]> git.ipfire.org Git - people/ms/linux.git/commitdiff
Merge tag 'net-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Sep 2022 17:58:13 +0000 (10:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Sep 2022 17:58:13 +0000 (10:58 -0700)
Pull networking fixes from Jakub Kicinski:
 "Including fixes from wifi, netfilter and can.

  A handful of awaited fixes here - revert of the FEC changes, bluetooth
  fix, fixes for iwlwifi spew.

  We added a warning in PHY/MDIO code which is triggering on a couple of
  platforms in a false-positive-ish way. If we can't iron that out over
  the week we'll drop it and re-add for 6.1.

  I've added a new "follow up fixes" section for fixes to fixes in
  6.0-rcs but it may actually give the false impression that those are
  problematic or that more testing time would have caught them. So
  likely a one time thing.

  Follow up fixes:

   - nf_tables_addchain: fix nft_counters_enabled underflow

   - ebtables: fix memory leak when blob is malformed

   - nf_ct_ftp: fix deadlock when nat rewrite is needed

  Current release - regressions:

   - Revert "fec: Restart PPS after link state change" and the related
     "net: fec: Use a spinlock to guard `fep->ptp_clk_on`"

   - Bluetooth: fix HCIGETDEVINFO regression

   - wifi: mt76: fix 5 GHz connection regression on mt76x0/mt76x2

   - mptcp: fix fwd memory accounting on coalesce

   - rwlock removal fall out:
      - ipmr: always call ip{,6}_mr_forward() from RCU read-side
        critical section
      - ipv6: fix crash when IPv6 is administratively disabled

   - tcp: read multiple skbs in tcp_read_skb()

   - mdio_bus_phy_resume state warning fallout:
      - eth: ravb: fix PHY state warning splat during system resume
      - eth: sh_eth: fix PHY state warning splat during system resume

  Current release - new code bugs:

   - wifi: iwlwifi: don't spam logs with NSS>2 messages

   - eth: mtk_eth_soc: enable XDP support just for MT7986 SoC

  Previous releases - regressions:

   - bonding: fix NULL deref in bond_rr_gen_slave_id

   - wifi: iwlwifi: mark IWLMEI as broken

  Previous releases - always broken:

   - nf_conntrack helpers:
      - irc: tighten matching on DCC message
      - sip: fix ct_sip_walk_headers
      - osf: fix possible bogus match in nf_osf_find()

   - ipvlan: fix out-of-bound bugs caused by unset skb->mac_header

   - core: fix flow symmetric hash

   - bonding, team: unsync device addresses on ndo_stop

   - phy: micrel: fix shared interrupt on LAN8814"

* tag 'net-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (83 commits)
  selftests: forwarding: add shebang for sch_red.sh
  bnxt: prevent skb UAF after handing over to PTP worker
  net: marvell: Fix refcounting bugs in prestera_port_sfp_bind()
  net: sched: fix possible refcount leak in tc_new_tfilter()
  net: sunhme: Fix packet reception for len < RX_COPY_THRESHOLD
  udp: Use WARN_ON_ONCE() in udp_read_skb()
  selftests: bonding: cause oops in bond_rr_gen_slave_id
  bonding: fix NULL deref in bond_rr_gen_slave_id
  net: phy: micrel: fix shared interrupt on LAN8814
  net/smc: Stop the CLC flow if no link to map buffers on
  ice: Fix ice_xdp_xmit() when XDP TX queue number is not sufficient
  net: atlantic: fix potential memory leak in aq_ndev_close()
  can: gs_usb: gs_usb_set_phys_id(): return with error if identify is not supported
  can: gs_usb: gs_can_open(): fix race dev->can.state condition
  can: flexcan: flexcan_mailbox_read() fix return value for drop = true
  net: sh_eth: Fix PHY state warning splat during system resume
  net: ravb: Fix PHY state warning splat during system resume
  netfilter: nf_ct_ftp: fix deadlock when nat rewrite is needed
  netfilter: ebtables: fix memory leak when blob is malformed
  netfilter: nf_tables: fix percpu memory leak at nf_tables_addchain()
  ...

337 files changed:
.mailmap
Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
Documentation/devicetree/bindings/hwmon/moortec,mr75203.yaml
Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml
Documentation/devicetree/bindings/i2c/renesas,riic.yaml
Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml
Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
Documentation/devicetree/bindings/mfd/khadas,mcu.yaml
Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
Documentation/devicetree/bindings/phy/amlogic,axg-mipi-dphy.yaml
Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml
Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml
Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
Documentation/devicetree/bindings/spi/amlogic,meson6-spifc.yaml
Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
Documentation/i2c/busses/i2c-piix4.rst
Documentation/i2c/i2c-topology.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/arm/kernel/irq.c
arch/arm64/Kconfig
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/sleep.S
arch/mips/Kconfig
arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/cavium-octeon/setup.c
arch/mips/lantiq/clk.c
arch/mips/loongson32/common/platform.c
arch/mips/loongson32/ls1c/board.c
arch/parisc/Kconfig
arch/parisc/kernel/irq.c
arch/powerpc/kernel/irq.c
arch/powerpc/platforms/pseries/plpks.c
arch/riscv/boot/dts/microchip/mpfs.dtsi
arch/s390/include/asm/softirq_stack.h
arch/s390/kernel/nmi.c
arch/s390/kernel/setup.c
arch/sh/kernel/irq.c
arch/sparc/kernel/irq_64.c
arch/um/Makefile
arch/um/kernel/sysrq.c
arch/um/kernel/um_arch.c
arch/x86/include/asm/irq_stack.h
arch/x86/kernel/irq_32.c
arch/x86/um/shared/sysdep/syscalls_32.h
arch/x86/um/tls_32.c
arch/x86/um/vdso/Makefile
block/blk-core.c
block/blk-lib.c
block/blk-mq-debugfs.c
block/partitions/core.c
drivers/base/arch_topology.c
drivers/base/driver.c
drivers/base/regmap/regmap-spi.c
drivers/dma/ti/k3-udma-private.c
drivers/dma/xilinx/xilinx_dma.c
drivers/dma/xilinx/zynqmp_dma.c
drivers/firmware/efi/efibc.c
drivers/firmware/efi/libstub/secureboot.c
drivers/firmware/efi/libstub/x86-stub.c
drivers/gpio/gpio-ftgpio010.c
drivers/gpio/gpio-ixp4xx.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-mt7621.c
drivers/gpio/gpio-rockchip.c
drivers/gpio/gpio-tqmx86.c
drivers/gpio/gpiolib-cdev.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/gma500/cdv_device.c
drivers/gpu/drm/gma500/gem.c
drivers/gpu/drm/gma500/gma_display.c
drivers/gpu/drm/gma500/oaktrail_device.c
drivers/gpu/drm/gma500/power.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/gma500/psb_irq.c
drivers/gpu/drm/gma500/psb_irq.h
drivers/gpu/drm/hyperv/hyperv_drm_drv.c
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/intel_vdsc.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gt/intel_llc.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/intel_rps.h
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/meson/meson_plane.c
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/panel/panel-edp.c
drivers/gpu/drm/panfrost/panfrost_devfreq.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/hv/hv_fcopy.c
drivers/hv/vmbus_drv.c
drivers/hwmon/asus-ec-sensors.c
drivers/hwmon/mr75203.c
drivers/hwmon/tps23861.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/irdma/uk.c
drivers/infiniband/hw/irdma/utils.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/umr.c
drivers/infiniband/sw/siw/siw_qp_tx.c
drivers/infiniband/ulp/rtrs/rtrs-clt.c
drivers/infiniband/ulp/rtrs/rtrs-srv.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/iommu/amd/iommu.c
drivers/iommu/amd/iommu_v2.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/iommu.h
drivers/iommu/iommu.c
drivers/iommu/virtio-iommu.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/nvme/host/core.c
drivers/nvme/host/tcp.c
drivers/nvme/target/core.c
drivers/nvme/target/zns.c
drivers/of/fdt.c
drivers/parisc/ccio-dma.c
drivers/parisc/iosapic.c
drivers/perf/riscv_pmu_sbi.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/qcom/pinctrl-sc8180x.c
drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
drivers/regulator/core.c
drivers/regulator/pfuze100-regulator.c
drivers/scsi/hosts.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/scsi.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/spi/spi-bitbang-txrx.h
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-mux.c
drivers/spi/spi.c
drivers/thunderbolt/Kconfig
drivers/vfio/vfio_iommu_type1.c
drivers/video/fbdev/hyperv_fb.c
drivers/virt/nitro_enclaves/Kconfig
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/inode.c
fs/btrfs/space-info.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/cifs/cifsfs.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/transport.c
fs/debugfs/inode.c
fs/exec.c
fs/exfat/fatent.c
fs/nfs/internal.h
fs/nfs/nfs42proc.c
fs/nfs/super.c
fs/nfs/write.c
fs/nfsd/vfs.c
fs/open.c
fs/tracefs/inode.c
include/asm-generic/softirq_stack.h
include/drm/drm_connector.h
include/drm/drm_edid.h
include/kunit/test.h
include/linux/debugfs.h
include/linux/dma-mapping.h
include/linux/hp_sdc.h
include/linux/mlx5/driver.h
include/linux/of_device.h
include/linux/pci_ids.h
include/linux/spi/spi.h
include/scsi/scsi_device.h
include/scsi/scsi_host.h
io_uring/io_uring.c
io_uring/kbuf.h
io_uring/msg_ring.c
io_uring/net.c
io_uring/notif.c
io_uring/opdef.c
io_uring/rw.c
kernel/dma/debug.c
kernel/dma/mapping.c
kernel/dma/swiotlb.c
kernel/fork.c
kernel/kprobes.c
kernel/nsproxy.c
kernel/sched/debug.c
kernel/trace/rv/monitors/wip/wip.h
kernel/trace/rv/monitors/wwnr/wwnr.h
kernel/trace/rv/reactor_panic.c
kernel/trace/rv/reactor_printk.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_preemptirq.c
kernel/tracepoint.c
net/sunrpc/clnt.c
net/sunrpc/xprt.c
scripts/extract-ikconfig
scripts/gcc-ld [deleted file]
scripts/mksysmap
sound/core/init.c
sound/core/memalloc.c
sound/core/oss/pcm_oss.c
sound/drivers/aloop.c
sound/pci/emu10k1/emupcm.c
sound/pci/hda/hda_bind.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/atmel/mchp-spdiftx.c
sound/soc/codecs/cs42l42.c
sound/soc/codecs/nau8540.c
sound/soc/codecs/nau8821.c
sound/soc/codecs/nau8824.c
sound/soc/codecs/nau8825.c
sound/soc/fsl/fsl_aud2htx.c
sound/soc/fsl/fsl_mqs.c
sound/soc/fsl/fsl_sai.c
sound/soc/mediatek/mt8186/mt8186-dai-adda.c
sound/soc/qcom/sm8250.c
sound/soc/sof/Kconfig
sound/soc/sof/ipc4-topology.c
sound/usb/card.c
sound/usb/endpoint.c
sound/usb/quirks.c
sound/usb/stream.c
tools/arch/x86/include/asm/cpufeatures.h
tools/debugging/kernel-chktaint
tools/hv/hv_kvp_daemon.c
tools/include/uapi/asm/errno.h
tools/lib/perf/evlist.c
tools/perf/Makefile.perf
tools/perf/builtin-c2c.c
tools/perf/builtin-lock.c
tools/perf/builtin-record.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/dlfilters/dlfilter-show-cycles.c
tools/perf/tests/shell/stat_bpf_counters_cgrp.sh [new file with mode: 0755]
tools/perf/tests/wp.c
tools/perf/util/affinity.c
tools/perf/util/bpf_counter_cgroup.c
tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
tools/perf/util/genelf.c
tools/perf/util/genelf.h
tools/perf/util/metricgroup.c
tools/perf/util/symbol-elf.c
tools/perf/util/synthetic-events.c
tools/testing/selftests/timens/Makefile
tools/testing/selftests/timens/vfork_exec.c [deleted file]

index 8ded2e7c2906f5c858c15893f27740424ff2eeae..d175777af07881fbff82dca171afc6e85f6b0288 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -315,6 +315,7 @@ Morten Welinder <welinder@troll.com>
 Mythri P K <mythripk@ti.com>
 Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
 Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
+Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
 Nguyen Anh Quynh <aquynh@gmail.com>
 Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de>
 Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
index 6cc74523ebfd3355cc2d76ca1c7cacac10819007..1748f1605cc701b55ac97687c27c23e0e6716465 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson Firmware registers Interface
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Meson SoCs have a register bank with status and data shared with the
index 2e208d2fc98f22e6c60716863e1ba30d8d500495..7cdffdb131ac4ca0b8a93d7133b53d595a2ec0b6 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic specific extensions to the Synopsys Designware HDMI Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 allOf:
   - $ref: /schemas/sound/name-prefix.yaml#
index 047fd69e0377006da7c112d41ddae8aad19ef171..6655a93b187402d4809a47bb00db22290e954ba8 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson Display Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Amlogic Meson Display controller is composed of several components
index bce96b5b0db083463eed3dba24dc6b26b99b025f..4a5e5d9d6f909064ea15e4ba7f8b29827e6b20fb 100644 (file)
@@ -8,7 +8,7 @@ title: Analogix ANX7814 SlimPort (Full-HD Transmitter)
 
 maintainers:
   - Andrzej Hajda <andrzej.hajda@intel.com>
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Robert Foss <robert.foss@linaro.org>
 
 properties:
index c6e81f5322152b84c57cc63496d50b37b0326d1c..1b2185be92cdd32ce67f447f021f55862006a552 100644 (file)
@@ -8,7 +8,7 @@ title: ITE it66121 HDMI bridge Device Tree Bindings
 
 maintainers:
   - Phong LE <ple@baylibre.com>
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The IT66121 is a high-performance and low-power single channel HDMI
index 44e02decdf3a003bc17bfd201d6c332ca3a207df..2e75e3738ff094317e2c8bba97757b5712f43c8d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Solomon Goldentek Display GKTW70SDAE4SE 7" WVGA LVDS Display Panel
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Thierry Reding <thierry.reding@gmail.com>
 
 allOf:
index b79f069a04c2ad17462c0d8bff173ebbb54e8dba..8ea97e7743640ac2e6ff49ace9daadf276779faf 100644 (file)
@@ -48,7 +48,6 @@ required:
   - compatible
   - reg
   - reg-names
-  - intel,vm-map
   - clocks
   - resets
   - "#thermal-sensor-cells"
index 6ecb0270d88d84e2a5b3a873f940bde0011298cc..199a354ccb9701ecd692481244e1bc7522f87172 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson I2C Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Beniamino Galvani <b.galvani@gmail.com>
 
 allOf:
index 2f315489aaaecc7fee12c359eed1ebe68cb06e29..d3c0d5c427acbcbf39fd6ad07fb525980dac3bd8 100644 (file)
@@ -60,6 +60,9 @@ properties:
   power-domains:
     maxItems: 1
 
+  resets:
+    maxItems: 1
+
 required:
   - compatible
   - reg
index 09c8948b5e251ee2d1983d378f31e00e77f8d084..fa4f7685ab2b546d63a5afb754df3c89d18fef52 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Generic i.MX bus frequency device
 
 maintainers:
-  - Leonard Crestez <leonard.crestez@nxp.com>
+  - Peng Fan <peng.fan@nxp.com>
 
 description: |
   The i.MX SoC family has multiple buses for which clock frequency (and
index 85c85b694217c16d157ee0c68509b95f9baa322c..e18107eafe7cc4038fb2f4b16fe47f0741129175 100644 (file)
@@ -96,7 +96,7 @@ properties:
               Documentation/devicetree/bindings/arm/cpus.yaml).
 
         required:
-          - fiq-index
+          - apple,fiq-index
           - cpus
 
 required:
index ea06976fbbc7ad809797391c19fcb57f12937336..dfd26b998189a10906dc043fc337586e5ee06227 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson Message-Handling-Unit Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Amlogic's Meson SoCs Message-Handling-Unit (MHU) is a mailbox controller
index bee93bd8477152f216998d31ba400099a8cc1e95..e551be5e680e36cb06a96a73fe1fea3c32694499 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic GE2D Acceleration Unit
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 5044c4bb94e099750912b974f0a9c7a4d4b05e25..b827edabcafaa475437b88d3d2b66e350f3a119c 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Video Decoder
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Maxime Jourdan <mjourdan@baylibre.com>
 
 description: |
index d93aea6a0258e98dc753020dfe336b27219f8f5f..8d844f4312d157e088dc5b840f094edeae1f0f4d 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson AO-CEC Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Amlogic Meson AO-CEC module is present is Amlogic SoCs and its purpose is
index a3b976f101e8c637bfec7122746420bf36f04a9d..5750cc06e92313d575f551b43c098aef244abe87 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Khadas on-board Microcontroller Device Tree Bindings
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   Khadas embeds a microcontroller on their VIM and Edge boards adding some
index 608e1d62bed5ca44b2dcc0d83339ccefe621059f..ddd5a073c3a89183156a32c940ac7a4a829cb333 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson DWMAC Ethernet controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Martin Blumenstingl <martin.blumenstingl@googlemail.com>
 
 # We need a select here so we don't match all nodes with 'snps,dwmac'
index be485f5008870d81b3055d32a373bf5dc94e1725..5eddaed3d8535fc7020240d6f3a52d0a921510ce 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic AXG MIPI D-PHY
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 399ebde454095fa19519b9ad0718ca3fd8db9a3c..f3a5fbabbbb5971b343ea6f892a49033dddbf5d0 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic G12A USB2 PHY
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 453c083cf44cb170e5e19038a1c34e6f8ff9d5c0..868b4e6fde71f1fb68d49b9ce8f73fc5176a12e6 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic G12A USB3 + PCIE Combo PHY
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 33d1d37fdf6db1496f15bacde42e145f2575c51e..624e14f007900473711054e44a9d5c023f85734c 100644 (file)
@@ -8,7 +8,6 @@ title: Qualcomm Technologies, Inc. Low Power Audio SubSystem (LPASS)
   Low Power Island (LPI) TLMM block
 
 maintainers:
-  - Srinivasa Rao Mandadapu <srivasam@codeaurora.org>
   - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
 
 description: |
index 2d228164357c2cffbccc3a685791c82b5205c16d..2bd60c49a44219606ca67630b24b03e2f3fdf468 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Technologies, Inc. SC7280 TLMM block
 
 maintainers:
-  - Rajendra Nayak <rnayak@codeaurora.org>
+  - Bjorn Andersson <andersson@kernel.org>
 
 description: |
   This binding describes the Top Level Mode Multiplexer block found in the
index 5390e988a9347a739ff3ae88ae067d07d1528732..43a932237a92c5d8697689fa8a3b7606025713e6 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Amlogic Meson Everything-Else Power Domains
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |+
   The Everything-Else Power Domains node should be the child of a syscon
index 0ccca493251acf1bacc1bebe63b0688580fec389..3934a2b44894de6b9faf9715d8400a20e3063c1b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm RPM/RPMh Power domains
 
 maintainers:
-  - Rajendra Nayak <rnayak@codeaurora.org>
+  - Bjorn Andersson <andersson@kernel.org>
 
 description:
   For RPM/RPMh Power domains, we communicate a performance state to RPM/RPMh
index 8b7c4af4b5517e06a95b87765a6a5ed04638a31f..faa4af9fd03582c4e9457a5bbb74b98a6899c1f1 100644 (file)
@@ -35,6 +35,7 @@ patternProperties:
     description: List of regulators and its properties
     type: object
     $ref: regulator.yaml#
+    unevaluatedProperties: false
 
     properties:
       qcom,ocp-max-retries:
@@ -100,8 +101,6 @@ patternProperties:
           SAW controlled gang leader. Will be configured as SAW regulator.
         type: boolean
 
-      unevaluatedProperties: false
-
 required:
   - compatible
 
index 494a454928ce62dc85b119ec9e1d25bae44bb9a5..98db2aa74dc88f9e6d603ef1df74726f73b1eec9 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson SoC Reset Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 69cdab18d6294935e22f3ef0b5e78bf2bb2cde1f..ca3b9be580584f43da83873ba2dfe2064507c871 100644 (file)
@@ -17,9 +17,6 @@ description:
   acts as directory-based coherency manager.
   All the properties in ePAPR/DeviceTree specification applies for this platform.
 
-allOf:
-  - $ref: /schemas/cache-controller.yaml#
-
 select:
   properties:
     compatible:
@@ -33,11 +30,16 @@ select:
 
 properties:
   compatible:
-    items:
-      - enum:
-          - sifive,fu540-c000-ccache
-          - sifive,fu740-c000-ccache
-      - const: cache
+    oneOf:
+      - items:
+          - enum:
+              - sifive,fu540-c000-ccache
+              - sifive,fu740-c000-ccache
+          - const: cache
+      - items:
+          - const: microchip,mpfs-ccache
+          - const: sifive,fu540-c000-ccache
+          - const: cache
 
   cache-block-size:
     const: 64
@@ -72,29 +74,46 @@ properties:
       The reference to the reserved-memory for the L2 Loosely Integrated Memory region.
       The reserved memory node should be defined as per the bindings in reserved-memory.txt.
 
-if:
-  properties:
-    compatible:
-      contains:
-        const: sifive,fu540-c000-ccache
+allOf:
+  - $ref: /schemas/cache-controller.yaml#
 
-then:
-  properties:
-    interrupts:
-      description: |
-        Must contain entries for DirError, DataError and DataFail signals.
-      maxItems: 3
-    cache-sets:
-      const: 1024
-
-else:
-  properties:
-    interrupts:
-      description: |
-        Must contain entries for DirError, DataError, DataFail, DirFail signals.
-      minItems: 4
-    cache-sets:
-      const: 2048
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - sifive,fu740-c000-ccache
+              - microchip,mpfs-ccache
+
+    then:
+      properties:
+        interrupts:
+          description: |
+            Must contain entries for DirError, DataError, DataFail, DirFail signals.
+          minItems: 4
+
+    else:
+      properties:
+        interrupts:
+          description: |
+            Must contain entries for DirError, DataError and DataFail signals.
+          maxItems: 3
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: sifive,fu740-c000-ccache
+
+    then:
+      properties:
+        cache-sets:
+          const: 2048
+
+    else:
+      properties:
+        cache-sets:
+          const: 1024
 
 additionalProperties: false
 
index 444be32a8a295ea43e1f806914b39a68fdce59ee..09c6c906b1f9733bbd3d3d66f90f7123e48eb28f 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson Random number generator
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 72e8868db3e01770d1b4f3bdecd8b36a43619b59..7822705ad16c6baa77d323f3bc2aa5595a3610a4 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson SoC UART Serial Interface
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Amlogic Meson SoC UART Serial Interface is present on a large range
index 17db87cb9dabc95a027d1fdbad2b8e0ebc9f10d8..c3c59909635310d6a72a09b57f26c23412c10a0e 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Canvas Video Lookup Table
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Maxime Jourdan <mjourdan@baylibre.com>
 
 description: |
index 50de0da42c138ea650e2f8d40a46315e3319081f..0c10f7678178ae0c7a4ffdc4cb2d2ae99118298d 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson SPI Communication Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 allOf:
   - $ref: "spi-controller.yaml#"
index 8a9d526d06ebf393840a3546bbe661677b8c139c..ac3b2ec300acf0d502c14b2be5193fe281e7f417 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson SPI Flash Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 allOf:
   - $ref: "spi-controller.yaml#"
index e349fa5de60611e155333c8f9cec378b139139ba..daf2a859418d40563f5c9b800074173e83816f7f 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson G12A DWC3 USB SoC Controller Glue
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Amlogic G12A embeds a DWC3 USB IP Core configured for USB2 and USB3
index c7459cf70e30341153e14baeedbaf468f980a6a9..497d60408ea04d7ba8ca49796790ef82f2926de2 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Meson GXBB SoCs Watchdog timer
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 allOf:
   - $ref: watchdog.yaml#
index cc900025922309f0bd86658e99b021f7b2bc8c38..07fe6f6f4b186a3115623c0b04eadeb1e8aaefcd 100644 (file)
@@ -64,7 +64,7 @@ correct address for this module, you could get in big trouble (read:
 crashes, data corruption, etc.). Try this only as a last resort (try BIOS
 updates first, for example), and backup first! An even more dangerous
 option is 'force_addr=<IOPORT>'. This will not only enable the PIIX4 like
-'force' foes, but it will also set a new base I/O port address. The SMBus
+'force' does, but it will also set a new base I/O port address. The SMBus
 parts of the PIIX4 needs a range of 8 of these addresses to function
 correctly. If these addresses are already reserved by some other device,
 you will get into big trouble! DON'T USE THIS IF YOU ARE NOT VERY SURE
@@ -86,15 +86,15 @@ If you own Force CPCI735 motherboard or other OSB4 based systems you may need
 to change the SMBus Interrupt Select register so the SMBus controller uses
 the SMI mode.
 
-1) Use lspci command and locate the PCI device with the SMBus controller:
+1) Use ``lspci`` command and locate the PCI device with the SMBus controller:
    00:0f.0 ISA bridge: ServerWorks OSB4 South Bridge (rev 4f)
    The line may vary for different chipsets. Please consult the driver source
-   for all possible PCI ids (and lspci -n to match them). Lets assume the
+   for all possible PCI ids (and ``lspci -n`` to match them). Let's assume the
    device is located at 00:0f.0.
 2) Now you just need to change the value in 0xD2 register. Get it first with
-   command: lspci -xxx -s 00:0f.0
+   command: ``lspci -xxx -s 00:0f.0``
    If the value is 0x3 then you need to change it to 0x1:
-   setpci  -s 00:0f.0 d2.b=1
+   ``setpci  -s 00:0f.0 d2.b=1``
 
 Please note that you don't need to do that in all cases, just when the SMBus is
 not working properly.
@@ -109,6 +109,3 @@ which can easily get corrupted due to a state machine bug. These are mostly
 Thinkpad laptops, but desktop systems may also be affected. We have no list
 of all affected systems, so the only safe solution was to prevent access to
 the SMBus on all IBM systems (detected using DMI data.)
-
-For additional information, read:
-http://www.lm-sensors.org/browser/lm-sensors/trunk/README
index 7cb53819778e6cb7d62787a62f4c124e28872d33..48fce0f7491bf1bcd4a851f685b010386c9ce0d2 100644 (file)
@@ -5,6 +5,8 @@ I2C muxes and complex topologies
 There are a couple of reasons for building more complex I2C topologies
 than a straight-forward I2C bus with one adapter and one or more devices.
 
+Some example use cases are:
+
 1. A mux may be needed on the bus to prevent address collisions.
 
 2. The bus may be accessible from some external bus master, and arbitration
@@ -14,10 +16,10 @@ than a straight-forward I2C bus with one adapter and one or more devices.
    from the I2C bus, at least most of the time, and sits behind a gate
    that has to be operated before the device can be accessed.
 
-Etc
-===
+Several types of hardware components such as I2C muxes, I2C gates and I2C
+arbitrators allow to handle such needs.
 
-These constructs are represented as I2C adapter trees by Linux, where
+These components are represented as I2C adapter trees by Linux, where
 each adapter has a parent adapter (except the root adapter) and zero or
 more child adapters. The root adapter is the actual adapter that issues
 I2C transfers, and all adapters with a parent are part of an "i2c-mux"
@@ -35,46 +37,7 @@ Locking
 =======
 
 There are two variants of locking available to I2C muxes, they can be
-mux-locked or parent-locked muxes. As is evident from below, it can be
-useful to know if a mux is mux-locked or if it is parent-locked. The
-following list was correct at the time of writing:
-
-In drivers/i2c/muxes/:
-
-======================    =============================================
-i2c-arb-gpio-challenge    Parent-locked
-i2c-mux-gpio              Normally parent-locked, mux-locked iff
-                          all involved gpio pins are controlled by the
-                          same I2C root adapter that they mux.
-i2c-mux-gpmux             Normally parent-locked, mux-locked iff
-                          specified in device-tree.
-i2c-mux-ltc4306           Mux-locked
-i2c-mux-mlxcpld           Parent-locked
-i2c-mux-pca9541           Parent-locked
-i2c-mux-pca954x           Parent-locked
-i2c-mux-pinctrl           Normally parent-locked, mux-locked iff
-                          all involved pinctrl devices are controlled
-                          by the same I2C root adapter that they mux.
-i2c-mux-reg               Parent-locked
-======================    =============================================
-
-In drivers/iio/:
-
-======================    =============================================
-gyro/mpu3050              Mux-locked
-imu/inv_mpu6050/          Mux-locked
-======================    =============================================
-
-In drivers/media/:
-
-=======================   =============================================
-dvb-frontends/lgdt3306a   Mux-locked
-dvb-frontends/m88ds3103   Parent-locked
-dvb-frontends/rtl2830     Parent-locked
-dvb-frontends/rtl2832     Mux-locked
-dvb-frontends/si2168      Mux-locked
-usb/cx231xx/              Parent-locked
-=======================   =============================================
+mux-locked or parent-locked muxes.
 
 
 Mux-locked muxes
@@ -89,40 +52,8 @@ full transaction, unrelated I2C transfers may interleave the different
 stages of the transaction. This has the benefit that the mux driver
 may be easier and cleaner to implement, but it has some caveats.
 
-==== =====================================================================
-ML1. If you build a topology with a mux-locked mux being the parent
-     of a parent-locked mux, this might break the expectation from the
-     parent-locked mux that the root adapter is locked during the
-     transaction.
-
-ML2. It is not safe to build arbitrary topologies with two (or more)
-     mux-locked muxes that are not siblings, when there are address
-     collisions between the devices on the child adapters of these
-     non-sibling muxes.
-
-     I.e. the select-transfer-deselect transaction targeting e.g. device
-     address 0x42 behind mux-one may be interleaved with a similar
-     operation targeting device address 0x42 behind mux-two. The
-     intension with such a topology would in this hypothetical example
-     be that mux-one and mux-two should not be selected simultaneously,
-     but mux-locked muxes do not guarantee that in all topologies.
-
-ML3. A mux-locked mux cannot be used by a driver for auto-closing
-     gates/muxes, i.e. something that closes automatically after a given
-     number (one, in most cases) of I2C transfers. Unrelated I2C transfers
-     may creep in and close prematurely.
-
-ML4. If any non-I2C operation in the mux driver changes the I2C mux state,
-     the driver has to lock the root adapter during that operation.
-     Otherwise garbage may appear on the bus as seen from devices
-     behind the mux, when an unrelated I2C transfer is in flight during
-     the non-I2C mux-changing operation.
-==== =====================================================================
-
-
 Mux-locked Example
-------------------
-
+~~~~~~~~~~~~~~~~~~
 
 ::
 
@@ -153,6 +84,43 @@ This means that accesses to D2 are lockout out for the full duration
 of the entire operation. But accesses to D3 are possibly interleaved
 at any point.
 
+Mux-locked caveats
+~~~~~~~~~~~~~~~~~~
+
+When using a mux-locked mux, be aware of the following restrictions:
+
+[ML1]
+  If you build a topology with a mux-locked mux being the parent
+  of a parent-locked mux, this might break the expectation from the
+  parent-locked mux that the root adapter is locked during the
+  transaction.
+
+[ML2]
+  It is not safe to build arbitrary topologies with two (or more)
+  mux-locked muxes that are not siblings, when there are address
+  collisions between the devices on the child adapters of these
+  non-sibling muxes.
+
+  I.e. the select-transfer-deselect transaction targeting e.g. device
+  address 0x42 behind mux-one may be interleaved with a similar
+  operation targeting device address 0x42 behind mux-two. The
+  intent with such a topology would in this hypothetical example
+  be that mux-one and mux-two should not be selected simultaneously,
+  but mux-locked muxes do not guarantee that in all topologies.
+
+[ML3]
+  A mux-locked mux cannot be used by a driver for auto-closing
+  gates/muxes, i.e. something that closes automatically after a given
+  number (one, in most cases) of I2C transfers. Unrelated I2C transfers
+  may creep in and close prematurely.
+
+[ML4]
+  If any non-I2C operation in the mux driver changes the I2C mux state,
+  the driver has to lock the root adapter during that operation.
+  Otherwise garbage may appear on the bus as seen from devices
+  behind the mux, when an unrelated I2C transfer is in flight during
+  the non-I2C mux-changing operation.
+
 
 Parent-locked muxes
 -------------------
@@ -161,28 +129,10 @@ Parent-locked muxes lock the parent adapter during the full select-
 transfer-deselect transaction. The implication is that the mux driver
 has to ensure that any and all I2C transfers through that parent
 adapter during the transaction are unlocked I2C transfers (using e.g.
-__i2c_transfer), or a deadlock will follow. There are a couple of
-caveats.
-
-==== ====================================================================
-PL1. If you build a topology with a parent-locked mux being the child
-     of another mux, this might break a possible assumption from the
-     child mux that the root adapter is unused between its select op
-     and the actual transfer (e.g. if the child mux is auto-closing
-     and the parent mux issues I2C transfers as part of its select).
-     This is especially the case if the parent mux is mux-locked, but
-     it may also happen if the parent mux is parent-locked.
-
-PL2. If select/deselect calls out to other subsystems such as gpio,
-     pinctrl, regmap or iio, it is essential that any I2C transfers
-     caused by these subsystems are unlocked. This can be convoluted to
-     accomplish, maybe even impossible if an acceptably clean solution
-     is sought.
-==== ====================================================================
-
+__i2c_transfer), or a deadlock will follow.
 
 Parent-locked Example
----------------------
+~~~~~~~~~~~~~~~~~~~~~
 
 ::
 
@@ -212,10 +162,30 @@ When there is an access to D1, this happens:
  9.  M1 unlocks its parent adapter.
  10. M1 unlocks muxes on its parent.
 
-
 This means that accesses to both D2 and D3 are locked out for the full
 duration of the entire operation.
 
+Parent-locked Caveats
+~~~~~~~~~~~~~~~~~~~~~
+
+When using a parent-locked mux, be aware of the following restrictions:
+
+[PL1]
+  If you build a topology with a parent-locked mux being the child
+  of another mux, this might break a possible assumption from the
+  child mux that the root adapter is unused between its select op
+  and the actual transfer (e.g. if the child mux is auto-closing
+  and the parent mux issues I2C transfers as part of its select).
+  This is especially the case if the parent mux is mux-locked, but
+  it may also happen if the parent mux is parent-locked.
+
+[PL2]
+  If select/deselect calls out to other subsystems such as gpio,
+  pinctrl, regmap or iio, it is essential that any I2C transfers
+  caused by these subsystems are unlocked. This can be convoluted to
+  accomplish, maybe even impossible if an acceptably clean solution
+  is sought.
+
 
 Complex Examples
 ================
@@ -261,8 +231,10 @@ This is a good topology::
 When device D1 is accessed, accesses to D2 are locked out for the
 full duration of the operation (muxes on the top child adapter of M1
 are locked). But accesses to D3 and D4 are possibly interleaved at
-any point. Accesses to D3 locks out D1 and D2, but accesses to D4
-are still possibly interleaved.
+any point.
+
+Accesses to D3 locks out D1 and D2, but accesses to D4 are still possibly
+interleaved.
 
 
 Mux-locked mux as parent of parent-locked mux
@@ -394,3 +366,47 @@ This is a good topology::
 When D1 or D2 are accessed, accesses to D3 and D4 are locked out while
 accesses to D5 may interleave. When D3 or D4 are accessed, accesses to
 all other devices are locked out.
+
+
+Mux type of existing device drivers
+===================================
+
+Whether a device is mux-locked or parent-locked depends on its
+implementation. The following list was correct at the time of writing:
+
+In drivers/i2c/muxes/:
+
+======================    =============================================
+i2c-arb-gpio-challenge    Parent-locked
+i2c-mux-gpio              Normally parent-locked, mux-locked iff
+                          all involved gpio pins are controlled by the
+                          same I2C root adapter that they mux.
+i2c-mux-gpmux             Normally parent-locked, mux-locked iff
+                          specified in device-tree.
+i2c-mux-ltc4306           Mux-locked
+i2c-mux-mlxcpld           Parent-locked
+i2c-mux-pca9541           Parent-locked
+i2c-mux-pca954x           Parent-locked
+i2c-mux-pinctrl           Normally parent-locked, mux-locked iff
+                          all involved pinctrl devices are controlled
+                          by the same I2C root adapter that they mux.
+i2c-mux-reg               Parent-locked
+======================    =============================================
+
+In drivers/iio/:
+
+======================    =============================================
+gyro/mpu3050              Mux-locked
+imu/inv_mpu6050/          Mux-locked
+======================    =============================================
+
+In drivers/media/:
+
+=======================   =============================================
+dvb-frontends/lgdt3306a   Mux-locked
+dvb-frontends/m88ds3103   Parent-locked
+dvb-frontends/rtl2830     Parent-locked
+dvb-frontends/rtl2832     Mux-locked
+dvb-frontends/si2168      Mux-locked
+usb/cx231xx/              Parent-locked
+=======================   =============================================
index f0fb5ab1ffccea0a29d86a76a295a1fb3bf1afe1..efada49e2e2ec75ba72a315e5ffe53d973bd0de4 100644 (file)
@@ -1803,7 +1803,7 @@ N:        sun[x456789]i
 N:     sun50i
 
 ARM/Amlogic Meson SoC CLOCK FRAMEWORK
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 M:     Jerome Brunet <jbrunet@baylibre.com>
 L:     linux-amlogic@lists.infradead.org
 S:     Maintained
@@ -1828,7 +1828,7 @@ F:        Documentation/devicetree/bindings/sound/amlogic*
 F:     sound/soc/meson/
 
 ARM/Amlogic Meson SoC support
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 M:     Kevin Hilman <khilman@baylibre.com>
 R:     Jerome Brunet <jbrunet@baylibre.com>
 R:     Martin Blumenstingl <martin.blumenstingl@googlemail.com>
@@ -2531,7 +2531,7 @@ W:        http://www.digriz.org.uk/ts78xx/kernel
 F:     arch/arm/mach-orion5x/ts78xx-*
 
 ARM/OXNAS platform support
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-oxnas@groups.io (moderated for non-subscribers)
 S:     Maintained
@@ -6792,7 +6792,7 @@ F:        Documentation/devicetree/bindings/display/allwinner*
 F:     drivers/gpu/drm/sun4i/
 
 DRM DRIVERS FOR AMLOGIC SOCS
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     dri-devel@lists.freedesktop.org
 L:     linux-amlogic@lists.infradead.org
 S:     Supported
@@ -6814,7 +6814,7 @@ F:        drivers/gpu/drm/atmel-hlcdc/
 
 DRM DRIVERS FOR BRIDGE CHIPS
 M:     Andrzej Hajda <andrzej.hajda@intel.com>
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 M:     Robert Foss <robert.foss@linaro.org>
 R:     Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
 R:     Jonas Karlman <jonas@kwiboo.se>
@@ -9122,7 +9122,7 @@ S:        Maintained
 F:     drivers/dma/hisi_dma.c
 
 HISILICON GPIO DRIVER
-M:     Luo Jiaxing <luojiaxing@huawei.com>
+M:     Jay Fang <f.fangjian@huawei.com>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 F:     drivers/gpio/gpio-hisi.c
@@ -9208,8 +9208,8 @@ F:        Documentation/ABI/testing/debugfs-hisi-zip
 F:     drivers/crypto/hisilicon/zip/
 
 HISILICON ROCE DRIVER
+M:     Haoyue Xu <xuhaoyue1@hisilicon.com>
 M:     Wenpeng Liang <liangwenpeng@huawei.com>
-M:     Weihang Li <liweihang@huawei.com>
 L:     linux-rdma@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
@@ -10828,7 +10828,7 @@ F:      drivers/media/tuners/it913x*
 
 ITE IT66121 HDMI BRIDGE DRIVER
 M:     Phong LE <ple@baylibre.com>
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
@@ -11347,7 +11347,7 @@ F:      kernel/debug/
 F:     kernel/module/kdb.c
 
 KHADAS MCU MFD DRIVER
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     linux-amlogic@lists.infradead.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/mfd/khadas,mcu.yaml
@@ -13218,7 +13218,7 @@ S:      Maintained
 F:     drivers/watchdog/menz69_wdt.c
 
 MESON AO CEC DRIVER FOR AMLOGIC SOCS
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     linux-media@vger.kernel.org
 L:     linux-amlogic@lists.infradead.org
 S:     Supported
@@ -13229,7 +13229,7 @@ F:      drivers/media/cec/platform/meson/ao-cec-g12a.c
 F:     drivers/media/cec/platform/meson/ao-cec.c
 
 MESON GE2D DRIVER FOR AMLOGIC SOCS
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     linux-media@vger.kernel.org
 L:     linux-amlogic@lists.infradead.org
 S:     Supported
@@ -13245,7 +13245,7 @@ F:      Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt
 F:     drivers/mtd/nand/raw/meson_*
 
 MESON VIDEO DECODER DRIVER FOR AMLOGIC SOCS
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     linux-media@vger.kernel.org
 L:     linux-amlogic@lists.infradead.org
 S:     Supported
@@ -17746,6 +17746,17 @@ L:     linux-rdma@vger.kernel.org
 S:     Maintained
 F:     drivers/infiniband/ulp/rtrs/
 
+RUNTIME VERIFICATION (RV)
+M:     Daniel Bristot de Oliveira <bristot@kernel.org>
+M:     Steven Rostedt <rostedt@goodmis.org>
+L:     linux-trace-devel@vger.kernel.org
+S:     Maintained
+F:     Documentation/trace/rv/
+F:     include/linux/rv.h
+F:     include/rv/
+F:     kernel/trace/rv/
+F:     tools/verification/
+
 RXRPC SOCKETS (AF_RXRPC)
 M:     David Howells <dhowells@redhat.com>
 M:     Marc Dionne <marc.dionne@auristor.com>
@@ -20613,6 +20624,7 @@ F:      include/*/ftrace.h
 F:     include/linux/trace*.h
 F:     include/trace/
 F:     kernel/trace/
+F:     scripts/tracing/
 F:     tools/testing/selftests/ftrace/
 
 TRACING MMIO ACCESSES (MMIOTRACE)
index a4f71076cacb8a841948beca3ddb246751285244..298f69060f104490f417c8b1e0a3f5779c460aa7 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
@@ -1287,8 +1287,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
 
 PHONY += headers
 headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
-       $(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \
-         $(error Headers not exportable for the $(SRCARCH) architecture))
+       $(if $(filter um, $(SRCARCH)), $(error Headers not exportable for UML))
        $(Q)$(MAKE) $(hdr-inst)=include/uapi
        $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi
 
index 5dbf11a5ba4e8ea2879b780f23d543d78ac31ebc..8b311e400ec140fcee6f377e11b17176d404a885 100644 (file)
@@ -923,6 +923,9 @@ config HAVE_SOFTIRQ_ON_OWN_STACK
          Architecture provides a function to run __do_softirq() on a
          separate stack.
 
+config SOFTIRQ_ON_OWN_STACK
+       def_bool HAVE_SOFTIRQ_ON_OWN_STACK && !PREEMPT_RT
+
 config ALTERNATE_USER_ADDRESS_SPACE
        bool
        help
index 034cb48c9eeb8c7e541124d9eb34d8b40cc21d63..fe28fc1f759d9372ef6f44e43a0b959d67579c8d 100644 (file)
@@ -70,7 +70,7 @@ static void __init init_irq_stacks(void)
        }
 }
 
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 static void ____do_softirq(void *arg)
 {
        __do_softirq();
index 9fb9fff08c94d026e3b51a3c37aecdea0577d384..1ce7685ad5de1988223b9c14b35edf51141b9011 100644 (file)
@@ -1887,6 +1887,8 @@ config ARM64_BTI_KERNEL
        depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
        # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
        depends on !CC_IS_GCC || GCC_VERSION >= 100100
+       # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671
+       depends on !CC_IS_GCC
        # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
        depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
        depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
index eb7c08dfb8348e29175e0076eb5c783bea24f8d3..041d2ae5c30ab256efb9fb77d72318b67916eab7 100644 (file)
@@ -1084,7 +1084,6 @@ static int za_set(struct task_struct *target,
        if (!target->thread.sve_state) {
                sve_alloc(target, false);
                if (!target->thread.sve_state) {
-                       clear_thread_flag(TIF_SME);
                        ret = -ENOMEM;
                        goto out;
                }
@@ -1094,7 +1093,6 @@ static int za_set(struct task_struct *target,
        sme_alloc(target);
        if (!target->thread.za_state) {
                ret = -ENOMEM;
-               clear_tsk_thread_flag(target, TIF_SME);
                goto out;
        }
 
index 617f78ad43a185c277f9d0193252039f216dba7d..97c9de57725dfddb59ee4a19ccbd883dd3b24cc9 100644 (file)
@@ -101,6 +101,9 @@ SYM_FUNC_END(__cpu_suspend_enter)
 SYM_CODE_START(cpu_resume)
        bl      init_kernel_el
        bl      finalise_el2
+#if VA_BITS > 48
+       ldr_l   x0, vabits_actual
+#endif
        bl      __cpu_setup
        /* enable the MMU early - so we can access sleep_save_stash by va */
        adrp    x1, swapper_pg_dir
index ec21f899924985fe8e57811a35d3790f439aeafa..25dd4c5a8ef505d66e7859de647a4734a6052e20 100644 (file)
@@ -2669,7 +2669,6 @@ config ARCH_FLATMEM_ENABLE
 
 config ARCH_SPARSEMEM_ENABLE
        bool
-       select SPARSEMEM_STATIC if !SGI_IP27
 
 config NUMA
        bool "NUMA Support"
index bf13e35871b213939a6bc6b1f5c0b6df2b6dee04..aa7bbf8d0df558c74f4bb438a18985ff7c30c37c 100644 (file)
@@ -57,14 +57,11 @@ EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
 static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
 {
        char *alloc_name = "cvmx_cmd_queues";
-#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
        extern uint64_t octeon_reserve32_memory;
-#endif
 
        if (likely(__cvmx_cmd_queue_state_ptr))
                return CVMX_CMD_QUEUE_SUCCESS;
 
-#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
        if (octeon_reserve32_memory)
                __cvmx_cmd_queue_state_ptr =
                    cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
@@ -73,7 +70,6 @@ static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
                                                   (CONFIG_CAVIUM_RESERVE32 <<
                                                    20) - 1, 128, alloc_name);
        else
-#endif
                __cvmx_cmd_queue_state_ptr =
                    cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
                                            128,
index 9cb9ed44bcafa149c18e08fb35bbbcd663ae0d96..fd8043f6ff8ae65a5769b5c24056560ac5c1fbd9 100644 (file)
@@ -127,6 +127,16 @@ static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
 static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
                                        int irq, int line, int bit)
 {
+       struct device_node *of_node;
+       int ret;
+
+       of_node = irq_domain_get_of_node(domain);
+       if (!of_node)
+               return -EINVAL;
+       ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node));
+       if (ret < 0)
+               return ret;
+
        return irq_domain_associate(domain, irq, line << 6 | bit);
 }
 
index cbd83205518d7d1d50e1aa249964f546a843586b..e7f994393ae80842746737e3a64bc83c02dfffb7 100644 (file)
@@ -284,10 +284,8 @@ void octeon_crash_smp_send_stop(void)
 
 #endif /* CONFIG_KEXEC */
 
-#ifdef CONFIG_CAVIUM_RESERVE32
 uint64_t octeon_reserve32_memory;
 EXPORT_SYMBOL(octeon_reserve32_memory);
-#endif
 
 #ifdef CONFIG_KEXEC
 /* crashkernel cmdline parameter is parsed _after_ memory setup
@@ -666,9 +664,6 @@ void __init prom_init(void)
        int i;
        u64 t;
        int argc;
-#ifdef CONFIG_CAVIUM_RESERVE32
-       int64_t addr = -1;
-#endif
        /*
         * The bootloader passes a pointer to the boot descriptor in
         * $a3, this is available as fw_arg3.
@@ -783,7 +778,7 @@ void __init prom_init(void)
                cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
                cvmx_write_csr(CVMX_LED_EN, 1);
        }
-#ifdef CONFIG_CAVIUM_RESERVE32
+
        /*
         * We need to temporarily allocate all memory in the reserve32
         * region. This makes sure the kernel doesn't allocate this
@@ -794,14 +789,16 @@ void __init prom_init(void)
         * Allocate memory for RESERVED32 aligned on 2MB boundary. This
         * is in case we later use hugetlb entries with it.
         */
-       addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
-                                               0, 0, 2 << 20,
-                                               "CAVIUM_RESERVE32", 0);
-       if (addr < 0)
-               pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
-       else
-               octeon_reserve32_memory = addr;
-#endif
+       if (CONFIG_CAVIUM_RESERVE32) {
+               int64_t addr =
+                       cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+                                                          0, 0, 2 << 20,
+                                                          "CAVIUM_RESERVE32", 0);
+               if (addr < 0)
+                       pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
+               else
+                       octeon_reserve32_memory = addr;
+       }
 
 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
        if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
@@ -1079,7 +1076,6 @@ void __init plat_mem_setup(void)
        cvmx_bootmem_unlock();
 #endif /* CONFIG_CRASH_DUMP */
 
-#ifdef CONFIG_CAVIUM_RESERVE32
        /*
         * Now that we've allocated the kernel memory it is safe to
         * free the reserved region. We free it here so that builtin
@@ -1087,7 +1083,6 @@ void __init plat_mem_setup(void)
         */
        if (octeon_reserve32_memory)
                cvmx_bootmem_free_named("CAVIUM_RESERVE32");
-#endif /* CONFIG_CAVIUM_RESERVE32 */
 
        if (total == 0)
                panic("Unable to allocate memory from "
index 7a623684d9b5ed415e167af74e0b9af22ea5d74c..2d5a0bcb0cec156dc5f0daedbdd7c56ff8d62ca4 100644 (file)
@@ -50,6 +50,7 @@ struct clk *clk_get_io(void)
 {
        return &cpu_clk_generic[2];
 }
+EXPORT_SYMBOL_GPL(clk_get_io);
 
 struct clk *clk_get_ppe(void)
 {
index 794c96c2a4cdd96cbaedb651b36a0421b09cbf37..311dc1580bbde98e59f33f508afba7695714923d 100644 (file)
@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
        if (plat_dat->bus_id) {
                __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
                             GMAC1_USE_UART0, LS1X_MUX_CTRL0);
-               switch (plat_dat->interface) {
+               switch (plat_dat->phy_interface) {
                case PHY_INTERFACE_MODE_RGMII:
                        val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
                        break;
@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
                        break;
                default:
                        pr_err("unsupported mii mode %d\n",
-                              plat_dat->interface);
+                              plat_dat->phy_interface);
                        return -ENOTSUPP;
                }
                val &= ~GMAC1_SHUT;
        } else {
-               switch (plat_dat->interface) {
+               switch (plat_dat->phy_interface) {
                case PHY_INTERFACE_MODE_RGMII:
                        val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
                        break;
@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
                        break;
                default:
                        pr_err("unsupported mii mode %d\n",
-                              plat_dat->interface);
+                              plat_dat->phy_interface);
                        return -ENOTSUPP;
                }
                val &= ~GMAC0_SHUT;
@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
        plat_dat = dev_get_platdata(&pdev->dev);
 
        val &= ~PHY_INTF_SELI;
-       if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
+       if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
                val |= 0x4 << PHY_INTF_SELI_SHIFT;
        __raw_writel(val, LS1X_MUX_CTRL1);
 
@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = {
        .bus_id                 = 0,
        .phy_addr               = -1,
 #if defined(CONFIG_LOONGSON1_LS1B)
-       .interface              = PHY_INTERFACE_MODE_MII,
+       .phy_interface          = PHY_INTERFACE_MODE_MII,
 #elif defined(CONFIG_LOONGSON1_LS1C)
-       .interface              = PHY_INTERFACE_MODE_RMII,
+       .phy_interface          = PHY_INTERFACE_MODE_RMII,
 #endif
        .mdio_bus_data          = &ls1x_mdio_bus_data,
        .dma_cfg                = &ls1x_eth_dma_cfg,
@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = {
 static struct plat_stmmacenet_data ls1x_eth1_pdata = {
        .bus_id                 = 1,
        .phy_addr               = -1,
-       .interface              = PHY_INTERFACE_MODE_MII,
+       .phy_interface          = PHY_INTERFACE_MODE_MII,
        .mdio_bus_data          = &ls1x_mdio_bus_data,
        .dma_cfg                = &ls1x_eth_dma_cfg,
        .has_gmac               = 1,
index e9de6da0ce51ff652ddcdbb5fdb7c79104f49454..9dcfe9de55b0ae983a2c4f8ebcf9ae23344a9b6f 100644 (file)
@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = {
 static int __init ls1c_platform_init(void)
 {
        ls1x_serial_set_uartclk(&ls1x_uart_pdev);
-       ls1x_rtc_set_extclk(&ls1x_rtc_pdev);
 
        return platform_add_devices(ls1c_platform_devices,
                                   ARRAY_SIZE(ls1c_platform_devices));
index 9aede2447011bc13f2fa11081d4ddb350784f285..a98940e6424327fe5a5232dc0613404c5c559ae4 100644 (file)
@@ -224,8 +224,18 @@ config MLONGCALLS
          Enabling this option will probably slow down your kernel.
 
 config 64BIT
-       def_bool "$(ARCH)" = "parisc64"
+       def_bool y if "$(ARCH)" = "parisc64"
+       bool "64-bit kernel" if "$(ARCH)" = "parisc"
        depends on PA8X00
+       help
+         Enable this if you want to support 64bit kernel on PA-RISC platform.
+
+         At the moment, only people willing to use more than 2GB of RAM,
+         or having a 64bit-only capable PA-RISC machine should say Y here.
+
+         Since there is no 64bit userland on PA-RISC, there is no point to
+         enable this option otherwise. The 64bit kernel is significantly bigger
+         and slower than the 32bit one.
 
 choice
        prompt "Kernel page size"
index fbb882cb8dbb5f9316af05f7c9f45faf90701cec..b05055f3ba4b8aa406c3a0881bb65b5984ce51f7 100644 (file)
@@ -480,7 +480,7 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
        *irq_stack_in_use = 1;
 }
 
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 void do_softirq_own_stack(void)
 {
        execute_on_irq_stack(__do_softirq, 0);
index 0f17268c1f0bbdcb6126eb2108cc0a7c30fbbbaa..9ede61a5a469efb7f4252412c2a5c98c5f77c48b 100644 (file)
@@ -199,7 +199,7 @@ static inline void check_stack_overflow(unsigned long sp)
        }
 }
 
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 static __always_inline void call_do_softirq(const void *sp)
 {
        /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
@@ -335,7 +335,7 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly;
 void *softirq_ctx[NR_CPUS] __read_mostly;
 void *hardirq_ctx[NR_CPUS] __read_mostly;
 
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 void do_softirq_own_stack(void)
 {
        call_do_softirq(softirq_ctx[smp_processor_id()]);
index 52aaa2894606835d0849a01e24d0fd57648b9223..f4b5b5a64db3d3a0e9ae65282f39ec7c60e160e6 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <asm/hvcall.h>
+#include <asm/machdep.h>
 
 #include "plpks.h"
 
@@ -457,4 +458,4 @@ static __init int pseries_plpks_init(void)
 
        return rc;
 }
-arch_initcall(pseries_plpks_init);
+machine_arch_initcall(pseries, pseries_plpks_init);
index 74493344ea41b440881ee6cadf9a39a9dbaa7dbc..6d9d455fa160ab12ab31582715949141c0af588f 100644 (file)
                ranges;
 
                cctrllr: cache-controller@2010000 {
-                       compatible = "sifive,fu540-c000-ccache", "cache";
+                       compatible = "microchip,mpfs-ccache", "sifive,fu540-c000-ccache", "cache";
                        reg = <0x0 0x2010000 0x0 0x1000>;
                        cache-block-size = <64>;
                        cache-level = <2>;
index af68d6c1d5840ecf0d06e65cd7dcd449d1a27219..1ac5115d3115ee5738ed27cb7411c6702b5d4aae 100644 (file)
@@ -5,7 +5,7 @@
 #include <asm/lowcore.h>
 #include <asm/stacktrace.h>
 
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 static inline void do_softirq_own_stack(void)
 {
        call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
index 60ac66aab1635b7578be79b41ac6bd8ae2eded0e..31cb9b00a36bb41bcec1e9090066cb029fa08e9d 100644 (file)
@@ -64,7 +64,7 @@ static inline unsigned long nmi_get_mcesa_size(void)
  * structure. The structure is required for machine check happening
  * early in the boot process.
  */
-static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
+static struct mcesa boot_mcesa __aligned(MCESA_MAX_SIZE);
 
 void __init nmi_alloc_mcesa_early(u64 *mcesad)
 {
index ed4fbbbdd1b0787c405d4fe2ebee9042e0f51bac..bbd4bde4f65d3eb760df9857695ee58847c4012f 100644 (file)
@@ -479,6 +479,7 @@ static void __init setup_lowcore_dat_off(void)
        put_abs_lowcore(restart_data, lc->restart_data);
        put_abs_lowcore(restart_source, lc->restart_source);
        put_abs_lowcore(restart_psw, lc->restart_psw);
+       put_abs_lowcore(mcesad, lc->mcesad);
 
        mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
        if (!mcck_stack)
@@ -507,8 +508,8 @@ static void __init setup_lowcore_dat_on(void)
        S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
-       __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
        __ctl_set_bit(0, 28);
+       __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
        put_abs_lowcore(restart_flags, RESTART_FLAG_CTLREGS);
        put_abs_lowcore(program_new_psw, lc->program_new_psw);
        for (cr = 0; cr < ARRAY_SIZE(lc->cregs_save_area); cr++)
index 90927673807802cb9ee4157994045c100e9dcc1e..4e6835de54cf8e773bd2b5b53df29e6ce7d2aa88 100644 (file)
@@ -149,7 +149,7 @@ void irq_ctx_exit(int cpu)
        hardirq_ctx[cpu] = NULL;
 }
 
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 void do_softirq_own_stack(void)
 {
        struct thread_info *curctx;
index 41fa1be980a33463adbde4c50b0dbade80203e9f..72da2e10e2559ab8ea1dc19c63af917b6ca2a492 100644 (file)
@@ -855,7 +855,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
        set_irq_regs(old_regs);
 }
 
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 void do_softirq_own_stack(void)
 {
        void *orig_sp, *sp = softirq_stack[smp_processor_id()];
index f2fe63bfd819f5f07971d4e4d50cbecec983173d..f1d4d67157be0bf568ca476aed3508b9c9fd2f8f 100644 (file)
@@ -132,10 +132,18 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
 # The wrappers will select whether using "malloc" or the kernel allocator.
 LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
 
+# Avoid binutils 2.39+ warnings by marking the stack non-executable and
+# ignorning warnings for the kallsyms sections.
+LDFLAGS_EXECSTACK = -z noexecstack
+ifeq ($(CONFIG_LD_IS_BFD),y)
+LDFLAGS_EXECSTACK += $(call ld-option,--no-warn-rwx-segments)
+endif
+
 LD_FLAGS_CMDLINE = $(foreach opt,$(KBUILD_LDFLAGS),-Wl,$(opt))
 
 # Used by link-vmlinux.sh which has special support for um link
 export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
+export LDFLAGS_vmlinux := $(LDFLAGS_EXECSTACK)
 
 # When cleaning we don't include .config, so we don't include
 # TT or skas makefiles and don't clean skas_ptregs.h.
index 7452f70d50d06924dbe0039db140ca8a0d0db2db..746715379f12a84abcfe991b3a23218eec7e0fc3 100644 (file)
@@ -48,7 +48,8 @@ void show_stack(struct task_struct *task, unsigned long *stack,
                        break;
                if (i && ((i % STACKSLOTS_PER_LINE) == 0))
                        pr_cont("\n");
-               pr_cont(" %08lx", *stack++);
+               pr_cont(" %08lx", READ_ONCE_NOCHECK(*stack));
+               stack++;
        }
 
        printk("%sCall Trace:\n", loglvl);
index e0de60e503b983c2ab6a2f215c688bf14161e59e..d9e023c78f568fe8c22eb9f7ae07f0cba0a716df 100644 (file)
@@ -33,7 +33,7 @@
 #include "um_arch.h"
 
 #define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
-#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
+#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty0"
 
 /* Changed in add_arg and setup_arch, which run before SMP is started */
 static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
index 63f818aedf770c65579d4bd48a38f9147a104542..147cb8fdda92e9e0a9a2e74bb876968af8ba29dc 100644 (file)
                              IRQ_CONSTRAINTS, regs, vector);           \
 }
 
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 /*
  * Macro to invoke __do_softirq on the irq stack. This is only called from
  * task context when bottom halves are about to be reenabled and soft
index e5dd6da78713bc1f37def623d2bf07577ea981ae..01833ebf5e8e3582992a8860460bbe555923eb30 100644 (file)
@@ -132,7 +132,7 @@ int irq_init_percpu_irqstack(unsigned int cpu)
        return 0;
 }
 
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 void do_softirq_own_stack(void)
 {
        struct irq_stack *irqstk;
index 68fd2cf526fd7e47f0dd823db12e8389b01ed6b9..f6e9f84397e792d5f3ba3102f6c10cfe7b57aae6 100644 (file)
@@ -6,10 +6,9 @@
 #include <asm/unistd.h>
 #include <sysdep/ptrace.h>
 
-typedef long syscall_handler_t(struct pt_regs);
+typedef long syscall_handler_t(struct syscall_args);
 
 extern syscall_handler_t *sys_call_table[];
 
 #define EXECUTE_SYSCALL(syscall, regs) \
-       ((long (*)(struct syscall_args)) \
-        (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
+       ((*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
index ac8eee093f9cd0fa112fe89e945202eb60203a84..66162eafd8e8f8770935451b555c644dc5aca688 100644 (file)
@@ -65,9 +65,6 @@ static int get_free_idx(struct task_struct* task)
        struct thread_struct *t = &task->thread;
        int idx;
 
-       if (!t->arch.tls_array)
-               return GDT_ENTRY_TLS_MIN;
-
        for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
                if (!t->arch.tls_array[idx].present)
                        return idx + GDT_ENTRY_TLS_MIN;
@@ -240,9 +237,6 @@ static int get_tls_entry(struct task_struct *task, struct user_desc *info,
 {
        struct thread_struct *t = &task->thread;
 
-       if (!t->arch.tls_array)
-               goto clear;
-
        if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
                return -EINVAL;
 
index 8c0396fd0e6f691e72a36bad703cf4172f8693da..6fbe97c52c991e2f71cd917d1cbaa6b4bf9ffcf4 100644 (file)
@@ -65,7 +65,7 @@ quiet_cmd_vdso = VDSO    $@
                       -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
                 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
-VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv
+VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv -z noexecstack
 GCOV_PROFILE := n
 
 #
index a0d1104c5590c16b9484c925ac387b571e1f15ed..651057c4146b2ace71a4908c937df092483357bc 100644 (file)
@@ -295,7 +295,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 
        while (!blk_try_enter_queue(q, pm)) {
                if (flags & BLK_MQ_REQ_NOWAIT)
-                       return -EBUSY;
+                       return -EAGAIN;
 
                /*
                 * read pair of barrier in blk_freeze_queue_start(), we need to
@@ -325,7 +325,7 @@ int __bio_queue_enter(struct request_queue *q, struct bio *bio)
                        if (test_bit(GD_DEAD, &disk->state))
                                goto dead;
                        bio_wouldblock_error(bio);
-                       return -EBUSY;
+                       return -EAGAIN;
                }
 
                /*
index 67e6dbc1ae8179594a57e89e87d6d247af358940..e59c3069e8351f7edf0d82c6a3b376a3029a994c 100644 (file)
@@ -309,6 +309,11 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
        struct blk_plug plug;
        int ret = 0;
 
+       /* make sure that "len << SECTOR_SHIFT" doesn't overflow */
+       if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
+               max_sectors = UINT_MAX >> SECTOR_SHIFT;
+       max_sectors &= ~bs_mask;
+
        if (max_sectors == 0)
                return -EOPNOTSUPP;
        if ((sector | nr_sects) & bs_mask)
@@ -322,10 +327,10 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
 
                bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
                bio->bi_iter.bi_sector = sector;
-               bio->bi_iter.bi_size = len;
+               bio->bi_iter.bi_size = len << SECTOR_SHIFT;
 
-               sector += len << SECTOR_SHIFT;
-               nr_sects -= len << SECTOR_SHIFT;
+               sector += len;
+               nr_sects -= len;
                if (!nr_sects) {
                        ret = submit_bio_wait(bio);
                        bio_put(bio);
index 8559cea7f300eefb19c8b5e67eae53de69fd4746..dee789f2f98fcbdd5111787d5e6ee69e976d493b 100644 (file)
@@ -283,7 +283,9 @@ static const char *const rqf_name[] = {
        RQF_NAME(SPECIAL_PAYLOAD),
        RQF_NAME(ZONE_WRITE_LOCKED),
        RQF_NAME(MQ_POLL_SLEPT),
+       RQF_NAME(TIMED_OUT),
        RQF_NAME(ELV),
+       RQF_NAME(RESV),
 };
 #undef RQF_NAME
 
index fc1d70384825cd804cce322038694126dbcbc4b8..b8112f52d38800742a2e51eb4af61fa116da35d6 100644 (file)
@@ -596,6 +596,9 @@ static int blk_add_partitions(struct gendisk *disk)
        if (disk->flags & GENHD_FL_NO_PART)
                return 0;
 
+       if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
+               return 0;
+
        state = check_partition(disk);
        if (!state)
                return 0;
index eaa1b8d2d39d37b06682420b0560b92ecdf79ba5..46cbe4471e785f577f3d1cd9196316e681957688 100644 (file)
@@ -724,7 +724,7 @@ const struct cpumask *cpu_clustergroup_mask(int cpu)
         */
        if (cpumask_subset(cpu_coregroup_mask(cpu),
                           &cpu_topology[cpu].cluster_sibling))
-               return get_cpu_mask(cpu);
+               return topology_sibling_cpumask(cpu);
 
        return &cpu_topology[cpu].cluster_sibling;
 }
index 15a75afe6b8450c948e530d8d2913e7554d1e10f..676b6275d5b53606caba09373d7480c7ca073d1a 100644 (file)
@@ -63,6 +63,12 @@ int driver_set_override(struct device *dev, const char **override,
        if (len >= (PAGE_SIZE - 1))
                return -EINVAL;
 
+       /*
+        * Compute the real length of the string in case userspace sends us a
+        * bunch of \0 characters like python likes to do.
+        */
+       len = strlen(s);
+
        if (!len) {
                /* Empty string passed - clear override */
                device_lock(dev);
index 719323bc6c7f1f478587400afa9fa30d6aa5ce43..37ab23a9d0345a12991e3b273e3d009b147ce4bc 100644 (file)
@@ -113,6 +113,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
                                                   const struct regmap_config *config)
 {
        size_t max_size = spi_max_transfer_size(spi);
+       size_t max_msg_size, reg_reserve_size;
        struct regmap_bus *bus;
 
        if (max_size != SIZE_MAX) {
@@ -120,9 +121,16 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
                if (!bus)
                        return ERR_PTR(-ENOMEM);
 
+               max_msg_size = spi_max_message_size(spi);
+               reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+                                + config->pad_bits / BITS_PER_BYTE;
+               if (max_size + reg_reserve_size > max_msg_size)
+                       max_size -= reg_reserve_size;
+
                bus->free_on_exit = true;
                bus->max_raw_read = max_size;
                bus->max_raw_write = max_size;
+
                return bus;
        }
 
index d4f1e4e9603a400308e514350a2f3ec330c9cbaf..85e00701473cb121ab668c97a2b5c875e082615a 100644 (file)
@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
        }
 
        pdev = of_find_device_by_node(udma_node);
+       if (np != udma_node)
+               of_node_put(udma_node);
+
        if (!pdev) {
                pr_debug("UDMA device not found\n");
                return ERR_PTR(-EPROBE_DEFER);
        }
 
-       if (np != udma_node)
-               of_node_put(udma_node);
-
        ud = platform_get_drvdata(pdev);
        if (!ud) {
                pr_debug("UDMA has not been probed\n");
index 6276934d4d2be9e3345c20921c5cdfce941f4a79..8cd4e69dc7b4c24b334c450562c2a88925b0f452 100644 (file)
@@ -3040,9 +3040,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
 
        /* Request and map I/O memory */
        xdev->regs = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(xdev->regs))
-               return PTR_ERR(xdev->regs);
-
+       if (IS_ERR(xdev->regs)) {
+               err = PTR_ERR(xdev->regs);
+               goto disable_clks;
+       }
        /* Retrieve the DMA engine properties from the device tree */
        xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
        xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
@@ -3070,7 +3071,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
                if (err < 0) {
                        dev_err(xdev->dev,
                                "missing xlnx,num-fstores property\n");
-                       return err;
+                       goto disable_clks;
                }
 
                err = of_property_read_u32(node, "xlnx,flush-fsync",
@@ -3090,7 +3091,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
                xdev->ext_addr = false;
 
        /* Set the dma mask bits */
-       dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+       err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+       if (err < 0) {
+               dev_err(xdev->dev, "DMA mask error %d\n", err);
+               goto disable_clks;
+       }
 
        /* Initialize the DMA engine */
        xdev->common.dev = &pdev->dev;
@@ -3137,7 +3142,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
        for_each_child_of_node(node, child) {
                err = xilinx_dma_child_probe(xdev, child);
                if (err < 0)
-                       goto disable_clks;
+                       goto error;
        }
 
        if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -3172,12 +3177,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
 
        return 0;
 
-disable_clks:
-       xdma_disable_allclks(xdev);
 error:
        for (i = 0; i < xdev->dma_config->max_channels; i++)
                if (xdev->chan[i])
                        xilinx_dma_chan_remove(xdev->chan[i]);
+disable_clks:
+       xdma_disable_allclks(xdev);
 
        return err;
 }
index dc299ab3681803ce094cafaaba1d7fa92168baf4..3f4ee39543840095c4a0a97cd2b9740afe174e7a 100644 (file)
@@ -849,7 +849,7 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
 
        zynqmp_dma_desc_config_eod(chan, desc);
        async_tx_ack(&first->async_tx);
-       first->async_tx.flags = flags;
+       first->async_tx.flags = (enum dma_ctrl_flags)flags;
        return &first->async_tx;
 }
 
index 8ced7af8e56d28d0dffe40e84506e6a88dd60b54..4f9fb086eab7b0e22252d22e59e5aae55865322d 100644 (file)
@@ -48,6 +48,9 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier,
                return NOTIFY_DONE;
 
        wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+       if (!wdata)
+               return NOTIFY_DONE;
+
        for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++)
                wdata[l] = str[l];
        wdata[l] = L'\0';
index 8a18930f3eb69561a0dcc199b398b8cd09fd55df..516f4f0069bd2fcfecc37481351e3f576fc5944b 100644 (file)
@@ -14,7 +14,7 @@
 
 /* SHIM variables */
 static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
 
 static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
                            unsigned long *data_size, void *data)
@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
 
        /*
         * See if a user has put the shim into insecure mode. If so, and if the
-        * variable doesn't have the runtime attribute set, we might as well
-        * honor that.
+        * variable doesn't have the non-volatile attribute set, we might as
+        * well honor that.
         */
        size = sizeof(moksbstate);
        status = get_efi_var(shim_MokSBState_name, &shim_guid,
@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
        /* If it fails, we don't care why. Default to secure */
        if (status != EFI_SUCCESS)
                goto secure_boot_enabled;
-       if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+       if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
                return efi_secureboot_mode_disabled;
 
 secure_boot_enabled:
index 43ca665af610c4ff4dc91d8f954976e2ce2445be..7a7abc8959d2b0e3e44dcb06f15fa12432b93c18 100644 (file)
@@ -516,6 +516,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
        hdr->ramdisk_image = 0;
        hdr->ramdisk_size = 0;
 
+       /*
+        * Disregard any setup data that was provided by the bootloader:
+        * setup_data could be pointing anywhere, and we have no way of
+        * authenticating or validating the payload.
+        */
+       hdr->setup_data = 0;
+
        efi_stub_entry(handle, sys_table_arg, boot_params);
        /* not reached */
 
index f422c3e129a0c4cc1b34c03c746608e387e013e5..f77a965f5780d8d9817f2546df480e3341cb3b68 100644 (file)
  * struct ftgpio_gpio - Gemini GPIO state container
  * @dev: containing device for this instance
  * @gc: gpiochip for this instance
- * @irq: irqchip for this instance
  * @base: remapped I/O-memory base
  * @clk: silicon clock
  */
 struct ftgpio_gpio {
        struct device *dev;
        struct gpio_chip gc;
-       struct irq_chip irq;
        void __iomem *base;
        struct clk *clk;
 };
@@ -70,6 +68,7 @@ static void ftgpio_gpio_mask_irq(struct irq_data *d)
        val = readl(g->base + GPIO_INT_EN);
        val &= ~BIT(irqd_to_hwirq(d));
        writel(val, g->base + GPIO_INT_EN);
+       gpiochip_disable_irq(gc, irqd_to_hwirq(d));
 }
 
 static void ftgpio_gpio_unmask_irq(struct irq_data *d)
@@ -78,6 +77,7 @@ static void ftgpio_gpio_unmask_irq(struct irq_data *d)
        struct ftgpio_gpio *g = gpiochip_get_data(gc);
        u32 val;
 
+       gpiochip_enable_irq(gc, irqd_to_hwirq(d));
        val = readl(g->base + GPIO_INT_EN);
        val |= BIT(irqd_to_hwirq(d));
        writel(val, g->base + GPIO_INT_EN);
@@ -221,6 +221,16 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
        return 0;
 }
 
+static const struct irq_chip ftgpio_irq_chip = {
+       .name = "FTGPIO010",
+       .irq_ack = ftgpio_gpio_ack_irq,
+       .irq_mask = ftgpio_gpio_mask_irq,
+       .irq_unmask = ftgpio_gpio_unmask_irq,
+       .irq_set_type = ftgpio_gpio_set_irq_type,
+       .flags = IRQCHIP_IMMUTABLE,
+        GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
 static int ftgpio_gpio_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -277,14 +287,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
        if (!IS_ERR(g->clk))
                g->gc.set_config = ftgpio_gpio_set_config;
 
-       g->irq.name = "FTGPIO010";
-       g->irq.irq_ack = ftgpio_gpio_ack_irq;
-       g->irq.irq_mask = ftgpio_gpio_mask_irq;
-       g->irq.irq_unmask = ftgpio_gpio_unmask_irq;
-       g->irq.irq_set_type = ftgpio_gpio_set_irq_type;
-
        girq = &g->gc.irq;
-       girq->chip = &g->irq;
+       gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip);
        girq->parent_handler = ftgpio_gpio_irq_handler;
        girq->num_parents = 1;
        girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
index 312309be0287db567c0a5c838078969fd4b19b1a..56656fb519f8553e783998188473c88db9a6e1b1 100644 (file)
@@ -63,6 +63,14 @@ static void ixp4xx_gpio_irq_ack(struct irq_data *d)
        __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
 }
 
+static void ixp4xx_gpio_mask_irq(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+       irq_chip_mask_parent(d);
+       gpiochip_disable_irq(gc, d->hwirq);
+}
+
 static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -72,6 +80,7 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
        if (!(g->irq_edge & BIT(d->hwirq)))
                ixp4xx_gpio_irq_ack(d);
 
+       gpiochip_enable_irq(gc, d->hwirq);
        irq_chip_unmask_parent(d);
 }
 
@@ -149,12 +158,14 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
 }
 
-static struct irq_chip ixp4xx_gpio_irqchip = {
+static const struct irq_chip ixp4xx_gpio_irqchip = {
        .name = "IXP4GPIO",
        .irq_ack = ixp4xx_gpio_irq_ack,
-       .irq_mask = irq_chip_mask_parent,
+       .irq_mask = ixp4xx_gpio_mask_irq,
        .irq_unmask = ixp4xx_gpio_irq_unmask,
        .irq_set_type = ixp4xx_gpio_irq_set_type,
+       .flags = IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
 };
 
 static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
@@ -263,7 +274,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
        g->gc.owner = THIS_MODULE;
 
        girq = &g->gc.irq;
-       girq->chip = &ixp4xx_gpio_irqchip;
+       gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
        girq->fwnode = g->fwnode;
        girq->parent_domain = parent;
        girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
index a2e505a7545cde534182d08befa46c350f1310b0..523dfd17dd922548ebabbd1cd63f7bbf033b73e2 100644 (file)
@@ -533,8 +533,10 @@ static int __init gpio_mockup_register_chip(int idx)
        }
 
        fwnode = fwnode_create_software_node(properties, NULL);
-       if (IS_ERR(fwnode))
+       if (IS_ERR(fwnode)) {
+               kfree_strarray(line_names, ngpio);
                return PTR_ERR(fwnode);
+       }
 
        pdevinfo.name = "gpio-mockup";
        pdevinfo.id = idx;
@@ -597,9 +599,9 @@ static int __init gpio_mockup_init(void)
 
 static void __exit gpio_mockup_exit(void)
 {
+       gpio_mockup_unregister_pdevs();
        debugfs_remove_recursive(gpio_mockup_dbg_dir);
        platform_driver_unregister(&gpio_mockup_driver);
-       gpio_mockup_unregister_pdevs();
 }
 
 module_init(gpio_mockup_init);
index 15049822937a43a19c98d04562c6235b67724332..3eb08cd1fdc0818a89458b5ad72a4761b2b31a9c 100644 (file)
@@ -169,6 +169,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
 
        switch (flow_type) {
        case IRQ_TYPE_EDGE_FALLING:
+       case IRQ_TYPE_LEVEL_LOW:
                raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
                gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
                        gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
index d8a26e503ca5d2fe7d99ce2745b484520a2973c0..f163f5ca857beb24d7c3eafffef482a6bb9def6b 100644 (file)
@@ -112,6 +112,8 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
        unsigned long flags;
        u32 rise, fall, high, low;
 
+       gpiochip_enable_irq(gc, d->hwirq);
+
        spin_lock_irqsave(&rg->lock, flags);
        rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
        fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
@@ -143,6 +145,8 @@ mediatek_gpio_irq_mask(struct irq_data *d)
        mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
        mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
        spin_unlock_irqrestore(&rg->lock, flags);
+
+       gpiochip_disable_irq(gc, d->hwirq);
 }
 
 static int
@@ -204,6 +208,16 @@ mediatek_gpio_xlate(struct gpio_chip *chip,
        return gpio % MTK_BANK_WIDTH;
 }
 
+static const struct irq_chip mt7621_irq_chip = {
+       .name           = "mt7621-gpio",
+       .irq_mask_ack   = mediatek_gpio_irq_mask,
+       .irq_mask       = mediatek_gpio_irq_mask,
+       .irq_unmask     = mediatek_gpio_irq_unmask,
+       .irq_set_type   = mediatek_gpio_irq_type,
+       .flags          = IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
 static int
 mediatek_gpio_bank_probe(struct device *dev, int bank)
 {
@@ -238,11 +252,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
                return -ENOMEM;
 
        rg->chip.offset = bank * MTK_BANK_WIDTH;
-       rg->irq_chip.name = dev_name(dev);
-       rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
-       rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
-       rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
-       rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
 
        if (mtk->gpio_irq) {
                struct gpio_irq_chip *girq;
@@ -262,7 +271,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
                }
 
                girq = &rg->chip.irq;
-               girq->chip = &rg->irq_chip;
+               gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
                /* This will let us handle the parent IRQ in the driver */
                girq->parent_handler = NULL;
                girq->num_parents = 0;
index f91e876fd9690cef5aa83775a2504402e6cfb42f..bb50335239ac804e615f045c8e029290d2973fb0 100644 (file)
@@ -419,11 +419,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
                        goto out;
                } else {
                        bank->toggle_edge_mode |= mask;
-                       level |= mask;
+                       level &= ~mask;
 
                        /*
                         * Determine gpio state. If 1 next interrupt should be
-                        * falling otherwise rising.
+                        * low otherwise high.
                         */
                        data = readl(bank->reg_base + bank->gpio_regs->ext_port);
                        if (data & mask)
index fa4bc7481f9a600b57d8ab6851ac9052c19cb49f..e739dcea61b231ff30387c12fef35ce59272d548 100644 (file)
@@ -307,6 +307,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
                girq->default_type = IRQ_TYPE_NONE;
                girq->handler = handle_simple_irq;
                girq->init_valid_mask = tqmx86_init_irq_valid_mask;
+
+               irq_domain_set_pm_device(girq->domain, dev);
        }
 
        ret = devm_gpiochip_add_data(dev, chip, gpio);
@@ -315,8 +317,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
                goto out_pm_dis;
        }
 
-       irq_domain_set_pm_device(girq->domain, dev);
-
        dev_info(dev, "GPIO functionality initialized with %d pins\n",
                 chip->ngpio);
 
index f8041d4898d1916ab11428b37954e04788a1fc1b..92f185575e941f438b8ed6f0b35e8c54ca497402 100644 (file)
@@ -1986,7 +1986,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
                ret = -ENODEV;
                goto out_free_le;
        }
-       le->irq = irq;
 
        if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
                irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
@@ -2000,7 +1999,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        init_waitqueue_head(&le->wait);
 
        /* Request a thread to read the events */
-       ret = request_threaded_irq(le->irq,
+       ret = request_threaded_irq(irq,
                                   lineevent_irq_handler,
                                   lineevent_irq_thread,
                                   irqflags,
@@ -2009,6 +2008,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        if (ret)
                goto out_free_le;
 
+       le->irq = irq;
+
        fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
        if (fd < 0) {
                ret = fd;
index cbd593f7d553f71e0b7b1ba80bf98f9384bcf889..2170db83e41d95dcde7e2e069f1171bcbda7d061 100644 (file)
@@ -1728,7 +1728,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
 
        if (user_addr) {
-               pr_debug("creating userptr BO for user_addr = %llu\n", user_addr);
+               pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
                ret = init_user_pages(*mem, user_addr, criu_resume);
                if (ret)
                        goto allocate_init_user_pages_failed;
index 1400abee9f402e3785374516a21105d7120a7ea7..be7aff2d4a57efd7ba345a7b59526d4889dce88a 100644 (file)
@@ -2365,8 +2365,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
                }
                adev->ip_blocks[i].status.sw = true;
 
-               /* need to do gmc hw init early so we can allocate gpu mem */
-               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+                       /* need to do common hw init early so everything is set up for gmc */
+                       r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+                       if (r) {
+                               DRM_ERROR("hw_init %d failed %d\n", i, r);
+                               goto init_failed;
+                       }
+                       adev->ip_blocks[i].status.hw = true;
+               } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+                       /* need to do gmc hw init early so we can allocate gpu mem */
                        /* Try to reserve bad pages early */
                        if (amdgpu_sriov_vf(adev))
                                amdgpu_virt_exchange_data(adev);
@@ -3052,8 +3060,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
        int i, r;
 
        static enum amd_ip_block_type ip_order[] = {
-               AMD_IP_BLOCK_TYPE_GMC,
                AMD_IP_BLOCK_TYPE_COMMON,
+               AMD_IP_BLOCK_TYPE_GMC,
                AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_IH,
        };
index c20922a5af9fc4d410a34bd12b7b06bbcb455363..5b09c8f4fe95d62b9f5f5267973e9296683a81fc 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_fb_helper.h>
@@ -496,6 +497,7 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
 static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
        .destroy = drm_gem_fb_destroy,
        .create_handle = drm_gem_fb_create_handle,
+       .dirty = drm_atomic_helper_dirtyfb,
 };
 
 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
index 9f7a5e393f85ed055cbf269dc2e047f19e790fcc..c9dec2434f370d9e32683e3babbc5254afecab77 100644 (file)
@@ -486,11 +486,14 @@ static int psp_sw_fini(void *handle)
                release_firmware(psp->ta_fw);
                psp->ta_fw = NULL;
        }
-       if (adev->psp.cap_fw) {
+       if (psp->cap_fw) {
                release_firmware(psp->cap_fw);
                psp->cap_fw = NULL;
        }
-
+       if (psp->toc_fw) {
+               release_firmware(psp->toc_fw);
+               psp->toc_fw = NULL;
+       }
        if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
            adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
                psp_sysfs_fini(adev);
@@ -753,7 +756,7 @@ static int psp_tmr_init(struct psp_context *psp)
        }
 
        pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
-       ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
+       ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
                                      AMDGPU_GEM_DOMAIN_VRAM,
                                      &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
 
index c32b74bd970fc53ecb0dc6c3bc833cdf4ac0bc50..e593e8c2a54d6b0d211bd5531ea3b54be4326a50 100644 (file)
@@ -36,6 +36,7 @@
 #define PSP_CMD_BUFFER_SIZE    0x1000
 #define PSP_1_MEG              0x100000
 #define PSP_TMR_SIZE(adev)     ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
+#define PSP_TMR_ALIGNMENT      0x100000
 #define PSP_FW_NAME_LEN                0x24
 
 enum psp_shared_mem_size {
index ff5361f5c2d4f2746a3db6fc00877733434d6cc9..12c6f97945a52cef376a9987e221fbd06a8ee2a8 100644 (file)
@@ -1811,7 +1811,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
                amdgpu_ras_query_error_status(adev, &info);
 
                if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
-                   adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+                   adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
+                   adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
                        if (amdgpu_ras_reset_error_status(adev, info.head.block))
                                dev_warn(adev->dev, "Failed to reset error counter and error status");
                }
index ebed3f5226dba19ae5e85ae4acf4653be48ae2b7..96b6cf4c4d54f863488a4999058b06efaa182fe1 100644 (file)
@@ -390,6 +390,7 @@ union amdgpu_firmware_header {
        struct rlc_firmware_header_v2_1 rlc_v2_1;
        struct rlc_firmware_header_v2_2 rlc_v2_2;
        struct rlc_firmware_header_v2_3 rlc_v2_3;
+       struct rlc_firmware_header_v2_4 rlc_v2_4;
        struct sdma_firmware_header_v1_0 sdma;
        struct sdma_firmware_header_v1_1 sdma_v1_1;
        struct sdma_firmware_header_v2_0 sdma_v2_0;
index b465baa267628229aaee75ec7f8826232390c59b..aa761ff3a5faec4e383b93f7467d13b27974bce6 100644 (file)
@@ -380,6 +380,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
                WREG32_PCIE(smnPCIE_LC_CNTL, data);
 }
 
+#ifdef CONFIG_PCIEASPM
 static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -401,9 +402,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
 }
+#endif
 
 static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
 {
+#ifdef CONFIG_PCIEASPM
        uint32_t def, data;
 
        def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -459,7 +462,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL6, data);
 
-       nbio_v2_3_program_ltr(adev);
+       /* Don't bother about LTR if LTR is not enabled
+        * in the path */
+       if (adev->pdev->ltr_path)
+               nbio_v2_3_program_ltr(adev);
 
        def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
        data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -483,6 +489,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
        data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
 }
 
 static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
index f7f6ddebd3e49b6ec55f411fda96cf4d35959f33..37615a77287bc00e52dab28d57d25b9df21cc57c 100644 (file)
@@ -282,6 +282,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
                        mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
 }
 
+#ifdef CONFIG_PCIEASPM
 static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -303,9 +304,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
 }
+#endif
 
 static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
 {
+#ifdef CONFIG_PCIEASPM
        uint32_t def, data;
 
        def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -361,7 +364,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL6, data);
 
-       nbio_v6_1_program_ltr(adev);
+       /* Don't bother about LTR if LTR is not enabled
+        * in the path */
+       if (adev->pdev->ltr_path)
+               nbio_v6_1_program_ltr(adev);
 
        def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
        data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -385,6 +391,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
        data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
 }
 
 const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
index 11848d1e238b6b7c3724b339d8c955bc1685d664..19455a72593916989de322cb038ce42b4d7482c2 100644 (file)
@@ -673,6 +673,7 @@ struct amdgpu_nbio_ras nbio_v7_4_ras = {
 };
 
 
+#ifdef CONFIG_PCIEASPM
 static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -694,9 +695,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
 }
+#endif
 
 static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
 {
+#ifdef CONFIG_PCIEASPM
        uint32_t def, data;
 
        if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
@@ -755,7 +758,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL6, data);
 
-       nbio_v7_4_program_ltr(adev);
+       /* Don't bother about LTR if LTR is not enabled
+        * in the path */
+       if (adev->pdev->ltr_path)
+               nbio_v7_4_program_ltr(adev);
 
        def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
        data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -779,6 +785,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
        data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
 }
 
 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
index 1dc95ef21da6afbd38c966f6e221cc6d5ced551d..def89379b51a57b13f1ca14c61899b68ad5ccb45 100644 (file)
 #include "nbio/nbio_7_7_0_sh_mask.h"
 #include <uapi/linux/kfd_ioctl.h>
 
+static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
+{
+       WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
+                    adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+       WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
+                    adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
 static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
 {
        u32 tmp;
@@ -68,12 +76,6 @@ static void nbio_v7_7_sdma_doorbell_range(struct amdgpu_device *adev, int instan
                doorbell_range = REG_SET_FIELD(doorbell_range,
                                               GDC0_BIF_CSDMA_DOORBELL_RANGE,
                                               SIZE, doorbell_size);
-               doorbell_range = REG_SET_FIELD(doorbell_range,
-                                              GDC0_BIF_SDMA0_DOORBELL_RANGE,
-                                              OFFSET, doorbell_index);
-               doorbell_range = REG_SET_FIELD(doorbell_range,
-                                              GDC0_BIF_SDMA0_DOORBELL_RANGE,
-                                              SIZE, doorbell_size);
        } else {
                doorbell_range = REG_SET_FIELD(doorbell_range,
                                               GDC0_BIF_SDMA0_DOORBELL_RANGE,
@@ -342,4 +344,5 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
        .get_clockgating_state = nbio_v7_7_get_clockgating_state,
        .ih_control = nbio_v7_7_ih_control,
        .init_registers = nbio_v7_7_init_registers,
+       .remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
 };
index 65181efba50ec73c68e44001692259f590983373..56424f75dd2cc900d078bba712b0f882f564c51d 100644 (file)
@@ -1504,6 +1504,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
                WREG32_SDMA(i, mmSDMA0_CNTL, temp);
 
                if (!amdgpu_sriov_vf(adev)) {
+                       ring = &adev->sdma.instance[i].ring;
+                       adev->nbio.funcs->sdma_doorbell_range(adev, i,
+                               ring->use_doorbell, ring->doorbell_index,
+                               adev->doorbell_index.sdma_doorbell_range);
+
                        /* unhalt engine */
                        temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
                        temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
index fde6154f200963eb2aa81a039349dc987e1f93c0..183024d7c184e3d2be2b4c1386a827f500661bb2 100644 (file)
@@ -1211,25 +1211,6 @@ static int soc15_common_sw_fini(void *handle)
        return 0;
 }
 
-static void soc15_doorbell_range_init(struct amdgpu_device *adev)
-{
-       int i;
-       struct amdgpu_ring *ring;
-
-       /* sdma/ih doorbell range are programed by hypervisor */
-       if (!amdgpu_sriov_vf(adev)) {
-               for (i = 0; i < adev->sdma.num_instances; i++) {
-                       ring = &adev->sdma.instance[i].ring;
-                       adev->nbio.funcs->sdma_doorbell_range(adev, i,
-                               ring->use_doorbell, ring->doorbell_index,
-                               adev->doorbell_index.sdma_doorbell_range);
-               }
-
-               adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
-                                               adev->irq.ih.doorbell_index);
-       }
-}
-
 static int soc15_common_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1249,12 +1230,6 @@ static int soc15_common_hw_init(void *handle)
 
        /* enable the doorbell aperture */
        soc15_enable_doorbell_aperture(adev, true);
-       /* HW doorbell routing policy: doorbell writing not
-        * in SDMA/IH/MM/ACV range will be routed to CP. So
-        * we need to init SDMA/IH/MM/ACV doorbell range prior
-        * to CP ip block init and ring test.
-        */
-       soc15_doorbell_range_init(adev);
 
        return 0;
 }
index 55284b24f113934bcf9470be8ab8c03805dbd3b8..2e50db3b761e4697d0426f9abb3654c85975019d 100644 (file)
@@ -421,6 +421,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
 {
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(11, 0, 0):
+               return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
        case IP_VERSION(11, 0, 2):
                return false;
        default:
index 03b7066471f9ad251d4337350ced36882a10f582..1e83db0c5438d0c07b27bf1540ad6b4bdf2bc61e 100644 (file)
@@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
                }
        }
 
+       if (!amdgpu_sriov_vf(adev))
+               adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+                                                   adev->irq.ih.doorbell_index);
+
        pci_set_master(adev->pdev);
 
        /* enable interrupts */
index 2022ffbb8dba55e6522e56e689d582c87dc6e543..59dfca093155c6a1cf5bd58b8f62a31e2adf4738 100644 (file)
@@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
                }
        }
 
+       if (!amdgpu_sriov_vf(adev))
+               adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+                                                   adev->irq.ih.doorbell_index);
+
        pci_set_master(adev->pdev);
 
        /* enable interrupts */
index 0e48824f55e3c6be65305eca236fcff25db170b5..ee242d9d8b06011c2d8307a35d96d24c24d220bd 100644 (file)
@@ -3288,6 +3288,7 @@ void crtc_debugfs_init(struct drm_crtc *crtc)
                                   &crc_win_y_end_fops);
        debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
                                   &crc_win_update_fops);
+       dput(dir);
 #endif
        debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
                            crtc, &amdgpu_current_bpc_fops);
index beb025cd3dc29671a5917a5d0f01ef2c46410d63..9781a8dbc2386abb45191c903e7636768b9bc5af 100644 (file)
@@ -670,6 +670,8 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl
        }
        ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
        bw_params->vram_type = bios_info->memory_type;
+
+       bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
        bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
 
        for (i = 0; i < WM_SET_COUNT; i++) {
index f62d50901d92e8ea225d98575583481599c49bbc..0c85ab5933b4a5c7203b8cc1063437f0646379dd 100644 (file)
@@ -329,7 +329,7 @@ bool dc_stream_set_cursor_attributes(
 
        dc = stream->ctx->dc;
 
-       if (attributes->height * attributes->width * 4 > 16384)
+       if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384)
                if (stream->mall_stream_config.type == SUBVP_MAIN)
                        return false;
 
index 5908b60db313964c9a888b470926a5cc1d478a4c..dbf8158b832e454ead45c1032be1280944a78ea2 100644 (file)
@@ -745,6 +745,7 @@ struct dc_debug_options {
        bool disable_fixed_vs_aux_timeout_wa;
        bool force_disable_subvp;
        bool force_subvp_mclk_switch;
+       bool allow_sw_cursor_fallback;
        bool force_usr_allow;
        /* uses value at boot and disables switch */
        bool disable_dtb_ref_clk_switch;
index 09b304507badb6bc5ec6b800e53a771888edeb61..52a61b3e5a8b06a54ca13f1ca02627fe882794cf 100644 (file)
@@ -417,44 +417,42 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
        struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
        struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
        struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
-       int16_t drr_frame_us = 0;
-       int16_t min_drr_supported_us = 0;
-       int16_t max_drr_supported_us = 0;
-       int16_t max_drr_vblank_us = 0;
-       int16_t max_drr_mallregion_us = 0;
-       int16_t mall_region_us = 0;
-       int16_t prefetch_us = 0;
-       int16_t subvp_active_us = 0;
-       int16_t drr_active_us = 0;
-       int16_t min_vtotal_supported = 0;
-       int16_t max_vtotal_supported = 0;
+       uint16_t drr_frame_us = 0;
+       uint16_t min_drr_supported_us = 0;
+       uint16_t max_drr_supported_us = 0;
+       uint16_t max_drr_vblank_us = 0;
+       uint16_t max_drr_mallregion_us = 0;
+       uint16_t mall_region_us = 0;
+       uint16_t prefetch_us = 0;
+       uint16_t subvp_active_us = 0;
+       uint16_t drr_active_us = 0;
+       uint16_t min_vtotal_supported = 0;
+       uint16_t max_vtotal_supported = 0;
 
        pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
        pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
        pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
 
-       drr_frame_us = div64_s64(drr_timing->v_total * drr_timing->h_total,
-                                (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
+       drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
+                       (((uint64_t)drr_timing->pix_clk_100hz * 100)));
        // P-State allow width and FW delays already included phantom_timing->v_addressable
-       mall_region_us = div64_s64(phantom_timing->v_addressable * phantom_timing->h_total,
-                                  (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000);
+       mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
+                       (((uint64_t)phantom_timing->pix_clk_100hz * 100)));
        min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
-       min_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 *
-                                        (div64_s64((int64_t)min_drr_supported_us, 1000000)),
-                                        (int64_t)drr_timing->h_total);
-
-       prefetch_us = div64_s64((phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total,
-                               (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
-                               dc->caps.subvp_prefetch_end_to_mall_start_us);
-       subvp_active_us = div64_s64(main_timing->v_addressable * main_timing->h_total,
-                                   (int64_t)(main_timing->pix_clk_100hz * 100) * 1000000);
-       drr_active_us = div64_s64(drr_timing->v_addressable * drr_timing->h_total,
-                                 (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
-       max_drr_vblank_us = div64_s64((int64_t)(subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
+       min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
+                       (((uint64_t)drr_timing->h_total * 1000000)));
+
+       prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
+                       (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+       subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
+                       (((uint64_t)main_timing->pix_clk_100hz * 100)));
+       drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
+                       (((uint64_t)drr_timing->pix_clk_100hz * 100)));
+       max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
        max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us;
        max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
-       max_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 * (div64_s64((int64_t)max_drr_supported_us, 1000000)),
-                                        (int64_t)drr_timing->h_total);
+       max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
+                       (((uint64_t)drr_timing->h_total * 1000000)));
 
        pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
        pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
@@ -548,10 +546,12 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
        struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
        struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
 
-       subvp0_prefetch_us = div64_s64((phantom_timing0->v_total - phantom_timing0->v_front_porch) * phantom_timing0->h_total,
-                                      (int64_t)(phantom_timing0->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
-       subvp1_prefetch_us = div64_s64((phantom_timing1->v_total - phantom_timing1->v_front_porch) * phantom_timing1->h_total,
-                                      (int64_t)(phantom_timing1->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
+       subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
+                       (uint64_t)phantom_timing0->h_total * 1000000),
+                       (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+       subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
+                       (uint64_t)phantom_timing1->h_total * 1000000),
+                       (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
 
        // Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
        // should increase it's prefetch time to match the other
@@ -559,16 +559,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
                pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
                prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
                pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
-                       div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
-                                  (phantom_timing1->pix_clk_100hz * 100) + phantom_timing1->h_total - 1),
-                                 (int64_t)phantom_timing1->h_total);
+                               div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+                                       ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
+                                       ((uint64_t)phantom_timing1->h_total * 1000000));
+
        } else if (subvp1_prefetch_us >  subvp0_prefetch_us) {
                pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
                prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
                pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
-                       div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
-                                  (phantom_timing0->pix_clk_100hz * 100) + phantom_timing0->h_total - 1),
-                                 (int64_t)phantom_timing0->h_total);
+                               div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+                                       ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
+                                       ((uint64_t)phantom_timing0->h_total * 1000000));
        }
 }
 
@@ -630,13 +631,11 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
 
        // Round up
        pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
-               div64_s64(((div64_s64((int64_t)dc->caps.subvp_prefetch_end_to_mall_start_us, 1000000)) *
-                          (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
-                         (int64_t)phantom_timing->h_total);
+                       div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+                                       ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
        pipe_data->pipe_config.subvp_data.processing_delay_lines =
-               div64_s64(((div64_s64((int64_t)dc->caps.subvp_fw_processing_delay_us, 1000000)) *
-                          (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
-                         (int64_t)phantom_timing->h_total);
+                       div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+                                       ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
        // Find phantom pipe index based on phantom stream
        for (j = 0; j < dc->res_pool->pipe_count; j++) {
                struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
index e3351ddc566cf6c32d305837d4912243678b2ef2..06d8638db696a8d8892cedacca1178e392fa2d58 100644 (file)
@@ -67,8 +67,7 @@ static void enc314_disable_fifo(struct stream_encoder *enc)
 {
        struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 
-       REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0,
-                    DIG_FIFO_READ_START_LEVEL, 0);
+       REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
 }
 
 static void enc314_dp_set_odm_combine(
index 6ec1c52535b9b0a8dda30720c517450e38869fa3..2038cbda33f7485648ec60628527b4b72f22437f 100644 (file)
@@ -103,6 +103,11 @@ void hubp32_cursor_set_attributes(
        enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk(
                        attr->width, attr->color_format);
 
+       //Round cursor width up to next multiple of 64
+       uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
+       uint32_t cursor_height = attr->height;
+       uint32_t cursor_size = cursor_width * cursor_height;
+
        hubp->curs_attr = *attr;
 
        REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
@@ -126,7 +131,24 @@ void hubp32_cursor_set_attributes(
                         /* used to shift the cursor chunk request deadline */
                        CURSOR0_CHUNK_HDL_ADJUST, 3);
 
-       if (attr->width * attr->height * 4 > 16384)
+       switch (attr->color_format) {
+       case CURSOR_MODE_MONO:
+               cursor_size /= 2;
+               break;
+       case CURSOR_MODE_COLOR_1BIT_AND:
+       case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+       case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+               cursor_size *= 4;
+               break;
+
+       case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+       case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+       default:
+               cursor_size *= 8;
+               break;
+       }
+
+       if (cursor_size > 16384)
                REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
        else
                REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
index 8d9d96c398085b7fa9bfc9a9ece5849cdedd9b08..344fe7535df5b9d46ae32424cc93bb4a515a8705 100644 (file)
@@ -741,7 +741,29 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
                struct hubp *hubp = pipe->plane_res.hubp;
 
                if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
-                       if (hubp->curs_attr.width * hubp->curs_attr.height * 4 > 16384)
+                       //Round cursor width up to next multiple of 64
+                       int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
+                       int cursor_height = hubp->curs_attr.height;
+                       int cursor_size = cursor_width * cursor_height;
+
+                       switch (hubp->curs_attr.color_format) {
+                       case CURSOR_MODE_MONO:
+                               cursor_size /= 2;
+                               break;
+                       case CURSOR_MODE_COLOR_1BIT_AND:
+                       case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+                       case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+                               cursor_size *= 4;
+                               break;
+
+                       case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+                       case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+                       default:
+                               cursor_size *= 8;
+                               break;
+                       }
+
+                       if (cursor_size > 16384)
                                cache_cursor = true;
 
                        if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
index 8b887b552f2c764a92816fe8eb4525c7eb7c6760..c3b783cea8a03138e3e7ef343a696e5362c57513 100644 (file)
@@ -871,6 +871,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .exit_idle_opt_for_cursor_updates = true,
        .enable_single_display_2to1_odm_policy = true,
        .enable_dp_dig_pixel_rate_div_policy = 1,
+       .allow_sw_cursor_fallback = false,
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -2039,7 +2040,8 @@ static bool dcn32_resource_construct(
        dc->caps.max_downscale_ratio = 600;
        dc->caps.i2c_speed_in_khz = 100;
        dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
-       dc->caps.max_cursor_size = 256;
+       /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
+       dc->caps.max_cursor_size = 64;
        dc->caps.min_horizontal_blanking_period = 80;
        dc->caps.dmdata_alloc_size = 2048;
        dc->caps.mall_size_per_mem_channel = 0;
index 1e7e6201c88019769dc4a473c2c532eae8e5154e..cf15d0e5e9b43e1d4d2bba5fde9f29fd29b7fa97 100644 (file)
@@ -30,6 +30,9 @@
 
 #define DCN3_2_DET_SEG_SIZE 64
 #define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024
+#define DCN3_2_MBLK_WIDTH 128
+#define DCN3_2_MBLK_HEIGHT_4BPE 128
+#define DCN3_2_MBLK_HEIGHT_8BPE 64
 
 #define TO_DCN32_RES_POOL(pool)\
        container_of(pool, struct dcn32_resource_pool, base)
index ab918fe38f6a57f34dc247937ea2464827124dca..1f195c5b3377d64eb482b4be63c17f05fea10663 100644 (file)
@@ -46,7 +46,6 @@
 uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
 {
        uint32_t num_ways = 0;
-       uint32_t mall_region_pixels = 0;
        uint32_t bytes_per_pixel = 0;
        uint32_t cache_lines_used = 0;
        uint32_t lines_per_way = 0;
@@ -54,20 +53,64 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
        uint32_t bytes_in_mall = 0;
        uint32_t num_mblks = 0;
        uint32_t cache_lines_per_plane = 0;
-       uint32_t i = 0;
+       uint32_t i = 0, j = 0;
+       uint32_t mblk_width = 0;
+       uint32_t mblk_height = 0;
+       uint32_t full_vp_width_blk_aligned = 0;
+       uint32_t full_vp_height_blk_aligned = 0;
+       uint32_t mall_alloc_width_blk_aligned = 0;
+       uint32_t mall_alloc_height_blk_aligned = 0;
+       uint32_t full_vp_height = 0;
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
                // Find the phantom pipes
-               if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
+               if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
                                pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
-                       bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
-                       mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable;
+                       struct pipe_ctx *main_pipe = NULL;
+
+                       /* Get full viewport height from main pipe (required for MBLK calculation) */
+                       for (j = 0; j < dc->res_pool->pipe_count; j++) {
+                               main_pipe = &context->res_ctx.pipe_ctx[j];
+                               if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
+                                       full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
+                                       break;
+                               }
+                       }
 
-                       // For bytes required in MALL, calculate based on number of MBlks required
-                       num_mblks = (mall_region_pixels * bytes_per_pixel +
-                                       DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES;
+                       bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
+                       mblk_width = DCN3_2_MBLK_WIDTH;
+                       mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
+
+                       /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+                        * FLOOR(vp_x_start, blk_width)
+                        */
+                       full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+                                       pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) +
+                                       (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+                       /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+                        * FLOOR(vp_y_start, blk_height)
+                        */
+                       full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+                                       full_vp_height + mblk_height - 1) / mblk_height * mblk_height) +
+                                       (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
+
+                       /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
+                       mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
+
+                       /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
+                       mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
+                                       mblk_height * mblk_height + mblk_height;
+
+                       /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
+                        * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
+                        * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
+                        * (Should be divisible, but round up if not)
+                        */
+                       num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+                                       ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
                        bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
                        // cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
                        // (MALL is 64-byte aligned)
index c8b7d6ff38f4fa1887aad87bb8cc6a4bb9bcf0cb..7309eed33a61c2412ce6cc29142e27d0677d26b5 100644 (file)
@@ -872,6 +872,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .exit_idle_opt_for_cursor_updates = true,
        .enable_single_display_2to1_odm_policy = true,
        .enable_dp_dig_pixel_rate_div_policy = 1,
+       .allow_sw_cursor_fallback = false,
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -1651,7 +1652,8 @@ static bool dcn321_resource_construct(
        dc->caps.max_downscale_ratio = 600;
        dc->caps.i2c_speed_in_khz = 100;
        dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
-       dc->caps.max_cursor_size = 256;
+       /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+       dc->caps.max_cursor_size = 64;
        dc->caps.min_horizontal_blanking_period = 80;
        dc->caps.dmdata_alloc_size = 2048;
        dc->caps.mall_size_per_mem_channel = 0;
index 86a3b5bfd699b2c9b5a15ea048be1b2fbe6f9990..cb81ed2fbd539ec08d7993cd2bb5b1ffee7ff81a 100644 (file)
@@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
@@ -123,6 +125,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
 DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
 DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
 DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn314/display_mode_vba_314.o dcn314/display_rq_dlg_calc_314.o
 DML += dcn32/display_mode_vba_32.o dcn32/display_rq_dlg_calc_32.o dcn32/display_mode_vba_util_32.o
 DML += dcn31/dcn31_fpu.o
 DML += dcn32/dcn32_fpu.o
index 876b321b30ca6517e5a55b497017758a2add3530..1cb858dd6ea0229ed3c68eaf1848fa7d59f6c646 100644 (file)
@@ -6610,8 +6610,7 @@ static double CalculateUrgentLatency(
        return ret;
 }
 
-
-static void UseMinimumDCFCLK(
+static noinline_for_stack void UseMinimumDCFCLK(
                struct display_mode_lib *mode_lib,
                int MaxInterDCNTileRepeaters,
                int MaxPrefetchMode,
index d63b4209b14c080538fb2905129e18354f163dba..8ca66f1644dc8ab00dfbf29705b339e67d6ea138 100644 (file)
@@ -251,33 +251,13 @@ static void CalculateRowBandwidth(
 
 static void CalculateFlipSchedule(
                struct display_mode_lib *mode_lib,
+               unsigned int k,
                double HostVMInefficiencyFactor,
                double UrgentExtraLatency,
                double UrgentLatency,
-               unsigned int GPUVMMaxPageTableLevels,
-               bool HostVMEnable,
-               unsigned int HostVMMaxNonCachedPageTableLevels,
-               bool GPUVMEnable,
-               double HostVMMinPageSize,
                double PDEAndMetaPTEBytesPerFrame,
                double MetaRowBytes,
-               double DPTEBytesPerRow,
-               double BandwidthAvailableForImmediateFlip,
-               unsigned int TotImmediateFlipBytes,
-               enum source_format_class SourcePixelFormat,
-               double LineTime,
-               double VRatio,
-               double VRatioChroma,
-               double Tno_bw,
-               bool DCCEnable,
-               unsigned int dpte_row_height,
-               unsigned int meta_row_height,
-               unsigned int dpte_row_height_chroma,
-               unsigned int meta_row_height_chroma,
-               double *DestinationLinesToRequestVMInImmediateFlip,
-               double *DestinationLinesToRequestRowInImmediateFlip,
-               double *final_flip_bw,
-               bool *ImmediateFlipSupportedForPipe);
+               double DPTEBytesPerRow);
 static double CalculateWriteBackDelay(
                enum source_format_class WritebackPixelFormat,
                double WritebackHRatio,
@@ -311,64 +291,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
 static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                struct display_mode_lib *mode_lib,
                unsigned int PrefetchMode,
-               unsigned int NumberOfActivePlanes,
-               unsigned int MaxLineBufferLines,
-               unsigned int LineBufferSize,
-               unsigned int WritebackInterfaceBufferSize,
                double DCFCLK,
                double ReturnBW,
-               bool SynchronizedVBlank,
-               unsigned int dpte_group_bytes[],
-               unsigned int MetaChunkSize,
                double UrgentLatency,
                double ExtraLatency,
-               double WritebackLatency,
-               double WritebackChunkSize,
                double SOCCLK,
-               double DRAMClockChangeLatency,
-               double SRExitTime,
-               double SREnterPlusExitTime,
-               double SRExitZ8Time,
-               double SREnterPlusExitZ8Time,
                double DCFCLKDeepSleep,
                unsigned int DETBufferSizeY[],
                unsigned int DETBufferSizeC[],
                unsigned int SwathHeightY[],
                unsigned int SwathHeightC[],
-               unsigned int LBBitPerPixel[],
                double SwathWidthY[],
                double SwathWidthC[],
-               double HRatio[],
-               double HRatioChroma[],
-               unsigned int vtaps[],
-               unsigned int VTAPsChroma[],
-               double VRatio[],
-               double VRatioChroma[],
-               unsigned int HTotal[],
-               double PixelClock[],
-               unsigned int BlendingAndTiming[],
                unsigned int DPPPerPlane[],
                double BytePerPixelDETY[],
                double BytePerPixelDETC[],
-               double DSTXAfterScaler[],
-               double DSTYAfterScaler[],
-               bool WritebackEnable[],
-               enum source_format_class WritebackPixelFormat[],
-               double WritebackDestinationWidth[],
-               double WritebackDestinationHeight[],
-               double WritebackSourceHeight[],
                bool UnboundedRequestEnabled,
                int unsigned CompressedBufferSizeInkByte,
                enum clock_change_support *DRAMClockChangeSupport,
-               double *UrgentWatermark,
-               double *WritebackUrgentWatermark,
-               double *DRAMClockChangeWatermark,
-               double *WritebackDRAMClockChangeWatermark,
                double *StutterExitWatermark,
                double *StutterEnterPlusExitWatermark,
                double *Z8StutterExitWatermark,
-               double *Z8StutterEnterPlusExitWatermark,
-               double *MinActiveDRAMClockChangeLatencySupported);
+               double *Z8StutterEnterPlusExitWatermark);
 
 static void CalculateDCFCLKDeepSleep(
                struct display_mode_lib *mode_lib,
@@ -2904,33 +2848,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                        for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                                CalculateFlipSchedule(
                                                mode_lib,
+                                               k,
                                                HostVMInefficiencyFactor,
                                                v->UrgentExtraLatency,
                                                v->UrgentLatency,
-                                               v->GPUVMMaxPageTableLevels,
-                                               v->HostVMEnable,
-                                               v->HostVMMaxNonCachedPageTableLevels,
-                                               v->GPUVMEnable,
-                                               v->HostVMMinPageSize,
                                                v->PDEAndMetaPTEBytesFrame[k],
                                                v->MetaRowByte[k],
-                                               v->PixelPTEBytesPerRow[k],
-                                               v->BandwidthAvailableForImmediateFlip,
-                                               v->TotImmediateFlipBytes,
-                                               v->SourcePixelFormat[k],
-                                               v->HTotal[k] / v->PixelClock[k],
-                                               v->VRatio[k],
-                                               v->VRatioChroma[k],
-                                               v->Tno_bw[k],
-                                               v->DCCEnable[k],
-                                               v->dpte_row_height[k],
-                                               v->meta_row_height[k],
-                                               v->dpte_row_height_chroma[k],
-                                               v->meta_row_height_chroma[k],
-                                               &v->DestinationLinesToRequestVMInImmediateFlip[k],
-                                               &v->DestinationLinesToRequestRowInImmediateFlip[k],
-                                               &v->final_flip_bw[k],
-                                               &v->ImmediateFlipSupportedForPipe[k]);
+                                               v->PixelPTEBytesPerRow[k]);
                        }
 
                        v->total_dcn_read_bw_with_flip = 0.0;
@@ -3017,64 +2941,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                CalculateWatermarksAndDRAMSpeedChangeSupport(
                                mode_lib,
                                PrefetchMode,
-                               v->NumberOfActivePlanes,
-                               v->MaxLineBufferLines,
-                               v->LineBufferSize,
-                               v->WritebackInterfaceBufferSize,
                                v->DCFCLK,
                                v->ReturnBW,
-                               v->SynchronizedVBlank,
-                               v->dpte_group_bytes,
-                               v->MetaChunkSize,
                                v->UrgentLatency,
                                v->UrgentExtraLatency,
-                               v->WritebackLatency,
-                               v->WritebackChunkSize,
                                v->SOCCLK,
-                               v->DRAMClockChangeLatency,
-                               v->SRExitTime,
-                               v->SREnterPlusExitTime,
-                               v->SRExitZ8Time,
-                               v->SREnterPlusExitZ8Time,
                                v->DCFCLKDeepSleep,
                                v->DETBufferSizeY,
                                v->DETBufferSizeC,
                                v->SwathHeightY,
                                v->SwathHeightC,
-                               v->LBBitPerPixel,
                                v->SwathWidthY,
                                v->SwathWidthC,
-                               v->HRatio,
-                               v->HRatioChroma,
-                               v->vtaps,
-                               v->VTAPsChroma,
-                               v->VRatio,
-                               v->VRatioChroma,
-                               v->HTotal,
-                               v->PixelClock,
-                               v->BlendingAndTiming,
                                v->DPPPerPlane,
                                v->BytePerPixelDETY,
                                v->BytePerPixelDETC,
-                               v->DSTXAfterScaler,
-                               v->DSTYAfterScaler,
-                               v->WritebackEnable,
-                               v->WritebackPixelFormat,
-                               v->WritebackDestinationWidth,
-                               v->WritebackDestinationHeight,
-                               v->WritebackSourceHeight,
                                v->UnboundedRequestEnabled,
                                v->CompressedBufferSizeInkByte,
                                &DRAMClockChangeSupport,
-                               &v->UrgentWatermark,
-                               &v->WritebackUrgentWatermark,
-                               &v->DRAMClockChangeWatermark,
-                               &v->WritebackDRAMClockChangeWatermark,
                                &v->StutterExitWatermark,
                                &v->StutterEnterPlusExitWatermark,
                                &v->Z8StutterExitWatermark,
-                               &v->Z8StutterEnterPlusExitWatermark,
-                               &v->MinActiveDRAMClockChangeLatencySupported);
+                               &v->Z8StutterEnterPlusExitWatermark);
 
                for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                        if (v->WritebackEnable[k] == true) {
@@ -3598,61 +3486,43 @@ static void CalculateRowBandwidth(
 
 static void CalculateFlipSchedule(
                struct display_mode_lib *mode_lib,
+               unsigned int k,
                double HostVMInefficiencyFactor,
                double UrgentExtraLatency,
                double UrgentLatency,
-               unsigned int GPUVMMaxPageTableLevels,
-               bool HostVMEnable,
-               unsigned int HostVMMaxNonCachedPageTableLevels,
-               bool GPUVMEnable,
-               double HostVMMinPageSize,
                double PDEAndMetaPTEBytesPerFrame,
                double MetaRowBytes,
-               double DPTEBytesPerRow,
-               double BandwidthAvailableForImmediateFlip,
-               unsigned int TotImmediateFlipBytes,
-               enum source_format_class SourcePixelFormat,
-               double LineTime,
-               double VRatio,
-               double VRatioChroma,
-               double Tno_bw,
-               bool DCCEnable,
-               unsigned int dpte_row_height,
-               unsigned int meta_row_height,
-               unsigned int dpte_row_height_chroma,
-               unsigned int meta_row_height_chroma,
-               double *DestinationLinesToRequestVMInImmediateFlip,
-               double *DestinationLinesToRequestRowInImmediateFlip,
-               double *final_flip_bw,
-               bool *ImmediateFlipSupportedForPipe)
+               double DPTEBytesPerRow)
 {
+       struct vba_vars_st *v = &mode_lib->vba;
        double min_row_time = 0.0;
        unsigned int HostVMDynamicLevelsTrips;
        double TimeForFetchingMetaPTEImmediateFlip;
        double TimeForFetchingRowInVBlankImmediateFlip;
        double ImmediateFlipBW;
+       double LineTime = v->HTotal[k] / v->PixelClock[k];
 
-       if (GPUVMEnable == true && HostVMEnable == true) {
-               HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+       if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+               HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
        } else {
                HostVMDynamicLevelsTrips = 0;
        }
 
-       if (GPUVMEnable == true || DCCEnable == true) {
-               ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+       if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+               ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
        }
 
-       if (GPUVMEnable == true) {
+       if (v->GPUVMEnable == true) {
                TimeForFetchingMetaPTEImmediateFlip = dml_max3(
-                               Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
-                               UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+                               v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+                               UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
                                LineTime / 4.0);
        } else {
                TimeForFetchingMetaPTEImmediateFlip = 0;
        }
 
-       *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
-       if ((GPUVMEnable == true || DCCEnable == true)) {
+       v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+       if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
                TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
                                (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
                                UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3661,54 +3531,54 @@ static void CalculateFlipSchedule(
                TimeForFetchingRowInVBlankImmediateFlip = 0;
        }
 
-       *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+       v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
 
-       if (GPUVMEnable == true) {
-               *final_flip_bw = dml_max(
-                               PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
-                               (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
-       } else if ((GPUVMEnable == true || DCCEnable == true)) {
-               *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+       if (v->GPUVMEnable == true) {
+               v->final_flip_bw[k] = dml_max(
+                               PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+                               (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+       } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+               v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
        } else {
-               *final_flip_bw = 0;
+               v->final_flip_bw[k] = 0;
        }
 
-       if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
-               if (GPUVMEnable == true && DCCEnable != true) {
-                       min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
-               } else if (GPUVMEnable != true && DCCEnable == true) {
-                       min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+       if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+               if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+                       min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+               } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+                       min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
                } else {
                        min_row_time = dml_min4(
-                                       dpte_row_height * LineTime / VRatio,
-                                       meta_row_height * LineTime / VRatio,
-                                       dpte_row_height_chroma * LineTime / VRatioChroma,
-                                       meta_row_height_chroma * LineTime / VRatioChroma);
+                                       v->dpte_row_height[k] * LineTime / v->VRatio[k],
+                                       v->meta_row_height[k] * LineTime / v->VRatio[k],
+                                       v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+                                       v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
                }
        } else {
-               if (GPUVMEnable == true && DCCEnable != true) {
-                       min_row_time = dpte_row_height * LineTime / VRatio;
-               } else if (GPUVMEnable != true && DCCEnable == true) {
-                       min_row_time = meta_row_height * LineTime / VRatio;
+               if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+                       min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+               } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+                       min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
                } else {
-                       min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+                       min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
                }
        }
 
-       if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+       if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
                        || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
-               *ImmediateFlipSupportedForPipe = false;
+               v->ImmediateFlipSupportedForPipe[k] = false;
        } else {
-               *ImmediateFlipSupportedForPipe = true;
+               v->ImmediateFlipSupportedForPipe[k] = true;
        }
 
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
-       dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+       dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+       dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
        dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
        dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
        dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
-       dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+       dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
 #endif
 
 }
@@ -5300,33 +5170,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                        for (k = 0; k < v->NumberOfActivePlanes; k++) {
                                                CalculateFlipSchedule(
                                                                mode_lib,
+                                                               k,
                                                                HostVMInefficiencyFactor,
                                                                v->ExtraLatency,
                                                                v->UrgLatency[i],
-                                                               v->GPUVMMaxPageTableLevels,
-                                                               v->HostVMEnable,
-                                                               v->HostVMMaxNonCachedPageTableLevels,
-                                                               v->GPUVMEnable,
-                                                               v->HostVMMinPageSize,
                                                                v->PDEAndMetaPTEBytesPerFrame[i][j][k],
                                                                v->MetaRowBytes[i][j][k],
-                                                               v->DPTEBytesPerRow[i][j][k],
-                                                               v->BandwidthAvailableForImmediateFlip,
-                                                               v->TotImmediateFlipBytes,
-                                                               v->SourcePixelFormat[k],
-                                                               v->HTotal[k] / v->PixelClock[k],
-                                                               v->VRatio[k],
-                                                               v->VRatioChroma[k],
-                                                               v->Tno_bw[k],
-                                                               v->DCCEnable[k],
-                                                               v->dpte_row_height[k],
-                                                               v->meta_row_height[k],
-                                                               v->dpte_row_height_chroma[k],
-                                                               v->meta_row_height_chroma[k],
-                                                               &v->DestinationLinesToRequestVMInImmediateFlip[k],
-                                                               &v->DestinationLinesToRequestRowInImmediateFlip[k],
-                                                               &v->final_flip_bw[k],
-                                                               &v->ImmediateFlipSupportedForPipe[k]);
+                                                               v->DPTEBytesPerRow[i][j][k]);
                                        }
                                        v->total_dcn_read_bw_with_flip = 0.0;
                                        for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5384,64 +5234,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                        CalculateWatermarksAndDRAMSpeedChangeSupport(
                                        mode_lib,
                                        v->PrefetchModePerState[i][j],
-                                       v->NumberOfActivePlanes,
-                                       v->MaxLineBufferLines,
-                                       v->LineBufferSize,
-                                       v->WritebackInterfaceBufferSize,
                                        v->DCFCLKState[i][j],
                                        v->ReturnBWPerState[i][j],
-                                       v->SynchronizedVBlank,
-                                       v->dpte_group_bytes,
-                                       v->MetaChunkSize,
                                        v->UrgLatency[i],
                                        v->ExtraLatency,
-                                       v->WritebackLatency,
-                                       v->WritebackChunkSize,
                                        v->SOCCLKPerState[i],
-                                       v->DRAMClockChangeLatency,
-                                       v->SRExitTime,
-                                       v->SREnterPlusExitTime,
-                                       v->SRExitZ8Time,
-                                       v->SREnterPlusExitZ8Time,
                                        v->ProjectedDCFCLKDeepSleep[i][j],
                                        v->DETBufferSizeYThisState,
                                        v->DETBufferSizeCThisState,
                                        v->SwathHeightYThisState,
                                        v->SwathHeightCThisState,
-                                       v->LBBitPerPixel,
                                        v->SwathWidthYThisState,
                                        v->SwathWidthCThisState,
-                                       v->HRatio,
-                                       v->HRatioChroma,
-                                       v->vtaps,
-                                       v->VTAPsChroma,
-                                       v->VRatio,
-                                       v->VRatioChroma,
-                                       v->HTotal,
-                                       v->PixelClock,
-                                       v->BlendingAndTiming,
                                        v->NoOfDPPThisState,
                                        v->BytePerPixelInDETY,
                                        v->BytePerPixelInDETC,
-                                       v->DSTXAfterScaler,
-                                       v->DSTYAfterScaler,
-                                       v->WritebackEnable,
-                                       v->WritebackPixelFormat,
-                                       v->WritebackDestinationWidth,
-                                       v->WritebackDestinationHeight,
-                                       v->WritebackSourceHeight,
                                        UnboundedRequestEnabledThisState,
                                        CompressedBufferSizeInkByteThisState,
                                        &v->DRAMClockChangeSupport[i][j],
-                                       &v->UrgentWatermark,
-                                       &v->WritebackUrgentWatermark,
-                                       &v->DRAMClockChangeWatermark,
-                                       &v->WritebackDRAMClockChangeWatermark,
-                                       &dummy,
                                        &dummy,
                                        &dummy,
                                        &dummy,
-                                       &v->MinActiveDRAMClockChangeLatencySupported);
+                                       &dummy);
                }
        }
 
@@ -5566,64 +5380,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                struct display_mode_lib *mode_lib,
                unsigned int PrefetchMode,
-               unsigned int NumberOfActivePlanes,
-               unsigned int MaxLineBufferLines,
-               unsigned int LineBufferSize,
-               unsigned int WritebackInterfaceBufferSize,
                double DCFCLK,
                double ReturnBW,
-               bool SynchronizedVBlank,
-               unsigned int dpte_group_bytes[],
-               unsigned int MetaChunkSize,
                double UrgentLatency,
                double ExtraLatency,
-               double WritebackLatency,
-               double WritebackChunkSize,
                double SOCCLK,
-               double DRAMClockChangeLatency,
-               double SRExitTime,
-               double SREnterPlusExitTime,
-               double SRExitZ8Time,
-               double SREnterPlusExitZ8Time,
                double DCFCLKDeepSleep,
                unsigned int DETBufferSizeY[],
                unsigned int DETBufferSizeC[],
                unsigned int SwathHeightY[],
                unsigned int SwathHeightC[],
-               unsigned int LBBitPerPixel[],
                double SwathWidthY[],
                double SwathWidthC[],
-               double HRatio[],
-               double HRatioChroma[],
-               unsigned int vtaps[],
-               unsigned int VTAPsChroma[],
-               double VRatio[],
-               double VRatioChroma[],
-               unsigned int HTotal[],
-               double PixelClock[],
-               unsigned int BlendingAndTiming[],
                unsigned int DPPPerPlane[],
                double BytePerPixelDETY[],
                double BytePerPixelDETC[],
-               double DSTXAfterScaler[],
-               double DSTYAfterScaler[],
-               bool WritebackEnable[],
-               enum source_format_class WritebackPixelFormat[],
-               double WritebackDestinationWidth[],
-               double WritebackDestinationHeight[],
-               double WritebackSourceHeight[],
                bool UnboundedRequestEnabled,
                int unsigned CompressedBufferSizeInkByte,
                enum clock_change_support *DRAMClockChangeSupport,
-               double *UrgentWatermark,
-               double *WritebackUrgentWatermark,
-               double *DRAMClockChangeWatermark,
-               double *WritebackDRAMClockChangeWatermark,
                double *StutterExitWatermark,
                double *StutterEnterPlusExitWatermark,
                double *Z8StutterExitWatermark,
-               double *Z8StutterEnterPlusExitWatermark,
-               double *MinActiveDRAMClockChangeLatencySupported)
+               double *Z8StutterEnterPlusExitWatermark)
 {
        struct vba_vars_st *v = &mode_lib->vba;
        double EffectiveLBLatencyHidingY;
@@ -5643,103 +5421,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
        double TotalPixelBW = 0.0;
        int k, j;
 
-       *UrgentWatermark = UrgentLatency + ExtraLatency;
+       v->UrgentWatermark = UrgentLatency + ExtraLatency;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
        dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
-       dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+       dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
 #endif
 
-       *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+       v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
 
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
-       dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+       dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+       dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
 #endif
 
        v->TotalActiveWriteback = 0;
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
-               if (WritebackEnable[k] == true) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+               if (v->WritebackEnable[k] == true) {
                        v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
                }
        }
 
        if (v->TotalActiveWriteback <= 1) {
-               *WritebackUrgentWatermark = WritebackLatency;
+               v->WritebackUrgentWatermark = v->WritebackLatency;
        } else {
-               *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+               v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
        }
 
        if (v->TotalActiveWriteback <= 1) {
-               *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+               v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
        } else {
-               *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+               v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
        }
 
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                TotalPixelBW = TotalPixelBW
-                               + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
-                                               / (HTotal[k] / PixelClock[k]);
+                               + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+                                               / (v->HTotal[k] / v->PixelClock[k]);
        }
 
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                double EffectiveDETBufferSizeY = DETBufferSizeY[k];
 
                v->LBLatencyHidingSourceLinesY = dml_min(
-                               (double) MaxLineBufferLines,
-                               dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+                               (double) v->MaxLineBufferLines,
+                               dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
 
                v->LBLatencyHidingSourceLinesC = dml_min(
-                               (double) MaxLineBufferLines,
-                               dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+                               (double) v->MaxLineBufferLines,
+                               dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
 
-               EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+               EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
 
-               EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+               EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
 
                if (UnboundedRequestEnabled) {
                        EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
-                                       + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+                                       + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
                }
 
                LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
                LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
-               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
                if (BytePerPixelDETC[k] > 0) {
                        LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
                        LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
-                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
                } else {
                        LinesInDETC = 0;
                        FullDETBufferingTimeC = 999999;
                }
 
                ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
-                               - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+                               - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
 
-               if (NumberOfActivePlanes > 1) {
+               if (v->NumberOfActivePlanes > 1) {
                        ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
-                                       - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+                                       - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
                }
 
                if (BytePerPixelDETC[k] > 0) {
                        ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
-                                       - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+                                       - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
 
-                       if (NumberOfActivePlanes > 1) {
+                       if (v->NumberOfActivePlanes > 1) {
                                ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
-                                               - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+                                               - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
                        }
                        v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
                } else {
                        v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
                }
 
-               if (WritebackEnable[k] == true) {
-                       WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
-                                       / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
-                       if (WritebackPixelFormat[k] == dm_444_64) {
+               if (v->WritebackEnable[k] == true) {
+                       WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+                                       / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+                       if (v->WritebackPixelFormat[k] == dm_444_64) {
                                WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
                        }
                        WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5749,14 +5527,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 
        v->MinActiveDRAMClockChangeMargin = 999999;
        PlaneWithMinActiveDRAMClockChangeMargin = 0;
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
                        v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
-                       if (BlendingAndTiming[k] == k) {
+                       if (v->BlendingAndTiming[k] == k) {
                                PlaneWithMinActiveDRAMClockChangeMargin = k;
                        } else {
-                               for (j = 0; j < NumberOfActivePlanes; ++j) {
-                                       if (BlendingAndTiming[k] == j) {
+                               for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+                                       if (v->BlendingAndTiming[k] == j) {
                                                PlaneWithMinActiveDRAMClockChangeMargin = j;
                                        }
                                }
@@ -5764,11 +5542,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                }
        }
 
-       *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+       v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
 
        SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
-               if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+               if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
                                && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
                        SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
                }
@@ -5776,25 +5554,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 
        v->TotalNumberOfActiveOTG = 0;
 
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
-               if (BlendingAndTiming[k] == k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+               if (v->BlendingAndTiming[k] == k) {
                        v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
                }
        }
 
        if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
                *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
-       } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+       } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
                        || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
                *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
        } else {
                *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
        }
 
-       *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
-       *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
-       *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
-       *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+       *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+       *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+       *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+       *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
index 34a5d0f87b5f9e4dce39ba14b04b5a3425e2ef47..4bb3b31ea7e0c87468c81cebe59217f6a82307f1 100644 (file)
@@ -194,6 +194,9 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
                dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
                dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
 
+               if (bw_params->dram_channel_width_bytes > 0)
+                       dcn3_14_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
+
                if (bw_params->num_channels > 0)
                        dcn3_14_soc.num_chans = bw_params->num_channels;
 
@@ -262,7 +265,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
        }
 
        if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
-               dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
+               dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
        else
                dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
 }
index fc4d7474c111e056552444220bf7378d429ba1b3..01f3fad172f33590587b6beca4321455423fd83a 100644 (file)
@@ -61,7 +61,7 @@
 // fudge factor for min dcfclk calclation
 #define __DML_MIN_DCFCLK_FACTOR__   1.15
 
-struct {
+typedef struct {
        double DPPCLK;
        double DISPCLK;
        double PixelClock;
@@ -1599,7 +1599,7 @@ static void CalculateDCCConfiguration(
        int segment_order_vert_contiguous_luma;
        int segment_order_vert_contiguous_chroma;
 
-       enum {
+       typedef enum {
                REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
        } RequestType;
        RequestType RequestLuma;
@@ -4071,9 +4071,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
 
        v->SourceFormatPixelAndScanSupport = true;
        for (k = 0; k < v->NumberOfActivePlanes; k++) {
-               if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true))
-                               || ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t
-                                               || v->SurfaceTiling[k] == dm_sw_64kb_d_x) && !(v->SourcePixelFormat[k] == dm_444_64))) {
+               if (v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true)) {
                        v->SourceFormatPixelAndScanSupport = false;
                }
        }
@@ -7157,12 +7155,13 @@ static double CalculateExtraLatencyBytes(
                        HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
                else
                        HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
-       else
+       } else {
                HostVMDynamicLevels = 0;
+       }
 
        ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
 
-       if (GPUVMEnable == true)
+       if (GPUVMEnable == true) {
                for (k = 0; k < NumberOfActivePlanes; ++k)
                        ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
        }
index cb2025771646b916d6d0d23224889e6d9d3921a2..9a60f27eceaaff1393c155edc31107ac54680b0f 100644 (file)
@@ -755,30 +755,18 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
-                       v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
-                                       &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
-                                       mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
-                                       mode_lib->vba.DPPCLKDelaySCL,
-                                       mode_lib->vba.DPPCLKDelaySCLLBOnly,
-                                       mode_lib->vba.DPPCLKDelayCNVCCursor,
-                                       mode_lib->vba.DISPCLKDelaySubtotal,
-                                       (unsigned int) (v->SwathWidthY[k] / mode_lib->vba.HRatio[k]),
-                                       mode_lib->vba.OutputFormat[k],
-                                       mode_lib->vba.MaxInterDCNTileRepeaters,
+                       v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
+                                       v,
+                                       k,
+                                       v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
+                                       &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe,
+                                       v->DSCDelay[k],
+                                       (unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
                                        dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
                                        v->MaxVStartupLines[k],
-                                       mode_lib->vba.GPUVMMaxPageTableLevels,
-                                       mode_lib->vba.GPUVMEnable,
-                                       mode_lib->vba.HostVMEnable,
-                                       mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
-                                       mode_lib->vba.HostVMMinPageSize,
-                                       mode_lib->vba.DynamicMetadataEnable[k],
-                                       mode_lib->vba.DynamicMetadataVMEnabled,
-                                       mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
-                                       mode_lib->vba.DynamicMetadataTransmittedBytes[k],
                                        v->UrgentLatency,
                                        v->UrgentExtraLatency,
-                                       mode_lib->vba.TCalc,
+                                       v->TCalc,
                                        v->PDEAndMetaPTEBytesFrame[k],
                                        v->MetaRowByte[k],
                                        v->PixelPTEBytesPerRow[k],
@@ -792,8 +780,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                                        v->MaxNumSwathC[k],
                                        v->swath_width_luma_ub[k],
                                        v->swath_width_chroma_ub[k],
-                                       mode_lib->vba.SwathHeightY[k],
-                                       mode_lib->vba.SwathHeightC[k],
+                                       v->SwathHeightY[k],
+                                       v->SwathHeightC[k],
                                        TWait,
                                        /* Output */
                                        &v->DSTXAfterScaler[k],
@@ -1163,58 +1151,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
 
                dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-                       mode_lib->vba.USRRetrainingRequiredFinal,
-                       mode_lib->vba.UsesMALLForPStateChange,
-                       mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
-                       mode_lib->vba.NumberOfActiveSurfaces,
-                       mode_lib->vba.MaxLineBufferLines,
-                       mode_lib->vba.LineBufferSizeFinal,
-                       mode_lib->vba.WritebackInterfaceBufferSize,
-                       mode_lib->vba.DCFCLK,
-                       mode_lib->vba.ReturnBW,
-                       mode_lib->vba.SynchronizeTimingsFinal,
-                       mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
-                       mode_lib->vba.DRRDisplay,
-                       v->dpte_group_bytes,
-                       v->meta_row_height,
-                       v->meta_row_height_chroma,
+                       v,
+                       v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb],
+                       v->DCFCLK,
+                       v->ReturnBW,
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters,
-                       mode_lib->vba.WritebackChunkSize,
-                       mode_lib->vba.SOCCLK,
+                       v->SOCCLK,
                        v->DCFCLKDeepSleep,
-                       mode_lib->vba.DETBufferSizeY,
-                       mode_lib->vba.DETBufferSizeC,
-                       mode_lib->vba.SwathHeightY,
-                       mode_lib->vba.SwathHeightC,
-                       mode_lib->vba.LBBitPerPixel,
+                       v->DETBufferSizeY,
+                       v->DETBufferSizeC,
+                       v->SwathHeightY,
+                       v->SwathHeightC,
                        v->SwathWidthY,
                        v->SwathWidthC,
-                       mode_lib->vba.HRatio,
-                       mode_lib->vba.HRatioChroma,
-                       mode_lib->vba.vtaps,
-                       mode_lib->vba.VTAPsChroma,
-                       mode_lib->vba.VRatio,
-                       mode_lib->vba.VRatioChroma,
-                       mode_lib->vba.HTotal,
-                       mode_lib->vba.VTotal,
-                       mode_lib->vba.VActive,
-                       mode_lib->vba.PixelClock,
-                       mode_lib->vba.BlendingAndTiming,
-                       mode_lib->vba.DPPPerPlane,
+                       v->DPPPerPlane,
                        v->BytePerPixelDETY,
                        v->BytePerPixelDETC,
                        v->DSTXAfterScaler,
                        v->DSTYAfterScaler,
-                       mode_lib->vba.WritebackEnable,
-                       mode_lib->vba.WritebackPixelFormat,
-                       mode_lib->vba.WritebackDestinationWidth,
-                       mode_lib->vba.WritebackDestinationHeight,
-                       mode_lib->vba.WritebackSourceHeight,
                        v->UnboundedRequestEnabled,
                        v->CompressedBufferSizeInkByte,
 
                        /* Output */
-                       &v->Watermark,
                        &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_dramchange_support,
                        v->MaxActiveDRAMClockChangeLatencySupported,
                        v->SubViewportLinesNeededInMALL,
@@ -1806,10 +1764,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                &mode_lib->vba.Read256BlockHeightC[k],
                                &mode_lib->vba.Read256BlockWidthY[k],
                                &mode_lib->vba.Read256BlockWidthC[k],
-                               &mode_lib->vba.MicroTileHeightY[k],
-                               &mode_lib->vba.MicroTileHeightC[k],
-                               &mode_lib->vba.MicroTileWidthY[k],
-                               &mode_lib->vba.MicroTileWidthC[k]);
+                               &mode_lib->vba.MacroTileHeightY[k],
+                               &mode_lib->vba.MacroTileHeightC[k],
+                               &mode_lib->vba.MacroTileWidthY[k],
+                               &mode_lib->vba.MacroTileWidthC[k]);
        }
 
        /*Bandwidth Support Check*/
@@ -2659,10 +2617,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                        mode_lib->vba.Read256BlockWidthC,
                        mode_lib->vba.Read256BlockHeightY,
                        mode_lib->vba.Read256BlockHeightC,
-                       mode_lib->vba.MicroTileWidthY,
-                       mode_lib->vba.MicroTileWidthC,
-                       mode_lib->vba.MicroTileHeightY,
-                       mode_lib->vba.MicroTileHeightC,
+                       mode_lib->vba.MacroTileWidthY,
+                       mode_lib->vba.MacroTileWidthC,
+                       mode_lib->vba.MacroTileHeightY,
+                       mode_lib->vba.MacroTileHeightC,
 
                        /* Output */
                        mode_lib->vba.SurfaceSizeInMALL,
@@ -2709,10 +2667,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesY = mode_lib->vba.Read256BlockHeightY[k];
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidth256BytesC = mode_lib->vba.Read256BlockWidthC[k];
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesC = mode_lib->vba.Read256BlockHeightC[k];
-                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MicroTileWidthY[k];
-                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MicroTileHeightY[k];
-                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MicroTileWidthC[k];
-                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MicroTileHeightC[k];
+                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MacroTileWidthY[k];
+                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MacroTileHeightY[k];
+                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MacroTileWidthC[k];
+                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MacroTileHeightC[k];
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].InterlaceEnable = mode_lib->vba.Interlace[k];
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].HTotal = mode_lib->vba.HTotal[k];
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCEnable = mode_lib->vba.DCCEnable[k];
@@ -3258,63 +3216,47 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
                                        mode_lib->vba.NoTimeForPrefetch[i][j][k] =
                                                dml32_CalculatePrefetchSchedule(
+                                                       v,
+                                                       k,
                                                        v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
-                                                       mode_lib->vba.DSCDelayPerState[i][k],
-                                                       mode_lib->vba.DPPCLKDelaySubtotal +
-                                                               mode_lib->vba.DPPCLKDelayCNVCFormater,
-                                                       mode_lib->vba.DPPCLKDelaySCL,
-                                                       mode_lib->vba.DPPCLKDelaySCLLBOnly,
-                                                       mode_lib->vba.DPPCLKDelayCNVCCursor,
-                                                       mode_lib->vba.DISPCLKDelaySubtotal,
-                                                       mode_lib->vba.SwathWidthYThisState[k] /
-                                                               mode_lib->vba.HRatio[k],
-                                                       mode_lib->vba.OutputFormat[k],
-                                                       mode_lib->vba.MaxInterDCNTileRepeaters,
-                                                       dml_min(mode_lib->vba.MaxVStartup,
-                                                                       mode_lib->vba.MaximumVStartup[i][j][k]),
-                                                       mode_lib->vba.MaximumVStartup[i][j][k],
-                                                       mode_lib->vba.GPUVMMaxPageTableLevels,
-                                                       mode_lib->vba.GPUVMEnable, mode_lib->vba.HostVMEnable,
-                                                       mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
-                                                       mode_lib->vba.HostVMMinPageSize,
-                                                       mode_lib->vba.DynamicMetadataEnable[k],
-                                                       mode_lib->vba.DynamicMetadataVMEnabled,
-                                                       mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
-                                                       mode_lib->vba.DynamicMetadataTransmittedBytes[k],
-                                                       mode_lib->vba.UrgLatency[i],
-                                                       mode_lib->vba.ExtraLatency,
-                                                       mode_lib->vba.TimeCalc,
-                                                       mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k],
-                                                       mode_lib->vba.MetaRowBytes[i][j][k],
-                                                       mode_lib->vba.DPTEBytesPerRow[i][j][k],
-                                                       mode_lib->vba.PrefetchLinesY[i][j][k],
-                                                       mode_lib->vba.SwathWidthYThisState[k],
-                                                       mode_lib->vba.PrefillY[k],
-                                                       mode_lib->vba.MaxNumSwY[k],
-                                                       mode_lib->vba.PrefetchLinesC[i][j][k],
-                                                       mode_lib->vba.SwathWidthCThisState[k],
-                                                       mode_lib->vba.PrefillC[k],
-                                                       mode_lib->vba.MaxNumSwC[k],
-                                                       mode_lib->vba.swath_width_luma_ub_this_state[k],
-                                                       mode_lib->vba.swath_width_chroma_ub_this_state[k],
-                                                       mode_lib->vba.SwathHeightYThisState[k],
-                                                       mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.TWait,
+                                                       v->DSCDelayPerState[i][k],
+                                                       v->SwathWidthYThisState[k] / v->HRatio[k],
+                                                       dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
+                                                       v->MaximumVStartup[i][j][k],
+                                                       v->UrgLatency[i],
+                                                       v->ExtraLatency,
+                                                       v->TimeCalc,
+                                                       v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+                                                       v->MetaRowBytes[i][j][k],
+                                                       v->DPTEBytesPerRow[i][j][k],
+                                                       v->PrefetchLinesY[i][j][k],
+                                                       v->SwathWidthYThisState[k],
+                                                       v->PrefillY[k],
+                                                       v->MaxNumSwY[k],
+                                                       v->PrefetchLinesC[i][j][k],
+                                                       v->SwathWidthCThisState[k],
+                                                       v->PrefillC[k],
+                                                       v->MaxNumSwC[k],
+                                                       v->swath_width_luma_ub_this_state[k],
+                                                       v->swath_width_chroma_ub_this_state[k],
+                                                       v->SwathHeightYThisState[k],
+                                                       v->SwathHeightCThisState[k], v->TWait,
 
                                                        /* Output */
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler[k],
-                                                       &mode_lib->vba.LineTimesForPrefetch[k],
-                                                       &mode_lib->vba.PrefetchBW[k],
-                                                       &mode_lib->vba.LinesForMetaPTE[k],
-                                                       &mode_lib->vba.LinesForMetaAndDPTERow[k],
-                                                       &mode_lib->vba.VRatioPreY[i][j][k],
-                                                       &mode_lib->vba.VRatioPreC[i][j][k],
-                                                       &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0][k],
-                                                       &mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0][k],
-                                                       &mode_lib->vba.NoTimeForDynamicMetadata[i][j][k],
-                                                       &mode_lib->vba.Tno_bw[k],
-                                                       &mode_lib->vba.prefetch_vmrow_bw[k],
+                                                       &v->LineTimesForPrefetch[k],
+                                                       &v->PrefetchBW[k],
+                                                       &v->LinesForMetaPTE[k],
+                                                       &v->LinesForMetaAndDPTERow[k],
+                                                       &v->VRatioPreY[i][j][k],
+                                                       &v->VRatioPreC[i][j][k],
+                                                       &v->RequiredPrefetchPixelDataBWLuma[0][0][k],
+                                                       &v->RequiredPrefetchPixelDataBWChroma[0][0][k],
+                                                       &v->NoTimeForDynamicMetadata[i][j][k],
+                                                       &v->Tno_bw[k],
+                                                       &v->prefetch_vmrow_bw[k],
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0],         // double *Tdmdl_vm
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1],         // double *Tdmdl
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[2],         // double *TSetup
@@ -3557,62 +3499,32 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
                        {
                                dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-                                               mode_lib->vba.USRRetrainingRequiredFinal,
-                                               mode_lib->vba.UsesMALLForPStateChange,
-                                               mode_lib->vba.PrefetchModePerState[i][j],
-                                               mode_lib->vba.NumberOfActiveSurfaces,
-                                               mode_lib->vba.MaxLineBufferLines,
-                                               mode_lib->vba.LineBufferSizeFinal,
-                                               mode_lib->vba.WritebackInterfaceBufferSize,
-                                               mode_lib->vba.DCFCLKState[i][j],
-                                               mode_lib->vba.ReturnBWPerState[i][j],
-                                               mode_lib->vba.SynchronizeTimingsFinal,
-                                               mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
-                                               mode_lib->vba.DRRDisplay,
-                                               mode_lib->vba.dpte_group_bytes,
-                                               mode_lib->vba.meta_row_height,
-                                               mode_lib->vba.meta_row_height_chroma,
+                                               v,
+                                               v->PrefetchModePerState[i][j],
+                                               v->DCFCLKState[i][j],
+                                               v->ReturnBWPerState[i][j],
                                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters,
-                                               mode_lib->vba.WritebackChunkSize,
-                                               mode_lib->vba.SOCCLKPerState[i],
-                                               mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j],
-                                               mode_lib->vba.DETBufferSizeYThisState,
-                                               mode_lib->vba.DETBufferSizeCThisState,
-                                               mode_lib->vba.SwathHeightYThisState,
-                                               mode_lib->vba.SwathHeightCThisState,
-                                               mode_lib->vba.LBBitPerPixel,
-                                               mode_lib->vba.SwathWidthYThisState, // 24
-                                               mode_lib->vba.SwathWidthCThisState,
-                                               mode_lib->vba.HRatio,
-                                               mode_lib->vba.HRatioChroma,
-                                               mode_lib->vba.vtaps,
-                                               mode_lib->vba.VTAPsChroma,
-                                               mode_lib->vba.VRatio,
-                                               mode_lib->vba.VRatioChroma,
-                                               mode_lib->vba.HTotal,
-                                               mode_lib->vba.VTotal,
-                                               mode_lib->vba.VActive,
-                                               mode_lib->vba.PixelClock,
-                                               mode_lib->vba.BlendingAndTiming,
-                                               mode_lib->vba.NoOfDPPThisState,
-                                               mode_lib->vba.BytePerPixelInDETY,
-                                               mode_lib->vba.BytePerPixelInDETC,
+                                               v->SOCCLKPerState[i],
+                                               v->ProjectedDCFCLKDeepSleep[i][j],
+                                               v->DETBufferSizeYThisState,
+                                               v->DETBufferSizeCThisState,
+                                               v->SwathHeightYThisState,
+                                               v->SwathHeightCThisState,
+                                               v->SwathWidthYThisState, // 24
+                                               v->SwathWidthCThisState,
+                                               v->NoOfDPPThisState,
+                                               v->BytePerPixelInDETY,
+                                               v->BytePerPixelInDETC,
                                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler,
                                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler,
-                                               mode_lib->vba.WritebackEnable,
-                                               mode_lib->vba.WritebackPixelFormat,
-                                               mode_lib->vba.WritebackDestinationWidth,
-                                               mode_lib->vba.WritebackDestinationHeight,
-                                               mode_lib->vba.WritebackSourceHeight,
-                                               mode_lib->vba.UnboundedRequestEnabledThisState,
-                                               mode_lib->vba.CompressedBufferSizeInkByteThisState,
+                                               v->UnboundedRequestEnabledThisState,
+                                               v->CompressedBufferSizeInkByteThisState,
 
                                                /* Output */
-                                               &mode_lib->vba.Watermark, // Store the values in vba
-                                               &mode_lib->vba.DRAMClockChangeSupport[i][j],
+                                               &v->DRAMClockChangeSupport[i][j],
                                                &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[0], // double *MaxActiveDRAMClockChangeLatencySupported
                                                &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], // Long SubViewportLinesNeededInMALL[]
-                                               &mode_lib->vba.FCLKChangeSupport[i][j],
+                                               &v->FCLKChangeSupport[i][j],
                                                &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[1], // double *MinActiveFCLKChangeLatencySupported
                                                &mode_lib->vba.USRRetrainingSupport[i][j],
                                                mode_lib->vba.ActiveDRAMClockChangeLatencyMargin);
index 05fc14a47fba91b86cc5579c823da689bc719a59..59c2547d01b1248b790a7e917dd16b6ad3b4e9ba 100644 (file)
@@ -3363,28 +3363,14 @@ double dml32_CalculateExtraLatency(
 } // CalculateExtraLatency
 
 bool dml32_CalculatePrefetchSchedule(
+               struct vba_vars_st *v,
+               unsigned int k,
                double HostVMInefficiencyFactor,
                DmlPipe *myPipe,
                unsigned int DSCDelay,
-               double DPPCLKDelaySubtotalPlusCNVCFormater,
-               double DPPCLKDelaySCL,
-               double DPPCLKDelaySCLLBOnly,
-               double DPPCLKDelayCNVCCursor,
-               double DISPCLKDelaySubtotal,
                unsigned int DPP_RECOUT_WIDTH,
-               enum output_format_class OutputFormat,
-               unsigned int MaxInterDCNTileRepeaters,
                unsigned int VStartup,
                unsigned int MaxVStartup,
-               unsigned int GPUVMPageTableLevels,
-               bool GPUVMEnable,
-               bool HostVMEnable,
-               unsigned int HostVMMaxNonCachedPageTableLevels,
-               double HostVMMinPageSize,
-               bool DynamicMetadataEnable,
-               bool DynamicMetadataVMEnabled,
-               int DynamicMetadataLinesBeforeActiveRequired,
-               unsigned int DynamicMetadataTransmittedBytes,
                double UrgentLatency,
                double UrgentExtraLatency,
                double TCalc,
@@ -3425,6 +3411,7 @@ bool dml32_CalculatePrefetchSchedule(
                double   *VUpdateWidthPix,
                double   *VReadyOffsetPix)
 {
+       double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
        bool MyError = false;
        unsigned int DPPCycles, DISPCLKCycles;
        double DSTTotalPixelsAfterScaler;
@@ -3461,27 +3448,27 @@ bool dml32_CalculatePrefetchSchedule(
        double  Tsw_est1 = 0;
        double  Tsw_est3 = 0;
 
-       if (GPUVMEnable == true && HostVMEnable == true)
-               HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+       if (v->GPUVMEnable == true && v->HostVMEnable == true)
+               HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
        else
                HostVMDynamicLevelsTrips = 0;
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
-       dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
+       dml_print("DML::%s: v->GPUVMEnable = %d\n", __func__, v->GPUVMEnable);
+       dml_print("DML::%s: v->GPUVMMaxPageTableLevels = %d\n", __func__, v->GPUVMMaxPageTableLevels);
        dml_print("DML::%s: DCCEnable = %d\n", __func__, myPipe->DCCEnable);
-       dml_print("DML::%s: HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
-                       __func__, HostVMEnable, HostVMInefficiencyFactor);
+       dml_print("DML::%s: v->HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
+                       __func__, v->HostVMEnable, HostVMInefficiencyFactor);
 #endif
        dml32_CalculateVUpdateAndDynamicMetadataParameters(
-                       MaxInterDCNTileRepeaters,
+                       v->MaxInterDCNTileRepeaters,
                        myPipe->Dppclk,
                        myPipe->Dispclk,
                        myPipe->DCFClkDeepSleep,
                        myPipe->PixelClock,
                        myPipe->HTotal,
                        myPipe->VBlank,
-                       DynamicMetadataTransmittedBytes,
-                       DynamicMetadataLinesBeforeActiveRequired,
+                       v->DynamicMetadataTransmittedBytes[k],
+                       v->DynamicMetadataLinesBeforeActiveRequired[k],
                        myPipe->InterlaceEnable,
                        myPipe->ProgressiveToInterlaceUnitInOPP,
                        TSetup,
@@ -3496,19 +3483,19 @@ bool dml32_CalculatePrefetchSchedule(
 
        LineTime = myPipe->HTotal / myPipe->PixelClock;
        trip_to_mem = UrgentLatency;
-       Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
+       Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
 
-       if (DynamicMetadataVMEnabled == true)
+       if (v->DynamicMetadataVMEnabled == true)
                *Tdmdl = TWait + Tvm_trips + trip_to_mem;
        else
                *Tdmdl = TWait + UrgentExtraLatency;
 
 #ifdef __DML_VBA_ALLOW_DELTA__
-       if (DynamicMetadataEnable == false)
+       if (v->DynamicMetadataEnable[k] == false)
                *Tdmdl = 0.0;
 #endif
 
-       if (DynamicMetadataEnable == true) {
+       if (v->DynamicMetadataEnable[k] == true) {
                if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
                        *NotEnoughTimeForDynamicMetadata = true;
 #ifdef __DML_VBA_DEBUG__
@@ -3528,17 +3515,17 @@ bool dml32_CalculatePrefetchSchedule(
                *NotEnoughTimeForDynamicMetadata = false;
        }
 
-       *Tdmdl_vm =  (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
-                       GPUVMEnable == true ? TWait + Tvm_trips : 0);
+       *Tdmdl_vm =  (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true &&
+                       v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
 
        if (myPipe->ScalerEnabled)
-               DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+               DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
        else
-               DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+               DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
 
-       DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+       DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
 
-       DISPCLKCycles = DISPCLKDelaySubtotal;
+       DISPCLKCycles = v->DISPCLKDelaySubtotal;
 
        if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
                return true;
@@ -3564,7 +3551,7 @@ bool dml32_CalculatePrefetchSchedule(
        dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__,  *DSTXAfterScaler);
 #endif
 
-       if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
+       if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
                *DSTYAfterScaler = 1;
        else
                *DSTYAfterScaler = 0;
@@ -3581,13 +3568,13 @@ bool dml32_CalculatePrefetchSchedule(
 
        Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
 
-       if (GPUVMEnable == true) {
+       if (v->GPUVMEnable == true) {
                Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
                Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
-               if (GPUVMPageTableLevels >= 3) {
+               if (v->GPUVMMaxPageTableLevels >= 3) {
                        *Tno_bw = UrgentExtraLatency + trip_to_mem *
-                                       (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
-               } else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
+                                       (double) ((v->GPUVMMaxPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
+               } else if (v->GPUVMMaxPageTableLevels == 1 && myPipe->DCCEnable != true) {
                        Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
                                        4.0 * LineTime; // VBA_ERROR
                        *Tno_bw = UrgentExtraLatency;
@@ -3622,7 +3609,7 @@ bool dml32_CalculatePrefetchSchedule(
        min_Lsw = dml_max(min_Lsw, 1.0);
        Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
 
-       if (GPUVMEnable == true) {
+       if (v->GPUVMEnable == true) {
                Tvm_oto = dml_max3(
                                Tvm_trips,
                                *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
@@ -3630,7 +3617,7 @@ bool dml32_CalculatePrefetchSchedule(
        } else
                Tvm_oto = LineTime / 4.0;
 
-       if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+       if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
                Tr0_oto = dml_max4(
                                Tr0_trips,
                                (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
@@ -3833,7 +3820,7 @@ bool dml32_CalculatePrefetchSchedule(
 #endif
 
                        if (prefetch_bw_equ > 0) {
-                               if (GPUVMEnable == true) {
+                               if (v->GPUVMEnable == true) {
                                        Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
                                                        HostVMInefficiencyFactor / prefetch_bw_equ,
                                                        Tvm_trips, LineTime / 4);
@@ -3841,7 +3828,7 @@ bool dml32_CalculatePrefetchSchedule(
                                        Tvm_equ = LineTime / 4;
                                }
 
-                               if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+                               if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
                                        Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
                                                        HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
                                                        (LineTime - Tvm_equ) / 2, LineTime / 4);
@@ -4206,58 +4193,28 @@ void dml32_CalculateFlipSchedule(
 } // CalculateFlipSchedule
 
 void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-               bool USRRetrainingRequiredFinal,
-               enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+               struct vba_vars_st *v,
                unsigned int PrefetchMode,
-               unsigned int NumberOfActiveSurfaces,
-               unsigned int MaxLineBufferLines,
-               unsigned int LineBufferSize,
-               unsigned int WritebackInterfaceBufferSize,
                double DCFCLK,
                double ReturnBW,
-               bool SynchronizeTimingsFinal,
-               bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
-               bool DRRDisplay[],
-               unsigned int dpte_group_bytes[],
-               unsigned int meta_row_height[],
-               unsigned int meta_row_height_chroma[],
                SOCParametersList mmSOCParameters,
-               unsigned int WritebackChunkSize,
                double SOCCLK,
                double DCFClkDeepSleep,
                unsigned int DETBufferSizeY[],
                unsigned int DETBufferSizeC[],
                unsigned int SwathHeightY[],
                unsigned int SwathHeightC[],
-               unsigned int LBBitPerPixel[],
                double SwathWidthY[],
                double SwathWidthC[],
-               double HRatio[],
-               double HRatioChroma[],
-               unsigned int VTaps[],
-               unsigned int VTapsChroma[],
-               double VRatio[],
-               double VRatioChroma[],
-               unsigned int HTotal[],
-               unsigned int VTotal[],
-               unsigned int VActive[],
-               double PixelClock[],
-               unsigned int BlendingAndTiming[],
                unsigned int DPPPerSurface[],
                double BytePerPixelDETY[],
                double BytePerPixelDETC[],
                double DSTXAfterScaler[],
                double DSTYAfterScaler[],
-               bool WritebackEnable[],
-               enum source_format_class WritebackPixelFormat[],
-               double WritebackDestinationWidth[],
-               double WritebackDestinationHeight[],
-               double WritebackSourceHeight[],
                bool UnboundedRequestEnabled,
                unsigned int CompressedBufferSizeInkByte,
 
                /* Output */
-               Watermarks *Watermark,
                enum clock_change_support *DRAMClockChangeSupport,
                double MaxActiveDRAMClockChangeLatencySupported[],
                unsigned int SubViewportLinesNeededInMALL[],
@@ -4299,136 +4256,136 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
        unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
        unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
 
-       Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
-       Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+       v->Watermark.UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
+       v->Watermark.USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
                        + mmSOCParameters.USRRetrainingLatency + mmSOCParameters.SMNLatency;
-       Watermark->DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + Watermark->UrgentWatermark;
-       Watermark->FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + Watermark->UrgentWatermark;
-       Watermark->StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+       v->Watermark.DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + v->Watermark.UrgentWatermark;
+       v->Watermark.FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + v->Watermark.UrgentWatermark;
+       v->Watermark.StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
                        + 10 / DCFClkDeepSleep;
-       Watermark->StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+       v->Watermark.StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
                        + 10 / DCFClkDeepSleep;
-       Watermark->Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+       v->Watermark.Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
                        + 10 / DCFClkDeepSleep;
-       Watermark->Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+       v->Watermark.Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
                        + mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: UrgentLatency = %f\n", __func__, mmSOCParameters.UrgentLatency);
        dml_print("DML::%s: ExtraLatency = %f\n", __func__, mmSOCParameters.ExtraLatency);
        dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, mmSOCParameters.DRAMClockChangeLatency);
-       dml_print("DML::%s: UrgentWatermark = %f\n", __func__, Watermark->UrgentWatermark);
-       dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, Watermark->USRRetrainingWatermark);
-       dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, Watermark->DRAMClockChangeWatermark);
-       dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, Watermark->FCLKChangeWatermark);
-       dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, Watermark->StutterExitWatermark);
-       dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, Watermark->StutterEnterPlusExitWatermark);
-       dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, Watermark->Z8StutterExitWatermark);
+       dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->Watermark.UrgentWatermark);
+       dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, v->Watermark.USRRetrainingWatermark);
+       dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->Watermark.DRAMClockChangeWatermark);
+       dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, v->Watermark.FCLKChangeWatermark);
+       dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, v->Watermark.StutterExitWatermark);
+       dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, v->Watermark.StutterEnterPlusExitWatermark);
+       dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, v->Watermark.Z8StutterExitWatermark);
        dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n",
-                       __func__, Watermark->Z8StutterEnterPlusExitWatermark);
+                       __func__, v->Watermark.Z8StutterEnterPlusExitWatermark);
 #endif
 
 
        TotalActiveWriteback = 0;
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if (WritebackEnable[k] == true)
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               if (v->WritebackEnable[k] == true)
                        TotalActiveWriteback = TotalActiveWriteback + 1;
        }
 
        if (TotalActiveWriteback <= 1) {
-               Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
+               v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
        } else {
-               Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
-                               + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+               v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
+                               + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
        }
-       if (USRRetrainingRequiredFinal)
-               Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+       if (v->USRRetrainingRequiredFinal)
+               v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark
                                + mmSOCParameters.USRRetrainingLatency;
 
        if (TotalActiveWriteback <= 1) {
-               Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+               v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
                                + mmSOCParameters.WritebackLatency;
-               Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+               v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
                                + mmSOCParameters.WritebackLatency;
        } else {
-               Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
-                               + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
-               Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
-                               + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024 / 32 / SOCCLK;
+               v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+                               + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+               v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+                               + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024 / 32 / SOCCLK;
        }
 
-       if (USRRetrainingRequiredFinal)
-               Watermark->WritebackDRAMClockChangeWatermark = Watermark->WritebackDRAMClockChangeWatermark
+       if (v->USRRetrainingRequiredFinal)
+               v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
                                + mmSOCParameters.USRRetrainingLatency;
 
-       if (USRRetrainingRequiredFinal)
-               Watermark->WritebackFCLKChangeWatermark = Watermark->WritebackFCLKChangeWatermark
+       if (v->USRRetrainingRequiredFinal)
+               v->Watermark.WritebackFCLKChangeWatermark = v->Watermark.WritebackFCLKChangeWatermark
                                + mmSOCParameters.USRRetrainingLatency;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: WritebackDRAMClockChangeWatermark = %f\n",
-                       __func__, Watermark->WritebackDRAMClockChangeWatermark);
-       dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, Watermark->WritebackFCLKChangeWatermark);
-       dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, Watermark->WritebackUrgentWatermark);
-       dml_print("DML::%s: USRRetrainingRequiredFinal = %d\n", __func__, USRRetrainingRequiredFinal);
+                       __func__, v->Watermark.WritebackDRAMClockChangeWatermark);
+       dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, v->Watermark.WritebackFCLKChangeWatermark);
+       dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, v->Watermark.WritebackUrgentWatermark);
+       dml_print("DML::%s: v->USRRetrainingRequiredFinal = %d\n", __func__, v->USRRetrainingRequiredFinal);
        dml_print("DML::%s: USRRetrainingLatency = %f\n", __func__, mmSOCParameters.USRRetrainingLatency);
 #endif
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
-                               SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] +
+                               SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) / (v->HTotal[k] / v->PixelClock[k]);
        }
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
 
-               LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
-               LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+               LBLatencyHidingSourceLinesY[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
+               LBLatencyHidingSourceLinesC[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
 
 
 #ifdef __DML_VBA_DEBUG__
-               dml_print("DML::%s: k=%d, MaxLineBufferLines = %d\n", __func__, k, MaxLineBufferLines);
-               dml_print("DML::%s: k=%d, LineBufferSize     = %d\n", __func__, k, LineBufferSize);
-               dml_print("DML::%s: k=%d, LBBitPerPixel      = %d\n", __func__, k, LBBitPerPixel[k]);
-               dml_print("DML::%s: k=%d, HRatio             = %f\n", __func__, k, HRatio[k]);
-               dml_print("DML::%s: k=%d, VTaps              = %d\n", __func__, k, VTaps[k]);
+               dml_print("DML::%s: k=%d, v->MaxLineBufferLines = %d\n", __func__, k, v->MaxLineBufferLines);
+               dml_print("DML::%s: k=%d, v->LineBufferSizeFinal     = %d\n", __func__, k, v->LineBufferSizeFinal);
+               dml_print("DML::%s: k=%d, v->LBBitPerPixel      = %d\n", __func__, k, v->LBBitPerPixel[k]);
+               dml_print("DML::%s: k=%d, v->HRatio             = %f\n", __func__, k, v->HRatio[k]);
+               dml_print("DML::%s: k=%d, v->vtaps              = %d\n", __func__, k, v->vtaps[k]);
 #endif
 
-               EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
-               EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+               EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
+               EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
                EffectiveDETBufferSizeY = DETBufferSizeY[k];
 
                if (UnboundedRequestEnabled) {
                        EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
                                        + CompressedBufferSizeInkByte * 1024
-                                                       * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
-                                                       / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+                                                       * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k])
+                                                       / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
                }
 
                LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
                LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
-               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
 
                ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
-                               - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
+                               - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k];
 
-               if (NumberOfActiveSurfaces > 1) {
+               if (v->NumberOfActiveSurfaces > 1) {
                        ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
-                                       - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
-                                                       / PixelClock[k] / VRatio[k];
+                                       - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
+                                                       / v->PixelClock[k] / v->VRatio[k];
                }
 
                if (BytePerPixelDETC[k] > 0) {
                        LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
                        LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
-                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
-                                       / VRatioChroma[k];
+                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k])
+                                       / v->VRatioChroma[k];
                        ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
-                                       - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
-                                                       / PixelClock[k];
-                       if (NumberOfActiveSurfaces > 1) {
+                                       - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k]
+                                                       / v->PixelClock[k];
+                       if (v->NumberOfActiveSurfaces > 1) {
                                ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
-                                               - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
-                                                               / PixelClock[k] / VRatioChroma[k];
+                                               - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightC[k] * v->HTotal[k]
+                                                               / v->PixelClock[k] / v->VRatioChroma[k];
                        }
                        ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
                                        ActiveClockChangeLatencyHidingC);
@@ -4436,24 +4393,24 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                        ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
                }
 
-               ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
-                               - Watermark->DRAMClockChangeWatermark;
-               ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
-                               - Watermark->FCLKChangeWatermark;
-               USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
-
-               if (WritebackEnable[k]) {
-                       WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
-                                       / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
-                                                       / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
-                       if (WritebackPixelFormat[k] == dm_444_64)
+               ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+                               - v->Watermark.DRAMClockChangeWatermark;
+               ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+                               - v->Watermark.FCLKChangeWatermark;
+               USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.USRRetrainingWatermark;
+
+               if (v->WritebackEnable[k]) {
+                       WritebackLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+                                       / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
+                                                       / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+                       if (v->WritebackPixelFormat[k] == dm_444_64)
                                WritebackLatencyHiding = WritebackLatencyHiding / 2;
 
                        WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
-                                       - Watermark->WritebackDRAMClockChangeWatermark;
+                                       - v->Watermark.WritebackDRAMClockChangeWatermark;
 
                        WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
-                                       - Watermark->WritebackFCLKChangeWatermark;
+                                       - v->Watermark.WritebackFCLKChangeWatermark;
 
                        ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
                                        WritebackFCLKChangeLatencyMargin);
@@ -4461,22 +4418,22 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                                        WritebackDRAMClockChangeLatencyMargin);
                }
                MaxActiveDRAMClockChangeLatencySupported[k] =
-                               (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
+                               (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
                                                0 :
                                                (ActiveDRAMClockChangeLatencyMargin[k]
                                                                + mmSOCParameters.DRAMClockChangeLatency);
        }
 
-       for (i = 0; i < NumberOfActiveSurfaces; ++i) {
-               for (j = 0; j < NumberOfActiveSurfaces; ++j) {
+       for (i = 0; i < v->NumberOfActiveSurfaces; ++i) {
+               for (j = 0; j < v->NumberOfActiveSurfaces; ++j) {
                        if (i == j ||
-                                       (BlendingAndTiming[i] == i && BlendingAndTiming[j] == i) ||
-                                       (BlendingAndTiming[j] == j && BlendingAndTiming[i] == j) ||
-                                       (BlendingAndTiming[i] == BlendingAndTiming[j] && BlendingAndTiming[i] != i) ||
-                                       (SynchronizeTimingsFinal && PixelClock[i] == PixelClock[j] &&
-                                       HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
-                                       VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
-                                       (DRRDisplay[i] || DRRDisplay[j]))) {
+                                       (v->BlendingAndTiming[i] == i && v->BlendingAndTiming[j] == i) ||
+                                       (v->BlendingAndTiming[j] == j && v->BlendingAndTiming[i] == j) ||
+                                       (v->BlendingAndTiming[i] == v->BlendingAndTiming[j] && v->BlendingAndTiming[i] != i) ||
+                                       (v->SynchronizeTimingsFinal && v->PixelClock[i] == v->PixelClock[j] &&
+                                       v->HTotal[i] == v->HTotal[j] && v->VTotal[i] == v->VTotal[j] &&
+                                       v->VActive[i] == v->VActive[j]) || (v->SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
+                                       (v->DRRDisplay[i] || v->DRRDisplay[j]))) {
                                SynchronizedSurfaces[i][j] = true;
                        } else {
                                SynchronizedSurfaces[i][j] = false;
@@ -4484,8 +4441,8 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                }
        }
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
                                (!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
                                ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
                        FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
@@ -4497,9 +4454,9 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
        *MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
 
        SameTimingForFCLKChange = true;
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
                if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
-                       if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+                       if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
                                        (SameTimingForFCLKChange ||
                                        ActiveFCLKChangeLatencyMargin[k] <
                                        SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
@@ -4519,17 +4476,17 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
        }
 
        *USRRetrainingSupport = true;
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
                                (USRRetrainingLatencyMargin[k] < 0)) {
                        *USRRetrainingSupport = false;
                }
        }
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
-                               UseMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
-                               UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               if (v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
+                               v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
+                               v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
                                ActiveDRAMClockChangeLatencyMargin[k] < 0) {
                        if (PrefetchMode > 0) {
                                DRAMClockChangeSupportNumber = 2;
@@ -4543,10 +4500,10 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                }
        }
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
                        DRAMClockChangeMethod = 1;
-               else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
+               else if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
                        DRAMClockChangeMethod = 2;
        }
 
@@ -4573,16 +4530,16 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                        *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
        }
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
                unsigned int dst_y_pstate;
                unsigned int src_y_pstate_l;
                unsigned int src_y_pstate_c;
                unsigned int src_y_ahead_l, src_y_ahead_c, sub_vp_lines_l, sub_vp_lines_c;
 
-               dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
-               src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
+               dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (v->HTotal[k] / v->PixelClock[k]), 1);
+               src_y_pstate_l = dml_ceil(dst_y_pstate * v->VRatio[k], SwathHeightY[k]);
                src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
-               sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
+               sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + v->meta_row_height[k];
 
 #ifdef __DML_VBA_DEBUG__
 dml_print("DML::%s: k=%d, DETBufferSizeY               = %d\n", __func__, k, DETBufferSizeY[k]);
@@ -4593,21 +4550,21 @@ dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY  = %d\n", __func__, k, LBL
 dml_print("DML::%s: k=%d, dst_y_pstate      = %d\n", __func__, k, dst_y_pstate);
 dml_print("DML::%s: k=%d, src_y_pstate_l    = %d\n", __func__, k, src_y_pstate_l);
 dml_print("DML::%s: k=%d, src_y_ahead_l     = %d\n", __func__, k, src_y_ahead_l);
-dml_print("DML::%s: k=%d, meta_row_height   = %d\n", __func__, k, meta_row_height[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height   = %d\n", __func__, k, v->meta_row_height[k]);
 dml_print("DML::%s: k=%d, sub_vp_lines_l    = %d\n", __func__, k, sub_vp_lines_l);
 #endif
                SubViewportLinesNeededInMALL[k] = sub_vp_lines_l;
 
                if (BytePerPixelDETC[k] > 0) {
-                       src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
+                       src_y_pstate_c = dml_ceil(dst_y_pstate * v->VRatioChroma[k], SwathHeightC[k]);
                        src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
-                       sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
+                       sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + v->meta_row_height_chroma[k];
                        SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
 
 #ifdef __DML_VBA_DEBUG__
 dml_print("DML::%s: k=%d, src_y_pstate_c            = %d\n", __func__, k, src_y_pstate_c);
 dml_print("DML::%s: k=%d, src_y_ahead_c             = %d\n", __func__, k, src_y_ahead_c);
-dml_print("DML::%s: k=%d, meta_row_height_chroma    = %d\n", __func__, k, meta_row_height_chroma[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height_chroma    = %d\n", __func__, k, v->meta_row_height_chroma[k]);
 dml_print("DML::%s: k=%d, sub_vp_lines_c            = %d\n", __func__, k, sub_vp_lines_c);
 #endif
                }
index d293856ba906b4032b5e461884a37392ae01fa18..924e361ad24357ea0dda74c95a492abdf5707b97 100644 (file)
@@ -30,6 +30,7 @@
 #include "os_types.h"
 #include "../dc_features.h"
 #include "../display_mode_structs.h"
+#include "dml/display_mode_vba.h"
 
 unsigned int dml32_dscceComputeDelay(
                unsigned int bpc,
@@ -712,28 +713,14 @@ double dml32_CalculateExtraLatency(
                unsigned int HostVMMaxNonCachedPageTableLevels);
 
 bool dml32_CalculatePrefetchSchedule(
+               struct vba_vars_st *v,
+               unsigned int k,
                double HostVMInefficiencyFactor,
                DmlPipe *myPipe,
                unsigned int DSCDelay,
-               double DPPCLKDelaySubtotalPlusCNVCFormater,
-               double DPPCLKDelaySCL,
-               double DPPCLKDelaySCLLBOnly,
-               double DPPCLKDelayCNVCCursor,
-               double DISPCLKDelaySubtotal,
                unsigned int DPP_RECOUT_WIDTH,
-               enum output_format_class OutputFormat,
-               unsigned int MaxInterDCNTileRepeaters,
                unsigned int VStartup,
                unsigned int MaxVStartup,
-               unsigned int GPUVMPageTableLevels,
-               bool GPUVMEnable,
-               bool HostVMEnable,
-               unsigned int HostVMMaxNonCachedPageTableLevels,
-               double HostVMMinPageSize,
-               bool DynamicMetadataEnable,
-               bool DynamicMetadataVMEnabled,
-               int DynamicMetadataLinesBeforeActiveRequired,
-               unsigned int DynamicMetadataTransmittedBytes,
                double UrgentLatency,
                double UrgentExtraLatency,
                double TCalc,
@@ -807,58 +794,28 @@ void dml32_CalculateFlipSchedule(
                bool *ImmediateFlipSupportedForPipe);
 
 void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-               bool USRRetrainingRequiredFinal,
-               enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+               struct vba_vars_st *v,
                unsigned int PrefetchMode,
-               unsigned int NumberOfActiveSurfaces,
-               unsigned int MaxLineBufferLines,
-               unsigned int LineBufferSize,
-               unsigned int WritebackInterfaceBufferSize,
                double DCFCLK,
                double ReturnBW,
-               bool SynchronizeTimingsFinal,
-               bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
-               bool DRRDisplay[],
-               unsigned int dpte_group_bytes[],
-               unsigned int meta_row_height[],
-               unsigned int meta_row_height_chroma[],
                SOCParametersList mmSOCParameters,
-               unsigned int WritebackChunkSize,
                double SOCCLK,
                double DCFClkDeepSleep,
                unsigned int DETBufferSizeY[],
                unsigned int DETBufferSizeC[],
                unsigned int SwathHeightY[],
                unsigned int SwathHeightC[],
-               unsigned int LBBitPerPixel[],
                double SwathWidthY[],
                double SwathWidthC[],
-               double HRatio[],
-               double HRatioChroma[],
-               unsigned int VTaps[],
-               unsigned int VTapsChroma[],
-               double VRatio[],
-               double VRatioChroma[],
-               unsigned int HTotal[],
-               unsigned int VTotal[],
-               unsigned int VActive[],
-               double PixelClock[],
-               unsigned int BlendingAndTiming[],
                unsigned int DPPPerSurface[],
                double BytePerPixelDETY[],
                double BytePerPixelDETC[],
                double DSTXAfterScaler[],
                double DSTYAfterScaler[],
-               bool WritebackEnable[],
-               enum source_format_class WritebackPixelFormat[],
-               double WritebackDestinationWidth[],
-               double WritebackDestinationHeight[],
-               double WritebackSourceHeight[],
                bool UnboundedRequestEnabled,
                unsigned int CompressedBufferSizeInkByte,
 
                /* Output */
-               Watermarks *Watermark,
                enum clock_change_support *DRAMClockChangeSupport,
                double MaxActiveDRAMClockChangeLatencySupported[],
                unsigned int SubViewportLinesNeededInMALL[],
index 5d27ff0ebb5fa17e5c2ea7e4cce14af62191f9eb..f5400eda07a53396ad9eafb09bcf2918dbb81e8d 100644 (file)
@@ -35,6 +35,8 @@
 #include "dcn30/display_rq_dlg_calc_30.h"
 #include "dcn31/display_mode_vba_31.h"
 #include "dcn31/display_rq_dlg_calc_31.h"
+#include "dcn314/display_mode_vba_314.h"
+#include "dcn314/display_rq_dlg_calc_314.h"
 #include "dcn32/display_mode_vba_32.h"
 #include "dcn32/display_rq_dlg_calc_32.h"
 #include "dml_logger.h"
@@ -74,6 +76,13 @@ const struct dml_funcs dml31_funcs = {
        .rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg
 };
 
+const struct dml_funcs dml314_funcs = {
+       .validate = dml314_ModeSupportAndSystemConfigurationFull,
+       .recalculate = dml314_recalculate,
+       .rq_dlg_get_dlg_reg = dml314_rq_dlg_get_dlg_reg,
+       .rq_dlg_get_rq_reg = dml314_rq_dlg_get_rq_reg
+};
+
 const struct dml_funcs dml32_funcs = {
        .validate = dml32_ModeSupportAndSystemConfigurationFull,
     .recalculate = dml32_recalculate,
@@ -107,6 +116,9 @@ void dml_init_instance(struct display_mode_lib *lib,
        case DML_PROJECT_DCN31_FPGA:
                lib->funcs = dml31_funcs;
                break;
+       case DML_PROJECT_DCN314:
+               lib->funcs = dml314_funcs;
+               break;
        case DML_PROJECT_DCN32:
                lib->funcs = dml32_funcs;
                break;
index 2bdd6ed22611d266223ca35f59a14e0d5cd99767..b1878a1440e2bf199bb3bd39dd75002bee1816e6 100644 (file)
@@ -41,6 +41,7 @@ enum dml_project {
        DML_PROJECT_DCN30,
        DML_PROJECT_DCN31,
        DML_PROJECT_DCN31_FPGA,
+       DML_PROJECT_DCN314,
        DML_PROJECT_DCN32,
 };
 
index 492aec634b685815a40c8cb304ae45427d98f323..2051ddaa641a78c57a64ec9d325f301eee09f4d1 100644 (file)
@@ -651,10 +651,10 @@ struct vba_vars_st {
 
        unsigned int OutputTypeAndRatePerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
        double RequiredDISPCLKPerSurface[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
-       unsigned int MicroTileHeightY[DC__NUM_DPP__MAX];
-       unsigned int MicroTileHeightC[DC__NUM_DPP__MAX];
-       unsigned int MicroTileWidthY[DC__NUM_DPP__MAX];
-       unsigned int MicroTileWidthC[DC__NUM_DPP__MAX];
+       unsigned int MacroTileHeightY[DC__NUM_DPP__MAX];
+       unsigned int MacroTileHeightC[DC__NUM_DPP__MAX];
+       unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
+       unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
        bool ImmediateFlipRequiredFinal;
        bool DCCProgrammingAssumesScanDirectionUnknownFinal;
        bool EnoughWritebackUnits;
@@ -800,8 +800,6 @@ struct vba_vars_st {
        double PSCL_FACTOR[DC__NUM_DPP__MAX];
        double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
        double MaximumVStartup[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
-       unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
-       unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
        double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
        double AlignedYPitch[DC__NUM_DPP__MAX];
        double AlignedCPitch[DC__NUM_DPP__MAX];
index 5d2b028e5dadf9564494ac924444e0235c7146b7..d9f1b0a4fbd4ab5874ab9a0f6b14dca6a06073c2 100644 (file)
@@ -214,6 +214,7 @@ struct dummy_pstate_entry {
 struct clk_bw_params {
        unsigned int vram_type;
        unsigned int num_channels;
+       unsigned int dram_channel_width_bytes;
        unsigned int dispclk_vco_khz;
        unsigned int dc_mode_softmax_memclk;
        struct clk_limit_table clk_table;
index 859ffd8725c5caa76882461072fc1a75b725b42c..04f7656906ca0fa77f362584d6f290daa21897b9 100644 (file)
@@ -1600,6 +1600,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
        struct fixed31_32 lut2;
        struct fixed31_32 delta_lut;
        struct fixed31_32 delta_index;
+       const struct fixed31_32 one = dc_fixpt_from_int(1);
 
        i = 0;
        /* fixed_pt library has problems handling too small values */
@@ -1628,6 +1629,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
                        } else
                                hw_x = coordinates_x[i].x;
 
+                       if (dc_fixpt_le(one, hw_x))
+                               hw_x = one;
+
                        norm_x = dc_fixpt_mul(norm_factor, hw_x);
                        index = dc_fixpt_floor(norm_x);
                        if (index < 0 || index > 255)
index 6db67f082d91758eece57c919e14e705e0148354..644ea150e0751890820cc79e66c24aa6f61dcb83 100644 (file)
@@ -368,6 +368,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
                smu_baco->platform_support =
                        (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
                                                                        false;
+
+               /*
+                * Disable BACO entry/exit completely on below SKUs to
+                * avoid hardware intermittent failures.
+                */
+               if (((adev->pdev->device == 0x73A1) &&
+                   (adev->pdev->revision == 0x00)) ||
+                   ((adev->pdev->device == 0x73BF) &&
+                   (adev->pdev->revision == 0xCF)))
+                       smu_baco->platform_support = false;
+
        }
 }
 
index fcf24c54085929245d29421f4cb0280faa71b17c..c422bf8a09b1d081119f77af4b4556e46c4f62ff 100644 (file)
@@ -120,6 +120,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,              0),
        MSG_MAP(Mode1Reset,             PPSMC_MSG_Mode1Reset,                  0),
        MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload,         0),
+       MSG_MAP(SetMGpuFanBoostLimitRpm,        PPSMC_MSG_SetMGpuFanBoostLimitRpm,     0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
index 493922069c9041194a0d9ab3519f92e9e2fd4e85..01ee3febb81315d1afbbc172c94d2d56b337b559 100644 (file)
@@ -377,8 +377,8 @@ static int vrr_range_show(struct seq_file *m, void *data)
        if (connector->status != connector_status_connected)
                return -ENODEV;
 
-       seq_printf(m, "Min: %u\n", (u8)connector->display_info.monitor_range.min_vfreq);
-       seq_printf(m, "Max: %u\n", (u8)connector->display_info.monitor_range.max_vfreq);
+       seq_printf(m, "Min: %u\n", connector->display_info.monitor_range.min_vfreq);
+       seq_printf(m, "Max: %u\n", connector->display_info.monitor_range.max_vfreq);
 
        return 0;
 }
index bbc25e3b7220a2ffe68b8288d2af56279b1ae208..eaa819381281be2f41f47e1c717cc79322e3db04 100644 (file)
@@ -5971,12 +5971,14 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
 }
 
 static
-void get_monitor_range(const struct detailed_timing *timing,
-                      void *info_monitor_range)
+void get_monitor_range(const struct detailed_timing *timing, void *c)
 {
-       struct drm_monitor_range_info *monitor_range = info_monitor_range;
+       struct detailed_mode_closure *closure = c;
+       struct drm_display_info *info = &closure->connector->display_info;
+       struct drm_monitor_range_info *monitor_range = &info->monitor_range;
        const struct detailed_non_pixel *data = &timing->data.other_data;
        const struct detailed_data_monitor_range *range = &data->data.range;
+       const struct edid *edid = closure->drm_edid->edid;
 
        if (!is_display_descriptor(timing, EDID_DETAIL_MONITOR_RANGE))
                return;
@@ -5992,18 +5994,28 @@ void get_monitor_range(const struct detailed_timing *timing,
 
        monitor_range->min_vfreq = range->min_vfreq;
        monitor_range->max_vfreq = range->max_vfreq;
+
+       if (edid->revision >= 4) {
+               if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+                       monitor_range->min_vfreq += 255;
+               if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+                       monitor_range->max_vfreq += 255;
+       }
 }
 
 static void drm_get_monitor_range(struct drm_connector *connector,
                                  const struct drm_edid *drm_edid)
 {
-       struct drm_display_info *info = &connector->display_info;
+       const struct drm_display_info *info = &connector->display_info;
+       struct detailed_mode_closure closure = {
+               .connector = connector,
+               .drm_edid = drm_edid,
+       };
 
        if (!version_greater(drm_edid, 1, 1))
                return;
 
-       drm_for_each_detailed_block(drm_edid, get_monitor_range,
-                                   &info->monitor_range);
+       drm_for_each_detailed_block(drm_edid, get_monitor_range, &closure);
 
        DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
                      info->monitor_range.min_vfreq,
index dd32b484dd82557b77a918c99c0953f78f4fb1e7..ce96234f3df2084f80702f060926d77d3b981068 100644 (file)
@@ -581,11 +581,9 @@ static const struct psb_offset cdv_regmap[2] = {
 static int cdv_chip_setup(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
-       struct pci_dev *pdev = to_pci_dev(dev->dev);
        INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
 
-       if (pci_enable_msi(pdev))
-               dev_warn(dev->dev, "Enabling MSI failed!\n");
+       dev_priv->use_msi = true;
        dev_priv->regmap = cdv_regmap;
        gma_get_core_freq(dev);
        psb_intel_opregion_init(dev);
index dffe37490206d241cda861e2d5000ff0cd5a6094..4b7627a7263788b939025dd298699e2565b1fa6e 100644 (file)
@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
 {
        struct psb_gem_object *pobj = to_psb_gem_object(obj);
 
-       drm_gem_object_release(obj);
-
        /* Undo the mmap pin if we are destroying the object */
        if (pobj->mmapping)
                psb_gem_unpin(pobj);
 
+       drm_gem_object_release(obj);
+
        WARN_ON(pobj->in_gart && !pobj->stolen);
 
        release_resource(&pobj->resource);
index bd40c040a2c91fe402ccdeb1cce63350a0541fb4..2f52eceda3a1e464255fb602b6cc42498822c525 100644 (file)
@@ -532,15 +532,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
                WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 
                gma_crtc->page_flip_event = event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
 
                /* Call this locked if we want an event at vblank interrupt. */
                ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
                if (ret) {
-                       gma_crtc->page_flip_event = NULL;
-                       drm_crtc_vblank_put(crtc);
+                       spin_lock_irqsave(&dev->event_lock, flags);
+                       if (gma_crtc->page_flip_event) {
+                               gma_crtc->page_flip_event = NULL;
+                               drm_crtc_vblank_put(crtc);
+                       }
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
                }
-
-               spin_unlock_irqrestore(&dev->event_lock, flags);
        } else {
                ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
        }
index 5923a9c8931224565545fc163b7602105546a072..f90e628cb482c097f503fc82701e4084a2b00d4d 100644 (file)
@@ -501,12 +501,9 @@ static const struct psb_offset oaktrail_regmap[2] = {
 static int oaktrail_chip_setup(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
-       struct pci_dev *pdev = to_pci_dev(dev->dev);
        int ret;
 
-       if (pci_enable_msi(pdev))
-               dev_warn(dev->dev, "Enabling MSI failed!\n");
-
+       dev_priv->use_msi = true;
        dev_priv->regmap = oaktrail_regmap;
 
        ret = mid_chip_setup(dev);
index b91de6d36e412e2b4dbbd3f179fd695781f119a3..66873085d45059797cdc94d8ab37ba5513bdd70c 100644 (file)
@@ -139,8 +139,6 @@ static void gma_suspend_pci(struct pci_dev *pdev)
        dev_priv->regs.saveBSM = bsm;
        pci_read_config_dword(pdev, 0xFC, &vbt);
        dev_priv->regs.saveVBT = vbt;
-       pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
-       pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
 
        pci_disable_device(pdev);
        pci_set_power_state(pdev, PCI_D3hot);
@@ -168,9 +166,6 @@ static bool gma_resume_pci(struct pci_dev *pdev)
        pci_restore_state(pdev);
        pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
        pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
-       /* restoring MSI address and data in PCIx space */
-       pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
-       pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
        ret = pci_enable_device(pdev);
 
        if (ret != 0)
@@ -223,8 +218,7 @@ int gma_power_resume(struct device *_dev)
        mutex_lock(&power_mutex);
        gma_resume_pci(pdev);
        gma_resume_display(pdev);
-       gma_irq_preinstall(dev);
-       gma_irq_postinstall(dev);
+       gma_irq_install(dev);
        mutex_unlock(&power_mutex);
        return 0;
 }
index 1d8744f3e7020760d4beffba2429815a58f93035..54e756b486060b5388680a93075dfcbd6054ab44 100644 (file)
@@ -383,7 +383,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
        PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
        spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
 
-       gma_irq_install(dev, pdev->irq);
+       gma_irq_install(dev);
 
        dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 
index 0ea3d23575f3a638be95c723639938fdd7443492..731cc356c07a39d4aba86cc682134d343ed28535 100644 (file)
@@ -490,6 +490,7 @@ struct drm_psb_private {
        int rpm_enabled;
 
        /* MID specific */
+       bool use_msi;
        bool has_gct;
        struct oaktrail_gct_data gct_data;
 
@@ -499,10 +500,6 @@ struct drm_psb_private {
        /* Register state */
        struct psb_save_area regs;
 
-       /* MSI reg save */
-       uint32_t msi_addr;
-       uint32_t msi_data;
-
        /* Hotplug handling */
        struct work_struct hotplug_work;
 
index e6e6d61bbeab646d05026564d51d9b53174d44f5..038f18ed0a95ee3b43759e32d3c1d84730111dfe 100644 (file)
@@ -316,17 +316,24 @@ void gma_irq_postinstall(struct drm_device *dev)
        spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
 }
 
-int gma_irq_install(struct drm_device *dev, unsigned int irq)
+int gma_irq_install(struct drm_device *dev)
 {
+       struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
        int ret;
 
-       if (irq == IRQ_NOTCONNECTED)
+       if (dev_priv->use_msi && pci_enable_msi(pdev)) {
+               dev_warn(dev->dev, "Enabling MSI failed!\n");
+               dev_priv->use_msi = false;
+       }
+
+       if (pdev->irq == IRQ_NOTCONNECTED)
                return -ENOTCONN;
 
        gma_irq_preinstall(dev);
 
        /* PCI devices require shared interrupts. */
-       ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
+       ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
        if (ret)
                return ret;
 
@@ -369,6 +376,8 @@ void gma_irq_uninstall(struct drm_device *dev)
        spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
 
        free_irq(pdev->irq, dev);
+       if (dev_priv->use_msi)
+               pci_disable_msi(pdev);
 }
 
 int gma_crtc_enable_vblank(struct drm_crtc *crtc)
index b51e395194fff2c50fd46cb369377f0007ce09ec..7648f69824a5d4c5b2edf381b30c56524c4cf9c6 100644 (file)
@@ -17,7 +17,7 @@ struct drm_device;
 
 void gma_irq_preinstall(struct drm_device *dev);
 void gma_irq_postinstall(struct drm_device *dev);
-int  gma_irq_install(struct drm_device *dev, unsigned int irq);
+int  gma_irq_install(struct drm_device *dev);
 void gma_irq_uninstall(struct drm_device *dev);
 
 int  gma_crtc_enable_vblank(struct drm_crtc *crtc);
index 6d11e7938c8370f4616a2e6c726c9258e1926d6f..f84d39762a72bef2a62fd904d0c9a59984ff6f4f 100644 (file)
@@ -23,9 +23,6 @@
 #define DRIVER_MAJOR 1
 #define DRIVER_MINOR 0
 
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
 DEFINE_DRM_GEM_FOPS(hv_fops);
 
 static struct drm_driver hyperv_driver = {
@@ -133,7 +130,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
        }
 
        ret = hyperv_setup_vram(hv, hdev);
-
        if (ret)
                goto err_vmbus_close;
 
@@ -150,18 +146,20 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
 
        ret = hyperv_mode_config_init(hv);
        if (ret)
-               goto err_vmbus_close;
+               goto err_free_mmio;
 
        ret = drm_dev_register(dev, 0);
        if (ret) {
                drm_err(dev, "Failed to register drm driver.\n");
-               goto err_vmbus_close;
+               goto err_free_mmio;
        }
 
        drm_fbdev_generic_setup(dev, 0);
 
        return 0;
 
+err_free_mmio:
+       vmbus_free_mmio(hv->mem->start, hv->fb_size);
 err_vmbus_close:
        vmbus_close(hdev->channel);
 err_hv_set_drv_data:
index 885c74f60366b83e8c5a4bc92f542276fe91367b..1390729401a07f1b08f9bff5c8ecd09d1ebc9488 100644 (file)
@@ -1629,6 +1629,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
        /* FIXME: initialize from VBT */
        vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
 
+       vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+
        ret = intel_dsc_compute_params(crtc_state);
        if (ret)
                return ret;
index 198a2f4920cc4180494b14611909f7e6d3f55593..7d6eb9ad7a026dc2f11ce5f5c0033a15fd5a2340 100644 (file)
@@ -479,6 +479,13 @@ init_bdb_block(struct drm_i915_private *i915,
 
        block_size = get_blocksize(block);
 
+       /*
+        * Version number and new block size are considered
+        * part of the header for MIPI sequenece block v3+.
+        */
+       if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3)
+               block_size += 5;
+
        entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
                        GFP_KERNEL);
        if (!entry) {
index ac90d455a7c737df8e90e9a26305eb9d082da41f..3ed7eeacc706be3eb6ffdb199053bd140c8ba63c 100644 (file)
@@ -389,23 +389,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
        return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
 }
 
-static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
-{
-       u32 voltage;
-
-       voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
-
-       return voltage == VOLTAGE_INFO_0_85V;
-}
-
 static int icl_max_source_rate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
        enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
 
-       if (intel_phy_is_combo(dev_priv, phy) &&
-           (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
+       if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
                return 540000;
 
        return 810000;
@@ -413,23 +403,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
 
 static int ehl_max_source_rate(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
-       if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
-               return 540000;
-
-       return 810000;
-}
-
-static int dg1_max_source_rate(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
-       if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
+       if (intel_dp_is_edp(intel_dp))
                return 540000;
 
        return 810000;
@@ -491,7 +465,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
                        max_rate = dg2_max_source_rate(intel_dp);
                else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
                         IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
-                       max_rate = dg1_max_source_rate(intel_dp);
+                       max_rate = 810000;
                else if (IS_JSL_EHL(dev_priv))
                        max_rate = ehl_max_source_rate(intel_dp);
                else
@@ -1395,6 +1369,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
         * DP_DSC_RC_BUF_SIZE for this.
         */
        vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+       vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
 
        /*
         * Slice Height of 8 works for all currently available panels. So start
index 9feaf1a589f382cb5cba18eb5441a72e06361b5e..d213d8ad1ea53b4c83960bd312c9c30ff192205a 100644 (file)
@@ -671,6 +671,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
        intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
                              &link_bw, &rate_select);
 
+       /*
+        * WaEdpLinkRateDataReload
+        *
+        * Parade PS8461E MUX (used on varius TGL+ laptops) needs
+        * to snoop the link rates reported by the sink when we
+        * use LINK_RATE_SET in order to operate in jitter cleaning
+        * mode (as opposed to redriver mode). Unfortunately it
+        * loses track of the snooped link rates when powered down,
+        * so we need to make it re-snoop often. Without this high
+        * link rates are not stable.
+        */
+       if (!link_bw) {
+               struct intel_connector *connector = intel_dp->attached_connector;
+               __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+               drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n",
+                           connector->base.base.id, connector->base.name);
+
+               drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+                                sink_rates, sizeof(sink_rates));
+       }
+
        if (link_bw)
                drm_dbg_kms(&i915->drm,
                            "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
index 43e1bbc1e3035ff989f437133d7b6e3232d941d5..ca530f0733e0e7c3e92d557d364d18c27c4fa47e 100644 (file)
@@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
        u8 i = 0;
 
        vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
-       vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
        vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
                                             pipe_config->dsc.slice_count);
 
index 389e9f157ca5efcdcd719f910044058509e2dad0..85482a04d15840646131263a89892e14b07605bd 100644 (file)
@@ -723,6 +723,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
        bool lmem_placement = false;
        int i;
 
+       if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
+               return false;
+
        for (i = 0; i < obj->mm.n_placements; i++) {
                /* Compression is not allowed for the objects with smem placement */
                if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
index f131dc065f477a135760fc823c65e123fea063f1..6f3ab7ade41ad9410a11eeb7da6f517d1871d4f5 100644 (file)
@@ -297,7 +297,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
                i915_tt->is_shmem = true;
        }
 
-       if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj))
+       if (i915_gem_object_needs_ccs_pages(obj))
                ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
                                                      NUM_BYTES_PER_CCS_BYTE),
                                         PAGE_SIZE);
index 14fe65812e42677c91a6efbd7c6cfee79b17fdd1..1d19c073ba2ec62b4a77708005a9c31cc5381d92 100644 (file)
@@ -12,6 +12,7 @@
 #include "intel_llc.h"
 #include "intel_mchbar_regs.h"
 #include "intel_pcode.h"
+#include "intel_rps.h"
 
 struct ia_constants {
        unsigned int min_gpu_freq;
@@ -55,9 +56,6 @@ static bool get_ia_constants(struct intel_llc *llc,
        if (!HAS_LLC(i915) || IS_DGFX(i915))
                return false;
 
-       if (rps->max_freq <= rps->min_freq)
-               return false;
-
        consts->max_ia_freq = cpu_max_MHz();
 
        consts->min_ring_freq =
@@ -65,13 +63,8 @@ static bool get_ia_constants(struct intel_llc *llc,
        /* convert DDR frequency from units of 266.6MHz to bandwidth */
        consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
 
-       consts->min_gpu_freq = rps->min_freq;
-       consts->max_gpu_freq = rps->max_freq;
-       if (GRAPHICS_VER(i915) >= 9) {
-               /* Convert GT frequency to 50 HZ units */
-               consts->min_gpu_freq /= GEN9_FREQ_SCALER;
-               consts->max_gpu_freq /= GEN9_FREQ_SCALER;
-       }
+       consts->min_gpu_freq = intel_rps_get_min_raw_freq(rps);
+       consts->max_gpu_freq = intel_rps_get_max_raw_freq(rps);
 
        return true;
 }
@@ -130,6 +123,12 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
        if (!get_ia_constants(llc, &consts))
                return;
 
+       /*
+        * Although this is unlikely on any platform during initialization,
+        * let's ensure we don't get accidentally into infinite loop
+        */
+       if (consts.max_gpu_freq <= consts.min_gpu_freq)
+               return;
        /*
         * For each potential GPU frequency, load a ring frequency we'd like
         * to use for memory access.  We do this by specifying the IA frequency
index fb3f57ee450bc935d1ead4ec97ce0079a13d76f7..7bb967034679a050b65925066b934a2b497a6d71 100644 (file)
@@ -2126,6 +2126,31 @@ u32 intel_rps_get_max_frequency(struct intel_rps *rps)
                return intel_gpu_freq(rps, rps->max_freq_softlimit);
 }
 
+/**
+ * intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the max frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps)
+{
+       struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+       u32 freq;
+
+       if (rps_uses_slpc(rps)) {
+               return DIV_ROUND_CLOSEST(slpc->rp0_freq,
+                                        GT_FREQUENCY_MULTIPLIER);
+       } else {
+               freq = rps->max_freq;
+               if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+                       /* Convert GT frequency to 50 MHz units */
+                       freq /= GEN9_FREQ_SCALER;
+               }
+               return freq;
+       }
+}
+
 u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
 {
        struct intel_guc_slpc *slpc = rps_to_slpc(rps);
@@ -2214,6 +2239,31 @@ u32 intel_rps_get_min_frequency(struct intel_rps *rps)
                return intel_gpu_freq(rps, rps->min_freq_softlimit);
 }
 
+/**
+ * intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the min frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps)
+{
+       struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+       u32 freq;
+
+       if (rps_uses_slpc(rps)) {
+               return DIV_ROUND_CLOSEST(slpc->min_freq,
+                                        GT_FREQUENCY_MULTIPLIER);
+       } else {
+               freq = rps->min_freq;
+               if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+                       /* Convert GT frequency to 50 MHz units */
+                       freq /= GEN9_FREQ_SCALER;
+               }
+               return freq;
+       }
+}
+
 static int set_min_freq(struct intel_rps *rps, u32 val)
 {
        int ret = 0;
index 1e8d5649130835d94e719e064a918f9701cae5cb..4509dfdc52e09d90775fec8054c8ffcae6207cba 100644 (file)
@@ -37,8 +37,10 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
 u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
 u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
 u32 intel_rps_get_min_frequency(struct intel_rps *rps);
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
 int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val);
 u32 intel_rps_get_max_frequency(struct intel_rps *rps);
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps);
 int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
 u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
 u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
index 834c707d1877b82262e13579905c45a20a325911..3e91f44829e92cf54181904af174c6fc2cef6944 100644 (file)
@@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt)
        if (!guc_submission_initialized(guc))
                return;
 
-       cancel_delayed_work(&guc->timestamp.work);
+       /*
+        * There is a race with suspend flow where the worker runs after suspend
+        * and causes an unclaimed register access warning. Cancel the worker
+        * synchronously here.
+        */
+       cancel_delayed_work_sync(&guc->timestamp.work);
 
        /*
         * Before parking, we should sample engine busyness stats if we need to.
index 3168d7007e10177bdb15da551c1f5e08caa458f0..135d04c2d41c8cfc9ceb84f973e1afc5027e3ee9 100644 (file)
 
 #define GT0_PERF_LIMIT_REASONS         _MMIO(0x1381a8)
 #define   GT0_PERF_LIMIT_REASONS_MASK  0xde3
-#define   PROCHOT_MASK                 REG_BIT(1)
-#define   THERMAL_LIMIT_MASK           REG_BIT(2)
-#define   RATL_MASK                    REG_BIT(6)
-#define   VR_THERMALERT_MASK           REG_BIT(7)
-#define   VR_TDC_MASK                  REG_BIT(8)
-#define   POWER_LIMIT_4_MASK           REG_BIT(9)
-#define   POWER_LIMIT_1_MASK           REG_BIT(11)
-#define   POWER_LIMIT_2_MASK           REG_BIT(12)
+#define   PROCHOT_MASK                 REG_BIT(0)
+#define   THERMAL_LIMIT_MASK           REG_BIT(1)
+#define   RATL_MASK                    REG_BIT(5)
+#define   VR_THERMALERT_MASK           REG_BIT(6)
+#define   VR_TDC_MASK                  REG_BIT(7)
+#define   POWER_LIMIT_4_MASK           REG_BIT(8)
+#define   POWER_LIMIT_1_MASK           REG_BIT(10)
+#define   POWER_LIMIT_2_MASK           REG_BIT(11)
 
 #define CHV_CLK_CTL1                   _MMIO(0x101100)
 #define VLV_CLK_CTL2                   _MMIO(0x101104)
index 26037171649006b3a6433e5541625e2fc2bb72c9..373582cfd8f31bbe9203fe8adc0162c181585de7 100644 (file)
@@ -1882,12 +1882,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
                enum dma_resv_usage usage;
                int idx;
 
-               obj->read_domains = 0;
                if (flags & EXEC_OBJECT_WRITE) {
                        usage = DMA_RESV_USAGE_WRITE;
                        obj->write_domain = I915_GEM_DOMAIN_RENDER;
+                       obj->read_domains = 0;
                } else {
                        usage = DMA_RESV_USAGE_READ;
+                       obj->write_domain = 0;
                }
 
                dma_fence_array_for_each(curr, idx, fence)
index b9ac932af8d008fb75815186913a51485733c079..03acc68abf2c51791a7cd5250ee7a8cecd7ad784 100644 (file)
@@ -170,7 +170,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
 
        /* Enable OSD and BLK0, set max global alpha */
        priv->viu.osd1_ctrl_stat = OSD_ENABLE |
-                                  (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+                                  (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
                                   OSD_BLK0_ENABLE;
 
        priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
index bb7e109534de1e958c69fdfd52feae53c5112a0b..d4b907889a21d199a43f0af7a7a6bcf56ec201c8 100644 (file)
@@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
                priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
        writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
                priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
-       writel((m[11] & 0x1fff) << 16,
+       writel((m[11] & 0x1fff),
                priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
 
        writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
index cdb154c8b866bbbee2952ab92f1c5b65bc89d000..b75c690bb0cc271b42964765e2f7fd1893dd6b55 100644 (file)
@@ -1295,7 +1295,8 @@ static const struct panel_desc innolux_n116bca_ea1 = {
        },
        .delay = {
                .hpd_absent = 200,
-               .prepare_to_enable = 80,
+               .enable = 80,
+               .disable = 50,
                .unprepare = 500,
        },
 };
index 5110cd9b2425890d3e620cbfd49186b581abd626..fe5f12f16a632cfa17819b86e63c78a29bd9c82a 100644 (file)
@@ -131,6 +131,17 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
                return PTR_ERR(opp);
 
        panfrost_devfreq_profile.initial_freq = cur_freq;
+
+       /*
+        * Set the recommend OPP this will enable and configure the regulator
+        * if any and will avoid a switch off by regulator_late_cleanup()
+        */
+       ret = dev_pm_opp_set_opp(dev, opp);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+               return ret;
+       }
+
        dev_pm_opp_put(opp);
 
        /*
index c204e9b95c1f749d9e2de03929b1c6aa1c76ebb1..518ee13b1d6f42b39516bd12a5f637367404d939 100644 (file)
@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
        return ret;
 }
 
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
-                                      struct drm_display_mode *mode)
+static enum drm_mode_status
+cdn_dp_connector_mode_valid(struct drm_connector *connector,
+                           struct drm_display_mode *mode)
 {
        struct cdn_dp_device *dp = connector_to_dp(connector);
        struct drm_display_info *display_info = &dp->connector.display_info;
index e4631f515ba42a33e2a359ba83844bbce606095a..f9aa8b96c69529c58a6eac609735b3859b8cc8b3 100644 (file)
@@ -1439,11 +1439,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
                die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
                die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
                           FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
+               dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
+               dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
                break;
        case ROCKCHIP_VOP2_EP_EDP0:
                die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
                die |= RK3568_SYS_DSP_INFACE_EN_EDP |
                           FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
+               dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
+               dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
                break;
        case ROCKCHIP_VOP2_EP_MIPI0:
                die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
index 1cbfb00c1d6586313711aa837d636c1eeee6ff4b..911141d16e9547fab42dbb4841bbad32c87f23dd 100644 (file)
@@ -236,16 +236,19 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        if (bo->type != ttm_bo_type_sg)
                fbo->base.base.resv = &fbo->base.base._resv;
 
-       if (fbo->base.resource) {
-               ttm_resource_set_bo(fbo->base.resource, &fbo->base);
-               bo->resource = NULL;
-       }
-
        dma_resv_init(&fbo->base.base._resv);
        fbo->base.base.dev = NULL;
        ret = dma_resv_trylock(&fbo->base.base._resv);
        WARN_ON(!ret);
 
+       if (fbo->base.resource) {
+               ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+               bo->resource = NULL;
+               ttm_bo_set_bulk_move(&fbo->base, NULL);
+       } else {
+               fbo->base.bulk_move = NULL;
+       }
+
        ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
        if (ret) {
                kfree(fbo);
index 660036da744953ef93ccd204df470b04e64daaee..922d83eb7ddfacf2a43132a438c57af494f0d1f0 100644 (file)
@@ -129,7 +129,7 @@ static void fcopy_send_data(struct work_struct *dummy)
 
        /*
         * The  strings sent from the host are encoded in
-        * in utf16; convert it to utf8 strings.
+        * utf16; convert it to utf8 strings.
         * The host assures us that the utf16 strings will not exceed
         * the max lengths specified. We will however, reserve room
         * for the string terminating character - in the utf16s_utf8s()
index 23c680d1a0f5415735f2ba9d99ec47500a7fbe44..3c833ea60db650f87252d76659b530ff9a7eb34c 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/syscore_ops.h>
 #include <linux/dma-map-ops.h>
+#include <linux/pci.h>
 #include <clocksource/hyperv_timer.h>
 #include "hyperv_vmbus.h"
 
@@ -2262,26 +2263,43 @@ static int vmbus_acpi_remove(struct acpi_device *device)
 
 static void vmbus_reserve_fb(void)
 {
-       int size;
+       resource_size_t start = 0, size;
+       struct pci_dev *pdev;
+
+       if (efi_enabled(EFI_BOOT)) {
+               /* Gen2 VM: get FB base from EFI framebuffer */
+               start = screen_info.lfb_base;
+               size = max_t(__u32, screen_info.lfb_size, 0x800000);
+       } else {
+               /* Gen1 VM: get FB base from PCI */
+               pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+                                     PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+               if (!pdev)
+                       return;
+
+               if (pdev->resource[0].flags & IORESOURCE_MEM) {
+                       start = pci_resource_start(pdev, 0);
+                       size = pci_resource_len(pdev, 0);
+               }
+
+               /*
+                * Release the PCI device so hyperv_drm or hyperv_fb driver can
+                * grab it later.
+                */
+               pci_dev_put(pdev);
+       }
+
+       if (!start)
+               return;
+
        /*
         * Make a claim for the frame buffer in the resource tree under the
         * first node, which will be the one below 4GB.  The length seems to
         * be underreported, particularly in a Generation 1 VM.  So start out
         * reserving a larger area and make it smaller until it succeeds.
         */
-
-       if (screen_info.lfb_base) {
-               if (efi_enabled(EFI_BOOT))
-                       size = max_t(__u32, screen_info.lfb_size, 0x800000);
-               else
-                       size = max_t(__u32, screen_info.lfb_size, 0x4000000);
-
-               for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
-                       fb_mmio = __request_region(hyperv_mmio,
-                                                  screen_info.lfb_base, size,
-                                                  fb_mmio_name, 0);
-               }
-       }
+       for (; !fb_mmio && (size >= 0x100000); size >>= 1)
+               fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
 }
 
 /**
@@ -2313,7 +2331,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
                        bool fb_overlap_ok)
 {
        struct resource *iter, *shadow;
-       resource_size_t range_min, range_max, start;
+       resource_size_t range_min, range_max, start, end;
        const char *dev_n = dev_name(&device_obj->device);
        int retval;
 
@@ -2348,6 +2366,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
                range_max = iter->end;
                start = (range_min + align - 1) & ~(align - 1);
                for (; start + size - 1 <= range_max; start += align) {
+                       end = start + size - 1;
+
+                       /* Skip the whole fb_mmio region if not fb_overlap_ok */
+                       if (!fb_overlap_ok && fb_mmio &&
+                           (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
+                            ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
+                               continue;
+
                        shadow = __request_region(iter, start, size, NULL,
                                                  IORESOURCE_BUSY);
                        if (!shadow)
index 61a4684fc020e1f5852bf3afc59c87efbd27bc42..81e688975c6a79583e61004f2ff4d47901521f93 100644 (file)
@@ -266,9 +266,7 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
 #define SENSOR_SET_WATER_BLOCK                                                 \
        (SENSOR_TEMP_WATER_BLOCK_IN | SENSOR_TEMP_WATER_BLOCK_OUT)
 
-
 struct ec_board_info {
-       const char *board_names[MAX_IDENTICAL_BOARD_VARIATIONS];
        unsigned long sensors;
        /*
         * Defines which mutex to use for guarding access to the state and the
@@ -281,152 +279,194 @@ struct ec_board_info {
        enum board_family family;
 };
 
-static const struct ec_board_info board_info[] = {
-       {
-               .board_names = {"PRIME X470-PRO"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
-                       SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
-                       SENSOR_FAN_CPU_OPT |
-                       SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
-               .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
-               .family = family_amd_400_series,
-       },
-       {
-               .board_names = {"PRIME X570-PRO"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
-                       SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {"ProArt X570-CREATOR WIFI"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
-                       SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
-                       SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
-       },
-       {
-               .board_names = {"Pro WS X570-ACE"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
-                       SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
-                       SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {"ROG CROSSHAIR VIII DARK HERO"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
-                       SENSOR_TEMP_T_SENSOR |
-                       SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
-                       SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
-                       SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {
-                       "ROG CROSSHAIR VIII FORMULA",
-                       "ROG CROSSHAIR VIII HERO",
-                       "ROG CROSSHAIR VIII HERO (WI-FI)",
-               },
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
-                       SENSOR_TEMP_T_SENSOR |
-                       SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
-                       SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
-                       SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
-                       SENSOR_IN_CPU_CORE,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {
-                       "ROG MAXIMUS XI HERO",
-                       "ROG MAXIMUS XI HERO (WI-FI)",
-               },
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
-                       SENSOR_TEMP_T_SENSOR |
-                       SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
-                       SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_intel_300_series,
-       },
-       {
-               .board_names = {"ROG CROSSHAIR VIII IMPACT"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
-                       SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
-                       SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
-                       SENSOR_IN_CPU_CORE,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {"ROG STRIX B550-E GAMING"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
-                       SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
-                       SENSOR_FAN_CPU_OPT,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {"ROG STRIX B550-I GAMING"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
-                       SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
-                       SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
-                       SENSOR_IN_CPU_CORE,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {"ROG STRIX X570-E GAMING"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
-                       SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
-                       SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
-                       SENSOR_IN_CPU_CORE,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {"ROG STRIX X570-E GAMING WIFI II"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
-                       SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
-                       SENSOR_IN_CPU_CORE,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {"ROG STRIX X570-F GAMING"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
-                       SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {"ROG STRIX X570-I GAMING"},
-               .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
-                       SENSOR_TEMP_T_SENSOR |
-                       SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
-                       SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
-               .family = family_amd_500_series,
-       },
-       {
-               .board_names = {"ROG STRIX Z690-A GAMING WIFI D4"},
-               .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
-               .family = family_intel_600_series,
-       },
-       {
-               .board_names = {"ROG ZENITH II EXTREME"},
-               .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
-                       SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
-                       SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
-                       SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
-                       SENSOR_SET_WATER_BLOCK |
-                       SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
-                       SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
-               .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
-               .family = family_amd_500_series,
-       },
-       {}
+static const struct ec_board_info board_info_prime_x470_pro = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+               SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+               SENSOR_FAN_CPU_OPT |
+               SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+       .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+       .family = family_amd_400_series,
+};
+
+static const struct ec_board_info board_info_prime_x570_pro = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+               SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+               SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
+               SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_pro_ws_x570_ace = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+               SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
+               SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+               SENSOR_TEMP_T_SENSOR |
+               SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+               SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
+               SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_hero = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+               SENSOR_TEMP_T_SENSOR |
+               SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+               SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+               SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
+               SENSOR_IN_CPU_CORE,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_maximus_xi_hero = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+               SENSOR_TEMP_T_SENSOR |
+               SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+               SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_intel_300_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_impact = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+               SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+               SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+               SENSOR_IN_CPU_CORE,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_b550_e_gaming = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+               SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+               SENSOR_FAN_CPU_OPT,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_b550_i_gaming = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+               SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+               SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
+               SENSOR_IN_CPU_CORE,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_e_gaming = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+               SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+               SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+               SENSOR_IN_CPU_CORE,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_e_gaming_wifi_ii = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+               SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
+               SENSOR_IN_CPU_CORE,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_f_gaming = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+               SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_i_gaming = {
+       .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
+               SENSOR_TEMP_T_SENSOR |
+               SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
+               SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+       .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_z690_a_gaming_wifi_d4 = {
+       .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+       .family = family_intel_600_series,
+};
+
+static const struct ec_board_info board_info_zenith_ii_extreme = {
+       .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+               SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+               SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
+               SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
+               SENSOR_SET_WATER_BLOCK |
+               SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
+               SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
+       .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+       .family = family_amd_500_series,
+};
+
+#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info)                      \
+       {                                                                      \
+               .matches = {                                                   \
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR,                      \
+                                       "ASUSTeK COMPUTER INC."),              \
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, name),                 \
+               },                                                             \
+               .driver_data = (void *)board_info,                              \
+       }
+
+static const struct dmi_system_id dmi_table[] = {
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO",
+                                       &board_info_prime_x470_pro),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
+                                       &board_info_prime_x570_pro),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
+                                       &board_info_pro_art_x570_creator_wifi),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE",
+                                       &board_info_pro_ws_x570_ace),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO",
+                                       &board_info_crosshair_viii_dark_hero),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII FORMULA",
+                                       &board_info_crosshair_viii_hero),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO",
+                                       &board_info_crosshair_viii_hero),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO (WI-FI)",
+                                       &board_info_crosshair_viii_hero),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO",
+                                       &board_info_maximus_xi_hero),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
+                                       &board_info_maximus_xi_hero),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
+                                       &board_info_crosshair_viii_impact),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
+                                       &board_info_strix_b550_e_gaming),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING",
+                                       &board_info_strix_b550_i_gaming),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING",
+                                       &board_info_strix_x570_e_gaming),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING WIFI II",
+                                       &board_info_strix_x570_e_gaming_wifi_ii),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-F GAMING",
+                                       &board_info_strix_x570_f_gaming),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-I GAMING",
+                                       &board_info_strix_x570_i_gaming),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-A GAMING WIFI D4",
+                                       &board_info_strix_z690_a_gaming_wifi_d4),
+       DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME",
+                                       &board_info_zenith_ii_extreme),
+       {},
 };
 
 struct ec_sensor {
@@ -537,12 +577,12 @@ static int find_ec_sensor_index(const struct ec_sensors_data *ec,
        return -ENOENT;
 }
 
-static int __init bank_compare(const void *a, const void *b)
+static int bank_compare(const void *a, const void *b)
 {
        return *((const s8 *)a) - *((const s8 *)b);
 }
 
-static void __init setup_sensor_data(struct ec_sensors_data *ec)
+static void setup_sensor_data(struct ec_sensors_data *ec)
 {
        struct ec_sensor *s = ec->sensors;
        bool bank_found;
@@ -574,7 +614,7 @@ static void __init setup_sensor_data(struct ec_sensors_data *ec)
        sort(ec->banks, ec->nr_banks, 1, bank_compare, NULL);
 }
 
-static void __init fill_ec_registers(struct ec_sensors_data *ec)
+static void fill_ec_registers(struct ec_sensors_data *ec)
 {
        const struct ec_sensor_info *si;
        unsigned int i, j, register_idx = 0;
@@ -589,7 +629,7 @@ static void __init fill_ec_registers(struct ec_sensors_data *ec)
        }
 }
 
-static int __init setup_lock_data(struct device *dev)
+static int setup_lock_data(struct device *dev)
 {
        const char *mutex_path;
        int status;
@@ -812,7 +852,7 @@ static umode_t asus_ec_hwmon_is_visible(const void *drvdata,
        return find_ec_sensor_index(state, type, channel) >= 0 ? S_IRUGO : 0;
 }
 
-static int __init
+static int
 asus_ec_hwmon_add_chan_info(struct hwmon_channel_info *asus_ec_hwmon_chan,
                             struct device *dev, int num,
                             enum hwmon_sensor_types type, u32 config)
@@ -841,27 +881,15 @@ static struct hwmon_chip_info asus_ec_chip_info = {
        .ops = &asus_ec_hwmon_ops,
 };
 
-static const struct ec_board_info * __init get_board_info(void)
+static const struct ec_board_info *get_board_info(void)
 {
-       const char *dmi_board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
-       const char *dmi_board_name = dmi_get_system_info(DMI_BOARD_NAME);
-       const struct ec_board_info *board;
-
-       if (!dmi_board_vendor || !dmi_board_name ||
-           strcasecmp(dmi_board_vendor, "ASUSTeK COMPUTER INC."))
-               return NULL;
-
-       for (board = board_info; board->sensors; board++) {
-               if (match_string(board->board_names,
-                                MAX_IDENTICAL_BOARD_VARIATIONS,
-                                dmi_board_name) >= 0)
-                       return board;
-       }
+       const struct dmi_system_id *dmi_entry;
 
-       return NULL;
+       dmi_entry = dmi_first_match(dmi_table);
+       return dmi_entry ? dmi_entry->driver_data : NULL;
 }
 
-static int __init asus_ec_probe(struct platform_device *pdev)
+static int asus_ec_probe(struct platform_device *pdev)
 {
        const struct hwmon_channel_info **ptr_asus_ec_ci;
        int nr_count[hwmon_max] = { 0 }, nr_types = 0;
@@ -970,29 +998,37 @@ static int __init asus_ec_probe(struct platform_device *pdev)
        return PTR_ERR_OR_ZERO(hwdev);
 }
 
-
-static const struct acpi_device_id acpi_ec_ids[] = {
-       /* Embedded Controller Device */
-       { "PNP0C09", 0 },
-       {}
-};
+MODULE_DEVICE_TABLE(dmi, dmi_table);
 
 static struct platform_driver asus_ec_sensors_platform_driver = {
        .driver = {
                .name   = "asus-ec-sensors",
-               .acpi_match_table = acpi_ec_ids,
        },
+       .probe = asus_ec_probe,
 };
 
-MODULE_DEVICE_TABLE(acpi, acpi_ec_ids);
-/*
- * we use module_platform_driver_probe() rather than module_platform_driver()
- * because the probe function (and its dependants) are marked with __init, which
- * means we can't put it into the .probe member of the platform_driver struct
- * above, and we can't mark the asus_ec_sensors_platform_driver object as __init
- * because the object is referenced from the module exit code.
- */
-module_platform_driver_probe(asus_ec_sensors_platform_driver, asus_ec_probe);
+static struct platform_device *asus_ec_sensors_platform_device;
+
+static int __init asus_ec_init(void)
+{
+       asus_ec_sensors_platform_device =
+               platform_create_bundle(&asus_ec_sensors_platform_driver,
+                                      asus_ec_probe, NULL, 0, NULL, 0);
+
+       if (IS_ERR(asus_ec_sensors_platform_device))
+               return PTR_ERR(asus_ec_sensors_platform_device);
+
+       return 0;
+}
+
+static void __exit asus_ec_exit(void)
+{
+       platform_device_unregister(asus_ec_sensors_platform_device);
+       platform_driver_unregister(&asus_ec_sensors_platform_driver);
+}
+
+module_init(asus_ec_init);
+module_exit(asus_ec_exit);
 
 module_param_named(mutex_path, mutex_path_override, charp, 0);
 MODULE_PARM_DESC(mutex_path,
index 26278b0f17a989ac0a6a23bcd5dc8d5143bd46e6..9259779cc2dff92e6380d488d73fb133f50356f3 100644 (file)
@@ -68,8 +68,9 @@
 
 /* VM Individual Macro Register */
 #define VM_COM_REG_SIZE        0x200
-#define VM_SDIF_DONE(n)        (VM_COM_REG_SIZE + 0x34 + 0x200 * (n))
-#define VM_SDIF_DATA(n)        (VM_COM_REG_SIZE + 0x40 + 0x200 * (n))
+#define VM_SDIF_DONE(vm)       (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm))
+#define VM_SDIF_DATA(vm, ch)   \
+       (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch))
 
 /* SDA Slave Register */
 #define IP_CTRL                        0x00
@@ -115,6 +116,7 @@ struct pvt_device {
        u32                     t_num;
        u32                     p_num;
        u32                     v_num;
+       u32                     c_num;
        u32                     ip_freq;
        u8                      *vm_idx;
 };
@@ -178,14 +180,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
 {
        struct pvt_device *pvt = dev_get_drvdata(dev);
        struct regmap *v_map = pvt->v_map;
+       u8 vm_idx, ch_idx;
        u32 n, stat;
-       u8 vm_idx;
        int ret;
 
-       if (channel >= pvt->v_num)
+       if (channel >= pvt->v_num * pvt->c_num)
                return -EINVAL;
 
-       vm_idx = pvt->vm_idx[channel];
+       vm_idx = pvt->vm_idx[channel / pvt->c_num];
+       ch_idx = channel % pvt->c_num;
 
        switch (attr) {
        case hwmon_in_input:
@@ -196,13 +199,23 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
                if (ret)
                        return ret;
 
-               ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n);
+               ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n);
                if(ret < 0)
                        return ret;
 
                n &= SAMPLE_DATA_MSK;
-               /* Convert the N bitstream count into voltage */
-               *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS;
+               /*
+                * Convert the N bitstream count into voltage.
+                * To support negative voltage calculation for 64bit machines
+                * n must be cast to long, since n and *val differ both in
+                * signedness and in size.
+                * Division is used instead of right shift, because for signed
+                * numbers, the sign bit is used to fill the vacated bit
+                * positions, and if the number is negative, 1 is used.
+                * BIT(x) may not be used instead of (1 << x) because it's
+                * unsigned.
+                */
+               *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << PVT_CONV_BITS);
 
                return 0;
        default:
@@ -375,6 +388,19 @@ static int pvt_init(struct pvt_device *pvt)
                if (ret)
                        return ret;
 
+               val = (BIT(pvt->c_num) - 1) | VM_CH_INIT |
+                     IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
+               ret = regmap_write(v_map, SDIF_W, val);
+               if (ret < 0)
+                       return ret;
+
+               ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
+                                              val, !(val & SDIF_BUSY),
+                                              PVT_POLL_DELAY_US,
+                                              PVT_POLL_TIMEOUT_US);
+               if (ret)
+                       return ret;
+
                val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT |
                      CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT |
                      SDIF_WRN_W | SDIF_PROG;
@@ -489,8 +515,8 @@ static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt
 
 static int mr75203_probe(struct platform_device *pdev)
 {
+       u32 ts_num, vm_num, pd_num, ch_num, val, index, i;
        const struct hwmon_channel_info **pvt_info;
-       u32 ts_num, vm_num, pd_num, val, index, i;
        struct device *dev = &pdev->dev;
        u32 *temp_config, *in_config;
        struct device *hwmon_dev;
@@ -531,9 +557,11 @@ static int mr75203_probe(struct platform_device *pdev)
        ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT;
        pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT;
        vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT;
+       ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT;
        pvt->t_num = ts_num;
        pvt->p_num = pd_num;
        pvt->v_num = vm_num;
+       pvt->c_num = ch_num;
        val = 0;
        if (ts_num)
                val++;
@@ -570,7 +598,7 @@ static int mr75203_probe(struct platform_device *pdev)
        }
 
        if (vm_num) {
-               u32 num = vm_num;
+               u32 total_ch;
 
                ret = pvt_get_regmap(pdev, "vm", pvt);
                if (ret)
@@ -584,30 +612,30 @@ static int mr75203_probe(struct platform_device *pdev)
                ret = device_property_read_u8_array(dev, "intel,vm-map",
                                                    pvt->vm_idx, vm_num);
                if (ret) {
-                       num = 0;
+                       /*
+                        * Incase intel,vm-map property is not defined, we
+                        * assume incremental channel numbers.
+                        */
+                       for (i = 0; i < vm_num; i++)
+                               pvt->vm_idx[i] = i;
                } else {
                        for (i = 0; i < vm_num; i++)
                                if (pvt->vm_idx[i] >= vm_num ||
                                    pvt->vm_idx[i] == 0xff) {
-                                       num = i;
+                                       pvt->v_num = i;
+                                       vm_num = i;
                                        break;
                                }
                }
 
-               /*
-                * Incase intel,vm-map property is not defined, we assume
-                * incremental channel numbers.
-                */
-               for (i = num; i < vm_num; i++)
-                       pvt->vm_idx[i] = i;
-
-               in_config = devm_kcalloc(dev, num + 1,
+               total_ch = ch_num * vm_num;
+               in_config = devm_kcalloc(dev, total_ch + 1,
                                         sizeof(*in_config), GFP_KERNEL);
                if (!in_config)
                        return -ENOMEM;
 
-               memset32(in_config, HWMON_I_INPUT, num);
-               in_config[num] = 0;
+               memset32(in_config, HWMON_I_INPUT, total_ch);
+               in_config[total_ch] = 0;
                pvt_in.config = in_config;
 
                pvt_info[index++] = &pvt_in;
index 42762e87b0147bf6e908ec0602857ce5126fb864..f7c59ff7ae8ee5b476f30f6e41bf22347143145d 100644 (file)
@@ -493,18 +493,20 @@ static char *tps23861_port_poe_plus_status(struct tps23861_data *data, int port)
 
 static int tps23861_port_resistance(struct tps23861_data *data, int port)
 {
-       u16 regval;
+       unsigned int raw_val;
+       __le16 regval;
 
        regmap_bulk_read(data->regmap,
                         PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * (port - 1),
                         &regval,
                         2);
 
-       switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, regval)) {
+       raw_val = le16_to_cpu(regval);
+       switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, raw_val)) {
        case PORT_RESISTANCE_RSN_OTHER:
-               return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB) / 10000;
+               return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB) / 10000;
        case PORT_RESISTANCE_RSN_LOW:
-               return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB_LOW) / 10000;
+               return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB_LOW) / 10000;
        case PORT_RESISTANCE_RSN_SHORT:
        case PORT_RESISTANCE_RSN_OPEN:
        default:
index 46d06678dfbebf935bccfc5c84456f5dcbd3c6ff..be317f2665a9ee112d67a2fc6a183d7b28423f68 100644 (file)
@@ -1841,8 +1841,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
                }
 
                if (!validate_net_dev(*net_dev,
-                                (struct sockaddr *)&req->listen_addr_storage,
-                                (struct sockaddr *)&req->src_addr_storage)) {
+                                (struct sockaddr *)&req->src_addr_storage,
+                                (struct sockaddr *)&req->listen_addr_storage)) {
                        id_priv = ERR_PTR(-EHOSTUNREACH);
                        goto err;
                }
index 186ed8859920c7be2ad646d982284baf62d2d5a0..d39e16c211e8a7a9edf021b771d45c54dd748287 100644 (file)
@@ -462,7 +462,7 @@ retry:
                mutex_unlock(&umem_odp->umem_mutex);
 
 out_put_mm:
-       mmput(owning_mm);
+       mmput_async(owning_mm);
 out_put_task:
        if (owning_process)
                put_task_struct(owning_process);
index f848eedc6a239b9c78ad110ca4490877acb3361e..d24996526c4d909c9663f44ad44ea8d394967d2e 100644 (file)
@@ -730,7 +730,6 @@ struct hns_roce_caps {
        u32             num_qps;
        u32             num_pi_qps;
        u32             reserved_qps;
-       int             num_qpc_timer;
        u32             num_srqs;
        u32             max_wqes;
        u32             max_srq_wrs;
index cbdafaac678a146b12776526a7a4817025a13f4d..c780646bd60acf11e4e396a83777a458ca8b2c91 100644 (file)
@@ -1977,7 +1977,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
 
        caps->num_mtpts         = HNS_ROCE_V2_MAX_MTPT_NUM;
        caps->num_pds           = HNS_ROCE_V2_MAX_PD_NUM;
-       caps->num_qpc_timer     = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
+       caps->qpc_timer_bt_num  = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM;
        caps->cqc_timer_bt_num  = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM;
 
        caps->max_qp_init_rdma  = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
@@ -2273,7 +2273,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
        caps->max_rq_sg              = le16_to_cpu(resp_a->max_rq_sg);
        caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
        caps->max_extend_sg          = le32_to_cpu(resp_a->max_extend_sg);
-       caps->num_qpc_timer          = le16_to_cpu(resp_a->num_qpc_timer);
        caps->max_srq_sges           = le16_to_cpu(resp_a->max_srq_sges);
        caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
        caps->num_aeq_vectors        = resp_a->num_aeq_vectors;
index f96debac30fe9d888ed11ed20fff785074548001..64797109bab63a7e374861edf8fa8f0ae7cc3309 100644 (file)
 #include <linux/bitops.h>
 
 #define HNS_ROCE_V2_MAX_QP_NUM                 0x1000
-#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM          0x200
 #define HNS_ROCE_V2_MAX_WQE_NUM                        0x8000
 #define HNS_ROCE_V2_MAX_SRQ_WR                 0x8000
 #define HNS_ROCE_V2_MAX_SRQ_SGE                        64
 #define HNS_ROCE_V2_MAX_CQ_NUM                 0x100000
+#define HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM       0x100
 #define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM       0x100
 #define HNS_ROCE_V2_MAX_SRQ_NUM                        0x100000
 #define HNS_ROCE_V2_MAX_CQE_NUM                        0x400000
@@ -83,7 +83,7 @@
 
 #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ         PAGE_SIZE
 #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ         PAGE_SIZE
-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED                0xFFFFF000
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED                0xFFFF000
 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM         2
 #define HNS_ROCE_INVALID_LKEY                  0x0
 #define HNS_ROCE_INVALID_SGE_LENGTH            0x80000000
index c8af4ebd7cbd35dd1b7d0598385a63b647ab03cf..4ccb217b2841d63c679ae7f1d762c4000e33dae0 100644 (file)
@@ -725,7 +725,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
                ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
                                              HEM_TYPE_QPC_TIMER,
                                              hr_dev->caps.qpc_timer_entry_sz,
-                                             hr_dev->caps.num_qpc_timer, 1);
+                                             hr_dev->caps.qpc_timer_bt_num, 1);
                if (ret) {
                        dev_err(dev,
                                "Failed to init QPC timer memory, aborting.\n");
index 48d3616a6d71d134069f8ee345b4287d02f86c2a..7bee7f6c5e7027846d7d520d79385e310748b654 100644 (file)
@@ -462,11 +462,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
        hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
                                              hr_qp->rq.rsv_sge);
 
-       if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
-               hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
-       else
-               hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
-                                           hr_qp->rq.max_gs);
+       hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+                                   hr_qp->rq.max_gs);
 
        hr_qp->rq.wqe_cnt = cnt;
        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
index daeab5daed5bc78e7d3a6abbac164b9cf1b06f88..a6e5d350a94ce8f1e3499fe736bd56cb4abf1fce 100644 (file)
@@ -497,7 +497,8 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
                              FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
                i = 0;
        } else {
-               qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
+               qp->wqe_ops.iw_set_fragment(wqe, 0,
+                                           frag_cnt ? op_info->sg_list : NULL,
                                            qp->swqe_polarity);
                i = 1;
        }
@@ -1005,6 +1006,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
        int ret_code;
        bool move_cq_head = true;
        u8 polarity;
+       u8 op_type;
        bool ext_valid;
        __le64 *ext_cqe;
 
@@ -1187,7 +1189,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
                        do {
                                __le64 *sw_wqe;
                                u64 wqe_qword;
-                               u8 op_type;
                                u32 tail;
 
                                tail = qp->sq_ring.tail;
@@ -1204,6 +1205,8 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
                                        break;
                                }
                        } while (1);
+                       if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
+                               info->minor_err = FLUSH_MW_BIND_ERR;
                        qp->sq_flush_seen = true;
                        if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
                                qp->sq_flush_complete = true;
index fdf4cc88cb91201d57e520bcb8b223253bebcc51..075defaabee53f3ae91cb89020742ea2d81ba22c 100644 (file)
@@ -590,11 +590,14 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
        cqp_error = cqp_request->compl_info.error;
        if (cqp_error) {
                err_code = -EIO;
-               if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
-                   cqp_request->compl_info.min_err_code == 0x8029) {
-                       if (!rf->reset) {
-                               rf->reset = true;
-                               rf->gen_ops.request_reset(rf);
+               if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
+                       if (cqp_request->compl_info.min_err_code == 0x8002)
+                               err_code = -EBUSY;
+                       else if (cqp_request->compl_info.min_err_code == 0x8029) {
+                               if (!rf->reset) {
+                                       rf->reset = true;
+                                       rf->gen_ops.request_reset(rf);
+                               }
                        }
                }
        }
@@ -2598,7 +2601,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
                spin_unlock_irqrestore(&iwqp->lock, flags2);
                spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
                if (compl_generated)
-                       irdma_comp_handler(iwqp->iwrcq);
+                       irdma_comp_handler(iwqp->iwscq);
        } else {
                spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
                mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
index 9b07b8af2997fbf7e9985d45ef0f9a86aaf391fb..9b207f5084eb7bf6b247d4f93f2da7dc43e937b7 100644 (file)
@@ -39,15 +39,18 @@ static int irdma_query_device(struct ib_device *ibdev,
        props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
        props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
        props->max_cq = rf->max_cq - rf->used_cqs;
-       props->max_cqe = rf->max_cqe;
+       props->max_cqe = rf->max_cqe - 1;
        props->max_mr = rf->max_mr - rf->used_mrs;
        props->max_mw = props->max_mr;
        props->max_pd = rf->max_pd - rf->used_pds;
        props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
        props->max_qp_rd_atom = hw_attrs->max_hw_ird;
        props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
-       if (rdma_protocol_roce(ibdev, 1))
+       if (rdma_protocol_roce(ibdev, 1)) {
+               props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
                props->max_pkeys = IRDMA_PKEY_TBL_SZ;
+       }
+
        props->max_ah = rf->max_ah;
        props->max_mcast_grp = rf->max_mcg;
        props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
@@ -3009,6 +3012,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
        struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
        struct irdma_cqp_request *cqp_request;
        struct cqp_cmds_info *cqp_info;
+       int status;
 
        if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
                if (iwmr->region) {
@@ -3039,8 +3043,11 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
        cqp_info->post_sq = 1;
        cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
        cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
-       irdma_handle_cqp_op(iwdev->rf, cqp_request);
+       status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
        irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+       if (status)
+               return status;
+
        irdma_free_stag(iwdev, iwmr->stag);
 done:
        if (iwpbl->pbl_allocated)
index 293ed709e5ed5dd2efc8c72fa25137cb21b60ad4..b4dc52392275b1a0ea99df4faed12907cdc11276 100644 (file)
@@ -166,6 +166,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
                mdev = dev->mdev;
                mdev_port_num = 1;
        }
+       if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+               /* set local port to one for Function-Per-Port HCA. */
+               mdev = dev->mdev;
+               mdev_port_num = 1;
+       }
+
        /* Declaring support of extended counters */
        if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
                struct ib_class_port_info cpi = {};
index fc94a1b25485d7ba65cada9af61496e1fe89d432..883d7c60143e8a8cd8289976e4aec2f69212dba6 100644 (file)
@@ -4336,7 +4336,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
        dev->mdev = mdev;
        dev->num_ports = num_ports;
 
-       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
+       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
                profile = &raw_eth_profile;
        else
                profile = &pf_profile;
index 2e2ad3918385832c62b38775b2dcc3c3b9005a80..e66bf72f1f04d27b9c1c0db3f5774b672a1d6046 100644 (file)
@@ -708,6 +708,7 @@ struct mlx5_ib_umr_context {
 };
 
 enum {
+       MLX5_UMR_STATE_UNINIT,
        MLX5_UMR_STATE_ACTIVE,
        MLX5_UMR_STATE_RECOVER,
        MLX5_UMR_STATE_ERR,
index e00b94d1b1ea1e9c039d06548185a6ae4aa03ce0..d5105b5c9979b582083fed732fff25b0f5cc2462 100644 (file)
@@ -177,6 +177,7 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
 
        sema_init(&dev->umrc.sem, MAX_UMR_WR);
        mutex_init(&dev->umrc.lock);
+       dev->umrc.state = MLX5_UMR_STATE_ACTIVE;
 
        return 0;
 
@@ -191,6 +192,8 @@ destroy_pd:
 
 void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
 {
+       if (dev->umrc.state == MLX5_UMR_STATE_UNINIT)
+               return;
        ib_destroy_qp(dev->umrc.qp);
        ib_free_cq(dev->umrc.cq);
        ib_dealloc_pd(dev->umrc.pd);
index 1f4e60257700ef4dffff91a81f3305569c45ebb1..7d47b521070b1b73d69a8822d45a6e879489e669 100644 (file)
@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
        dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
 
        if (paddr)
-               return virt_to_page(paddr);
+               return virt_to_page((void *)paddr);
 
        return NULL;
 }
@@ -533,13 +533,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
                                        kunmap_local(kaddr);
                                }
                        } else {
-                               u64 va = sge->laddr + sge_off;
+                               /*
+                                * Cast to an uintptr_t to preserve all 64 bits
+                                * in sge->laddr.
+                                */
+                               uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
 
-                               page_array[seg] = virt_to_page(va & PAGE_MASK);
+                               /*
+                                * virt_to_page() takes a (void *) pointer
+                                * so cast to a (void *) meaning it will be 64
+                                * bits on a 64 bit platform and 32 bits on a
+                                * 32 bit platform.
+                                */
+                               page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
                                if (do_crc)
                                        crypto_shash_update(
                                                c_tx->mpa_crc_hd,
-                                               (void *)(uintptr_t)va,
+                                               (void *)va,
                                                plen);
                        }
 
index baecde41d126e5573257df5ae9b73a8473bdf8a4..449904dac0a91945089208978232f5631ebd64ec 100644 (file)
@@ -1004,7 +1004,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
                                   struct rtrs_clt_io_req *req,
                                   struct rtrs_rbuf *rbuf, bool fr_en,
-                                  u32 size, u32 imm, struct ib_send_wr *wr,
+                                  u32 count, u32 size, u32 imm,
+                                  struct ib_send_wr *wr,
                                   struct ib_send_wr *tail)
 {
        struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
@@ -1024,12 +1025,12 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
                num_sge = 2;
                ptail = tail;
        } else {
-               for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+               for_each_sg(req->sglist, sg, count, i) {
                        sge[i].addr   = sg_dma_address(sg);
                        sge[i].length = sg_dma_len(sg);
                        sge[i].lkey   = clt_path->s.dev->ib_pd->local_dma_lkey;
                }
-               num_sge = 1 + req->sg_cnt;
+               num_sge = 1 + count;
        }
        sge[i].addr   = req->iu->dma_addr;
        sge[i].length = size;
@@ -1142,7 +1143,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
         */
        rtrs_clt_update_all_stats(req, WRITE);
 
-       ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
+       ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
                                      req->usr_len + sizeof(*msg),
                                      imm, wr, &inv_wr);
        if (ret) {
index 34c03bde50641b5ffcdfb51ec3748925947cd2da..4894e7329d88725c40166b4601d4c8fdf3c7bdaf 100644 (file)
@@ -595,7 +595,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
                struct sg_table *sgt = &srv_mr->sgt;
                struct scatterlist *s;
                struct ib_mr *mr;
-               int nr, chunks;
+               int nr, nr_sgt, chunks;
 
                chunks = chunks_per_mr * mri;
                if (!always_invalidate)
@@ -610,19 +610,19 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
                        sg_set_page(s, srv->chunks[chunks + i],
                                    max_chunk_size, 0);
 
-               nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
+               nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
                                   sgt->nents, DMA_BIDIRECTIONAL);
-               if (nr < sgt->nents) {
-                       err = nr < 0 ? nr : -EINVAL;
+               if (!nr_sgt) {
+                       err = -EINVAL;
                        goto free_sg;
                }
                mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
-                                sgt->nents);
+                                nr_sgt);
                if (IS_ERR(mr)) {
                        err = PTR_ERR(mr);
                        goto unmap_sg;
                }
-               nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
+               nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
                                  NULL, max_chunk_size);
                if (nr < 0 || nr < sgt->nents) {
                        err = nr < 0 ? nr : -EINVAL;
@@ -641,7 +641,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
                        }
                }
                /* Eventually dma addr for each chunk can be cached */
-               for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+               for_each_sg(sgt->sgl, s, nr_sgt, i)
                        srv_path->dma_addr[chunks + i] = sg_dma_address(s);
 
                ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
index 7720ea270ed8c8f4f2e33c72ed27e2dfacdc6216..d7f69e593a63f4beb77c08444bfc12db623eeaf7 100644 (file)
@@ -1961,7 +1961,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
                if (scmnd) {
                        req = scsi_cmd_priv(scmnd);
                        scmnd = srp_claim_req(ch, req, NULL, scmnd);
-               } else {
+               }
+               if (!scmnd) {
                        shost_printk(KERN_ERR, target->scsi_host,
                                     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
                                     rsp->tag, ch - target->ch, ch->qp->qp_num);
index 65b8e4fd82177872a81b706b538014f1be75a8b7..828672a46a3d4fbcd8f23b680cb59d5f90ff284d 100644 (file)
@@ -939,7 +939,8 @@ static void build_completion_wait(struct iommu_cmd *cmd,
        memset(cmd, 0, sizeof(*cmd));
        cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
        cmd->data[1] = upper_32_bits(paddr);
-       cmd->data[2] = data;
+       cmd->data[2] = lower_32_bits(data);
+       cmd->data[3] = upper_32_bits(data);
        CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
 }
 
index 696d5555be5794bb0ef4329e3608753fee70385f..6a1f02c62dffccd32cdd435f96ba322a7da42a44 100644 (file)
@@ -777,6 +777,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
        if (dev_state->domain == NULL)
                goto out_free_states;
 
+       /* See iommu_is_default_domain() */
+       dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
        amd_iommu_domain_direct_map(dev_state->domain);
 
        ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
index 7cca030a508e1e22603a369335d6033b385f1b90..31bc50e538a341320564cfe9b5cdf0033c5e70b5 100644 (file)
@@ -163,38 +163,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
        return re->hi & VTD_PAGE_MASK;
 }
 
-static inline void context_clear_pasid_enable(struct context_entry *context)
-{
-       context->lo &= ~(1ULL << 11);
-}
-
-static inline bool context_pasid_enabled(struct context_entry *context)
-{
-       return !!(context->lo & (1ULL << 11));
-}
-
-static inline void context_set_copied(struct context_entry *context)
-{
-       context->hi |= (1ull << 3);
-}
-
-static inline bool context_copied(struct context_entry *context)
-{
-       return !!(context->hi & (1ULL << 3));
-}
-
-static inline bool __context_present(struct context_entry *context)
-{
-       return (context->lo & 1);
-}
-
-bool context_present(struct context_entry *context)
-{
-       return context_pasid_enabled(context) ?
-            __context_present(context) :
-            __context_present(context) && !context_copied(context);
-}
-
 static inline void context_set_present(struct context_entry *context)
 {
        context->lo |= 1;
@@ -242,6 +210,26 @@ static inline void context_clear_entry(struct context_entry *context)
        context->hi = 0;
 }
 
+static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+       if (!iommu->copied_tables)
+               return false;
+
+       return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+       set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+       clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
 /*
  * This domain is a statically identity mapping domain.
  *     1. This domain creats a static 1:1 mapping to all usable memory.
@@ -402,14 +390,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain,
        return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
 }
 
+/*
+ * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
+ * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
+ * the returned SAGAW.
+ */
+static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
+{
+       unsigned long fl_sagaw, sl_sagaw;
+
+       fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
+       sl_sagaw = cap_sagaw(iommu->cap);
+
+       /* Second level only. */
+       if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+               return sl_sagaw;
+
+       /* First level only. */
+       if (!ecap_slts(iommu->ecap))
+               return fl_sagaw;
+
+       return fl_sagaw & sl_sagaw;
+}
+
 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
 {
        unsigned long sagaw;
        int agaw;
 
-       sagaw = cap_sagaw(iommu->cap);
-       for (agaw = width_to_agaw(max_gaw);
-            agaw >= 0; agaw--) {
+       sagaw = __iommu_calculate_sagaw(iommu);
+       for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) {
                if (test_bit(agaw, &sagaw))
                        break;
        }
@@ -505,8 +515,9 @@ static int domain_update_device_node(struct dmar_domain *domain)
 {
        struct device_domain_info *info;
        int nid = NUMA_NO_NODE;
+       unsigned long flags;
 
-       spin_lock(&domain->lock);
+       spin_lock_irqsave(&domain->lock, flags);
        list_for_each_entry(info, &domain->devices, link) {
                /*
                 * There could possibly be multiple device numa nodes as devices
@@ -518,7 +529,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
                if (nid != NUMA_NO_NODE)
                        break;
        }
-       spin_unlock(&domain->lock);
+       spin_unlock_irqrestore(&domain->lock, flags);
 
        return nid;
 }
@@ -578,6 +589,13 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
        struct context_entry *context;
        u64 *entry;
 
+       /*
+        * Except that the caller requested to allocate a new entry,
+        * returning a copied context entry makes no sense.
+        */
+       if (!alloc && context_copied(iommu, bus, devfn))
+               return NULL;
+
        entry = &root->lo;
        if (sm_supported(iommu)) {
                if (devfn >= 0x80) {
@@ -795,32 +813,11 @@ static void free_context_table(struct intel_iommu *iommu)
 }
 
 #ifdef CONFIG_DMAR_DEBUG
-static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, u8 bus, u8 devfn)
+static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
+                        u8 bus, u8 devfn, struct dma_pte *parent, int level)
 {
-       struct device_domain_info *info;
-       struct dma_pte *parent, *pte;
-       struct dmar_domain *domain;
-       struct pci_dev *pdev;
-       int offset, level;
-
-       pdev = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn);
-       if (!pdev)
-               return;
-
-       info = dev_iommu_priv_get(&pdev->dev);
-       if (!info || !info->domain) {
-               pr_info("device [%02x:%02x.%d] not probed\n",
-                       bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
-               return;
-       }
-
-       domain = info->domain;
-       level = agaw_to_level(domain->agaw);
-       parent = domain->pgd;
-       if (!parent) {
-               pr_info("no page table setup\n");
-               return;
-       }
+       struct dma_pte *pte;
+       int offset;
 
        while (1) {
                offset = pfn_level_offset(pfn, level);
@@ -847,9 +844,10 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
        struct pasid_entry *entries, *pte;
        struct context_entry *ctx_entry;
        struct root_entry *rt_entry;
+       int i, dir_index, index, level;
        u8 devfn = source_id & 0xff;
        u8 bus = source_id >> 8;
-       int i, dir_index, index;
+       struct dma_pte *pgtable;
 
        pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr);
 
@@ -877,8 +875,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
                ctx_entry->hi, ctx_entry->lo);
 
        /* legacy mode does not require PASID entries */
-       if (!sm_supported(iommu))
+       if (!sm_supported(iommu)) {
+               level = agaw_to_level(ctx_entry->hi & 7);
+               pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
                goto pgtable_walk;
+       }
 
        /* get the pointer to pasid directory entry */
        dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
@@ -905,8 +906,16 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
        for (i = 0; i < ARRAY_SIZE(pte->val); i++)
                pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]);
 
+       if (pasid_pte_get_pgtt(pte) == PASID_ENTRY_PGTT_FL_ONLY) {
+               level = pte->val[2] & BIT_ULL(2) ? 5 : 4;
+               pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK);
+       } else {
+               level = agaw_to_level((pte->val[0] >> 2) & 0x7);
+               pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK);
+       }
+
 pgtable_walk:
-       pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn);
+       pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level);
 }
 #endif
 
@@ -1345,19 +1354,20 @@ iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
                        u8 bus, u8 devfn)
 {
        struct device_domain_info *info;
+       unsigned long flags;
 
        if (!iommu->qi)
                return NULL;
 
-       spin_lock(&domain->lock);
+       spin_lock_irqsave(&domain->lock, flags);
        list_for_each_entry(info, &domain->devices, link) {
                if (info->iommu == iommu && info->bus == bus &&
                    info->devfn == devfn) {
-                       spin_unlock(&domain->lock);
+                       spin_unlock_irqrestore(&domain->lock, flags);
                        return info->ats_supported ? info : NULL;
                }
        }
-       spin_unlock(&domain->lock);
+       spin_unlock_irqrestore(&domain->lock, flags);
 
        return NULL;
 }
@@ -1366,8 +1376,9 @@ static void domain_update_iotlb(struct dmar_domain *domain)
 {
        struct device_domain_info *info;
        bool has_iotlb_device = false;
+       unsigned long flags;
 
-       spin_lock(&domain->lock);
+       spin_lock_irqsave(&domain->lock, flags);
        list_for_each_entry(info, &domain->devices, link) {
                if (info->ats_enabled) {
                        has_iotlb_device = true;
@@ -1375,7 +1386,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
                }
        }
        domain->has_iotlb_device = has_iotlb_device;
-       spin_unlock(&domain->lock);
+       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
@@ -1467,14 +1478,15 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
                                  u64 addr, unsigned mask)
 {
        struct device_domain_info *info;
+       unsigned long flags;
 
        if (!domain->has_iotlb_device)
                return;
 
-       spin_lock(&domain->lock);
+       spin_lock_irqsave(&domain->lock, flags);
        list_for_each_entry(info, &domain->devices, link)
                __iommu_flush_dev_iotlb(info, addr, mask);
-       spin_unlock(&domain->lock);
+       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -1688,6 +1700,11 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
                iommu->domain_ids = NULL;
        }
 
+       if (iommu->copied_tables) {
+               bitmap_free(iommu->copied_tables);
+               iommu->copied_tables = NULL;
+       }
+
        /* free context mapping */
        free_context_table(iommu);
 
@@ -1913,7 +1930,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                goto out_unlock;
 
        ret = 0;
-       if (context_present(context))
+       if (context_present(context) && !context_copied(iommu, bus, devfn))
                goto out_unlock;
 
        /*
@@ -1925,7 +1942,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
         * in-flight DMA will exist, and we don't need to worry anymore
         * hereafter.
         */
-       if (context_copied(context)) {
+       if (context_copied(iommu, bus, devfn)) {
                u16 did_old = context_domain_id(context);
 
                if (did_old < cap_ndoms(iommu->cap)) {
@@ -1936,6 +1953,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                        iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
                                                 DMA_TLB_DSI_FLUSH);
                }
+
+               clear_context_copied(iommu, bus, devfn);
        }
 
        context_clear_entry(context);
@@ -2429,6 +2448,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
 {
        struct device_domain_info *info = dev_iommu_priv_get(dev);
        struct intel_iommu *iommu;
+       unsigned long flags;
        u8 bus, devfn;
        int ret;
 
@@ -2440,9 +2460,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
        if (ret)
                return ret;
        info->domain = domain;
-       spin_lock(&domain->lock);
+       spin_lock_irqsave(&domain->lock, flags);
        list_add(&info->link, &domain->devices);
-       spin_unlock(&domain->lock);
+       spin_unlock_irqrestore(&domain->lock, flags);
 
        /* PASID table is mandatory for a PCI device in scalable mode. */
        if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -2684,32 +2704,14 @@ static int copy_context_table(struct intel_iommu *iommu,
                /* Now copy the context entry */
                memcpy(&ce, old_ce + idx, sizeof(ce));
 
-               if (!__context_present(&ce))
+               if (!context_present(&ce))
                        continue;
 
                did = context_domain_id(&ce);
                if (did >= 0 && did < cap_ndoms(iommu->cap))
                        set_bit(did, iommu->domain_ids);
 
-               /*
-                * We need a marker for copied context entries. This
-                * marker needs to work for the old format as well as
-                * for extended context entries.
-                *
-                * Bit 67 of the context entry is used. In the old
-                * format this bit is available to software, in the
-                * extended format it is the PGE bit, but PGE is ignored
-                * by HW if PASIDs are disabled (and thus still
-                * available).
-                *
-                * So disable PASIDs first and then mark the entry
-                * copied. This means that we don't copy PASID
-                * translations from the old kernel, but this is fine as
-                * faults there are not fatal.
-                */
-               context_clear_pasid_enable(&ce);
-               context_set_copied(&ce);
-
+               set_context_copied(iommu, bus, devfn);
                new_ce[idx] = ce;
        }
 
@@ -2735,8 +2737,8 @@ static int copy_translation_tables(struct intel_iommu *iommu)
        bool new_ext, ext;
 
        rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
-       ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
-       new_ext    = !!ecap_ecs(iommu->ecap);
+       ext        = !!(rtaddr_reg & DMA_RTADDR_SMT);
+       new_ext    = !!sm_supported(iommu);
 
        /*
         * The RTT bit can only be changed when translation is disabled,
@@ -2747,6 +2749,10 @@ static int copy_translation_tables(struct intel_iommu *iommu)
        if (new_ext != ext)
                return -EINVAL;
 
+       iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
+       if (!iommu->copied_tables)
+               return -ENOMEM;
+
        old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
        if (!old_rt_phys)
                return -EINVAL;
@@ -4080,6 +4086,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
        struct device_domain_info *info = dev_iommu_priv_get(dev);
        struct dmar_domain *domain = info->domain;
        struct intel_iommu *iommu = info->iommu;
+       unsigned long flags;
 
        if (!dev_is_real_dma_subdevice(info->dev)) {
                if (dev_is_pci(info->dev) && sm_supported(iommu))
@@ -4091,9 +4098,9 @@ static void dmar_remove_one_dev_info(struct device *dev)
                intel_pasid_free_table(info->dev);
        }
 
-       spin_lock(&domain->lock);
+       spin_lock_irqsave(&domain->lock, flags);
        list_del(&info->link);
-       spin_unlock(&domain->lock);
+       spin_unlock_irqrestore(&domain->lock, flags);
 
        domain_detach_iommu(domain, iommu);
        info->domain = NULL;
@@ -4412,19 +4419,20 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
 static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
 {
        struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       unsigned long flags;
 
        if (dmar_domain->force_snooping)
                return true;
 
-       spin_lock(&dmar_domain->lock);
+       spin_lock_irqsave(&dmar_domain->lock, flags);
        if (!domain_support_force_snooping(dmar_domain)) {
-               spin_unlock(&dmar_domain->lock);
+               spin_unlock_irqrestore(&dmar_domain->lock, flags);
                return false;
        }
 
        domain_set_force_snooping(dmar_domain);
        dmar_domain->force_snooping = true;
-       spin_unlock(&dmar_domain->lock);
+       spin_unlock_irqrestore(&dmar_domain->lock, flags);
 
        return true;
 }
index fae45bbb0c7f7a354f6e8a0a71a16e7dda9d063b..74b0e19e23eeb68e025c453225e018d7554db516 100644 (file)
 #define ecap_dis(e)            (((e) >> 27) & 0x1)
 #define ecap_nest(e)           (((e) >> 26) & 0x1)
 #define ecap_mts(e)            (((e) >> 25) & 0x1)
-#define ecap_ecs(e)            (((e) >> 24) & 0x1)
 #define ecap_iotlb_offset(e)   ((((e) >> 8) & 0x3ff) * 16)
 #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
 #define ecap_coherent(e)       ((e) & 0x1)
 #define DMA_GSTS_CFIS (((u32)1) << 23)
 
 /* DMA_RTADDR_REG */
-#define DMA_RTADDR_RTT (((u64)1) << 11)
 #define DMA_RTADDR_SMT (((u64)1) << 10)
 
 /* CCMD_REG */
@@ -579,6 +577,7 @@ struct intel_iommu {
 
 #ifdef CONFIG_INTEL_IOMMU
        unsigned long   *domain_ids; /* bitmap of domains */
+       unsigned long   *copied_tables; /* bitmap of copied tables */
        spinlock_t      lock; /* protect context, domain ids */
        struct root_entry *root_entry; /* virtual address */
 
@@ -701,6 +700,11 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte)
                (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
 }
 
+static inline bool context_present(struct context_entry *context)
+{
+       return (context->lo & 1);
+}
+
 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
 
 extern int dmar_enable_qi(struct intel_iommu *iommu);
@@ -784,7 +788,6 @@ static inline void intel_iommu_debugfs_init(void) {}
 #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
 
 extern const struct attribute_group *intel_iommu_groups[];
-bool context_present(struct context_entry *context);
 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
                                         u8 devfn, int alloc);
 
index 780fb70715770d710f9a806d443dd886222ce986..3a808146b50f4baf9b7c1e1b66d85139b032f58d 100644 (file)
@@ -3076,6 +3076,24 @@ out:
        return ret;
 }
 
+static bool iommu_is_default_domain(struct iommu_group *group)
+{
+       if (group->domain == group->default_domain)
+               return true;
+
+       /*
+        * If the default domain was set to identity and it is still an identity
+        * domain then we consider this a pass. This happens because of
+        * amd_iommu_init_device() replacing the default idenytity domain with an
+        * identity domain that has a different configuration for AMDGPU.
+        */
+       if (group->default_domain &&
+           group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
+           group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
+               return true;
+       return false;
+}
+
 /**
  * iommu_device_use_default_domain() - Device driver wants to handle device
  *                                     DMA through the kernel DMA API.
@@ -3094,8 +3112,7 @@ int iommu_device_use_default_domain(struct device *dev)
 
        mutex_lock(&group->mutex);
        if (group->owner_cnt) {
-               if (group->domain != group->default_domain ||
-                   group->owner) {
+               if (group->owner || !iommu_is_default_domain(group)) {
                        ret = -EBUSY;
                        goto unlock_out;
                }
index 08eeafc9529fa0580da66a4e93fd8fb2b81fc52e..80151176ba12832a51fbfb583cde5222c7c66da6 100644 (file)
@@ -1006,7 +1006,18 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
        return iommu_fwspec_add_ids(dev, args->args, 1);
 }
 
+static bool viommu_capable(enum iommu_cap cap)
+{
+       switch (cap) {
+       case IOMMU_CAP_CACHE_COHERENCY:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static struct iommu_ops viommu_ops = {
+       .capable                = viommu_capable,
        .domain_alloc           = viommu_domain_alloc,
        .probe_device           = viommu_probe_device,
        .probe_finalize         = viommu_probe_finalize,
index c085b031abfc157cba1db0974a9cd92564d3f121..89b2d9cea33f0ec74b33ed759b2e57f9fa060443 100644 (file)
@@ -494,6 +494,24 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
        return err;
 }
 
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
+{
+       struct devlink *devlink = priv_to_devlink(dev);
+       union devlink_param_value val;
+       int err;
+
+       err = devlink_param_driverinit_value_get(devlink,
+                                                DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+                                                &val);
+
+       if (!err)
+               return val.vbool;
+
+       mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
+       return MLX5_CAP_GEN(dev, roce);
+}
+EXPORT_SYMBOL(mlx5_is_roce_on);
+
 static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
 {
        void *set_hca_cap;
@@ -597,7 +615,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
                         MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
 
        if (MLX5_CAP_GEN(dev, roce_rw_supported))
-               MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+               MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
+                        mlx5_is_roce_on(dev));
 
        max_uc_list = max_uc_list_get_devlink_param(dev);
        if (max_uc_list > 0)
@@ -623,7 +642,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
  */
 static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
 {
-       return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
+       return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) ||
                (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
 }
 
index e10b9f8f89e1e5ff0ce8b40d2ac50370e48a3026..a6f99b4344d93fa813c6300aa904d3fb4c5e44da 100644 (file)
@@ -1475,10 +1475,6 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-#ifndef PCI_VENDOR_ID_MICROSOFT
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#endif
-
 static const struct pci_device_id mana_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
        { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
index af367b22871b1f71ae803165d3978d1f87f1f800..66446f1e06cf23ea95f946e98f1347269e4eb40f 100644 (file)
@@ -4703,6 +4703,8 @@ static void nvme_fw_act_work(struct work_struct *work)
        nvme_start_queues(ctrl);
        /* read FW slot information to clear the AER */
        nvme_get_fw_slot_info(ctrl);
+
+       queue_work(nvme_wq, &ctrl->async_event_work);
 }
 
 static u32 nvme_aer_type(u32 result)
@@ -4715,9 +4717,10 @@ static u32 nvme_aer_subtype(u32 result)
        return (result & 0xff00) >> 8;
 }
 
-static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
 {
        u32 aer_notice_type = nvme_aer_subtype(result);
+       bool requeue = true;
 
        trace_nvme_async_event(ctrl, aer_notice_type);
 
@@ -4734,6 +4737,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
                 */
                if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
                        nvme_auth_stop(ctrl);
+                       requeue = false;
                        queue_work(nvme_wq, &ctrl->fw_act_work);
                }
                break;
@@ -4750,6 +4754,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
        default:
                dev_warn(ctrl->device, "async event result %08x\n", result);
        }
+       return requeue;
 }
 
 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
@@ -4765,13 +4770,14 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
        u32 result = le32_to_cpu(res->u32);
        u32 aer_type = nvme_aer_type(result);
        u32 aer_subtype = nvme_aer_subtype(result);
+       bool requeue = true;
 
        if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
                return;
 
        switch (aer_type) {
        case NVME_AER_NOTICE:
-               nvme_handle_aen_notice(ctrl, result);
+               requeue = nvme_handle_aen_notice(ctrl, result);
                break;
        case NVME_AER_ERROR:
                /*
@@ -4792,7 +4798,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
        default:
                break;
        }
-       queue_work(nvme_wq, &ctrl->async_event_work);
+
+       if (requeue)
+               queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 
index 044da18c06f51249bb917ccb32aae23b2c87d1c8..d5871fd6f769b2e41750c13fa28be1b1a877a067 100644 (file)
@@ -121,7 +121,6 @@ struct nvme_tcp_queue {
        struct mutex            send_mutex;
        struct llist_head       req_list;
        struct list_head        send_list;
-       bool                    more_requests;
 
        /* recv state */
        void                    *pdu;
@@ -320,7 +319,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
 {
        return !list_empty(&queue->send_list) ||
-               !llist_empty(&queue->req_list) || queue->more_requests;
+               !llist_empty(&queue->req_list);
 }
 
 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
@@ -339,9 +338,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
         */
        if (queue->io_cpu == raw_smp_processor_id() &&
            sync && empty && mutex_trylock(&queue->send_mutex)) {
-               queue->more_requests = !last;
                nvme_tcp_send_all(queue);
-               queue->more_requests = false;
                mutex_unlock(&queue->send_mutex);
        }
 
@@ -1229,7 +1226,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
                else if (unlikely(result < 0))
                        return;
 
-               if (!pending)
+               if (!pending || !queue->rd_enabled)
                        return;
 
        } while (!time_after(jiffies, deadline)); /* quota is exhausted */
index a1345790005f428ce84a718469178157975736cc..7f4083cf953a658499cd09c1047ac7dd97cf971c 100644 (file)
@@ -735,6 +735,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
 
 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
 {
+       struct nvmet_ns *ns = req->ns;
+
        if (!req->sq->sqhd_disabled)
                nvmet_update_sq_head(req);
        req->cqe->sq_id = cpu_to_le16(req->sq->qid);
@@ -745,9 +747,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
 
        trace_nvmet_req_complete(req);
 
-       if (req->ns)
-               nvmet_put_namespace(req->ns);
        req->ops->queue_response(req);
+       if (ns)
+               nvmet_put_namespace(ns);
 }
 
 void nvmet_req_complete(struct nvmet_req *req, u16 status)
index c7ef69f29fe4e70cd4102f5b503112c2c801ade1..835bfda86fcf2771376847bbaf9e192fbdab4ded 100644 (file)
@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
        struct nvme_id_ns_zns *id_zns;
        u64 zsze;
        u16 status;
+       u32 mar, mor;
 
        if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
                req->error_loc = offsetof(struct nvme_identify, nsid);
@@ -130,8 +131,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
        zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
                                        req->ns->blksize_shift;
        id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
-       id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
-       id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
+
+       mor = bdev_max_open_zones(req->ns->bdev);
+       if (!mor)
+               mor = U32_MAX;
+       else
+               mor--;
+       id_zns->mor = cpu_to_le32(mor);
+
+       mar = bdev_max_active_zones(req->ns->bdev);
+       if (!mar)
+               mar = U32_MAX;
+       else
+               mar--;
+       id_zns->mar = cpu_to_le32(mar);
 
 done:
        status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
index 7bc92923104c1e21017c81d03bfc796f2f8d4d83..1c573e7a60bc854b8fa0503d4904d6415df9f929 100644 (file)
@@ -314,7 +314,7 @@ static int unflatten_dt_nodes(const void *blob,
        for (offset = 0;
             offset >= 0 && depth >= initial_depth;
             offset = fdt_next_node(blob, offset, &depth)) {
-               if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
+               if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
                        continue;
 
                if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
index f223afe47d1049849e88e678e994b6549bf3d03e..a66386043aa6643d87216005c426da904a0408c7 100644 (file)
@@ -1546,6 +1546,7 @@ static int __init ccio_probe(struct parisc_device *dev)
        }
        ccio_ioc_init(ioc);
        if (ccio_init_resources(ioc)) {
+               iounmap(ioc->ioc_regs);
                kfree(ioc);
                return -ENOMEM;
        }
index 3a8c986156348465b5890317906ecb8dad4bcd5b..bdef7a8d6ab8e89ed60573c88581fcd14ff75d9c 100644 (file)
@@ -221,16 +221,7 @@ static size_t irt_num_entry;
 
 static struct irt_entry *iosapic_alloc_irt(int num_entries)
 {
-       unsigned long a;
-
-       /* The IRT needs to be 8-byte aligned for the PDC call. 
-        * Normally kmalloc would guarantee larger alignment, but
-        * if CONFIG_DEBUG_SLAB is enabled, then we can get only
-        * 4-byte alignment on 32-bit kernels
-        */
-       a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL);
-       a = (a + 7UL) & ~7UL;
-       return (struct irt_entry *)a;
+       return kcalloc(num_entries, sizeof(struct irt_entry), GFP_KERNEL);
 }
 
 /**
index 6f6681bbfd36de515b52bfc58ed6e04a1d34518f..8de4ca2fef210119b04da6bd28a13460cab681ca 100644 (file)
@@ -473,7 +473,7 @@ static int pmu_sbi_get_ctrinfo(int nctr)
        if (!pmu_ctr_list)
                return -ENOMEM;
 
-       for (i = 0; i <= nctr; i++) {
+       for (i = 0; i < nctr; i++) {
                ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
                if (ret.error)
                        /* The logical counter ids are not expected to be contiguous */
index c5fd154990c8b4b61bd52ca5b733ad9a930738a8..c7df8c5fe5854462d1585782c3d0f72b7a02f5dd 100644 (file)
@@ -331,6 +331,7 @@ struct ocelot_pinctrl {
        const struct ocelot_pincfg_data *pincfg_data;
        struct ocelot_pmx_func func[FUNC_MAX];
        u8 stride;
+       struct workqueue_struct *wq;
 };
 
 struct ocelot_match_data {
@@ -338,6 +339,11 @@ struct ocelot_match_data {
        struct ocelot_pincfg_data pincfg_data;
 };
 
+struct ocelot_irq_work {
+       struct work_struct irq_work;
+       struct irq_desc *irq_desc;
+};
+
 #define LUTON_P(p, f0, f1)                                             \
 static struct ocelot_pin_caps luton_pin_##p = {                                \
        .pin = p,                                                       \
@@ -1813,6 +1819,75 @@ static void ocelot_irq_mask(struct irq_data *data)
        gpiochip_disable_irq(chip, gpio);
 }
 
+static void ocelot_irq_work(struct work_struct *work)
+{
+       struct ocelot_irq_work *w = container_of(work, struct ocelot_irq_work, irq_work);
+       struct irq_chip *parent_chip = irq_desc_get_chip(w->irq_desc);
+       struct gpio_chip *chip = irq_desc_get_chip_data(w->irq_desc);
+       struct irq_data *data = irq_desc_get_irq_data(w->irq_desc);
+       unsigned int gpio = irqd_to_hwirq(data);
+
+       local_irq_disable();
+       chained_irq_enter(parent_chip, w->irq_desc);
+       generic_handle_domain_irq(chip->irq.domain, gpio);
+       chained_irq_exit(parent_chip, w->irq_desc);
+       local_irq_enable();
+
+       kfree(w);
+}
+
+static void ocelot_irq_unmask_level(struct irq_data *data)
+{
+       struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+       struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+       struct irq_desc *desc = irq_data_to_desc(data);
+       unsigned int gpio = irqd_to_hwirq(data);
+       unsigned int bit = BIT(gpio % 32);
+       bool ack = false, active = false;
+       u8 trigger_level;
+       int val;
+
+       trigger_level = irqd_get_trigger_type(data);
+
+       /* Check if the interrupt line is still active. */
+       regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val);
+       if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) ||
+             (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH))
+               active = true;
+
+       /*
+        * Check if the interrupt controller has seen any changes in the
+        * interrupt line.
+        */
+       regmap_read(info->map, REG(OCELOT_GPIO_INTR, info, gpio), &val);
+       if (val & bit)
+               ack = true;
+
+       /* Enable the interrupt now */
+       gpiochip_enable_irq(chip, gpio);
+       regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
+                          bit, bit);
+
+       /*
+        * In case the interrupt line is still active and the interrupt
+        * controller has not seen any changes in the interrupt line, then it
+        * means that there happen another interrupt while the line was active.
+        * So we missed that one, so we need to kick the interrupt again
+        * handler.
+        */
+       if (active && !ack) {
+               struct ocelot_irq_work *work;
+
+               work = kmalloc(sizeof(*work), GFP_ATOMIC);
+               if (!work)
+                       return;
+
+               work->irq_desc = desc;
+               INIT_WORK(&work->irq_work, ocelot_irq_work);
+               queue_work(info->wq, &work->irq_work);
+       }
+}
+
 static void ocelot_irq_unmask(struct irq_data *data)
 {
        struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
@@ -1836,13 +1911,12 @@ static void ocelot_irq_ack(struct irq_data *data)
 
 static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
 
-static struct irq_chip ocelot_eoi_irqchip = {
+static struct irq_chip ocelot_level_irqchip = {
        .name           = "gpio",
        .irq_mask       = ocelot_irq_mask,
-       .irq_eoi        = ocelot_irq_ack,
-       .irq_unmask     = ocelot_irq_unmask,
-       .flags          = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
-                         IRQCHIP_IMMUTABLE,
+       .irq_ack        = ocelot_irq_ack,
+       .irq_unmask     = ocelot_irq_unmask_level,
+       .flags          = IRQCHIP_IMMUTABLE,
        .irq_set_type   = ocelot_irq_set_type,
        GPIOCHIP_IRQ_RESOURCE_HELPERS
 };
@@ -1859,14 +1933,9 @@ static struct irq_chip ocelot_irqchip = {
 
 static int ocelot_irq_set_type(struct irq_data *data, unsigned int type)
 {
-       type &= IRQ_TYPE_SENSE_MASK;
-
-       if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH)))
-               return -EINVAL;
-
-       if (type & IRQ_TYPE_LEVEL_HIGH)
-               irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip,
-                                                handle_fasteoi_irq, NULL);
+       if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+               irq_set_chip_handler_name_locked(data, &ocelot_level_irqchip,
+                                                handle_level_irq, NULL);
        if (type & IRQ_TYPE_EDGE_BOTH)
                irq_set_chip_handler_name_locked(data, &ocelot_irqchip,
                                                 handle_edge_irq, NULL);
@@ -1996,6 +2065,10 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        if (!info->desc)
                return -ENOMEM;
 
+       info->wq = alloc_ordered_workqueue("ocelot_ordered", 0);
+       if (!info->wq)
+               return -ENOMEM;
+
        info->pincfg_data = &data->pincfg_data;
 
        reset = devm_reset_control_get_optional_shared(dev, "switch");
@@ -2018,7 +2091,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
                dev_err(dev, "Failed to create regmap\n");
                return PTR_ERR(info->map);
        }
-       dev_set_drvdata(dev, info->map);
+       dev_set_drvdata(dev, info);
        info->dev = dev;
 
        /* Pinconf registers */
@@ -2043,6 +2116,15 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int ocelot_pinctrl_remove(struct platform_device *pdev)
+{
+       struct ocelot_pinctrl *info = platform_get_drvdata(pdev);
+
+       destroy_workqueue(info->wq);
+
+       return 0;
+}
+
 static struct platform_driver ocelot_pinctrl_driver = {
        .driver = {
                .name = "pinctrl-ocelot",
@@ -2050,6 +2132,7 @@ static struct platform_driver ocelot_pinctrl_driver = {
                .suppress_bind_attrs = true,
        },
        .probe = ocelot_pinctrl_probe,
+       .remove = ocelot_pinctrl_remove,
 };
 module_platform_driver(ocelot_pinctrl_driver);
 MODULE_LICENSE("Dual MIT/GPL");
index 6bec7f1431348d51480c756d5af5bc029a727973..704a99d2f93cebe91e21e14ac51655bde2de9a3b 100644 (file)
@@ -530,10 +530,10 @@ DECLARE_MSM_GPIO_PINS(187);
 DECLARE_MSM_GPIO_PINS(188);
 DECLARE_MSM_GPIO_PINS(189);
 
-static const unsigned int sdc2_clk_pins[] = { 190 };
-static const unsigned int sdc2_cmd_pins[] = { 191 };
-static const unsigned int sdc2_data_pins[] = { 192 };
-static const unsigned int ufs_reset_pins[] = { 193 };
+static const unsigned int ufs_reset_pins[] = { 190 };
+static const unsigned int sdc2_clk_pins[] = { 191 };
+static const unsigned int sdc2_cmd_pins[] = { 192 };
+static const unsigned int sdc2_data_pins[] = { 193 };
 
 enum sc8180x_functions {
        msm_mux_adsp_ext,
@@ -1582,7 +1582,7 @@ static const int sc8180x_acpi_reserved_gpios[] = {
 static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = {
        { 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 },
        { 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 },
-       { 37, 43 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
+       { 37, 44 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
        { 42, 48 }, { 46, 50 }, { 47, 49 }, { 48, 51 }, { 49, 53 }, { 50, 52 },
        { 51, 116 }, { 51, 123 }, { 53, 54 }, { 54, 55 }, { 55, 56 },
        { 56, 57 }, { 58, 58 }, { 60, 60 }, { 68, 62 }, { 70, 63 }, { 76, 86 },
index afc1f5df75450bb5810f482966932bb4b0702aa1..b82ad135bf2aa6aeaaf36724ec586f179cfdb2ae 100644 (file)
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match);
 static struct platform_driver a100_r_pinctrl_driver = {
        .probe  = a100_r_pinctrl_probe,
        .driver = {
-               .name           = "sun50iw10p1-r-pinctrl",
+               .name           = "sun50i-a100-r-pinctrl",
                .of_match_table = a100_r_pinctrl_match,
        },
 };
index d8373cb04f9038af50c1798ea3d16b142a8159a1..d3e8dc32832ddf08a00c33ac057724d3a261ed06 100644 (file)
@@ -2733,13 +2733,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
  */
 static int _regulator_handle_consumer_enable(struct regulator *regulator)
 {
+       int ret;
        struct regulator_dev *rdev = regulator->rdev;
 
        lockdep_assert_held_once(&rdev->mutex.base);
 
        regulator->enable_count++;
-       if (regulator->uA_load && regulator->enable_count == 1)
-               return drms_uA_update(rdev);
+       if (regulator->uA_load && regulator->enable_count == 1) {
+               ret = drms_uA_update(rdev);
+               if (ret)
+                       regulator->enable_count--;
+               return ret;
+       }
 
        return 0;
 }
index 6b617024a67d1b048bf4df59806826e26ef8dd47..d899d6e98fb81d938b5daf81aab4ec17cede294e 100644 (file)
@@ -766,7 +766,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
                ((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001"))));
 
        memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
-               sizeof(pfuze_chip->regulator_descs));
+               regulator_num * sizeof(struct pfuze_regulator));
 
        ret = pfuze_parse_regulators_dt(pfuze_chip);
        if (ret)
index 0738238ed6cc40e7313adadbcaaa8757bc76b650..9857dba09c951a68cfd94965191fb7dd30972d70 100644 (file)
@@ -182,6 +182,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
        mutex_unlock(&shost->scan_mutex);
        scsi_proc_host_rm(shost);
 
+       /*
+        * New SCSI devices cannot be attached anymore because of the SCSI host
+        * state so drop the tag set refcnt. Wait until the tag set refcnt drops
+        * to zero because .exit_cmd_priv implementations may need the host
+        * pointer.
+        */
+       kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
+       wait_for_completion(&shost->tagset_freed);
+
        spin_lock_irqsave(shost->host_lock, flags);
        if (scsi_host_set_state(shost, SHOST_DEL))
                BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
@@ -190,15 +199,6 @@ void scsi_remove_host(struct Scsi_Host *shost)
        transport_unregister_device(&shost->shost_gendev);
        device_unregister(&shost->shost_dev);
        device_del(&shost->shost_gendev);
-
-       /*
-        * After scsi_remove_host() has returned the scsi LLD module can be
-        * unloaded and/or the host resources can be released. Hence wait until
-        * the dependent SCSI targets and devices are gone before returning.
-        */
-       wait_event(shost->targets_wq, atomic_read(&shost->target_count) == 0);
-
-       scsi_mq_destroy_tags(shost);
 }
 EXPORT_SYMBOL(scsi_remove_host);
 
@@ -254,6 +254,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
        if (error)
                goto fail;
 
+       kref_init(&shost->tagset_refcnt);
+       init_completion(&shost->tagset_freed);
+
        /*
         * Increase usage count temporarily here so that calling
         * scsi_autopm_put_host() will trigger runtime idle if there is
@@ -309,8 +312,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
        return error;
 
        /*
-        * Any resources associated with the SCSI host in this function except
-        * the tag set will be freed by scsi_host_dev_release().
+        * Any host allocation in this function will be freed in
+        * scsi_host_dev_release().
         */
  out_del_dev:
        device_del(&shost->shost_dev);
@@ -326,7 +329,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
        pm_runtime_disable(&shost->shost_gendev);
        pm_runtime_set_suspended(&shost->shost_gendev);
        pm_runtime_put_noidle(&shost->shost_gendev);
-       scsi_mq_destroy_tags(shost);
+       kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
  fail:
        return error;
 }
@@ -406,7 +409,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        INIT_LIST_HEAD(&shost->starved_list);
        init_waitqueue_head(&shost->host_wait);
        mutex_init(&shost->scan_mutex);
-       init_waitqueue_head(&shost->targets_wq);
 
        index = ida_alloc(&host_index_ida, GFP_KERNEL);
        if (index < 0) {
index c69c5a0979ec4cf9bc9af41b0a6b3c5840dc90fe..55a1ad6eed03457a9479b81d3061f8764e4489e7 100644 (file)
@@ -8053,7 +8053,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        /* Allocate device driver memory */
        rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
        if (rc)
-               return -ENOMEM;
+               goto out_destroy_workqueue;
 
        /* IF Type 2 ports get initialized now. */
        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
@@ -8481,6 +8481,9 @@ out_free_bsmbx:
        lpfc_destroy_bootstrap_mbox(phba);
 out_free_mem:
        lpfc_mem_free(phba);
+out_destroy_workqueue:
+       destroy_workqueue(phba->wq);
+       phba->wq = NULL;
        return rc;
 }
 
index 084c0f9fdc3a6bc2c7f78385b68e6373b8fd02c8..938a5e43594361feef3e2c08d2aac9bd7e1417c6 100644 (file)
@@ -4272,7 +4272,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
                    lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
                    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
-                       cmd->result = DID_REQUEUE << 16;
+                       cmd->result = DID_TRANSPORT_DISRUPTED << 16;
                        break;
                }
                if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4562,7 +4562,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
                            lpfc_cmd->result == IOERR_NO_RESOURCES ||
                            lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
                            lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
-                               cmd->result = DID_REQUEUE << 16;
+                               cmd->result = DID_TRANSPORT_DISRUPTED << 16;
                                break;
                        }
                        if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
index def37a7e59807dcc1cb0472a5f3fb95dc5290f82..bd6a5f1bd532b1506fdc7ac58fcc876e1adf5a33 100644 (file)
@@ -3670,6 +3670,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
                fw_event = list_first_entry(&ioc->fw_event_list,
                                struct fw_event_work, list);
                list_del_init(&fw_event->list);
+               fw_event_work_put(fw_event);
        }
        spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 
@@ -3751,7 +3752,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
                if (cancel_work_sync(&fw_event->work))
                        fw_event_work_put(fw_event);
 
-               fw_event_work_put(fw_event);
        }
        ioc->fw_events_cleanup = 0;
 }
index 086ec5b5862d0e1ec5860440f5e7c5cb761598b2..c59eac7a32f2a087f0491b9d594a9e00f7950d47 100644 (file)
@@ -586,13 +586,10 @@ EXPORT_SYMBOL(scsi_device_get);
  */
 void scsi_device_put(struct scsi_device *sdev)
 {
-       /*
-        * Decreasing the module reference count before the device reference
-        * count is safe since scsi_remove_host() only returns after all
-        * devices have been removed.
-        */
-       module_put(sdev->host->hostt->module);
+       struct module *mod = sdev->host->hostt->module;
+
        put_device(&sdev->sdev_gendev);
+       module_put(mod);
 }
 EXPORT_SYMBOL(scsi_device_put);
 
index ef08029a00793e5ba12c67551296781ec8e42c03..96e7e3eaca29d2cddf11e3133d1239d1240f5020 100644 (file)
@@ -1983,9 +1983,13 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
        return blk_mq_alloc_tag_set(tag_set);
 }
 
-void scsi_mq_destroy_tags(struct Scsi_Host *shost)
+void scsi_mq_free_tags(struct kref *kref)
 {
+       struct Scsi_Host *shost = container_of(kref, typeof(*shost),
+                                              tagset_refcnt);
+
        blk_mq_free_tag_set(&shost->tag_set);
+       complete(&shost->tagset_freed);
 }
 
 /**
index 429663bd78ecf74fc3c160d85e2ba8144d21fb01..f385b3f04d6ece02491ee606dd41aa18e03df04a 100644 (file)
@@ -94,7 +94,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
 extern void scsi_requeue_run_queue(struct work_struct *work);
 extern void scsi_start_queue(struct scsi_device *sdev);
 extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
-extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
+extern void scsi_mq_free_tags(struct kref *kref);
 extern void scsi_exit_queue(void);
 extern void scsi_evt_thread(struct work_struct *work);
 
index ac6059702d13514d8bb9ed704e644b62117109fb..5d27f5196de6fd49537ba29d9d3814207380e0ea 100644 (file)
@@ -340,6 +340,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
                kfree(sdev);
                goto out;
        }
+       kref_get(&sdev->host->tagset_refcnt);
        sdev->request_queue = q;
        q->queuedata = sdev;
        __scsi_init_queue(sdev->host, q);
@@ -406,14 +407,9 @@ static void scsi_target_destroy(struct scsi_target *starget)
 static void scsi_target_dev_release(struct device *dev)
 {
        struct device *parent = dev->parent;
-       struct Scsi_Host *shost = dev_to_shost(parent);
        struct scsi_target *starget = to_scsi_target(dev);
 
        kfree(starget);
-
-       if (atomic_dec_return(&shost->target_count) == 0)
-               wake_up(&shost->targets_wq);
-
        put_device(parent);
 }
 
@@ -526,10 +522,6 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
        starget->state = STARGET_CREATED;
        starget->scsi_level = SCSI_2;
        starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
-       init_waitqueue_head(&starget->sdev_wq);
-
-       atomic_inc(&shost->target_count);
-
  retry:
        spin_lock_irqsave(shost->host_lock, flags);
 
index 9dad2fd5297fa6c74bac5d79e9e3b15b3d48f8ee..5d61f58399dca4d63bf733419258da4ac739cc80 100644 (file)
@@ -443,15 +443,18 @@ static void scsi_device_cls_release(struct device *class_dev)
 
 static void scsi_device_dev_release_usercontext(struct work_struct *work)
 {
-       struct scsi_device *sdev = container_of(work, struct scsi_device,
-                                               ew.work);
-       struct scsi_target *starget = sdev->sdev_target;
+       struct scsi_device *sdev;
        struct device *parent;
        struct list_head *this, *tmp;
        struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
        struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
        struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
        unsigned long flags;
+       struct module *mod;
+
+       sdev = container_of(work, struct scsi_device, ew.work);
+
+       mod = sdev->host->hostt->module;
 
        scsi_dh_release_device(sdev);
 
@@ -513,16 +516,19 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
        kfree(sdev->inquiry);
        kfree(sdev);
 
-       if (starget && atomic_dec_return(&starget->sdev_count) == 0)
-               wake_up(&starget->sdev_wq);
-
        if (parent)
                put_device(parent);
+       module_put(mod);
 }
 
 static void scsi_device_dev_release(struct device *dev)
 {
        struct scsi_device *sdp = to_scsi_device(dev);
+
+       /* Set module pointer as NULL in case of module unloading */
+       if (!try_module_get(sdp->host->hostt->module))
+               sdp->host->hostt->module = NULL;
+
        execute_in_process_context(scsi_device_dev_release_usercontext,
                                   &sdp->ew);
 }
@@ -1470,6 +1476,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
        mutex_unlock(&sdev->state_mutex);
 
        blk_mq_destroy_queue(sdev->request_queue);
+       kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
        cancel_work_sync(&sdev->requeue_work);
 
        if (sdev->host->hostt->slave_destroy)
@@ -1529,14 +1536,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
                goto restart;
        }
        spin_unlock_irqrestore(shost->host_lock, flags);
-
-       /*
-        * After scsi_remove_target() returns its caller can remove resources
-        * associated with @starget, e.g. an rport or session. Wait until all
-        * devices associated with @starget have been removed to prevent that
-        * a SCSI error handling callback function triggers a use-after-free.
-        */
-       wait_event(starget->sdev_wq, atomic_read(&starget->sdev_count) == 0);
 }
 
 /**
@@ -1647,9 +1646,6 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
        list_add_tail(&sdev->same_target_siblings, &starget->devices);
        list_add_tail(&sdev->siblings, &shost->__devices);
        spin_unlock_irqrestore(shost->host_lock, flags);
-
-       atomic_inc(&starget->sdev_count);
-
        /*
         * device can now only be removed via __scsi_remove_device() so hold
         * the target.  Target will be held in CREATED state until something
index 267342dfa73880d3db4f1573f8a79ba32436e728..2dcbe166df63e31d782be489a1d59a65b3165e6e 100644 (file)
@@ -116,6 +116,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
 {
        /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
 
+       u8 rxbit = bits - 1;
        u32 oldbit = !(word & 1);
        /* clock starts at inactive polarity */
        for (; likely(bits); bits--) {
@@ -135,7 +136,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
                /* sample LSB (from slave) on leading edge */
                word >>= 1;
                if ((flags & SPI_MASTER_NO_RX) == 0)
-                       word |= getmiso(spi) << (bits - 1);
+                       word |= getmiso(spi) << rxbit;
                setsck(spi, cpol);
        }
        return word;
@@ -148,6 +149,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
 {
        /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
 
+       u8 rxbit = bits - 1;
        u32 oldbit = !(word & 1);
        /* clock starts at inactive polarity */
        for (; likely(bits); bits--) {
@@ -168,7 +170,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
                /* sample LSB (from slave) on trailing edge */
                word >>= 1;
                if ((flags & SPI_MASTER_NO_RX) == 0)
-                       word |= getmiso(spi) << (bits - 1);
+                       word |= getmiso(spi) << rxbit;
        }
        return word;
 }
index 72b1a5a2298c551c34a80aefd56af16d98949f40..e12ab5b43f341833e57daff15b408fd56a4d37d4 100644 (file)
@@ -39,6 +39,7 @@
 #define CQSPI_DISABLE_DAC_MODE         BIT(1)
 #define CQSPI_SUPPORT_EXTERNAL_DMA     BIT(2)
 #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
+#define CQSPI_SLOW_SRAM                BIT(4)
 
 /* Capabilities */
 #define CQSPI_SUPPORTS_OCTAL           BIT(0)
@@ -87,6 +88,7 @@ struct cqspi_st {
        bool                    use_dma_read;
        u32                     pd_dev_id;
        bool                    wr_completion;
+       bool                    slow_sram;
 };
 
 struct cqspi_driver_platdata {
@@ -333,7 +335,10 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
                }
        }
 
-       irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+       else if (!cqspi->slow_sram)
+               irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+       else
+               irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
 
        if (irq_status)
                complete(&cqspi->transfer_complete);
@@ -673,7 +678,18 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
        /* Clear all interrupts. */
        writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
 
-       writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+       /*
+        * On SoCFPGA platform reading the SRAM is slow due to
+        * hardware limitation and causing read interrupt storm to CPU,
+        * so enabling only watermark interrupt to disable all read
+        * interrupts later as we want to run "bytes to read" loop with
+        * all the read interrupts disabled for max performance.
+        */
+
+       if (!cqspi->slow_sram)
+               writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+       else
+               writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
 
        reinit_completion(&cqspi->transfer_complete);
        writel(CQSPI_REG_INDIRECTRD_START_MASK,
@@ -684,6 +700,13 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
                                                 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
                        ret = -ETIMEDOUT;
 
+               /*
+                * Disable all read interrupts until
+                * we are out of "bytes to read"
+                */
+               if (cqspi->slow_sram)
+                       writel(0x0, reg_base + CQSPI_REG_IRQMASK);
+
                bytes_to_read = cqspi_get_rd_sram_level(cqspi);
 
                if (ret && bytes_to_read == 0) {
@@ -715,8 +738,11 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
                        bytes_to_read = cqspi_get_rd_sram_level(cqspi);
                }
 
-               if (remaining > 0)
+               if (remaining > 0) {
                        reinit_completion(&cqspi->transfer_complete);
+                       if (cqspi->slow_sram)
+                               writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
+               }
        }
 
        /* Check indirect done status */
@@ -1667,6 +1693,8 @@ static int cqspi_probe(struct platform_device *pdev)
                        cqspi->use_dma_read = true;
                if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
                        cqspi->wr_completion = false;
+               if (ddata->quirks & CQSPI_SLOW_SRAM)
+                       cqspi->slow_sram = true;
 
                if (of_device_is_compatible(pdev->dev.of_node,
                                            "xlnx,versal-ospi-1.0"))
@@ -1779,7 +1807,9 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
 };
 
 static const struct cqspi_driver_platdata socfpga_qspi = {
-       .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION,
+       .quirks = CQSPI_DISABLE_DAC_MODE
+                       | CQSPI_NO_SUPPORT_WR_COMPLETION
+                       | CQSPI_SLOW_SRAM,
 };
 
 static const struct cqspi_driver_platdata versal_ospi = {
index f5d32ec4634e341d51b20446e041886b07dbb871..0709e987bd5ab28ce521e13e550cca6e1dd293ec 100644 (file)
@@ -161,6 +161,7 @@ static int spi_mux_probe(struct spi_device *spi)
        ctlr->num_chipselect = mux_control_states(priv->mux);
        ctlr->bus_num = -1;
        ctlr->dev.of_node = spi->dev.of_node;
+       ctlr->must_async = true;
 
        ret = devm_spi_register_controller(&spi->dev, ctlr);
        if (ret)
index 83da8862b8f22ee414bc7f92af8550285e43e4d1..32c01e684af3def8852414bf11e517ae10030340 100644 (file)
@@ -1727,8 +1727,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 
        ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
-       if (!ret)
-               kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
+       kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
 
        ctlr->cur_msg = NULL;
        ctlr->fallback = false;
@@ -4033,7 +4032,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
         * guard against reentrancy from a different context. The io_mutex
         * will catch those cases.
         */
-       if (READ_ONCE(ctlr->queue_empty)) {
+       if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
                message->actual_length = 0;
                message->status = -EINPROGRESS;
 
index e76a6c1736376b976ad4f737dd98139eff5eafb8..f12d0a3ee3e253049aad5e2b34a4155be56f80e8 100644 (file)
@@ -29,8 +29,7 @@ config USB4_DEBUGFS_WRITE
 
 config USB4_KUNIT_TEST
        bool "KUnit tests" if !KUNIT_ALL_TESTS
-       depends on (USB4=m || KUNIT=y)
-       depends on KUNIT
+       depends on USB4 && KUNIT=y
        default KUNIT_ALL_TESTS
 
 config USB4_DMA_TEST
index db516c90a977066fb1de7de3601669019c40fcd1..8706482665d11c393cc48d4499ab8aec6e18e05b 100644 (file)
@@ -558,6 +558,18 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
        ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
                                    pages, NULL, NULL);
        if (ret > 0) {
+               int i;
+
+               /*
+                * The zero page is always resident, we don't need to pin it
+                * and it falls into our invalid/reserved test so we don't
+                * unpin in put_pfn().  Unpin all zero pages in the batch here.
+                */
+               for (i = 0 ; i < ret; i++) {
+                       if (unlikely(is_zero_pfn(page_to_pfn(pages[i]))))
+                               unpin_user_page(pages[i]);
+               }
+
                *pfn = page_to_pfn(pages[0]);
                goto done;
        }
index 886c564787f152f8eef16d0990bf9242662bdc88..b58b445bb529b32edc134356ccac907ea2d3954f 100644 (file)
 #define SYNTHVID_DEPTH_WIN8 32
 #define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
 
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
-
 enum pipe_msg_type {
        PIPE_MSG_INVALID,
        PIPE_MSG_DATA,
index ce91add81401a22805d253a585d2b15b11572a31..dc4d25c26256e53a341ca881cbf9fa2561794de0 100644 (file)
@@ -17,7 +17,7 @@ config NITRO_ENCLAVES
 
 config NITRO_ENCLAVES_MISC_DEV_TEST
        bool "Tests for the misc device functionality of the Nitro Enclaves" if !KUNIT_ALL_TESTS
-       depends on NITRO_ENCLAVES && KUNIT
+       depends on NITRO_ENCLAVES && KUNIT=y
        default KUNIT_ALL_TESTS
        help
          Enable KUnit tests for the misc device functionality of the Nitro
index 9ef162dbd4bc11fd84c649d6b7c8b453313a8571..df8c99c99df9278f874c053f0ed7f34987fa1385 100644 (file)
@@ -1088,8 +1088,6 @@ struct btrfs_fs_info {
 
        spinlock_t zone_active_bgs_lock;
        struct list_head zone_active_bgs;
-       /* Waiters when BTRFS_FS_NEED_ZONE_FINISH is set */
-       wait_queue_head_t zone_finish_wait;
 
        /* Updates are not protected by any lock */
        struct btrfs_commit_stats commit_stats;
index 820b1f1e6b6723dbd6ffcb4da6255331c319a26e..2633137c3e9f1efc4d07bd262152104a3633e32a 100644 (file)
@@ -3068,7 +3068,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
        init_waitqueue_head(&fs_info->transaction_blocked_wait);
        init_waitqueue_head(&fs_info->async_submit_wait);
        init_waitqueue_head(&fs_info->delayed_iputs_wait);
-       init_waitqueue_head(&fs_info->zone_finish_wait);
 
        /* Usable values until the real ones are cached from the superblock */
        fs_info->nodesize = 4096;
@@ -4475,6 +4474,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
 
        set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
 
+       /*
+        * If we had UNFINISHED_DROPS we could still be processing them, so
+        * clear that bit and wake up relocation so it can stop.
+        * We must do this before stopping the block group reclaim task, because
+        * at btrfs_relocate_block_group() we wait for this bit, and after the
+        * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
+        * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
+        * return 1.
+        */
+       btrfs_wake_unfinished_drop(fs_info);
+
        /*
         * We may have the reclaim task running and relocating a data block group,
         * in which case it may create delayed iputs. So stop it before we park
@@ -4493,12 +4503,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
         */
        kthread_park(fs_info->cleaner_kthread);
 
-       /*
-        * If we had UNFINISHED_DROPS we could still be processing them, so
-        * clear that bit and wake up relocation so it can stop.
-        */
-       btrfs_wake_unfinished_drop(fs_info);
-
        /* wait for the qgroup rescan worker to stop */
        btrfs_qgroup_wait_for_completion(fs_info, false);
 
@@ -4521,6 +4525,31 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
        /* clear out the rbtree of defraggable inodes */
        btrfs_cleanup_defrag_inodes(fs_info);
 
+       /*
+        * After we parked the cleaner kthread, ordered extents may have
+        * completed and created new delayed iputs. If one of the async reclaim
+        * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
+        * can hang forever trying to stop it, because if a delayed iput is
+        * added after it ran btrfs_run_delayed_iputs() and before it called
+        * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
+        * no one else to run iputs.
+        *
+        * So wait for all ongoing ordered extents to complete and then run
+        * delayed iputs. This works because once we reach this point no one
+        * can either create new ordered extents nor create delayed iputs
+        * through some other means.
+        *
+        * Also note that btrfs_wait_ordered_roots() is not safe here, because
+        * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
+        * but the delayed iput for the respective inode is made only when doing
+        * the final btrfs_put_ordered_extent() (which must happen at
+        * btrfs_finish_ordered_io() when we are unmounting).
+        */
+       btrfs_flush_workqueue(fs_info->endio_write_workers);
+       /* Ordered extents for free space inodes. */
+       btrfs_flush_workqueue(fs_info->endio_freespace_worker);
+       btrfs_run_delayed_iputs(fs_info);
+
        cancel_work_sync(&fs_info->async_reclaim_work);
        cancel_work_sync(&fs_info->async_data_reclaim_work);
        cancel_work_sync(&fs_info->preempt_reclaim_work);
index ad250892028d6e0387ff8ad101e0ee2a9b9452f2..1372210869b14cda075d3e10fb583ac98d2d4a07 100644 (file)
@@ -1644,10 +1644,9 @@ static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
                        done_offset = end;
 
                if (done_offset == start) {
-                       struct btrfs_fs_info *info = inode->root->fs_info;
-
-                       wait_var_event(&info->zone_finish_wait,
-                                      !test_bit(BTRFS_FS_NEED_ZONE_FINISH, &info->flags));
+                       wait_on_bit_io(&inode->root->fs_info->flags,
+                                      BTRFS_FS_NEED_ZONE_FINISH,
+                                      TASK_UNINTERRUPTIBLE);
                        continue;
                }
 
index d0cbeb7ae81c12ba4bf69fe9cb5c1669b56b3890..435559ba94fa00d31f21fa56ec97ec40c55282bc 100644 (file)
@@ -199,7 +199,7 @@ static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
        ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
 
        if (flags & BTRFS_BLOCK_GROUP_DATA)
-               return SZ_1G;
+               return BTRFS_MAX_DATA_CHUNK_SIZE;
        else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
                return SZ_32M;
 
index 064ab2a79c805f5f07921a4b1f3641d6bb6736a8..f63ff91e28837a0d5b13d09a239b89c44e4ca099 100644 (file)
@@ -5267,6 +5267,9 @@ static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
                                       ctl->stripe_size);
        }
 
+       /* Stripe size should not go beyond 1G. */
+       ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);
+
        /* Align to BTRFS_STRIPE_LEN */
        ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
        ctl->chunk_size = ctl->stripe_size * data_stripes;
index b150b07ba1a7663c9b04c85307324d6819bd1b73..73c6929f7be66163d72a36d4790154ba7e882978 100644 (file)
@@ -421,10 +421,19 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
         * since btrfs adds the pages one by one to a bio, and btrfs cannot
         * increase the metadata reservation even if it increases the number of
         * extents, it is safe to stick with the limit.
+        *
+        * With the zoned emulation, we can have non-zoned device on the zoned
+        * mode. In this case, we don't have a valid max zone append size. So,
+        * use max_segments * PAGE_SIZE as the pseudo max_zone_append_size.
         */
-       zone_info->max_zone_append_size =
-               min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
-                     (u64)bdev_max_segments(bdev) << PAGE_SHIFT);
+       if (bdev_is_zoned(bdev)) {
+               zone_info->max_zone_append_size = min_t(u64,
+                       (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
+                       (u64)bdev_max_segments(bdev) << PAGE_SHIFT);
+       } else {
+               zone_info->max_zone_append_size =
+                       (u64)bdev_max_segments(bdev) << PAGE_SHIFT;
+       }
        if (!IS_ALIGNED(nr_sectors, zone_sectors))
                zone_info->nr_zones++;
 
@@ -1178,7 +1187,7 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
  * offset.
  */
 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
-                                  u64 *offset_ret)
+                                  u64 *offset_ret, bool new)
 {
        struct btrfs_fs_info *fs_info = cache->fs_info;
        struct btrfs_root *root;
@@ -1188,6 +1197,21 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
        int ret;
        u64 length;
 
+       /*
+        * Avoid  tree lookups for a new block group, there's no use for it.
+        * It must always be 0.
+        *
+        * Also, we have a lock chain of extent buffer lock -> chunk mutex.
+        * For new a block group, this function is called from
+        * btrfs_make_block_group() which is already taking the chunk mutex.
+        * Thus, we cannot call calculate_alloc_pointer() which takes extent
+        * buffer locks to avoid deadlock.
+        */
+       if (new) {
+               *offset_ret = 0;
+               return 0;
+       }
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
@@ -1323,6 +1347,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
                else
                        num_conventional++;
 
+               /*
+                * Consider a zone as active if we can allow any number of
+                * active zones.
+                */
+               if (!device->zone_info->max_active_zones)
+                       __set_bit(i, active);
+
                if (!is_sequential) {
                        alloc_offsets[i] = WP_CONVENTIONAL;
                        continue;
@@ -1389,45 +1420,23 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
                        __set_bit(i, active);
                        break;
                }
-
-               /*
-                * Consider a zone as active if we can allow any number of
-                * active zones.
-                */
-               if (!device->zone_info->max_active_zones)
-                       __set_bit(i, active);
        }
 
        if (num_sequential > 0)
                cache->seq_zone = true;
 
        if (num_conventional > 0) {
-               /*
-                * Avoid calling calculate_alloc_pointer() for new BG. It
-                * is no use for new BG. It must be always 0.
-                *
-                * Also, we have a lock chain of extent buffer lock ->
-                * chunk mutex.  For new BG, this function is called from
-                * btrfs_make_block_group() which is already taking the
-                * chunk mutex. Thus, we cannot call
-                * calculate_alloc_pointer() which takes extent buffer
-                * locks to avoid deadlock.
-                */
-
                /* Zone capacity is always zone size in emulation */
                cache->zone_capacity = cache->length;
-               if (new) {
-                       cache->alloc_offset = 0;
-                       goto out;
-               }
-               ret = calculate_alloc_pointer(cache, &last_alloc);
-               if (ret || map->num_stripes == num_conventional) {
-                       if (!ret)
-                               cache->alloc_offset = last_alloc;
-                       else
-                               btrfs_err(fs_info,
+               ret = calculate_alloc_pointer(cache, &last_alloc, new);
+               if (ret) {
+                       btrfs_err(fs_info,
                        "zoned: failed to determine allocation offset of bg %llu",
-                                         cache->start);
+                                 cache->start);
+                       goto out;
+               } else if (map->num_stripes == num_conventional) {
+                       cache->alloc_offset = last_alloc;
+                       cache->zone_is_active = 1;
                        goto out;
                }
        }
@@ -1495,13 +1504,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
                goto out;
        }
 
-       if (cache->zone_is_active) {
-               btrfs_get_block_group(cache);
-               spin_lock(&fs_info->zone_active_bgs_lock);
-               list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
-               spin_unlock(&fs_info->zone_active_bgs_lock);
-       }
-
 out:
        if (cache->alloc_offset > fs_info->zone_size) {
                btrfs_err(fs_info,
@@ -1526,10 +1528,16 @@ out:
                ret = -EIO;
        }
 
-       if (!ret)
+       if (!ret) {
                cache->meta_write_pointer = cache->alloc_offset + cache->start;
-
-       if (ret) {
+               if (cache->zone_is_active) {
+                       btrfs_get_block_group(cache);
+                       spin_lock(&fs_info->zone_active_bgs_lock);
+                       list_add_tail(&cache->active_bg_list,
+                                     &fs_info->zone_active_bgs);
+                       spin_unlock(&fs_info->zone_active_bgs_lock);
+               }
+       } else {
                kfree(cache->physical_map);
                cache->physical_map = NULL;
        }
@@ -1910,10 +1918,44 @@ out_unlock:
        return ret;
 }
 
+static void wait_eb_writebacks(struct btrfs_block_group *block_group)
+{
+       struct btrfs_fs_info *fs_info = block_group->fs_info;
+       const u64 end = block_group->start + block_group->length;
+       struct radix_tree_iter iter;
+       struct extent_buffer *eb;
+       void __rcu **slot;
+
+       rcu_read_lock();
+       radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
+                                block_group->start >> fs_info->sectorsize_bits) {
+               eb = radix_tree_deref_slot(slot);
+               if (!eb)
+                       continue;
+               if (radix_tree_deref_retry(eb)) {
+                       slot = radix_tree_iter_retry(&iter);
+                       continue;
+               }
+
+               if (eb->start < block_group->start)
+                       continue;
+               if (eb->start >= end)
+                       break;
+
+               slot = radix_tree_iter_resume(slot, &iter);
+               rcu_read_unlock();
+               wait_on_extent_buffer_writeback(eb);
+               rcu_read_lock();
+       }
+       rcu_read_unlock();
+}
+
 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
 {
        struct btrfs_fs_info *fs_info = block_group->fs_info;
        struct map_lookup *map;
+       const bool is_metadata = (block_group->flags &
+                       (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
        int ret = 0;
        int i;
 
@@ -1924,8 +1966,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
        }
 
        /* Check if we have unwritten allocated space */
-       if ((block_group->flags &
-            (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) &&
+       if (is_metadata &&
            block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
                spin_unlock(&block_group->lock);
                return -EAGAIN;
@@ -1950,6 +1991,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
                /* No need to wait for NOCOW writers. Zoned mode does not allow that */
                btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
                                         block_group->length);
+               /* Wait for extent buffers to be written. */
+               if (is_metadata)
+                       wait_eb_writebacks(block_group);
 
                spin_lock(&block_group->lock);
 
@@ -2007,8 +2051,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
        /* For active_bg_list */
        btrfs_put_block_group(block_group);
 
-       clear_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
-       wake_up_all(&fs_info->zone_finish_wait);
+       clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
 
        return 0;
 }
index 81f4c15936d050273b9f1ef0a49aa72e86efaf2f..5b4a7a32bdc58c65d71e514dfb0fde0ea2dbac55 100644 (file)
@@ -153,6 +153,6 @@ extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
 /* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 38
-#define CIFS_VERSION   "2.38"
+#define SMB3_PRODUCT_BUILD 39
+#define CIFS_VERSION   "2.39"
 #endif                         /* _CIFSFS_H */
index a0a06b6f252be3a6d205d325c735d7a17ee02c0a..7ae6f2c08153e63b17cc19262fad12562a53cecd 100644 (file)
@@ -702,9 +702,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
        int length = 0;
        int total_read;
 
-       smb_msg->msg_control = NULL;
-       smb_msg->msg_controllen = 0;
-
        for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
                try_to_freeze();
 
@@ -760,7 +757,7 @@ int
 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
                      unsigned int to_read)
 {
-       struct msghdr smb_msg;
+       struct msghdr smb_msg = {};
        struct kvec iov = {.iov_base = buf, .iov_len = to_read};
        iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
 
@@ -770,15 +767,13 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
 ssize_t
 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
 {
-       struct msghdr smb_msg;
+       struct msghdr smb_msg = {};
 
        /*
         *  iov_iter_discard already sets smb_msg.type and count and iov_offset
         *  and cifs_readv_from_socket sets msg_control and msg_controllen
         *  so little to initialize in struct msghdr
         */
-       smb_msg.msg_name = NULL;
-       smb_msg.msg_namelen = 0;
        iov_iter_discard(&smb_msg.msg_iter, READ, to_read);
 
        return cifs_readv_from_socket(server, &smb_msg);
@@ -788,7 +783,7 @@ int
 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
        unsigned int page_offset, unsigned int to_read)
 {
-       struct msghdr smb_msg;
+       struct msghdr smb_msg = {};
        struct bio_vec bv = {
                .bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
        iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
@@ -2350,7 +2345,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
        ses = tcon->ses;
        cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
        spin_lock(&cifs_tcp_ses_lock);
+       spin_lock(&tcon->tc_lock);
        if (--tcon->tc_count > 0) {
+               spin_unlock(&tcon->tc_lock);
                spin_unlock(&cifs_tcp_ses_lock);
                return;
        }
@@ -2359,6 +2356,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
        WARN_ON(tcon->tc_count < 0);
 
        list_del_init(&tcon->tcon_list);
+       spin_unlock(&tcon->tc_lock);
        spin_unlock(&cifs_tcp_ses_lock);
 
        /* cancel polling of interfaces */
index fa738adc031f728d2cd553f0dcaababf2fc275ac..6f38b134a346851bb07fe72653571680211ff7e0 100644 (file)
@@ -3575,6 +3575,9 @@ static ssize_t __cifs_writev(
 
 ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
+
+       cifs_revalidate_mapping(file->f_inode);
        return __cifs_writev(iocb, from, true);
 }
 
index c2fe035e573ba01eaf366668e83c9802e5752a4d..9a2753e2117073666de3e3f0215dc7526bc8601a 100644 (file)
@@ -194,10 +194,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
 
        *sent = 0;
 
-       smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
-       smb_msg->msg_namelen = sizeof(struct sockaddr);
-       smb_msg->msg_control = NULL;
-       smb_msg->msg_controllen = 0;
        if (server->noblocksnd)
                smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
        else
@@ -309,7 +305,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
        sigset_t mask, oldmask;
        size_t total_len = 0, sent, size;
        struct socket *ssocket = server->ssocket;
-       struct msghdr smb_msg;
+       struct msghdr smb_msg = {};
        __be32 rfc1002_marker;
 
        if (cifs_rdma_enabled(server)) {
index 3dcf0b8b4e932dba4dc3f85d3c9e2336e8b7e541..232cfdf095aeb3ec6c67b1ee512f791a7192cdc2 100644 (file)
@@ -744,6 +744,28 @@ void debugfs_remove(struct dentry *dentry)
 }
 EXPORT_SYMBOL_GPL(debugfs_remove);
 
+/**
+ * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it
+ * @name: a pointer to a string containing the name of the item to look up.
+ * @parent: a pointer to the parent dentry of the item.
+ *
+ * This is the equlivant of doing something like
+ * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting
+ * handled for the directory being looked up.
+ */
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent)
+{
+       struct dentry *dentry;
+
+       dentry = debugfs_lookup(name, parent);
+       if (!dentry)
+               return;
+
+       debugfs_remove(dentry);
+       dput(dentry);
+}
+EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
+
 /**
  * debugfs_rename - rename a file/directory in the debugfs filesystem
  * @old_dir: a pointer to the parent dentry for the renamed object. This
index 9a5ca7b82bfc5ee62e1533c35fbf7e587b077572..d046dbb9cbd08317cbaef0fb18a73399dd9571c3 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -65,7 +65,6 @@
 #include <linux/io_uring.h>
 #include <linux/syscall_user_dispatch.h>
 #include <linux/coredump.h>
-#include <linux/time_namespace.h>
 
 #include <linux/uaccess.h>
 #include <asm/mmu_context.h>
@@ -979,12 +978,10 @@ static int exec_mmap(struct mm_struct *mm)
 {
        struct task_struct *tsk;
        struct mm_struct *old_mm, *active_mm;
-       bool vfork;
        int ret;
 
        /* Notify parent that we're no longer interested in the old VM */
        tsk = current;
-       vfork = !!tsk->vfork_done;
        old_mm = current->mm;
        exec_mm_release(tsk, old_mm);
        if (old_mm)
@@ -1029,10 +1026,6 @@ static int exec_mmap(struct mm_struct *mm)
        tsk->mm->vmacache_seqnum = 0;
        vmacache_flush(tsk);
        task_unlock(tsk);
-
-       if (vfork)
-               timens_on_fork(tsk->nsproxy, tsk);
-
        if (old_mm) {
                mmap_read_unlock(old_mm);
                BUG_ON(active_mm != old_mm);
index ee0b7cf51157059c6fee3c6fc6edd67560862c88..41ae4cce1f4203c1d8ecd6e4bd92e23fc42f7366 100644 (file)
@@ -270,8 +270,7 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
        struct super_block *sb = dir->i_sb;
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
        struct buffer_head *bh;
-       sector_t blknr, last_blknr;
-       int i;
+       sector_t blknr, last_blknr, i;
 
        blknr = exfat_cluster_to_sector(sbi, clu);
        last_blknr = blknr + sbi->sect_per_clus;
index 27c720d71b4e33c65b6cf05d5053acec8a853c4c..898dd95bc7a7c705cb8e8de8e229262008efee57 100644 (file)
@@ -606,6 +606,31 @@ static inline gfp_t nfs_io_gfp_mask(void)
        return GFP_KERNEL;
 }
 
+/*
+ * Special version of should_remove_suid() that ignores capabilities.
+ */
+static inline int nfs_should_remove_suid(const struct inode *inode)
+{
+       umode_t mode = inode->i_mode;
+       int kill = 0;
+
+       /* suid always must be killed */
+       if (unlikely(mode & S_ISUID))
+               kill = ATTR_KILL_SUID;
+
+       /*
+        * sgid without any exec bits is just a mandatory locking mark; leave
+        * it alone.  If some exec bits are set, it's a real sgid; kill it.
+        */
+       if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
+               kill |= ATTR_KILL_SGID;
+
+       if (unlikely(kill && S_ISREG(mode)))
+               return kill;
+
+       return 0;
+}
+
 /* unlink.c */
 extern struct rpc_task *
 nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
index 068c45b3bc1ab19593441cc5b336e5e50a5e4322..6dab9e40837298405f98617b64076f7a6ce79f90 100644 (file)
@@ -78,10 +78,15 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
 
        status = nfs4_call_sync(server->client, server, msg,
                                &args.seq_args, &res.seq_res, 0);
-       if (status == 0)
+       if (status == 0) {
+               if (nfs_should_remove_suid(inode)) {
+                       spin_lock(&inode->i_lock);
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+                       spin_unlock(&inode->i_lock);
+               }
                status = nfs_post_op_update_inode_force_wcc(inode,
                                                            res.falloc_fattr);
-
+       }
        if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE])
                trace_nfs4_fallocate(inode, &args, status);
        else
index 82944e14fcea19533f9bb2ea9e1819ea0da784ec..ee66ffdb985e8094131a5226baecb876cb05c4a0 100644 (file)
@@ -1051,22 +1051,31 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
        if (ctx->bsize)
                sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits);
 
-       if (server->nfs_client->rpc_ops->version != 2) {
-               /* The VFS shouldn't apply the umask to mode bits. We will do
-                * so ourselves when necessary.
+       switch (server->nfs_client->rpc_ops->version) {
+       case 2:
+               sb->s_time_gran = 1000;
+               sb->s_time_min = 0;
+               sb->s_time_max = U32_MAX;
+               break;
+       case 3:
+               /*
+                * The VFS shouldn't apply the umask to mode bits.
+                * We will do so ourselves when necessary.
                 */
                sb->s_flags |= SB_POSIXACL;
                sb->s_time_gran = 1;
-               sb->s_export_op = &nfs_export_ops;
-       } else
-               sb->s_time_gran = 1000;
-
-       if (server->nfs_client->rpc_ops->version != 4) {
                sb->s_time_min = 0;
                sb->s_time_max = U32_MAX;
-       } else {
+               sb->s_export_op = &nfs_export_ops;
+               break;
+       case 4:
+               sb->s_flags |= SB_POSIXACL;
+               sb->s_time_gran = 1;
                sb->s_time_min = S64_MIN;
                sb->s_time_max = S64_MAX;
+               if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
+                       sb->s_export_op = &nfs_export_ops;
+               break;
        }
 
        sb->s_magic = NFS_SUPER_MAGIC;
index 1843fa235d9b64c045cc89f3767b2ed9a1cbf6e5..f41d24b54fd1f8b19a3e34e8cfad1bdb7b5ff206 100644 (file)
@@ -1496,31 +1496,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata)
        NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
 }
 
-/*
- * Special version of should_remove_suid() that ignores capabilities.
- */
-static int nfs_should_remove_suid(const struct inode *inode)
-{
-       umode_t mode = inode->i_mode;
-       int kill = 0;
-
-       /* suid always must be killed */
-       if (unlikely(mode & S_ISUID))
-               kill = ATTR_KILL_SUID;
-
-       /*
-        * sgid without any exec bits is just a mandatory locking mark; leave
-        * it alone.  If some exec bits are set, it's a real sgid; kill it.
-        */
-       if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
-               kill |= ATTR_KILL_SGID;
-
-       if (unlikely(kill && S_ISREG(mode)))
-               return kill;
-
-       return 0;
-}
-
 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
                struct nfs_fattr *fattr)
 {
index 9f486b788ed09fdd007ca342bc4f4e9fe34273cb..fc17b0ac8729792ba277d5117ea77da40372d47c 100644 (file)
@@ -300,6 +300,10 @@ commit_metadata(struct svc_fh *fhp)
 static void
 nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
 {
+       /* Ignore mode updates on symlinks */
+       if (S_ISLNK(inode->i_mode))
+               iap->ia_valid &= ~ATTR_MODE;
+
        /* sanitize the mode change */
        if (iap->ia_valid & ATTR_MODE) {
                iap->ia_mode &= S_IALLUGO;
@@ -353,7 +357,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
        int             accmode = NFSD_MAY_SATTR;
        umode_t         ftype = 0;
        __be32          err;
-       int             host_err;
+       int             host_err = 0;
        bool            get_write_count;
        bool            size_change = (iap->ia_valid & ATTR_SIZE);
 
@@ -391,13 +395,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
        dentry = fhp->fh_dentry;
        inode = d_inode(dentry);
 
-       /* Ignore any mode updates on symlinks */
-       if (S_ISLNK(inode->i_mode))
-               iap->ia_valid &= ~ATTR_MODE;
-
-       if (!iap->ia_valid)
-               return 0;
-
        nfsd_sanitize_attrs(inode, iap);
 
        if (check_guard && guardtime != inode->i_ctime.tv_sec)
@@ -448,8 +445,10 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
                        goto out_unlock;
        }
 
-       iap->ia_valid |= ATTR_CTIME;
-       host_err = notify_change(&init_user_ns, dentry, iap, NULL);
+       if (iap->ia_valid) {
+               iap->ia_valid |= ATTR_CTIME;
+               host_err = notify_change(&init_user_ns, dentry, iap, NULL);
+       }
 
 out_unlock:
        if (attr->na_seclabel && attr->na_seclabel->len)
@@ -846,10 +845,14 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
                  struct splice_desc *sd)
 {
        struct svc_rqst *rqstp = sd->u.data;
-
-       svc_rqst_replace_page(rqstp, buf->page);
-       if (rqstp->rq_res.page_len == 0)
-               rqstp->rq_res.page_base = buf->offset;
+       struct page *page = buf->page;  // may be a compound one
+       unsigned offset = buf->offset;
+
+       page += offset / PAGE_SIZE;
+       for (int i = sd->len; i > 0; i -= PAGE_SIZE)
+               svc_rqst_replace_page(rqstp, page++);
+       if (rqstp->rq_res.page_len == 0)        // first call
+               rqstp->rq_res.page_base = offset % PAGE_SIZE;
        rqstp->rq_res.page_len += sd->len;
        return sd->len;
 }
index 8a813fa5ca56511fd946903376d40cfcc2cb8d73..cf7e5c350a54be924b40fff310ba03177d539905 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -716,6 +716,8 @@ int chown_common(const struct path *path, uid_t user, gid_t group)
        fs_userns = i_user_ns(inode);
 
 retry_deleg:
+       newattrs.ia_vfsuid = INVALID_VFSUID;
+       newattrs.ia_vfsgid = INVALID_VFSGID;
        newattrs.ia_valid =  ATTR_CTIME;
        if ((user != (uid_t)-1) && !setattr_vfsuid(&newattrs, uid))
                return -EINVAL;
index 81d26abf486fa7b6b969d21039af2e13e390e600..da85b3979195747d09b788ca7884f7575bf3ff15 100644 (file)
@@ -141,6 +141,8 @@ struct tracefs_mount_opts {
        kuid_t uid;
        kgid_t gid;
        umode_t mode;
+       /* Opt_* bitfield. */
+       unsigned int opts;
 };
 
 enum {
@@ -241,6 +243,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
        kgid_t gid;
        char *p;
 
+       opts->opts = 0;
        opts->mode = TRACEFS_DEFAULT_MODE;
 
        while ((p = strsep(&data, ",")) != NULL) {
@@ -275,24 +278,36 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
                 * but traditionally tracefs has ignored all mount options
                 */
                }
+
+               opts->opts |= BIT(token);
        }
 
        return 0;
 }
 
-static int tracefs_apply_options(struct super_block *sb)
+static int tracefs_apply_options(struct super_block *sb, bool remount)
 {
        struct tracefs_fs_info *fsi = sb->s_fs_info;
        struct inode *inode = d_inode(sb->s_root);
        struct tracefs_mount_opts *opts = &fsi->mount_opts;
 
-       inode->i_mode &= ~S_IALLUGO;
-       inode->i_mode |= opts->mode;
+       /*
+        * On remount, only reset mode/uid/gid if they were provided as mount
+        * options.
+        */
+
+       if (!remount || opts->opts & BIT(Opt_mode)) {
+               inode->i_mode &= ~S_IALLUGO;
+               inode->i_mode |= opts->mode;
+       }
 
-       inode->i_uid = opts->uid;
+       if (!remount || opts->opts & BIT(Opt_uid))
+               inode->i_uid = opts->uid;
 
-       /* Set all the group ids to the mount option */
-       set_gid(sb->s_root, opts->gid);
+       if (!remount || opts->opts & BIT(Opt_gid)) {
+               /* Set all the group ids to the mount option */
+               set_gid(sb->s_root, opts->gid);
+       }
 
        return 0;
 }
@@ -307,7 +322,7 @@ static int tracefs_remount(struct super_block *sb, int *flags, char *data)
        if (err)
                goto fail;
 
-       tracefs_apply_options(sb);
+       tracefs_apply_options(sb, true);
 
 fail:
        return err;
@@ -359,7 +374,7 @@ static int trace_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_op = &tracefs_super_operations;
 
-       tracefs_apply_options(sb);
+       tracefs_apply_options(sb, false);
 
        return 0;
 
index d3e2d81656e042f3a74579403470311dc7f3c732..2a67aed9ac5287bc5c6df3bd374b9f2493fd803a 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
 #define __ASM_GENERIC_SOFTIRQ_STACK_H
 
-#if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT)
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 void do_softirq_own_stack(void);
 #else
 static inline void do_softirq_own_stack(void)
index a1705d6b3fba20d9be64a651b9f2f3cb4a5c36e8..7df7876b2ad5634fe373d2ef6aabe245a57ab26d 100644 (file)
@@ -319,8 +319,8 @@ enum drm_panel_orientation {
  *             EDID's detailed monitor range
  */
 struct drm_monitor_range_info {
-       u8 min_vfreq;
-       u8 max_vfreq;
+       u16 min_vfreq;
+       u16 max_vfreq;
 };
 
 /**
index 2181977ae6839c6caedb0957c43306dcf4e8da35..1ed61e2b30a41c70b40682f01bd011d4047a886a 100644 (file)
@@ -92,6 +92,11 @@ struct detailed_data_string {
        u8 str[13];
 } __attribute__((packed));
 
+#define DRM_EDID_RANGE_OFFSET_MIN_VFREQ (1 << 0) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MAX_VFREQ (1 << 1) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MIN_HFREQ (1 << 2) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MAX_HFREQ (1 << 3) /* 1.4 */
+
 #define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG   0x00
 #define DRM_EDID_RANGE_LIMITS_ONLY_FLAG     0x01
 #define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02
index c958855681ccdd205bc8aa358aa0a9829d0cfede..840a2c375065b941a0043a660298102061bf18e7 100644 (file)
@@ -826,7 +826,7 @@ do {                                                                               \
 
 #define KUNIT_EXPECT_LE_MSG(test, left, right, fmt, ...)                      \
        KUNIT_BINARY_INT_ASSERTION(test,                                       \
-                                  KUNIT_ASSERTION,                            \
+                                  KUNIT_EXPECTATION,                          \
                                   left, <=, right,                            \
                                   fmt,                                        \
                                    ##__VA_ARGS__)
@@ -1116,7 +1116,7 @@ do {                                                                             \
 
 #define KUNIT_ASSERT_LT_MSG(test, left, right, fmt, ...)                      \
        KUNIT_BINARY_INT_ASSERTION(test,                                       \
-                                  KUNIT_EXPECTATION,                          \
+                                  KUNIT_ASSERTION,                            \
                                   left, <, right,                             \
                                   fmt,                                        \
                                    ##__VA_ARGS__)
@@ -1157,7 +1157,7 @@ do {                                                                             \
 
 #define KUNIT_ASSERT_GT_MSG(test, left, right, fmt, ...)                      \
        KUNIT_BINARY_INT_ASSERTION(test,                                       \
-                                  KUNIT_EXPECTATION,                          \
+                                  KUNIT_ASSERTION,                            \
                                   left, >, right,                             \
                                   fmt,                                        \
                                    ##__VA_ARGS__)
index c869f1e73d755832c739e88ed7f73699fbf46c2a..f60674692d36578ae81c0c55aace120f4a444687 100644 (file)
@@ -91,6 +91,8 @@ struct dentry *debugfs_create_automount(const char *name,
 void debugfs_remove(struct dentry *dentry);
 #define debugfs_remove_recursive debugfs_remove
 
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
+
 const struct file_operations *debugfs_real_fops(const struct file *filp);
 
 int debugfs_file_get(struct dentry *dentry);
@@ -225,6 +227,10 @@ static inline void debugfs_remove(struct dentry *dentry)
 static inline void debugfs_remove_recursive(struct dentry *dentry)
 { }
 
+static inline void debugfs_lookup_and_remove(const char *name,
+                                            struct dentry *parent)
+{ }
+
 const struct file_operations *debugfs_real_fops(const struct file *filp);
 
 static inline int debugfs_file_get(struct dentry *dentry)
index 25a30906289d987e9fc0936132bb526e3bf7a4d0..0ee20b764000cb705a238a3483689be5d4b468fd 100644 (file)
@@ -139,7 +139,6 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs);
 bool dma_can_mmap(struct device *dev);
-int dma_supported(struct device *dev, u64 mask);
 bool dma_pci_p2pdma_supported(struct device *dev);
 int dma_set_mask(struct device *dev, u64 mask);
 int dma_set_coherent_mask(struct device *dev, u64 mask);
@@ -248,10 +247,6 @@ static inline bool dma_can_mmap(struct device *dev)
 {
        return false;
 }
-static inline int dma_supported(struct device *dev, u64 mask)
-{
-       return 0;
-}
 static inline bool dma_pci_p2pdma_supported(struct device *dev)
 {
        return false;
index 6f1dee7e67e068663ab26e288fe1c93c912ecb3c..9be8704e2d38e20e678f62a4c8de365be3497824 100644 (file)
@@ -180,7 +180,7 @@ switch (val) {                                              \
 
 #define HP_SDC_CMD_SET_IM      0x40    /* 010xxxxx == set irq mask */
 
-/* The documents provided do not explicitly state that all registers betwee
+/* The documents provided do not explicitly state that all registers between
  * 0x01 and 0x1f inclusive can be read by sending their register index as a 
  * command, but this is implied and appears to be the case.
  */
index 7b7ce602c8080956fed90daa083a7fa55b67d156..c32de987fa71c59857eaa5c105ba79624984e9fe 100644 (file)
@@ -1280,16 +1280,17 @@ enum {
        MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
 };
 
-static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
+
+static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
 {
-       struct devlink *devlink = priv_to_devlink(dev);
-       union devlink_param_value val;
-       int err;
-
-       err = devlink_param_driverinit_value_get(devlink,
-                                                DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
-                                                &val);
-       return err ? MLX5_CAP_GEN(dev, roce) : val.vbool;
+       if (MLX5_CAP_GEN(dev, roce_rw_supported))
+               return MLX5_CAP_GEN(dev, roce);
+
+       /* If RoCE cap is read-only in FW, get RoCE state from devlink
+        * in order to support RoCE enable/disable feature
+        */
+       return mlx5_is_roce_on(dev);
 }
 
 #endif /* MLX5_DRIVER_H */
index 1d7992a02e36e283438254ef84ad704e209e2848..1a803e4335d30528aae1ba07674d82d47ba84ff3 100644 (file)
@@ -101,8 +101,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
 }
 
 static inline int of_dma_configure_id(struct device *dev,
-                                  struct device_node *np,
-                                  bool force_dma)
+                                     struct device_node *np,
+                                     bool force_dma,
+                                     const u32 *id)
 {
        return 0;
 }
index 6feade66efdbdccf28e85ecb9e37bfd8aa80e227..15b49e655ce3649bd51d958b32fd08703034b0ba 100644 (file)
 #define PCI_DEVICE_ID_ICE_1712         0x1712
 #define PCI_DEVICE_ID_VT1724           0x1724
 
+#define PCI_VENDOR_ID_MICROSOFT                0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO     0x5353
+
 #define PCI_VENDOR_ID_OXSEMI           0x1415
 #define PCI_DEVICE_ID_OXSEMI_12PCI840  0x8403
 #define PCI_DEVICE_ID_OXSEMI_PCIe840           0xC000
index e6c73d5ff1a8298dcde9641f187cd6d8806ca20d..f089ee1ead5800ae64a8fff137d774ff14adbc45 100644 (file)
@@ -469,6 +469,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
  *     SPI_TRANS_FAIL_NO_START.
  * @queue_empty: signal green light for opportunistically skipping the queue
  *     for spi_sync transfers.
+ * @must_async: disable all fast paths in the core
  *
  * Each SPI controller can communicate with one or more @spi_device
  * children.  These make a small bus, sharing MOSI, MISO and SCK signals
@@ -690,6 +691,7 @@ struct spi_controller {
 
        /* Flag for enabling opportunistic skipping of the queue in spi_sync */
        bool                    queue_empty;
+       bool                    must_async;
 };
 
 static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
index 3113471ca375bb1d05874293d6f17d7aa768e472..2493bd65351a6af6fc4bc6963808ba0c91b24856 100644 (file)
@@ -309,8 +309,6 @@ struct scsi_target {
        struct list_head        devices;
        struct device           dev;
        struct kref             reap_ref; /* last put renders target invisible */
-       atomic_t                sdev_count;
-       wait_queue_head_t       sdev_wq;
        unsigned int            channel;
        unsigned int            id; /* target id ... replace
                                     * scsi_device.id eventually */
index aa7b7496c93aa16a6c1487425ec1de6dc862923b..9b0a028bf053af2e165ed255122343b4b16f527b 100644 (file)
@@ -557,6 +557,8 @@ struct Scsi_Host {
        struct scsi_host_template *hostt;
        struct scsi_transport_template *transportt;
 
+       struct kref             tagset_refcnt;
+       struct completion       tagset_freed;
        /* Area to keep a shared tag map */
        struct blk_mq_tag_set   tag_set;
 
@@ -690,9 +692,6 @@ struct Scsi_Host {
        /* ldm bits */
        struct device           shost_gendev, shost_dev;
 
-       atomic_t                target_count;
-       wait_queue_head_t       targets_wq;
-
        /*
         * Points to the transport data (if any) which is allocated
         * separately
index f9be9b7eb654f38f150937deac3ad13e283f7587..b9640ad5069f3127bfbd9961c56a96147d9f2ad7 100644 (file)
@@ -1728,6 +1728,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
 
        switch (io_arm_poll_handler(req, 0)) {
        case IO_APOLL_READY:
+               io_kbuf_recycle(req, 0);
                io_req_task_queue(req);
                break;
        case IO_APOLL_ABORTED:
index d6af208d109ffe2956741c42a0a3daeb150745aa..746fbf31a703ecb1d5362fe565c642bb716611e8 100644 (file)
@@ -91,9 +91,13 @@ static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
         * buffer data. However if that buffer is recycled the original request
         * data stored in addr is lost. Therefore forbid recycling for now.
         */
-       if (req->opcode == IORING_OP_READV)
+       if (req->opcode == IORING_OP_READV) {
+               if ((req->flags & REQ_F_BUFFER_RING) && req->buf_list) {
+                       req->buf_list->head++;
+                       req->buf_list = NULL;
+               }
                return;
-
+       }
        if (req->flags & REQ_F_BUFFER_SELECTED)
                io_kbuf_recycle_legacy(req, issue_flags);
        if (req->flags & REQ_F_BUFFER_RING)
index 976c4ba68ee7ec07052d0a5473a487ed8a1c9822..4a7e5d030c782f1d175b69afb463b2cc52c7f814 100644 (file)
@@ -165,7 +165,8 @@ done:
                req_set_fail(req);
        io_req_set_res(req, ret, 0);
        /* put file to avoid an attempt to IOPOLL the req */
-       io_put_file(req->file);
+       if (!(req->flags & REQ_F_FIXED_FILE))
+               io_put_file(req->file);
        req->file = NULL;
        return IOU_OK;
 }
index 7047c13425419b7247d2532a1c18dff86886e92d..60e392f7f2dcdc91c5390be2386a604a741af355 100644 (file)
@@ -905,6 +905,13 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
                          IORING_RECVSEND_FIXED_BUF))
                return -EINVAL;
+       notif = zc->notif = io_alloc_notif(ctx);
+       if (!notif)
+               return -ENOMEM;
+       notif->cqe.user_data = req->cqe.user_data;
+       notif->cqe.res = 0;
+       notif->cqe.flags = IORING_CQE_F_NOTIF;
+       req->flags |= REQ_F_NEED_CLEANUP;
        if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
                unsigned idx = READ_ONCE(sqe->buf_index);
 
@@ -912,15 +919,8 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                        return -EFAULT;
                idx = array_index_nospec(idx, ctx->nr_user_bufs);
                req->imu = READ_ONCE(ctx->user_bufs[idx]);
-               io_req_set_rsrc_node(req, ctx, 0);
+               io_req_set_rsrc_node(notif, ctx, 0);
        }
-       notif = zc->notif = io_alloc_notif(ctx);
-       if (!notif)
-               return -ENOMEM;
-       notif->cqe.user_data = req->cqe.user_data;
-       notif->cqe.res = 0;
-       notif->cqe.flags = IORING_CQE_F_NOTIF;
-       req->flags |= REQ_F_NEED_CLEANUP;
 
        zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
        zc->len = READ_ONCE(sqe->len);
@@ -1003,9 +1003,6 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
        unsigned msg_flags, cflags;
        int ret, min_ret = 0;
 
-       if (!(req->flags & REQ_F_POLLED) &&
-           (zc->flags & IORING_RECVSEND_POLL_FIRST))
-               return -EAGAIN;
        sock = sock_from_file(req->file);
        if (unlikely(!sock))
                return -ENOTSOCK;
@@ -1030,6 +1027,10 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
                msg.msg_namelen = zc->addr_len;
        }
 
+       if (!(req->flags & REQ_F_POLLED) &&
+           (zc->flags & IORING_RECVSEND_POLL_FIRST))
+               return io_setup_async_addr(req, addr, issue_flags);
+
        if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
                ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
                                        (u64)(uintptr_t)zc->buf, zc->len);
index 38d77165edc3a233c5a2e932f09c7cb924a1fe5b..e37c6569d82e8da251357e9e30105757037a0b9e 100644 (file)
@@ -21,14 +21,6 @@ static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
        io_req_task_complete(notif, locked);
 }
 
-static inline void io_notif_complete(struct io_kiocb *notif)
-       __must_hold(&notif->ctx->uring_lock)
-{
-       bool locked = true;
-
-       __io_notif_complete_tw(notif, &locked);
-}
-
 static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
                                          struct ubuf_info *uarg,
                                          bool success)
index c61494e0a6022e955ca90f50229a1ca8e3f06a67..c4dddd0fd70949c300e519c54c0f6f7d8f13c246 100644 (file)
@@ -471,7 +471,7 @@ const struct io_op_def io_op_defs[] = {
                .prep_async             = io_uring_cmd_prep_async,
        },
        [IORING_OP_SEND_ZC] = {
-               .name                   = "SENDZC_NOTIF",
+               .name                   = "SEND_ZC",
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
index 1babd77da79c7e1b97600f7c5084f9c9bc3fe5fa..76ebcfebc9a6e32f93b78f42fab0da9ae38c9893 100644 (file)
@@ -206,6 +206,20 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
        return false;
 }
 
+static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
+{
+       struct io_async_rw *io = req->async_data;
+
+       /* add previously done IO, if any */
+       if (req_has_async_data(req) && io->bytes_done > 0) {
+               if (res < 0)
+                       res = io->bytes_done;
+               else
+                       res += io->bytes_done;
+       }
+       return res;
+}
+
 static void io_complete_rw(struct kiocb *kiocb, long res)
 {
        struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
@@ -213,7 +227,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
 
        if (__io_complete_rw_common(req, res))
                return;
-       io_req_set_res(req, res, 0);
+       io_req_set_res(req, io_fixup_rw_res(req, res), 0);
        req->io_task_work.func = io_req_task_complete;
        io_req_task_work_add(req);
 }
@@ -240,22 +254,14 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
                       unsigned int issue_flags)
 {
-       struct io_async_rw *io = req->async_data;
        struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-
-       /* add previously done IO, if any */
-       if (req_has_async_data(req) && io->bytes_done > 0) {
-               if (ret < 0)
-                       ret = io->bytes_done;
-               else
-                       ret += io->bytes_done;
-       }
+       unsigned final_ret = io_fixup_rw_res(req, ret);
 
        if (req->flags & REQ_F_CUR_POS)
                req->file->f_pos = rw->kiocb.ki_pos;
        if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
                if (!__io_complete_rw_common(req, ret)) {
-                       io_req_set_res(req, req->cqe.res,
+                       io_req_set_res(req, final_ret,
                                       io_put_kbuf(req, issue_flags));
                        return IOU_OK;
                }
@@ -268,7 +274,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
                if (io_resubmit_prep(req))
                        io_req_task_queue_reissue(req);
                else
-                       io_req_task_queue_fail(req, ret);
+                       io_req_task_queue_fail(req, final_ret);
        }
        return IOU_ISSUE_SKIP_COMPLETE;
 }
index 2caafd13f8aac27f92e0675d94050175c45d585d..18c93c2276cae436c89341e11830cb694fc87e8f 100644 (file)
@@ -350,11 +350,10 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
                                                   unsigned long *flags)
 {
 
-       unsigned int max_range = dma_get_max_seg_size(ref->dev);
        struct dma_debug_entry *entry, index = *ref;
-       unsigned int range = 0;
+       int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
 
-       while (range <= max_range) {
+       for (int i = 0; i < limit; i++) {
                entry = __hash_bucket_find(*bucket, ref, containing_match);
 
                if (entry)
@@ -364,7 +363,6 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
                 * Nothing found, go back a hash bucket
                 */
                put_hash_bucket(*bucket, *flags);
-               range          += (1 << HASH_FN_SHIFT);
                index.dev_addr -= (1 << HASH_FN_SHIFT);
                *bucket = get_hash_bucket(&index, flags);
        }
index 49cbf3e33de71cb8fbff2d67fc4b23e1e7b4c4f5..27f272381cf27e88888e489a63048712636882f5 100644 (file)
@@ -707,7 +707,7 @@ int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
 }
 EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
 
-int dma_supported(struct device *dev, u64 mask)
+static int dma_supported(struct device *dev, u64 mask)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
@@ -721,7 +721,6 @@ int dma_supported(struct device *dev, u64 mask)
                return 1;
        return ops->dma_supported(dev, mask);
 }
-EXPORT_SYMBOL(dma_supported);
 
 bool dma_pci_p2pdma_supported(struct device *dev)
 {
index c5a9190b218f959d3a050e23fc25472ab8fbfeb5..0ef6b12f961d53ffc5e5cc7bc98cc660b2934953 100644 (file)
@@ -326,9 +326,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
                swiotlb_adjust_nareas(num_possible_cpus());
 
        nslabs = default_nslabs;
-       if (nslabs < IO_TLB_MIN_SLABS)
-               panic("%s: nslabs = %lu too small\n", __func__, nslabs);
-
        /*
         * By default allocate the bounce buffer memory from low memory, but
         * allow to pick a location everywhere for hypervisors with guest
@@ -341,8 +338,7 @@ retry:
        else
                tlb = memblock_alloc_low(bytes, PAGE_SIZE);
        if (!tlb) {
-               pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
-                       __func__, bytes);
+               pr_warn("%s: failed to allocate tlb structure\n", __func__);
                return;
        }
 
@@ -579,7 +575,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
        }
 }
 
-#define slot_addr(start, idx)  ((start) + ((idx) << IO_TLB_SHIFT))
+static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
+{
+       return start + (idx << IO_TLB_SHIFT);
+}
 
 /*
  * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
@@ -765,7 +764,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
        /*
         * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
         * to the tlb buffer, if we knew for sure the device will
-        * overwirte the entire current content. But we don't. Thus
+        * overwrite the entire current content. But we don't. Thus
         * unconditional bounce may prevent leaking swiotlb content (i.e.
         * kernel memory) to user-space.
         */
index 90c85b17bf698744c12cb165c05396fa55db7c3a..2b6bd511c6ed1cdff351f63a6f657e912ffe2ac1 100644 (file)
@@ -1225,6 +1225,7 @@ void mmput_async(struct mm_struct *mm)
                schedule_work(&mm->async_put_work);
        }
 }
+EXPORT_SYMBOL_GPL(mmput_async);
 #endif
 
 /**
@@ -2046,11 +2047,8 @@ static __latent_entropy struct task_struct *copy_process(
        /*
         * If the new process will be in a different time namespace
         * do not allow it to share VM or a thread group with the forking task.
-        *
-        * On vfork, the child process enters the target time namespace only
-        * after exec.
         */
-       if ((clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM) {
+       if (clone_flags & (CLONE_THREAD | CLONE_VM)) {
                if (nsp->time_ns != nsp->time_ns_for_children)
                        return ERR_PTR(-EINVAL);
        }
index 08350e35aba240b8042c74aa8993012a8277c344..ca9d834d0b843b5cd6e3ff9584cbce727dcdc9d4 100644 (file)
@@ -1562,6 +1562,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
        /* Ensure it is not in reserved area nor out of text */
        if (!(core_kernel_text((unsigned long) p->addr) ||
            is_module_text_address((unsigned long) p->addr)) ||
+           in_gate_area_no_mm((unsigned long) p->addr) ||
            within_kprobe_blacklist((unsigned long) p->addr) ||
            jump_label_text_reserved(p->addr, p->addr) ||
            static_call_text_reserved(p->addr, p->addr) ||
index b4cbb406bc2840af0029107e3bbbff030081937f..eec72ca962e249c94266192b77a3c1f92ec8e889 100644 (file)
@@ -179,8 +179,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
        if (IS_ERR(new_ns))
                return  PTR_ERR(new_ns);
 
-       if ((flags & CLONE_VM) == 0)
-               timens_on_fork(new_ns, tsk);
+       timens_on_fork(new_ns, tsk);
 
        tsk->nsproxy = new_ns;
        return 0;
index bb3d63bdf4ae87558782b143a6dc238ad07cfd5a..667876da8382d31ec987946e0b16bdd6cbdab16d 100644 (file)
@@ -416,7 +416,7 @@ void update_sched_domain_debugfs(void)
                char buf[32];
 
                snprintf(buf, sizeof(buf), "cpu%d", cpu);
-               debugfs_remove(debugfs_lookup(buf, sd_dentry));
+               debugfs_lookup_and_remove(buf, sd_dentry);
                d_cpu = debugfs_create_dir(buf, sd_dentry);
 
                i = 0;
index c1c47e2305efa2e6c43642536ff99fcebc9cb30e..dacc37b62a2c5d675fef631a92f626189d5e95d9 100644 (file)
@@ -27,7 +27,7 @@ struct automaton_wip {
        bool final_states[state_max_wip];
 };
 
-struct automaton_wip automaton_wip = {
+static struct automaton_wip automaton_wip = {
        .state_names = {
                "preemptive",
                "non_preemptive"
index d1afe55cdd4c11acb152485a6de1fb03ec8891b6..118e576b91b4b1414d39522eb0d8307032379e67 100644 (file)
@@ -27,7 +27,7 @@ struct automaton_wwnr {
        bool final_states[state_max_wwnr];
 };
 
-struct automaton_wwnr automaton_wwnr = {
+static struct automaton_wwnr automaton_wwnr = {
        .state_names = {
                "not_running",
                "running"
index b698d05dd06955e883aee394837dd54d2781c5e9..d65f6c25a87cdd31a9079e3217103c72207f1fe7 100644 (file)
@@ -24,13 +24,13 @@ static struct rv_reactor rv_panic = {
        .react = rv_panic_reaction
 };
 
-static int register_react_panic(void)
+static int __init register_react_panic(void)
 {
        rv_register_reactor(&rv_panic);
        return 0;
 }
 
-static void unregister_react_panic(void)
+static void __exit unregister_react_panic(void)
 {
        rv_unregister_reactor(&rv_panic);
 }
index 31899f953af4e0f9b7baec97ababa8dbd995e36b..4b6b7106a477c2c105823ed798a05a006513e1ae 100644 (file)
@@ -23,13 +23,13 @@ static struct rv_reactor rv_printk = {
        .react = rv_printk_reaction
 };
 
-static int register_react_printk(void)
+static int __init register_react_printk(void)
 {
        rv_register_reactor(&rv_printk);
        return 0;
 }
 
-static void unregister_react_printk(void)
+static void __exit unregister_react_printk(void)
 {
        rv_unregister_reactor(&rv_printk);
 }
index cb866c3141af2323001595386dd22a6eff4e9a35..918730d749325147c3772c2f858cb18e805b1291 100644 (file)
@@ -142,7 +142,8 @@ static bool check_user_trigger(struct trace_event_file *file)
 {
        struct event_trigger_data *data;
 
-       list_for_each_entry_rcu(data, &file->triggers, list) {
+       list_for_each_entry_rcu(data, &file->triggers, list,
+                               lockdep_is_held(&event_mutex)) {
                if (data->flags & EVENT_TRIGGER_FL_PROBE)
                        continue;
                return true;
index 95b58bd757ce400f1766e296088650850d57ae30..1e130da1b742c663e6c578ccf581ec05635baa6a 100644 (file)
@@ -95,14 +95,14 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
        }
 
        lockdep_hardirqs_on_prepare();
-       lockdep_hardirqs_on(CALLER_ADDR0);
+       lockdep_hardirqs_on(caller_addr);
 }
 EXPORT_SYMBOL(trace_hardirqs_on_caller);
 NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
 
 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
 {
-       lockdep_hardirqs_off(CALLER_ADDR0);
+       lockdep_hardirqs_off(caller_addr);
 
        if (!this_cpu_read(tracing_irq_cpu)) {
                this_cpu_write(tracing_irq_cpu, 1);
index 64ea283f2f86dddb19b4c8e2d683544a14b3eb63..ef42c1a1192053cc05b45ccb61358a4996453add 100644 (file)
@@ -571,7 +571,8 @@ static void for_each_tracepoint_range(
 bool trace_module_has_bad_taint(struct module *mod)
 {
        return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
-                              (1 << TAINT_UNSIGNED_MODULE));
+                              (1 << TAINT_UNSIGNED_MODULE) |
+                              (1 << TAINT_TEST));
 }
 
 static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
@@ -647,7 +648,7 @@ static int tracepoint_module_coming(struct module *mod)
        /*
         * We skip modules that taint the kernel, especially those with different
         * module headers (for forced load), to make sure we don't cause a crash.
-        * Staging, out-of-tree, and unsigned GPL modules are fine.
+        * Staging, out-of-tree, unsigned GPL, and test modules are fine.
         */
        if (trace_module_has_bad_taint(mod))
                return 0;
index 7d268a291486ba54f692cbc8dfd1a8dd79529530..c284efa3d1efc2ef4ccab136911365ead224f22d 100644 (file)
@@ -2873,6 +2873,9 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
 
        task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
                        &rpc_cb_add_xprt_call_ops, data);
+       if (IS_ERR(task))
+               return PTR_ERR(task);
+
        data->xps->xps_nunique_destaddr_xprts++;
        rpc_put_task(task);
 success:
index d71eec494826b269a76c887cfa82972fd805e59b..f8fae78156494c19e0aae8270d187155d696608c 100644 (file)
@@ -1179,11 +1179,8 @@ xprt_request_dequeue_receive_locked(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
 
-       if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
+       if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
                xprt_request_rb_remove(req->rq_xprt, req);
-               xdr_free_bvec(&req->rq_rcv_buf);
-               req->rq_private_buf.bvec = NULL;
-       }
 }
 
 /**
@@ -1221,6 +1218,8 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
 
        xprt->stat.recvs++;
 
+       xdr_free_bvec(&req->rq_rcv_buf);
+       req->rq_private_buf.bvec = NULL;
        req->rq_private_buf.len = copied;
        /* Ensure all writes are done before we update */
        /* req->rq_reply_bytes_recvd */
@@ -1453,6 +1452,7 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
                xprt_request_dequeue_transmit_locked(task);
                xprt_request_dequeue_receive_locked(task);
                spin_unlock(&xprt->queue_lock);
+               xdr_free_bvec(&req->rq_rcv_buf);
        }
 }
 
index 3b42f255e2baa85738240c6bd847e4fe21a378f5..8df33e7d6daa3122856aa10b54995f56d058da30 100755 (executable)
@@ -62,6 +62,7 @@ try_decompress 'BZh'          xy    bunzip2
 try_decompress '\135\0\0\0'   xxx   unlzma
 try_decompress '\211\114\132' xy    'lzop -d'
 try_decompress '\002\041\114\030' xyy 'lz4 -d -l'
+try_decompress '\050\265\057\375' xxx unzstd
 
 # Bail out:
 echo "$me: Cannot find kernel config." >&2
diff --git a/scripts/gcc-ld b/scripts/gcc-ld
deleted file mode 100755 (executable)
index 997b818..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# run gcc with ld options
-# used as a wrapper to execute link time optimizations
-# yes virginia, this is not pretty
-
-ARGS="-nostdlib"
-
-while [ "$1" != "" ] ; do
-       case "$1" in
-       -save-temps|-m32|-m64) N="$1" ;;
-       -r) N="$1" ;;
-       -[Wg]*) N="$1" ;;
-       -[olv]|-[Ofd]*|-nostdlib) N="$1" ;;
-       --end-group|--start-group)
-                N="-Wl,$1" ;;
-       -[RTFGhIezcbyYu]*|\
---script|--defsym|-init|-Map|--oformat|-rpath|\
--rpath-link|--sort-section|--section-start|-Tbss|-Tdata|-Ttext|\
---version-script|--dynamic-list|--version-exports-symbol|--wrap|-m)
-               A="$1" ; shift ; N="-Wl,$A,$1" ;;
-       -[m]*) N="$1" ;;
-       -*) N="-Wl,$1" ;;
-       *)  N="$1" ;;
-       esac
-       ARGS="$ARGS $N"
-       shift
-done
-
-exec $CC $ARGS
index 9aa23d15862a02b0ae86646b0cb6d7f84f13c5f8..ad8bbc52267d052d0d083a0f6b12e8faf7342a90 100755 (executable)
@@ -41,4 +41,4 @@
 # so we just ignore them to let readprofile continue to work.
 # (At least sparc64 has __crc_ in the middle).
 
-$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2
+$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)\|\( L0\)' > $2
index 193dae361fac39d7d9f3a6a1cd413944eb0e6a2a..5377f94eb2111f844e1c439926f1a27bfb728fd4 100644 (file)
@@ -178,10 +178,8 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
                return -ENOMEM;
 
        err = snd_card_init(card, parent, idx, xid, module, extra_size);
-       if (err < 0) {
-               kfree(card);
-               return err;
-       }
+       if (err < 0)
+               return err; /* card is freed by error handler */
 
        *card_ret = card;
        return 0;
@@ -233,7 +231,7 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
        card->managed = true;
        err = snd_card_init(card, parent, idx, xid, module, extra_size);
        if (err < 0) {
-               devres_free(card);
+               devres_free(card); /* in managed mode, we need to free manually */
                return err;
        }
 
@@ -297,6 +295,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
                mutex_unlock(&snd_card_mutex);
                dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n",
                         idx, snd_ecards_limit - 1, err);
+               if (!card->managed)
+                       kfree(card); /* manually free here, as no destructor called */
                return err;
        }
        set_bit(idx, snd_cards_lock);           /* lock it */
index b665ac66ccbe8aa9bb1c76af8bc6a7548c83592a..cfcd8eff41398eb935fcc16dab51a520628fd198 100644 (file)
@@ -543,10 +543,13 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
        dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
                                            sg_dma_address(sgt->sgl));
        p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
-       if (p)
+       if (p) {
                dmab->private_data = sgt;
-       else
+               /* store the first page address for convenience */
+               dmab->addr = snd_sgbuf_get_addr(dmab, 0);
+       } else {
                dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
+       }
        return p;
 }
 
@@ -780,6 +783,8 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
        if (!p)
                goto error;
        dmab->private_data = sgbuf;
+       /* store the first page address for convenience */
+       dmab->addr = snd_sgbuf_get_addr(dmab, 0);
        return p;
 
  error:
index 90c3a367d7de9acde49b816bbd4853e55eba2497..02df915eb3c66ab9ee6ab5617e8b206115a65fa6 100644 (file)
@@ -1672,14 +1672,14 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
                runtime = substream->runtime;
                if (atomic_read(&substream->mmap_count))
                        goto __direct;
-               err = snd_pcm_oss_make_ready(substream);
-               if (err < 0)
-                       return err;
                atomic_inc(&runtime->oss.rw_ref);
                if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
                        atomic_dec(&runtime->oss.rw_ref);
                        return -ERESTARTSYS;
                }
+               err = snd_pcm_oss_make_ready_locked(substream);
+               if (err < 0)
+                       goto unlock;
                format = snd_pcm_oss_format_from(runtime->oss.format);
                width = snd_pcm_format_physical_width(format);
                if (runtime->oss.buffer_used > 0) {
index 9b4a7cdb103ad8c142bd5fe4a4e73914a21df553..12f12a294df5a462f6de7e11137ac8738533b011 100644 (file)
@@ -605,17 +605,18 @@ static unsigned int loopback_jiffies_timer_pos_update
                        cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
        struct loopback_pcm *dpcm_capt =
                        cable->streams[SNDRV_PCM_STREAM_CAPTURE];
-       unsigned long delta_play = 0, delta_capt = 0;
+       unsigned long delta_play = 0, delta_capt = 0, cur_jiffies;
        unsigned int running, count1, count2;
 
+       cur_jiffies = jiffies;
        running = cable->running ^ cable->pause;
        if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) {
-               delta_play = jiffies - dpcm_play->last_jiffies;
+               delta_play = cur_jiffies - dpcm_play->last_jiffies;
                dpcm_play->last_jiffies += delta_play;
        }
 
        if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) {
-               delta_capt = jiffies - dpcm_capt->last_jiffies;
+               delta_capt = cur_jiffies - dpcm_capt->last_jiffies;
                dpcm_capt->last_jiffies += delta_capt;
        }
 
index b2701a4452d86dd15123b272b38c2c2b7b35dcb7..48af77ae8020f5a5c2ecca1c2bd974cc186f1779 100644 (file)
@@ -124,7 +124,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic
        epcm->voices[0]->epcm = epcm;
        if (voices > 1) {
                for (i = 1; i < voices; i++) {
-                       epcm->voices[i] = &epcm->emu->voices[epcm->voices[0]->number + i];
+                       epcm->voices[i] = &epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G];
                        epcm->voices[i]->epcm = epcm;
                }
        }
index cae9a975cbcca809bdd59993e1343eebd4c6ad76..1a868dd9dc4b6886a8dda83514052910e52014cc 100644 (file)
@@ -157,10 +157,10 @@ static int hda_codec_driver_remove(struct device *dev)
                return codec->bus->core.ext_ops->hdev_detach(&codec->core);
        }
 
-       refcount_dec(&codec->pcm_ref);
        snd_hda_codec_disconnect_pcms(codec);
        snd_hda_jack_tbl_disconnect(codec);
-       wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref));
+       if (!refcount_dec_and_test(&codec->pcm_ref))
+               wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref));
        snd_power_sync_ref(codec->bus->card);
 
        if (codec->patch_ops.free)
index a77165bd92a983c35bfa56e6f0ca4407b1ff7aa1..6f30c374f896ea72411a926a6e51b747dcede921 100644 (file)
@@ -1817,7 +1817,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
 
        /* use the non-cached pages in non-snoop mode */
        if (!azx_snoop(chip))
-               azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_WC;
+               azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_WC_SG;
 
        if (chip->driver_type == AZX_DRIVER_NVIDIA) {
                dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
@@ -2550,6 +2550,8 @@ static const struct pci_device_id azx_ids[] = {
        /* 5 Series/3400 */
        { PCI_DEVICE(0x8086, 0x3b56),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+       { PCI_DEVICE(0x8086, 0x3b57),
+         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* Poulsbo */
        { PCI_DEVICE(0x8086, 0x811b),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE },
index 7debb2c76aa62b778cec1d2c110694d4a57c91dc..976a112c7d0061c932c82310b376bacf381fc2c8 100644 (file)
@@ -474,7 +474,8 @@ MODULE_DEVICE_TABLE(of, hda_tegra_match);
 static int hda_tegra_probe(struct platform_device *pdev)
 {
        const unsigned int driver_flags = AZX_DCAPS_CORBRP_SELF_CLEAR |
-                                         AZX_DCAPS_PM_RUNTIME;
+                                         AZX_DCAPS_PM_RUNTIME |
+                                         AZX_DCAPS_4K_BDLE_BOUNDARY;
        struct snd_card *card;
        struct azx *chip;
        struct hda_tegra *hda;
index 6c209cd26c0cab1ed9e835e118e6bcfaf37a7f05..c239d9dbbaefe518e83a1a9a1ea16343ce12c409 100644 (file)
@@ -170,6 +170,8 @@ struct hdmi_spec {
        bool dyn_pcm_no_legacy;
        /* hdmi interrupt trigger control flag for Nvidia codec */
        bool hdmi_intr_trig_ctrl;
+       bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */
+
        bool intel_hsw_fixup;   /* apply Intel platform-specific fixups */
        /*
         * Non-generic VIA/NVIDIA specific
@@ -679,15 +681,24 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
                                     int ca, int active_channels,
                                     int conn_type)
 {
+       struct hdmi_spec *spec = codec->spec;
        union audio_infoframe ai;
 
        memset(&ai, 0, sizeof(ai));
-       if (conn_type == 0) { /* HDMI */
+       if ((conn_type == 0) || /* HDMI */
+               /* Nvidia DisplayPort: Nvidia HW expects same layout as HDMI */
+               (conn_type == 1 && spec->nv_dp_workaround)) {
                struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
 
-               hdmi_ai->type           = 0x84;
-               hdmi_ai->ver            = 0x01;
-               hdmi_ai->len            = 0x0a;
+               if (conn_type == 0) { /* HDMI */
+                       hdmi_ai->type           = 0x84;
+                       hdmi_ai->ver            = 0x01;
+                       hdmi_ai->len            = 0x0a;
+               } else {/* Nvidia DP */
+                       hdmi_ai->type           = 0x84;
+                       hdmi_ai->ver            = 0x1b;
+                       hdmi_ai->len            = 0x11 << 2;
+               }
                hdmi_ai->CC02_CT47      = active_channels - 1;
                hdmi_ai->CA             = ca;
                hdmi_checksum_audio_infoframe(hdmi_ai);
@@ -1267,6 +1278,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
        set_bit(pcm_idx, &spec->pcm_in_use);
        per_pin = get_pin(spec, pin_idx);
        per_pin->cvt_nid = per_cvt->cvt_nid;
+       per_pin->silent_stream = false;
        hinfo->nid = per_cvt->cvt_nid;
 
        /* flip stripe flag for the assigned stream if supported */
@@ -3617,6 +3629,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
        spec->pcm_playback.rates = SUPPORTED_RATES;
        spec->pcm_playback.maxbps = SUPPORTED_MAXBPS;
        spec->pcm_playback.formats = SUPPORTED_FORMATS;
+       spec->nv_dp_workaround = true;
        return 0;
 }
 
@@ -3756,6 +3769,7 @@ static int patch_nvhdmi(struct hda_codec *codec)
        spec->chmap.ops.chmap_cea_alloc_validate_get_type =
                nvhdmi_chmap_cea_alloc_validate_get_type;
        spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+       spec->nv_dp_workaround = true;
 
        codec->link_down_at_suspend = 1;
 
@@ -3779,6 +3793,7 @@ static int patch_nvhdmi_legacy(struct hda_codec *codec)
        spec->chmap.ops.chmap_cea_alloc_validate_get_type =
                nvhdmi_chmap_cea_alloc_validate_get_type;
        spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+       spec->nv_dp_workaround = true;
 
        codec->link_down_at_suspend = 1;
 
@@ -3984,6 +3999,7 @@ static int tegra_hdmi_init(struct hda_codec *codec)
 
        generic_hdmi_init_per_pins(codec);
 
+       codec->depop_delay = 10;
        codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
        spec->chmap.ops.chmap_cea_alloc_validate_get_type =
                nvhdmi_chmap_cea_alloc_validate_get_type;
@@ -3992,6 +4008,7 @@ static int tegra_hdmi_init(struct hda_codec *codec)
        spec->chmap.ops.chmap_cea_alloc_validate_get_type =
                nvhdmi_chmap_cea_alloc_validate_get_type;
        spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+       spec->nv_dp_workaround = true;
 
        return 0;
 }
index 38930cf5aace021ff46ae1120318c4e22dd26aca..f9d46ae4c7b71308d70b6da4f73d98070dc2ae83 100644 (file)
@@ -7067,6 +7067,8 @@ enum {
        ALC294_FIXUP_ASUS_GU502_HP,
        ALC294_FIXUP_ASUS_GU502_PINS,
        ALC294_FIXUP_ASUS_GU502_VERBS,
+       ALC294_FIXUP_ASUS_G513_PINS,
+       ALC285_FIXUP_ASUS_G533Z_PINS,
        ALC285_FIXUP_HP_GPIO_LED,
        ALC285_FIXUP_HP_MUTE_LED,
        ALC236_FIXUP_HP_GPIO_LED,
@@ -8405,6 +8407,24 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC294_FIXUP_ASUS_GU502_HP] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc294_fixup_gu502_hp,
+       },
+        [ALC294_FIXUP_ASUS_G513_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                               { 0x19, 0x03a11050 }, /* front HP mic */
+                               { 0x1a, 0x03a11c30 }, /* rear external mic */
+                               { 0x21, 0x03211420 }, /* front HP out */
+                               { }
+               },
+       },
+       [ALC285_FIXUP_ASUS_G533Z_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x14, 0x90170120 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC294_FIXUP_ASUS_G513_PINS,
        },
        [ALC294_FIXUP_ASUS_COEF_1B] = {
                .type = HDA_FIXUP_VERBS,
@@ -9149,6 +9169,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
+       SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
@@ -9165,6 +9186,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -9292,6 +9314,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8972, "HP EliteBook 840 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -9339,10 +9362,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+       SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+       SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
        SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
-       SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -9358,14 +9382,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
        SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+       SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
        SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+       SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
-       SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
-       SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
        SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -9569,6 +9595,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
        SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
        SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+       SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
        SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
        SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
index 61df4d33c48ffa95dc5e6adb11e48ff364984c1f..7f340f18599c98b3a070def9311f7eb639974b1c 100644 (file)
@@ -209,6 +209,7 @@ struct sigmatel_spec {
 
        /* beep widgets */
        hda_nid_t anabeep_nid;
+       bool beep_power_on;
 
        /* SPDIF-out mux */
        const char * const *spdif_labels;
@@ -4443,6 +4444,28 @@ static int stac_suspend(struct hda_codec *codec)
 
        return 0;
 }
+
+static int stac_check_power_status(struct hda_codec *codec, hda_nid_t nid)
+{
+#ifdef CONFIG_SND_HDA_INPUT_BEEP
+       struct sigmatel_spec *spec = codec->spec;
+#endif
+       int ret = snd_hda_gen_check_power_status(codec, nid);
+
+#ifdef CONFIG_SND_HDA_INPUT_BEEP
+       if (nid == spec->gen.beep_nid && codec->beep) {
+               if (codec->beep->enabled != spec->beep_power_on) {
+                       spec->beep_power_on = codec->beep->enabled;
+                       if (spec->beep_power_on)
+                               snd_hda_power_up_pm(codec);
+                       else
+                               snd_hda_power_down_pm(codec);
+               }
+               ret |= spec->beep_power_on;
+       }
+#endif
+       return ret;
+}
 #else
 #define stac_suspend           NULL
 #endif /* CONFIG_PM */
@@ -4455,6 +4478,7 @@ static const struct hda_codec_ops stac_patch_ops = {
        .unsol_event = snd_hda_jack_unsol_event,
 #ifdef CONFIG_PM
        .suspend = stac_suspend,
+       .check_power_status = stac_check_power_status,
 #endif
 };
 
index 4850a177803dbee786c34a5aa93793cbc917d167..ab2d7a791f39ce3dc742fe325e786d75d98c2943 100644 (file)
@@ -196,7 +196,7 @@ struct mchp_spdiftx_dev {
        struct clk                              *pclk;
        struct clk                              *gclk;
        unsigned int                            fmt;
-       int                                     gclk_enabled:1;
+       unsigned int                            gclk_enabled:1;
 };
 
 static inline int mchp_spdiftx_is_running(struct mchp_spdiftx_dev *dev)
index d545a593a2516603979ddf6c731cd80e0c64d2d6..daafd4251ce669599f90b7e0cc7fd860bc67df86 100644 (file)
@@ -1617,7 +1617,6 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
        unsigned int current_plug_status;
        unsigned int current_button_status;
        unsigned int i;
-       int report = 0;
 
        mutex_lock(&cs42l42->irq_lock);
        if (cs42l42->suspended) {
@@ -1711,13 +1710,15 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
 
                        if (current_button_status & CS42L42_M_DETECT_TF_MASK) {
                                dev_dbg(cs42l42->dev, "Button released\n");
-                               report = 0;
+                               snd_soc_jack_report(cs42l42->jack, 0,
+                                                   SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+                                                   SND_JACK_BTN_2 | SND_JACK_BTN_3);
                        } else if (current_button_status & CS42L42_M_DETECT_FT_MASK) {
-                               report = cs42l42_handle_button_press(cs42l42);
-
+                               snd_soc_jack_report(cs42l42->jack,
+                                                   cs42l42_handle_button_press(cs42l42),
+                                                   SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+                                                   SND_JACK_BTN_2 | SND_JACK_BTN_3);
                        }
-                       snd_soc_jack_report(cs42l42->jack, report, SND_JACK_BTN_0 | SND_JACK_BTN_1 |
-                                                                  SND_JACK_BTN_2 | SND_JACK_BTN_3);
                }
        }
 
index 58f70a02f18aad83654420d8bd843bf4a1e37dc9..0626d5694c2244003bdcee52ed0fc9d7b4e985fe 100644 (file)
@@ -357,17 +357,32 @@ static const struct snd_soc_dapm_route nau8540_dapm_routes[] = {
        {"AIFTX", NULL, "Digital CH4 Mux"},
 };
 
-static int nau8540_clock_check(struct nau8540 *nau8540, int rate, int osr)
+static const struct nau8540_osr_attr *
+nau8540_get_osr(struct nau8540 *nau8540)
 {
+       unsigned int osr;
+
+       regmap_read(nau8540->regmap, NAU8540_REG_ADC_SAMPLE_RATE, &osr);
+       osr &= NAU8540_ADC_OSR_MASK;
        if (osr >= ARRAY_SIZE(osr_adc_sel))
-               return -EINVAL;
+               return NULL;
+       return &osr_adc_sel[osr];
+}
+
+static int nau8540_dai_startup(struct snd_pcm_substream *substream,
+                              struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct nau8540 *nau8540 = snd_soc_component_get_drvdata(component);
+       const struct nau8540_osr_attr *osr;
 
-       if (rate * osr > CLK_ADC_MAX) {
-               dev_err(nau8540->dev, "exceed the maximum frequency of CLK_ADC\n");
+       osr = nau8540_get_osr(nau8540);
+       if (!osr || !osr->osr)
                return -EINVAL;
-       }
 
-       return 0;
+       return snd_pcm_hw_constraint_minmax(substream->runtime,
+                                           SNDRV_PCM_HW_PARAM_RATE,
+                                           0, CLK_ADC_MAX / osr->osr);
 }
 
 static int nau8540_hw_params(struct snd_pcm_substream *substream,
@@ -375,7 +390,8 @@ static int nau8540_hw_params(struct snd_pcm_substream *substream,
 {
        struct snd_soc_component *component = dai->component;
        struct nau8540 *nau8540 = snd_soc_component_get_drvdata(component);
-       unsigned int val_len = 0, osr;
+       unsigned int val_len = 0;
+       const struct nau8540_osr_attr *osr;
 
        /* CLK_ADC = OSR * FS
         * ADC clock frequency is defined as Over Sampling Rate (OSR)
@@ -383,13 +399,14 @@ static int nau8540_hw_params(struct snd_pcm_substream *substream,
         * values must be selected such that the maximum frequency is less
         * than 6.144 MHz.
         */
-       regmap_read(nau8540->regmap, NAU8540_REG_ADC_SAMPLE_RATE, &osr);
-       osr &= NAU8540_ADC_OSR_MASK;
-       if (nau8540_clock_check(nau8540, params_rate(params), osr))
+       osr = nau8540_get_osr(nau8540);
+       if (!osr || !osr->osr)
+               return -EINVAL;
+       if (params_rate(params) * osr->osr > CLK_ADC_MAX)
                return -EINVAL;
        regmap_update_bits(nau8540->regmap, NAU8540_REG_CLOCK_SRC,
                NAU8540_CLK_ADC_SRC_MASK,
-               osr_adc_sel[osr].clk_src << NAU8540_CLK_ADC_SRC_SFT);
+               osr->clk_src << NAU8540_CLK_ADC_SRC_SFT);
 
        switch (params_width(params)) {
        case 16:
@@ -515,6 +532,7 @@ static int nau8540_set_tdm_slot(struct snd_soc_dai *dai,
 
 
 static const struct snd_soc_dai_ops nau8540_dai_ops = {
+       .startup = nau8540_dai_startup,
        .hw_params = nau8540_hw_params,
        .set_fmt = nau8540_set_fmt,
        .set_tdm_slot = nau8540_set_tdm_slot,
index 2d21339932e651bdf24bf3b6a6ebf6bef5bfde40..4a72b94e8410424023fdef82365e413d0dc256f7 100644 (file)
@@ -670,28 +670,40 @@ static const struct snd_soc_dapm_route nau8821_dapm_routes[] = {
        {"HPOR", NULL, "Class G"},
 };
 
-static int nau8821_clock_check(struct nau8821 *nau8821,
-       int stream, int rate, int osr)
+static const struct nau8821_osr_attr *
+nau8821_get_osr(struct nau8821 *nau8821, int stream)
 {
-       int osrate = 0;
+       unsigned int osr;
 
        if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               regmap_read(nau8821->regmap, NAU8821_R2C_DAC_CTRL1, &osr);
+               osr &= NAU8821_DAC_OVERSAMPLE_MASK;
                if (osr >= ARRAY_SIZE(osr_dac_sel))
-                       return -EINVAL;
-               osrate = osr_dac_sel[osr].osr;
+                       return NULL;
+               return &osr_dac_sel[osr];
        } else {
+               regmap_read(nau8821->regmap, NAU8821_R2B_ADC_RATE, &osr);
+               osr &= NAU8821_ADC_SYNC_DOWN_MASK;
                if (osr >= ARRAY_SIZE(osr_adc_sel))
-                       return -EINVAL;
-               osrate = osr_adc_sel[osr].osr;
+                       return NULL;
+               return &osr_adc_sel[osr];
        }
+}
 
-       if (!osrate || rate * osrate > CLK_DA_AD_MAX) {
-               dev_err(nau8821->dev,
-                       "exceed the maximum frequency of CLK_ADC or CLK_DAC");
+static int nau8821_dai_startup(struct snd_pcm_substream *substream,
+                              struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct nau8821 *nau8821 = snd_soc_component_get_drvdata(component);
+       const struct nau8821_osr_attr *osr;
+
+       osr = nau8821_get_osr(nau8821, substream->stream);
+       if (!osr || !osr->osr)
                return -EINVAL;
-       }
 
-       return 0;
+       return snd_pcm_hw_constraint_minmax(substream->runtime,
+                                           SNDRV_PCM_HW_PARAM_RATE,
+                                           0, CLK_DA_AD_MAX / osr->osr);
 }
 
 static int nau8821_hw_params(struct snd_pcm_substream *substream,
@@ -699,7 +711,8 @@ static int nau8821_hw_params(struct snd_pcm_substream *substream,
 {
        struct snd_soc_component *component = dai->component;
        struct nau8821 *nau8821 = snd_soc_component_get_drvdata(component);
-       unsigned int val_len = 0, osr, ctrl_val, bclk_fs, clk_div;
+       unsigned int val_len = 0, ctrl_val, bclk_fs, clk_div;
+       const struct nau8821_osr_attr *osr;
 
        nau8821->fs = params_rate(params);
        /* CLK_DAC or CLK_ADC = OSR * FS
@@ -708,27 +721,19 @@ static int nau8821_hw_params(struct snd_pcm_substream *substream,
         * values must be selected such that the maximum frequency is less
         * than 6.144 MHz.
         */
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               regmap_read(nau8821->regmap, NAU8821_R2C_DAC_CTRL1, &osr);
-               osr &= NAU8821_DAC_OVERSAMPLE_MASK;
-               if (nau8821_clock_check(nau8821, substream->stream,
-                       nau8821->fs, osr)) {
-                       return -EINVAL;
-               }
+       osr = nau8821_get_osr(nau8821, substream->stream);
+       if (!osr || !osr->osr)
+               return -EINVAL;
+       if (nau8821->fs * osr->osr > CLK_DA_AD_MAX)
+               return -EINVAL;
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
                regmap_update_bits(nau8821->regmap, NAU8821_R03_CLK_DIVIDER,
                        NAU8821_CLK_DAC_SRC_MASK,
-                       osr_dac_sel[osr].clk_src << NAU8821_CLK_DAC_SRC_SFT);
-       } else {
-               regmap_read(nau8821->regmap, NAU8821_R2B_ADC_RATE, &osr);
-               osr &= NAU8821_ADC_SYNC_DOWN_MASK;
-               if (nau8821_clock_check(nau8821, substream->stream,
-                       nau8821->fs, osr)) {
-                       return -EINVAL;
-               }
+                       osr->clk_src << NAU8821_CLK_DAC_SRC_SFT);
+       else
                regmap_update_bits(nau8821->regmap, NAU8821_R03_CLK_DIVIDER,
                        NAU8821_CLK_ADC_SRC_MASK,
-                       osr_adc_sel[osr].clk_src << NAU8821_CLK_ADC_SRC_SFT);
-       }
+                       osr->clk_src << NAU8821_CLK_ADC_SRC_SFT);
 
        /* make BCLK and LRC divde configuration if the codec as master. */
        regmap_read(nau8821->regmap, NAU8821_R1D_I2S_PCM_CTRL2, &ctrl_val);
@@ -843,6 +848,7 @@ static int nau8821_digital_mute(struct snd_soc_dai *dai, int mute,
 }
 
 static const struct snd_soc_dai_ops nau8821_dai_ops = {
+       .startup = nau8821_dai_startup,
        .hw_params = nau8821_hw_params,
        .set_fmt = nau8821_set_dai_fmt,
        .mute_stream = nau8821_digital_mute,
index ad54d70f7d8e75d4eda4f527548852d60a132363..15596452ca374949d47f9cbf7fe76f44e68b04bf 100644 (file)
@@ -1014,27 +1014,42 @@ static irqreturn_t nau8824_interrupt(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static int nau8824_clock_check(struct nau8824 *nau8824,
-       int stream, int rate, int osr)
+static const struct nau8824_osr_attr *
+nau8824_get_osr(struct nau8824 *nau8824, int stream)
 {
-       int osrate;
+       unsigned int osr;
 
        if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               regmap_read(nau8824->regmap,
+                           NAU8824_REG_DAC_FILTER_CTRL_1, &osr);
+               osr &= NAU8824_DAC_OVERSAMPLE_MASK;
                if (osr >= ARRAY_SIZE(osr_dac_sel))
-                       return -EINVAL;
-               osrate = osr_dac_sel[osr].osr;
+                       return NULL;
+               return &osr_dac_sel[osr];
        } else {
+               regmap_read(nau8824->regmap,
+                           NAU8824_REG_ADC_FILTER_CTRL, &osr);
+               osr &= NAU8824_ADC_SYNC_DOWN_MASK;
                if (osr >= ARRAY_SIZE(osr_adc_sel))
-                       return -EINVAL;
-               osrate = osr_adc_sel[osr].osr;
+                       return NULL;
+               return &osr_adc_sel[osr];
        }
+}
+
+static int nau8824_dai_startup(struct snd_pcm_substream *substream,
+                              struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
+       const struct nau8824_osr_attr *osr;
 
-       if (!osrate || rate * osr > CLK_DA_AD_MAX) {
-               dev_err(nau8824->dev, "exceed the maximum frequency of CLK_ADC or CLK_DAC\n");
+       osr = nau8824_get_osr(nau8824, substream->stream);
+       if (!osr || !osr->osr)
                return -EINVAL;
-       }
 
-       return 0;
+       return snd_pcm_hw_constraint_minmax(substream->runtime,
+                                           SNDRV_PCM_HW_PARAM_RATE,
+                                           0, CLK_DA_AD_MAX / osr->osr);
 }
 
 static int nau8824_hw_params(struct snd_pcm_substream *substream,
@@ -1042,7 +1057,9 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
 {
        struct snd_soc_component *component = dai->component;
        struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
-       unsigned int val_len = 0, osr, ctrl_val, bclk_fs, bclk_div;
+       unsigned int val_len = 0, ctrl_val, bclk_fs, bclk_div;
+       const struct nau8824_osr_attr *osr;
+       int err = -EINVAL;
 
        nau8824_sema_acquire(nau8824, HZ);
 
@@ -1053,27 +1070,19 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
         * than 6.144 MHz.
         */
        nau8824->fs = params_rate(params);
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               regmap_read(nau8824->regmap,
-                       NAU8824_REG_DAC_FILTER_CTRL_1, &osr);
-               osr &= NAU8824_DAC_OVERSAMPLE_MASK;
-               if (nau8824_clock_check(nau8824, substream->stream,
-                       nau8824->fs, osr))
-                       return -EINVAL;
+       osr = nau8824_get_osr(nau8824, substream->stream);
+       if (!osr || !osr->osr)
+               goto error;
+       if (nau8824->fs * osr->osr > CLK_DA_AD_MAX)
+               goto error;
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
                regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER,
                        NAU8824_CLK_DAC_SRC_MASK,
-                       osr_dac_sel[osr].clk_src << NAU8824_CLK_DAC_SRC_SFT);
-       } else {
-               regmap_read(nau8824->regmap,
-                       NAU8824_REG_ADC_FILTER_CTRL, &osr);
-               osr &= NAU8824_ADC_SYNC_DOWN_MASK;
-               if (nau8824_clock_check(nau8824, substream->stream,
-                       nau8824->fs, osr))
-                       return -EINVAL;
+                       osr->clk_src << NAU8824_CLK_DAC_SRC_SFT);
+       else
                regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER,
                        NAU8824_CLK_ADC_SRC_MASK,
-                       osr_adc_sel[osr].clk_src << NAU8824_CLK_ADC_SRC_SFT);
-       }
+                       osr->clk_src << NAU8824_CLK_ADC_SRC_SFT);
 
        /* make BCLK and LRC divde configuration if the codec as master. */
        regmap_read(nau8824->regmap,
@@ -1090,7 +1099,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
                else if (bclk_fs <= 256)
                        bclk_div = 0;
                else
-                       return -EINVAL;
+                       goto error;
                regmap_update_bits(nau8824->regmap,
                        NAU8824_REG_PORT0_I2S_PCM_CTRL_2,
                        NAU8824_I2S_LRC_DIV_MASK | NAU8824_I2S_BLK_DIV_MASK,
@@ -1111,15 +1120,17 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
                val_len |= NAU8824_I2S_DL_32;
                break;
        default:
-               return -EINVAL;
+               goto error;
        }
 
        regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1,
                NAU8824_I2S_DL_MASK, val_len);
+       err = 0;
 
+ error:
        nau8824_sema_release(nau8824);
 
-       return 0;
+       return err;
 }
 
 static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
@@ -1128,8 +1139,6 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
        unsigned int ctrl1_val = 0, ctrl2_val = 0;
 
-       nau8824_sema_acquire(nau8824, HZ);
-
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBM_CFM:
                ctrl2_val |= NAU8824_I2S_MS_MASTER;
@@ -1171,6 +1180,8 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                return -EINVAL;
        }
 
+       nau8824_sema_acquire(nau8824, HZ);
+
        regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1,
                NAU8824_I2S_DF_MASK | NAU8824_I2S_BP_MASK |
                NAU8824_I2S_PCMB_EN, ctrl1_val);
@@ -1547,6 +1558,7 @@ static const struct snd_soc_component_driver nau8824_component_driver = {
 };
 
 static const struct snd_soc_dai_ops nau8824_dai_ops = {
+       .startup = nau8824_dai_startup,
        .hw_params = nau8824_hw_params,
        .set_fmt = nau8824_set_fmt,
        .set_tdm_slot = nau8824_set_tdm_slot,
index 54ef7b0fa87860ec7d6199f884dd7a5aa255492c..8213273f501eb1055cbaf73f3ac8bc5f1bb7d248 100644 (file)
@@ -1247,27 +1247,42 @@ static const struct snd_soc_dapm_route nau8825_dapm_routes[] = {
        {"HPOR", NULL, "Class G"},
 };
 
-static int nau8825_clock_check(struct nau8825 *nau8825,
-       int stream, int rate, int osr)
+static const struct nau8825_osr_attr *
+nau8825_get_osr(struct nau8825 *nau8825, int stream)
 {
-       int osrate;
+       unsigned int osr;
 
        if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               regmap_read(nau8825->regmap,
+                           NAU8825_REG_DAC_CTRL1, &osr);
+               osr &= NAU8825_DAC_OVERSAMPLE_MASK;
                if (osr >= ARRAY_SIZE(osr_dac_sel))
-                       return -EINVAL;
-               osrate = osr_dac_sel[osr].osr;
+                       return NULL;
+               return &osr_dac_sel[osr];
        } else {
+               regmap_read(nau8825->regmap,
+                           NAU8825_REG_ADC_RATE, &osr);
+               osr &= NAU8825_ADC_SYNC_DOWN_MASK;
                if (osr >= ARRAY_SIZE(osr_adc_sel))
-                       return -EINVAL;
-               osrate = osr_adc_sel[osr].osr;
+                       return NULL;
+               return &osr_adc_sel[osr];
        }
+}
 
-       if (!osrate || rate * osr > CLK_DA_AD_MAX) {
-               dev_err(nau8825->dev, "exceed the maximum frequency of CLK_ADC or CLK_DAC\n");
+static int nau8825_dai_startup(struct snd_pcm_substream *substream,
+                              struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct nau8825 *nau8825 = snd_soc_component_get_drvdata(component);
+       const struct nau8825_osr_attr *osr;
+
+       osr = nau8825_get_osr(nau8825, substream->stream);
+       if (!osr || !osr->osr)
                return -EINVAL;
-       }
 
-       return 0;
+       return snd_pcm_hw_constraint_minmax(substream->runtime,
+                                           SNDRV_PCM_HW_PARAM_RATE,
+                                           0, CLK_DA_AD_MAX / osr->osr);
 }
 
 static int nau8825_hw_params(struct snd_pcm_substream *substream,
@@ -1276,7 +1291,9 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
 {
        struct snd_soc_component *component = dai->component;
        struct nau8825 *nau8825 = snd_soc_component_get_drvdata(component);
-       unsigned int val_len = 0, osr, ctrl_val, bclk_fs, bclk_div;
+       unsigned int val_len = 0, ctrl_val, bclk_fs, bclk_div;
+       const struct nau8825_osr_attr *osr;
+       int err = -EINVAL;
 
        nau8825_sema_acquire(nau8825, 3 * HZ);
 
@@ -1286,29 +1303,19 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
         * values must be selected such that the maximum frequency is less
         * than 6.144 MHz.
         */
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               regmap_read(nau8825->regmap, NAU8825_REG_DAC_CTRL1, &osr);
-               osr &= NAU8825_DAC_OVERSAMPLE_MASK;
-               if (nau8825_clock_check(nau8825, substream->stream,
-                       params_rate(params), osr)) {
-                       nau8825_sema_release(nau8825);
-                       return -EINVAL;
-               }
+       osr = nau8825_get_osr(nau8825, substream->stream);
+       if (!osr || !osr->osr)
+               goto error;
+       if (params_rate(params) * osr->osr > CLK_DA_AD_MAX)
+               goto error;
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
                regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
                        NAU8825_CLK_DAC_SRC_MASK,
-                       osr_dac_sel[osr].clk_src << NAU8825_CLK_DAC_SRC_SFT);
-       } else {
-               regmap_read(nau8825->regmap, NAU8825_REG_ADC_RATE, &osr);
-               osr &= NAU8825_ADC_SYNC_DOWN_MASK;
-               if (nau8825_clock_check(nau8825, substream->stream,
-                       params_rate(params), osr)) {
-                       nau8825_sema_release(nau8825);
-                       return -EINVAL;
-               }
+                       osr->clk_src << NAU8825_CLK_DAC_SRC_SFT);
+       else
                regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
                        NAU8825_CLK_ADC_SRC_MASK,
-                       osr_adc_sel[osr].clk_src << NAU8825_CLK_ADC_SRC_SFT);
-       }
+                       osr->clk_src << NAU8825_CLK_ADC_SRC_SFT);
 
        /* make BCLK and LRC divde configuration if the codec as master. */
        regmap_read(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2, &ctrl_val);
@@ -1321,10 +1328,8 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
                        bclk_div = 1;
                else if (bclk_fs <= 128)
                        bclk_div = 0;
-               else {
-                       nau8825_sema_release(nau8825);
-                       return -EINVAL;
-               }
+               else
+                       goto error;
                regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
                        NAU8825_I2S_LRC_DIV_MASK | NAU8825_I2S_BLK_DIV_MASK,
                        ((bclk_div + 1) << NAU8825_I2S_LRC_DIV_SFT) | bclk_div);
@@ -1344,17 +1349,18 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
                val_len |= NAU8825_I2S_DL_32;
                break;
        default:
-               nau8825_sema_release(nau8825);
-               return -EINVAL;
+               goto error;
        }
 
        regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1,
                NAU8825_I2S_DL_MASK, val_len);
+       err = 0;
 
+ error:
        /* Release the semaphore. */
        nau8825_sema_release(nau8825);
 
-       return 0;
+       return err;
 }
 
 static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
@@ -1420,6 +1426,7 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 }
 
 static const struct snd_soc_dai_ops nau8825_dai_ops = {
+       .startup        = nau8825_dai_startup,
        .hw_params      = nau8825_hw_params,
        .set_fmt        = nau8825_set_dai_fmt,
 };
index 873295f59ad7b9dde877ce1f990bb098f77385ca..1e421d9a03fbe45ec48865bd748d1dfaeba48b98 100644 (file)
@@ -234,18 +234,26 @@ static int fsl_aud2htx_probe(struct platform_device *pdev)
 
        regcache_cache_only(aud2htx->regmap, true);
 
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
+       ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to pcm register\n");
+               pm_runtime_disable(&pdev->dev);
+               return ret;
+       }
+
        ret = devm_snd_soc_register_component(&pdev->dev,
                                              &fsl_aud2htx_component,
                                              &fsl_aud2htx_dai, 1);
        if (ret) {
                dev_err(&pdev->dev, "failed to register ASoC DAI\n");
+               pm_runtime_disable(&pdev->dev);
                return ret;
        }
 
-       ret = imx_pcm_dma_init(pdev);
-       if (ret)
-               dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
-
        return ret;
 }
 
index c1e2f671191b5fb0a60e3b7ec425c3cbf56035a0..4922e6795b73f02a8d53a33f0fad079e4ccd0739 100644 (file)
@@ -122,7 +122,7 @@ static int fsl_mqs_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        }
 
        switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
-       case SND_SOC_DAIFMT_BP_FP:
+       case SND_SOC_DAIFMT_CBC_CFC:
                break;
        default:
                return -EINVAL;
index 7523bb944b216da4c601e8c5332683fa669fa8cd..d430eece1d6b1504cb42b209c79a3dce4547f163 100644 (file)
@@ -1306,7 +1306,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
                sai->mclk_clk[i] = devm_clk_get(dev, tmp);
                if (IS_ERR(sai->mclk_clk[i])) {
                        dev_err(dev, "failed to get mclk%d clock: %ld\n",
-                                       i + 1, PTR_ERR(sai->mclk_clk[i]));
+                                       i, PTR_ERR(sai->mclk_clk[i]));
                        sai->mclk_clk[i] = NULL;
                }
        }
index 266704556f37d923ecb827ba7e1963a64abfa0f9..094402470dc2383e8045a1991223f6c749a23156 100644 (file)
@@ -271,9 +271,6 @@ static int mtk_adda_ul_event(struct snd_soc_dapm_widget *w,
                /* should delayed 1/fs(smallest is 8k) = 125us before afe off */
                usleep_range(125, 135);
                mt8186_afe_gpio_request(afe->dev, false, MT8186_DAI_ADDA, 1);
-
-               /* reset dmic */
-               afe_priv->mtkaif_dmic = 0;
                break;
        default:
                break;
index ce4a5713386a3681f6b17ce7ad3c59436554323c..98a2fde9e0041f4f4952971c4b94c9e8c546a5df 100644 (file)
@@ -270,6 +270,7 @@ static int sm8250_platform_probe(struct platform_device *pdev)
        if (!card)
                return -ENOMEM;
 
+       card->owner = THIS_MODULE;
        /* Allocate the private data */
        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
index e90f173d067c9aba54b4e8e99933eca47913b003..37f7df5fde175cef88525ba40f617020ae83cf56 100644 (file)
@@ -196,6 +196,7 @@ config SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE
 
 config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST
        tristate "SOF enable IPC flood test"
+       depends on SND_SOC_SOF
        select SND_SOC_SOF_CLIENT
        help
          This option enables a separate client device for IPC flood test
@@ -214,6 +215,7 @@ config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST_NUM
 
 config SND_SOC_SOF_DEBUG_IPC_MSG_INJECTOR
        tristate "SOF enable IPC message injector"
+       depends on SND_SOC_SOF
        select SND_SOC_SOF_CLIENT
        help
          This option enables the IPC message injector which can be used to send
index af072b484a6072eb1b83823ff94db3dac4cc56ce..64929dc9af397e1119fa86577f61ca8b93cc9ff2 100644 (file)
@@ -771,7 +771,7 @@ static int sof_ipc4_widget_setup_comp_src(struct snd_sof_widget *swidget)
                goto err;
 
        ret = sof_update_ipc_object(scomp, src, SOF_SRC_TOKENS, swidget->tuples,
-                                   swidget->num_tuples, sizeof(src), 1);
+                                   swidget->num_tuples, sizeof(*src), 1);
        if (ret) {
                dev_err(scomp->dev, "Parsing SRC tokens failed\n");
                goto err;
@@ -1251,7 +1251,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
                        if (blob->alh_cfg.count > 1) {
                                int group_id;
 
-                               group_id = ida_alloc_max(&alh_group_ida, ALH_MULTI_GTW_COUNT,
+                               group_id = ida_alloc_max(&alh_group_ida, ALH_MULTI_GTW_COUNT - 1,
                                                         GFP_KERNEL);
 
                                if (group_id < 0)
index d356743de2ff9b720505ae1e856c4beccb4f7b0a..706d249a9ad6b81b53bd79de28b0b21b0e8c7eb9 100644 (file)
@@ -699,7 +699,7 @@ static bool check_delayed_register_option(struct snd_usb_audio *chip, int iface)
                if (delayed_register[i] &&
                    sscanf(delayed_register[i], "%x:%x", &id, &inum) == 2 &&
                    id == chip->usb_id)
-                       return inum != iface;
+                       return iface < inum;
        }
 
        return false;
index 0d7b73bf7945063cda7bd50d81c65327dde5ef2a..eb71df9da831a3d2dc75f52be797337fa17f41cb 100644 (file)
@@ -924,6 +924,8 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
                endpoint_set_interface(chip, ep, false);
 
        if (!--ep->opened) {
+               if (ep->clock_ref && !atomic_read(&ep->clock_ref->locked))
+                       ep->clock_ref->rate = 0;
                ep->iface = 0;
                ep->altsetting = 0;
                ep->cur_audiofmt = NULL;
index 9bfead5efc4c1b078f20af92785fa839d228719d..5b4d8f5eade2093ebe1e7875ab90cdf9f2c4ecdb 100644 (file)
@@ -1764,7 +1764,7 @@ bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface)
 
        for (q = registration_quirks; q->usb_id; q++)
                if (chip->usb_id == q->usb_id)
-                       return iface != q->interface;
+                       return iface < q->interface;
 
        /* Register as normal */
        return false;
index ceb93d798182cfba087848a86609dd23bfbd7ce9..f10f4e6d3fb851661c3553f43084f1d678ca1752 100644 (file)
@@ -495,6 +495,10 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip,
                        return 0;
                }
        }
+
+       if (chip->card->registered)
+               chip->need_delayed_register = true;
+
        /* look for an empty stream */
        list_for_each_entry(as, &chip->pcm_list, list) {
                if (as->fmt_type != fp->fmt_type)
@@ -502,9 +506,6 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip,
                subs = &as->substream[stream];
                if (subs->ep_num)
                        continue;
-               if (snd_device_get_state(chip->card, as->pcm) !=
-                   SNDRV_DEV_BUILD)
-                       chip->need_delayed_register = true;
                err = snd_pcm_new_stream(as->pcm, stream, 1);
                if (err < 0)
                        return err;
@@ -1105,7 +1106,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
         * Dallas DS4201 workaround: It presents 5 altsettings, but the last
         * one misses syncpipe, and does not produce any sound.
         */
-       if (chip->usb_id == USB_ID(0x04fa, 0x4201))
+       if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4)
                num = 4;
 
        for (i = 0; i < num; i++) {
index 235dc85c91c3e372980b8b428e9713869115ead4..ef4775c6db01c128ab112a3d741c1b38ef7cdf64 100644 (file)
 #define X86_BUG_ITLB_MULTIHIT          X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
 #define X86_BUG_SRBDS                  X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
 #define X86_BUG_MMIO_STALE_DATA                X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
-#define X86_BUG_RETBLEED               X86_BUG(26) /* CPU is affected by RETBleed */
-#define X86_BUG_EIBRS_PBRSB            X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
+#define X86_BUG_MMIO_UNKNOWN           X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
+#define X86_BUG_RETBLEED               X86_BUG(27) /* CPU is affected by RETBleed */
+#define X86_BUG_EIBRS_PBRSB            X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index f1af27ce9f200f32c4811d2be30a74056057306c..279be06332be990754490662ee00fa4257cecbda 100755 (executable)
@@ -187,6 +187,7 @@ else
        echo " * auxiliary taint, defined for and used by distros (#16)"
 
 fi
+
 T=`expr $T / 2`
 if [ `expr $T % 2` -eq 0 ]; then
        addout " "
@@ -195,6 +196,14 @@ else
        echo " * kernel was built with the struct randomization plugin (#17)"
 fi
 
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+       addout " "
+else
+       addout "N"
+       echo " * an in-kernel test (such as a KUnit test) has been run (#18)"
+fi
+
 echo "For a more detailed explanation of the various taint flags see"
 echo " Documentation/admin-guide/tainted-kernels.rst in the Linux kernel sources"
 echo " or https://kernel.org/doc/html/latest/admin-guide/tainted-kernels.html"
index 1e6fd6ca513bdf446e2b56816de69a6afc1054eb..27f5e7dfc2f761ad2d2ec9ff8fba996ef046d319 100644 (file)
@@ -44,7 +44,7 @@
 
 /*
  * KVP protocol: The user mode component first registers with the
- * the kernel component. Subsequently, the kernel component requests, data
+ * kernel component. Subsequently, the kernel component requests, data
  * for the specified keys. In response to this message the user mode component
  * fills in the value corresponding to the specified key. We overload the
  * sequence field in the cn_msg header to define our KVP message types.
@@ -772,11 +772,11 @@ static int kvp_process_ip_address(void *addrp,
        const char *str;
 
        if (family == AF_INET) {
-               addr = (struct sockaddr_in *)addrp;
+               addr = addrp;
                str = inet_ntop(family, &addr->sin_addr, tmp, 50);
                addr_length = INET_ADDRSTRLEN;
        } else {
-               addr6 = (struct sockaddr_in6 *)addrp;
+               addr6 = addrp;
                str = inet_ntop(family, &addr6->sin6_addr.s6_addr, tmp, 50);
                addr_length = INET6_ADDRSTRLEN;
        }
index d30439b4b8ab45c7a851891e69ca6ebea007a319..869379f91fe487ad3611966b09bda47462da0b70 100644 (file)
@@ -9,8 +9,8 @@
 #include "../../../arch/alpha/include/uapi/asm/errno.h"
 #elif defined(__mips__)
 #include "../../../arch/mips/include/uapi/asm/errno.h"
-#elif defined(__xtensa__)
-#include "../../../arch/xtensa/include/uapi/asm/errno.h"
+#elif defined(__hppa__)
+#include "../../../arch/parisc/include/uapi/asm/errno.h"
 #else
 #include <asm-generic/errno.h>
 #endif
index e6c98a6e3908e20f8c68946f677b7161540f0e47..8ec5b9f344e02cbb1985f36c66234359e2a00eea 100644 (file)
@@ -441,6 +441,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
 
        perf_evlist__for_each_entry(evlist, evsel) {
                bool overwrite = evsel->attr.write_backward;
+               enum fdarray_flags flgs;
                struct perf_mmap *map;
                int *output, fd, cpu;
 
@@ -486,6 +487,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
                        if (ops->idx)
                                ops->idx(evlist, evsel, mp, idx);
 
+                       pr_debug("idx %d: mmapping fd %d\n", idx, *output);
                        if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
                                return -1;
 
@@ -494,6 +496,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
                        if (!idx)
                                perf_evlist__set_mmap_first(evlist, map, overwrite);
                } else {
+                       pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
                        if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
                                return -1;
 
@@ -502,8 +505,8 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
 
                revent = !overwrite ? POLLIN : 0;
 
-               if (!evsel->system_wide &&
-                   perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) {
+               flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
+               if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
                        perf_mmap__put(map);
                        return -1;
                }
@@ -519,6 +522,48 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
        return 0;
 }
 
+static int
+mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+               struct perf_mmap_param *mp)
+{
+       int nr_threads = perf_thread_map__nr(evlist->threads);
+       int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
+       int cpu, thread, idx = 0;
+       int nr_mmaps = 0;
+
+       pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
+                __func__, nr_cpus, nr_threads);
+
+       /* per-thread mmaps */
+       for (thread = 0; thread < nr_threads; thread++, idx++) {
+               int output = -1;
+               int output_overwrite = -1;
+
+               if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
+                                  &output_overwrite, &nr_mmaps))
+                       goto out_unmap;
+       }
+
+       /* system-wide mmaps i.e. per-cpu */
+       for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
+               int output = -1;
+               int output_overwrite = -1;
+
+               if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
+                                  &output_overwrite, &nr_mmaps))
+                       goto out_unmap;
+       }
+
+       if (nr_mmaps != evlist->nr_mmaps)
+               pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
+
+       return 0;
+
+out_unmap:
+       perf_evlist__munmap(evlist);
+       return -1;
+}
+
 static int
 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
             struct perf_mmap_param *mp)
@@ -528,6 +573,8 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
        int nr_mmaps = 0;
        int cpu, thread;
 
+       pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
+
        for (cpu = 0; cpu < nr_cpus; cpu++) {
                int output = -1;
                int output_overwrite = -1;
@@ -569,6 +616,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
                          struct perf_evlist_mmap_ops *ops,
                          struct perf_mmap_param *mp)
 {
+       const struct perf_cpu_map *cpus = evlist->all_cpus;
        struct perf_evsel *evsel;
 
        if (!ops || !ops->get || !ops->mmap)
@@ -588,6 +636,9 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
        if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
                return -ENOMEM;
 
+       if (perf_cpu_map__empty(cpus))
+               return mmap_per_thread(evlist, ops, mp);
+
        return mmap_per_cpu(evlist, ops, mp);
 }
 
index e5921b3471535d45c49f957c9b787733ddc69a8d..bd947885a639bd56600e77a4f2318194b263b8c0 100644 (file)
@@ -954,11 +954,11 @@ ifndef NO_LIBBPF
        $(call QUIET_INSTALL, bpf-headers) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'; \
-               $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
-               $(INSTALL) include/bpf/linux/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'
+               $(INSTALL) include/bpf/*.h -m 644 -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
+               $(INSTALL) include/bpf/linux/*.h -m 644 -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'
        $(call QUIET_INSTALL, bpf-examples) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
-               $(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
+               $(INSTALL) examples/bpf/*.c -m 644 -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
 endif
        $(call QUIET_INSTALL, perf-archive) \
                $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
@@ -967,13 +967,13 @@ endif
 ifndef NO_LIBAUDIT
        $(call QUIET_INSTALL, strace/groups) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'; \
-               $(INSTALL) trace/strace/groups/* -t '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'
+               $(INSTALL) trace/strace/groups/* -m 644 -t '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'
 endif
 ifndef NO_LIBPERL
        $(call QUIET_INSTALL, perl-scripts) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
-               $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
-               $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'; \
+               $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
+               $(INSTALL) scripts/perl/*.pl -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'; \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'; \
                $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
 endif
@@ -990,23 +990,23 @@ endif
                $(INSTALL) $(DLFILTERS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/dlfilters';
        $(call QUIET_INSTALL, perf_completion-script) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'; \
-               $(INSTALL) perf-completion.sh '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
+               $(INSTALL) perf-completion.sh -m 644 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
        $(call QUIET_INSTALL, perf-tip) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(tip_instdir_SQ)'; \
-               $(INSTALL) Documentation/tips.txt -t '$(DESTDIR_SQ)$(tip_instdir_SQ)'
+               $(INSTALL) Documentation/tips.txt -m 644 -t '$(DESTDIR_SQ)$(tip_instdir_SQ)'
 
 install-tests: all install-gtk
        $(call QUIET_INSTALL, tests) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
-               $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
+               $(INSTALL) tests/attr.py -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
                $(INSTALL) tests/pe-file.exe* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
-               $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
+               $(INSTALL) tests/attr/* -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
                $(INSTALL) tests/shell/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
-               $(INSTALL) tests/shell/lib/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
-               $(INSTALL) tests/shell/lib/*.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
+               $(INSTALL) tests/shell/lib/*.sh -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
+               $(INSTALL) tests/shell/lib/*.py -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
 
 install-bin: install-tools install-tests install-traceevent-plugins
 
index 653e13b5037ec074fa4e51a0df367b64c6d40ae5..438fc222e2138d81041a4fbe592d0e00d67aa411 100644 (file)
@@ -146,15 +146,15 @@ static void *c2c_he_zalloc(size_t size)
 
        c2c_he->cpuset = bitmap_zalloc(c2c.cpus_cnt);
        if (!c2c_he->cpuset)
-               return NULL;
+               goto out_free;
 
        c2c_he->nodeset = bitmap_zalloc(c2c.nodes_cnt);
        if (!c2c_he->nodeset)
-               return NULL;
+               goto out_free;
 
        c2c_he->node_stats = zalloc(c2c.nodes_cnt * sizeof(*c2c_he->node_stats));
        if (!c2c_he->node_stats)
-               return NULL;
+               goto out_free;
 
        init_stats(&c2c_he->cstats.lcl_hitm);
        init_stats(&c2c_he->cstats.rmt_hitm);
@@ -163,6 +163,12 @@ static void *c2c_he_zalloc(size_t size)
        init_stats(&c2c_he->cstats.load);
 
        return &c2c_he->he;
+
+out_free:
+       free(c2c_he->nodeset);
+       free(c2c_he->cpuset);
+       free(c2c_he);
+       return NULL;
 }
 
 static void c2c_he_free(void *he)
index dd11d3471baf3b1f87512681eabc6f6e24f14d98..ea40ae52cd2c7efc788c4423129dda25f479ad90 100644 (file)
@@ -1874,8 +1874,7 @@ int cmd_lock(int argc, const char **argv)
                NULL
        };
        const char *const lock_subcommands[] = { "record", "report", "script",
-                                                "info", "contention",
-                                                "contention", NULL };
+                                                "info", "contention", NULL };
        const char *lock_usage[] = {
                NULL,
                NULL
index 4713f0f3a6cf15add5416e3ad5fc2f6e919f2a89..0f711f88894cf9ebf87cfa00c390b2e781b8e8b2 100644 (file)
@@ -1906,14 +1906,18 @@ static int record__synthesize(struct record *rec, bool tail)
 
        err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
                                                machine, opts);
-       if (err < 0)
+       if (err < 0) {
                pr_warning("Couldn't synthesize bpf events.\n");
+               err = 0;
+       }
 
        if (rec->opts.synth & PERF_SYNTH_CGROUP) {
                err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
                                                     machine);
-               if (err < 0)
+               if (err < 0) {
                        pr_warning("Couldn't synthesize cgroup events.\n");
+                       err = 0;
+               }
        }
 
        if (rec->opts.nr_threads_synthesize > 1) {
@@ -3358,16 +3362,24 @@ static struct option __record_options[] = {
 
 struct option *record_options = __record_options;
 
-static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
+static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
 {
        struct perf_cpu cpu;
        int idx;
 
        if (cpu_map__is_dummy(cpus))
-               return;
+               return 0;
 
-       perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+       perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+               if (cpu.cpu == -1)
+                       continue;
+               /* Return ENODEV is input cpu is greater than max cpu */
+               if ((unsigned long)cpu.cpu > mask->nbits)
+                       return -ENODEV;
                set_bit(cpu.cpu, mask->bits);
+       }
+
+       return 0;
 }
 
 static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
@@ -3379,7 +3391,9 @@ static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const cha
                return -ENOMEM;
 
        bitmap_zero(mask->bits, mask->nbits);
-       record__mmap_cpu_mask_init(mask, cpus);
+       if (record__mmap_cpu_mask_init(mask, cpus))
+               return -ENODEV;
+
        perf_cpu_map__put(cpus);
 
        return 0;
@@ -3461,7 +3475,12 @@ static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_ma
                pr_err("Failed to allocate CPUs mask\n");
                return ret;
        }
-       record__mmap_cpu_mask_init(&cpus_mask, cpus);
+
+       ret = record__mmap_cpu_mask_init(&cpus_mask, cpus);
+       if (ret) {
+               pr_err("Failed to init cpu mask\n");
+               goto out_free_cpu_mask;
+       }
 
        ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
        if (ret) {
@@ -3702,7 +3721,8 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu
        if (ret)
                return ret;
 
-       record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
+       if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
+               return -ENODEV;
 
        rec->nr_threads = 1;
 
index 13580a9c50b8d6d539e9f15d1305946a5639018a..029b4330e59b12fa6aa02692da099123d20d7552 100644 (file)
@@ -445,6 +445,9 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
        struct perf_event_attr *attr = &evsel->core.attr;
        bool allow_user_set;
 
+       if (evsel__is_dummy_event(evsel))
+               return 0;
+
        if (perf_header__has_feat(&session->header, HEADER_STAT))
                return 0;
 
@@ -566,6 +569,8 @@ static struct evsel *find_first_output_type(struct evlist *evlist,
        struct evsel *evsel;
 
        evlist__for_each_entry(evlist, evsel) {
+               if (evsel__is_dummy_event(evsel))
+                       continue;
                if (output_type(evsel->core.attr.type) == (int)type)
                        return evsel;
        }
index 54cd29d07ca8d4cce18816795e7896e2925bec47..0b4a62e4ff6752d78a0db4ab6a4300fa9dd39a13 100644 (file)
@@ -1932,6 +1932,9 @@ setup_metrics:
                free(str);
        }
 
+       if (!stat_config.topdown_level)
+               stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
+
        if (!evsel_list->core.nr_entries) {
                if (target__has_cpu(&target))
                        default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
@@ -1948,8 +1951,6 @@ setup_metrics:
                }
                if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
                        return -1;
-
-               stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
                /* Platform specific attrs */
                if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
                        return -1;
index 9eccc97bff82f64108545ca6a5eedb59a70aa6e8..6d47298ebe9f61fc1fc1c0a799a0f4ec5bd2f9b2 100644 (file)
@@ -98,9 +98,9 @@ int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, vo
 static void print_vals(__u64 cycles, __u64 delta)
 {
        if (delta)
-               printf("%10llu %10llu ", cycles, delta);
+               printf("%10llu %10llu ", (unsigned long long)cycles, (unsigned long long)delta);
        else
-               printf("%10llu %10s ", cycles, "");
+               printf("%10llu %10s ", (unsigned long long)cycles, "");
 }
 
 int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
new file mode 100755 (executable)
index 0000000..d724855
--- /dev/null
@@ -0,0 +1,83 @@
+#!/bin/sh
+# perf stat --bpf-counters --for-each-cgroup test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+test_cgroups=
+if [ "$1" = "-v" ]; then
+       verbose="1"
+fi
+
+# skip if --bpf-counters --for-each-cgroup is not supported
+check_bpf_counter()
+{
+       if ! perf stat -a --bpf-counters --for-each-cgroup / true > /dev/null 2>&1; then
+               if [ "${verbose}" = "1" ]; then
+                       echo "Skipping: --bpf-counters --for-each-cgroup not supported"
+                       perf --no-pager stat -a --bpf-counters --for-each-cgroup / true || true
+               fi
+               exit 2
+       fi
+}
+
+# find two cgroups to measure
+find_cgroups()
+{
+       # try usual systemd slices first
+       if [ -d /sys/fs/cgroup/system.slice -a -d /sys/fs/cgroup/user.slice ]; then
+               test_cgroups="system.slice,user.slice"
+               return
+       fi
+
+       # try root and self cgroups
+       local self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
+       if [ -z ${self_cgrp} ]; then
+               # cgroup v2 doesn't specify perf_event
+               self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
+       fi
+
+       if [ -z ${self_cgrp} ]; then
+               test_cgroups="/"
+       else
+               test_cgroups="/,${self_cgrp}"
+       fi
+}
+
+# As cgroup events are cpu-wide, we cannot simply compare the result.
+# Just check if it runs without failure and has non-zero results.
+check_system_wide_counted()
+{
+       local output
+
+       output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1  2>&1)
+       if echo ${output} | grep -q -F "<not "; then
+               echo "Some system-wide events are not counted"
+               if [ "${verbose}" = "1" ]; then
+                       echo ${output}
+               fi
+               exit 1
+       fi
+}
+
+check_cpu_list_counted()
+{
+       local output
+
+       output=$(perf stat -C 1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1  2>&1)
+       if echo ${output} | grep -q -F "<not "; then
+               echo "Some CPU events are not counted"
+               if [ "${verbose}" = "1" ]; then
+                       echo ${output}
+               fi
+               exit 1
+       fi
+}
+
+check_bpf_counter
+find_cgroups
+
+check_system_wide_counted
+check_cpu_list_counted
+
+exit 0
index 9d4c45184e715daec8c78ac10189efcc6d456b1e..56455da30341b4ff597d86f71cc41f098d5a0d61 100644 (file)
@@ -2,7 +2,9 @@
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
+#include <errno.h>
 #include <sys/ioctl.h>
+#include <linux/compiler.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/kernel.h>
 #include "tests.h"
@@ -137,8 +139,7 @@ static int test__wp_rw(struct test_suite *test __maybe_unused,
 #endif
 }
 
-static int test__wp_modify(struct test_suite *test __maybe_unused,
-                          int subtest __maybe_unused)
+static int test__wp_modify(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
 {
 #if defined(__s390x__)
        return TEST_SKIP;
@@ -160,6 +161,11 @@ static int test__wp_modify(struct test_suite *test __maybe_unused,
        new_attr.disabled = 1;
        ret = ioctl(fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr);
        if (ret < 0) {
+               if (errno == ENOTTY) {
+                       test->test_cases[subtest].skip_reason = "missing kernel support";
+                       ret = TEST_SKIP;
+               }
+
                pr_debug("ioctl(PERF_EVENT_IOC_MODIFY_ATTRIBUTES) failed\n");
                close(fd);
                return ret;
index 4d216c0dc4259b9ef812b41a793f911f74e44e9f..4ee96b3c755b73d5bdddaad55ae290254329cec1 100644 (file)
@@ -49,8 +49,14 @@ void affinity__set(struct affinity *a, int cpu)
 {
        int cpu_set_size = get_cpu_set_size();
 
-       if (cpu == -1)
+       /*
+        * Return:
+        * - if cpu is -1
+        * - restrict out of bound access to sched_cpus
+        */
+       if (cpu == -1 || ((cpu >= (cpu_set_size * 8))))
                return;
+
        a->changed = true;
        set_bit(cpu, a->sched_cpus);
        /*
index 63b9db657442510a6e8a4cc69266ad4aa4b49bde..3c2df7522f6fcbdc63a918a6d322cd2c3afbcf56 100644 (file)
@@ -95,7 +95,7 @@ static int bperf_load_program(struct evlist *evlist)
 
        perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
                link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
-                                                     FD(cgrp_switch, cpu.cpu));
+                                                     FD(cgrp_switch, i));
                if (IS_ERR(link)) {
                        pr_err("Failed to attach cgroup program\n");
                        err = PTR_ERR(link);
@@ -115,15 +115,15 @@ static int bperf_load_program(struct evlist *evlist)
                        evsel->cgrp = NULL;
 
                        /* open single copy of the events w/o cgroup */
-                       err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1);
+                       err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
                        if (err) {
                                pr_err("Failed to open first cgroup events\n");
                                goto out;
                        }
 
                        map_fd = bpf_map__fd(skel->maps.events);
-                       perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
-                               int fd = FD(evsel, cpu.cpu);
+                       perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
+                               int fd = FD(evsel, j);
                                __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
 
                                err = bpf_map_update_elem(map_fd, &idx, &fd,
@@ -269,7 +269,7 @@ static int bperf_cgrp__read(struct evsel *evsel)
                        goto out;
                }
 
-               perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
+               perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
                        counts = perf_counts(evsel->counts, i, 0);
                        counts->val = values[cpu.cpu].counter;
                        counts->ena = values[cpu.cpu].enabled;
index 292c430768b525afdde468eca9b6ecea40955517..c72f8ad96f75191b9c017207d675cd25a39ca23d 100644 (file)
@@ -176,7 +176,7 @@ static int bperf_cgroup_count(void)
 }
 
 // This will be attached to cgroup-switches event for each cpu
-SEC("perf_events")
+SEC("perf_event")
 int BPF_PROG(on_cgrp_switch)
 {
        return bperf_cgroup_count();
index 953338b9e887e26f943831903e72c6df1c18927d..d81b54563e962b8a0389c7aad0954673b5c7825b 100644 (file)
 
 #define BUILD_ID_URANDOM /* different uuid for each run */
 
-// FIXME, remove this and fix the deprecation warnings before its removed and
-// We'll break for good here...
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-
 #ifdef HAVE_LIBCRYPTO_SUPPORT
 
 #define BUILD_ID_MD5
@@ -45,6 +41,7 @@
 #endif
 
 #ifdef BUILD_ID_MD5
+#include <openssl/evp.h>
 #include <openssl/md5.h>
 #endif
 #endif
@@ -142,15 +139,20 @@ gen_build_id(struct buildid_note *note,
 static void
 gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *code, size_t csize)
 {
-       MD5_CTX context;
+       EVP_MD_CTX *mdctx;
 
        if (sizeof(note->build_id) < 16)
                errx(1, "build_id too small for MD5");
 
-       MD5_Init(&context);
-       MD5_Update(&context, &load_addr, sizeof(load_addr));
-       MD5_Update(&context, code, csize);
-       MD5_Final((unsigned char *)note->build_id, &context);
+       mdctx = EVP_MD_CTX_new();
+       if (!mdctx)
+               errx(2, "failed to create EVP_MD_CTX");
+
+       EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
+       EVP_DigestUpdate(mdctx, &load_addr, sizeof(load_addr));
+       EVP_DigestUpdate(mdctx, code, csize);
+       EVP_DigestFinal_ex(mdctx, (unsigned char *)note->build_id, NULL);
+       EVP_MD_CTX_free(mdctx);
 }
 #endif
 
@@ -251,6 +253,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
        Elf_Data *d;
        Elf_Scn *scn;
        Elf_Ehdr *ehdr;
+       Elf_Phdr *phdr;
        Elf_Shdr *shdr;
        uint64_t eh_frame_base_offset;
        char *strsym = NULL;
@@ -285,6 +288,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
        ehdr->e_version = EV_CURRENT;
        ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
 
+       /*
+        * setup program header
+        */
+       phdr = elf_newphdr(e, 1);
+       phdr[0].p_type = PT_LOAD;
+       phdr[0].p_offset = 0;
+       phdr[0].p_vaddr = 0;
+       phdr[0].p_paddr = 0;
+       phdr[0].p_filesz = csize;
+       phdr[0].p_memsz = csize;
+       phdr[0].p_flags = PF_X | PF_R;
+       phdr[0].p_align = 8;
+
        /*
         * setup text section
         */
index ae138afe6c56345a7def862fdc9ae50771616125..b5c909546e3f2041c2c864c12177bb08834cca23 100644 (file)
@@ -53,8 +53,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
 
 #if GEN_ELF_CLASS == ELFCLASS64
 #define elf_newehdr    elf64_newehdr
+#define elf_newphdr    elf64_newphdr
 #define elf_getshdr    elf64_getshdr
 #define Elf_Ehdr       Elf64_Ehdr
+#define Elf_Phdr       Elf64_Phdr
 #define Elf_Shdr       Elf64_Shdr
 #define Elf_Sym                Elf64_Sym
 #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
@@ -62,8 +64,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
 #define ELF_ST_VIS(a)  ELF64_ST_VISIBILITY(a)
 #else
 #define elf_newehdr    elf32_newehdr
+#define elf_newphdr    elf32_newphdr
 #define elf_getshdr    elf32_getshdr
 #define Elf_Ehdr       Elf32_Ehdr
+#define Elf_Phdr       Elf32_Phdr
 #define Elf_Shdr       Elf32_Shdr
 #define Elf_Sym                Elf32_Sym
 #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
index 464475fd6b9a3a799e0e3d72ed56c727bb6a6113..c93bcaf6d55d08e1b25382fa30b2e8d5a28b7e1b 100644 (file)
@@ -1655,6 +1655,9 @@ int metricgroup__parse_groups(const struct option *opt,
        struct evlist *perf_evlist = *(struct evlist **)opt->value;
        const struct pmu_events_table *table = pmu_events_table__find();
 
+       if (!table)
+               return -EINVAL;
+
        return parse_groups(perf_evlist, str, metric_no_group,
                            metric_no_merge, NULL, metric_events, table);
 }
index 75bec32d4f571319e8577e96899dee2a24bb4381..647b7dff8ef36f6bf245df547330fc7cd408e541 100644 (file)
@@ -2102,8 +2102,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
  * unusual.  One significant peculiarity is that the mapping (start -> pgoff)
  * is not the same for the kernel map and the modules map.  That happens because
  * the data is copied adjacently whereas the original kcore has gaps.  Finally,
- * kallsyms and modules files are compared with their copies to check that
- * modules have not been loaded or unloaded while the copies were taking place.
+ * kallsyms file is compared with its copy to check that modules have not been
+ * loaded or unloaded while the copies were taking place.
  *
  * Return: %0 on success, %-1 on failure.
  */
@@ -2166,9 +2166,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
                        goto out_extract_close;
        }
 
-       if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
-               goto out_extract_close;
-
        if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
                goto out_extract_close;
 
index 812424dbf2d5b95f1b19df7f704f37643a87dbd6..538790758e242cf16c877356cd7351a951f1e13b 100644 (file)
@@ -367,13 +367,24 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
                                             bool is_kernel)
 {
        struct build_id bid;
+       struct nsinfo *nsi;
+       struct nscookie nc;
        int rc;
 
-       if (is_kernel)
+       if (is_kernel) {
                rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
-       else
-               rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+               goto out;
+       }
+
+       nsi = nsinfo__new(event->pid);
+       nsinfo__mountns_enter(nsi, &nc);
 
+       rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+
+       nsinfo__mountns_exit(&nc);
+       nsinfo__put(nsi);
+
+out:
        if (rc == 0) {
                memcpy(event->build_id, bid.data, sizeof(bid.data));
                event->build_id_size = (u8) bid.size;
index f0d51d4d2c879cffaea6143793096e19a9162ec5..3a5936cc10abc29347f1e03d49e7bf4b1600000d 100644 (file)
@@ -1,4 +1,4 @@
-TEST_GEN_PROGS := timens timerfd timer clock_nanosleep procfs exec futex vfork_exec
+TEST_GEN_PROGS := timens timerfd timer clock_nanosleep procfs exec futex
 TEST_GEN_PROGS_EXTENDED := gettime_perf
 
 CFLAGS := -Wall -Werror -pthread
diff --git a/tools/testing/selftests/timens/vfork_exec.c b/tools/testing/selftests/timens/vfork_exec.c
deleted file mode 100644 (file)
index e6ccd90..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define _GNU_SOURCE
-#include <errno.h>
-#include <fcntl.h>
-#include <sched.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <time.h>
-#include <unistd.h>
-#include <string.h>
-
-#include "log.h"
-#include "timens.h"
-
-#define OFFSET (36000)
-
-int main(int argc, char *argv[])
-{
-       struct timespec now, tst;
-       int status, i;
-       pid_t pid;
-
-       if (argc > 1) {
-               if (sscanf(argv[1], "%ld", &now.tv_sec) != 1)
-                       return pr_perror("sscanf");
-
-               for (i = 0; i < 2; i++) {
-                       _gettime(CLOCK_MONOTONIC, &tst, i);
-                       if (abs(tst.tv_sec - now.tv_sec) > 5)
-                               return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec);
-               }
-               return 0;
-       }
-
-       nscheck();
-
-       ksft_set_plan(1);
-
-       clock_gettime(CLOCK_MONOTONIC, &now);
-
-       if (unshare_timens())
-               return 1;
-
-       if (_settime(CLOCK_MONOTONIC, OFFSET))
-               return 1;
-
-       for (i = 0; i < 2; i++) {
-               _gettime(CLOCK_MONOTONIC, &tst, i);
-               if (abs(tst.tv_sec - now.tv_sec) > 5)
-                       return pr_fail("%ld %ld\n",
-                                       now.tv_sec, tst.tv_sec);
-       }
-
-       pid = vfork();
-       if (pid < 0)
-               return pr_perror("fork");
-
-       if (pid == 0) {
-               char now_str[64];
-               char *cargv[] = {"exec", now_str, NULL};
-               char *cenv[] = {NULL};
-
-               // Check that we are still in the source timens.
-               for (i = 0; i < 2; i++) {
-                       _gettime(CLOCK_MONOTONIC, &tst, i);
-                       if (abs(tst.tv_sec - now.tv_sec) > 5)
-                               return pr_fail("%ld %ld\n",
-                                               now.tv_sec, tst.tv_sec);
-               }
-
-               /* Check for proper vvar offsets after execve. */
-               snprintf(now_str, sizeof(now_str), "%ld", now.tv_sec + OFFSET);
-               execve("/proc/self/exe", cargv, cenv);
-               return pr_perror("execve");
-       }
-
-       if (waitpid(pid, &status, 0) != pid)
-               return pr_perror("waitpid");
-
-       if (status)
-               ksft_exit_fail();
-
-       ksft_test_result_pass("exec\n");
-       ksft_exit_pass();
-       return 0;
-}