]> git.ipfire.org Git - people/ms/linux.git/commitdiff
Merge tag 'at91-fixes-6.0-2' of https://git.kernel.org/pub/scm/linux/kernel/git/at91...
authorArnd Bergmann <arnd@arndb.de>
Thu, 15 Sep 2022 19:54:06 +0000 (21:54 +0200)
committerArnd Bergmann <arnd@arndb.de>
Thu, 15 Sep 2022 19:54:07 +0000 (21:54 +0200)
AT91 fixes for 6.0 #2

It contains a fix for LAN966 SoCs that corrects the interrupt
number for internal PHYs.

* tag 'at91-fixes-6.0-2' of https://git.kernel.org/pub/scm/linux/kernel/git/at91/linux:
  ARM: dts: lan966x: Fix the interrupt number for internal PHYs

Link: https://lore.kernel.org/r/20220915105833.4159850-1-claudiu.beznea@microchip.com
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
800 files changed:
.get_maintainer.ignore
.mailmap
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/sysctl/net.rst
Documentation/arm64/elf_hwcaps.rst
Documentation/arm64/silicon-errata.rst
Documentation/atomic_bitops.txt
Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml
Documentation/devicetree/bindings/net/qcom-emac.txt
Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
Documentation/devicetree/bindings/spi/cdns,qspi-nor-peripheral-props.yaml
Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
Documentation/devicetree/bindings/thermal/thermal-zones.yaml
Documentation/kbuild/kconfig-language.rst
Documentation/tools/rtla/rtla-timerlat-hist.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/bitops.h
arch/arm/boot/dts/arm-realview-eb.dtsi
arch/arm/boot/dts/arm-realview-pb1176.dts
arch/arm/boot/dts/arm-realview-pb11mp.dts
arch/arm/boot/dts/arm-realview-pbx.dtsi
arch/arm/boot/dts/bcm63178.dtsi
arch/arm/boot/dts/bcm6846.dtsi
arch/arm/boot/dts/bcm6878.dtsi
arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
arch/arm/boot/dts/imx6qdl-vicut1.dtsi
arch/arm/boot/dts/integratorap-im-pd1.dts
arch/arm/boot/dts/moxart-uc7112lx.dts
arch/arm/boot/dts/moxart.dtsi
arch/arm/boot/dts/versatile-ab.dts
arch/arm/mach-ixp4xx/ixp4xx-of.c
arch/arm64/Kconfig
arch/arm64/boot/dts/arm/juno-base.dtsi
arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts
arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
arch/arm64/boot/dts/freescale/imx8ulp.dtsi
arch/arm64/boot/dts/renesas/r8a779g0.dtsi
arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
arch/arm64/configs/defconfig
arch/arm64/include/asm/cache.h
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/setup.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/kernel/cacheinfo.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/pi/kaslr_early.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/topology.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/mmu.c
arch/arm64/tools/cpucaps
arch/hexagon/include/asm/bitops.h
arch/ia64/include/asm/bitops.h
arch/loongarch/Kconfig
arch/loongarch/include/asm/addrspace.h
arch/loongarch/include/asm/cmpxchg.h
arch/loongarch/include/asm/io.h
arch/loongarch/include/asm/irq.h
arch/loongarch/include/asm/page.h
arch/loongarch/include/asm/percpu.h
arch/loongarch/include/asm/pgtable.h
arch/loongarch/include/asm/reboot.h [deleted file]
arch/loongarch/kernel/reset.c
arch/loongarch/mm/fault.c
arch/loongarch/mm/mmap.c
arch/loongarch/vdso/vgetcpu.c
arch/loongarch/vdso/vgettimeofday.c
arch/m68k/include/asm/bitops.h
arch/mips/include/asm/kvm_host.h
arch/mips/kvm/mmu.c
arch/nios2/include/asm/entry.h
arch/nios2/include/asm/ptrace.h
arch/nios2/kernel/entry.S
arch/nios2/kernel/signal.c
arch/nios2/kernel/syscall_table.c
arch/parisc/Kconfig
arch/parisc/include/asm/bitops.h
arch/parisc/kernel/head.S
arch/parisc/kernel/unaligned.c
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/kernel/pci-common.c
arch/powerpc/kvm/book3s_64_mmu_host.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_hv_nested.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/e500_mmu_host.c
arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
arch/riscv/boot/dts/microchip/mpfs.dtsi
arch/riscv/include/asm/signal.h [new file with mode: 0644]
arch/riscv/include/asm/thread_info.h
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/signal.c
arch/riscv/kernel/traps.c
arch/riscv/kvm/mmu.c
arch/s390/hypfs/hypfs_diag.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/bitops.h
arch/s390/kernel/process.c
arch/s390/mm/fault.c
arch/sh/include/asm/bitops-op32.h
arch/um/drivers/virtio_uml.c
arch/um/include/asm/cpufeature.h
arch/x86/Makefile
arch/x86/boot/compressed/misc.h
arch/x86/boot/compressed/sev.c
arch/x86/configs/xen.config
arch/x86/entry/entry_64_compat.S
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/uncore_snb.c
arch/x86/include/asm/bitops.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/extable_fixup_types.h
arch/x86/include/asm/ibt.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/rmwcc.h
arch/x86/include/asm/sev.h
arch/x86/include/asm/word-at-a-time.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/sev.c
arch/x86/kernel/unwind_orc.c
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/mm/extable.c
arch/x86/mm/init_64.c
arch/x86/mm/pat/memtype.c
block/blk-mq.c
drivers/acpi/processor_thermal.c
drivers/acpi/property.c
drivers/android/binder_alloc.c
drivers/ata/libata-eh.c
drivers/block/loop.c
drivers/block/ublk_drv.c
drivers/block/zram/zram_drv.c
drivers/block/zram/zram_drv.h
drivers/cpufreq/cpufreq.c
drivers/firmware/arm_scmi/clock.c
drivers/firmware/arm_scmi/optee.c
drivers/firmware/arm_scmi/reset.c
drivers/firmware/arm_scmi/scmi_pm_domain.c
drivers/firmware/arm_scmi/sensors.c
drivers/firmware/dmi_scan.c
drivers/gpu/drm/amd/amdgpu/aldebaran.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_events.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
drivers/gpu/drm/amd/display/dc/basics/conversion.c
drivers/gpu/drm/amd/display/dc/basics/conversion.h
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
drivers/gpu/drm/amd/display/dc/dcn314/Makefile
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
drivers/gpu/drm/amd/display/include/dal_asic_id.h
drivers/gpu/drm/amd/display/include/logger_types.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/bridge/lvds-codec.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_gt.h
drivers/gpu/drm/i915/gt/intel_gt_pm.h
drivers/gpu/drm/i915/gt/intel_gt_types.h
drivers/gpu/drm/i915/gt/intel_migrate.c
drivers/gpu/drm/i915/gt/intel_ppgtt.c
drivers/gpu/drm/i915/gt/intel_region_lmem.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/i915_vma_resource.c
drivers/gpu/drm/i915/i915_vma_resource.h
drivers/gpu/drm/imx/dcss/dcss-kms.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/vc4/Kconfig
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-scmi.c
drivers/infiniband/core/umem_dmabuf.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/erdma/erdma_qp.c
drivers/infiniband/hw/erdma/erdma_verbs.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/iommu/hyperv-iommu.c
drivers/irqchip/irq-loongarch-cpu.c
drivers/irqchip/irq-loongson-eiointc.c
drivers/irqchip/irq-loongson-liointc.c
drivers/irqchip/irq-loongson-pch-msi.c
drivers/irqchip/irq-loongson-pch-pic.c
drivers/md/md.c
drivers/md/raid10.c
drivers/mmc/host/Kconfig
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/sdhci-of-dwcmshc.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/microchip/ksz9477.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/microchip/ksz_common.h
drivers/net/dsa/mv88e6060.c
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/ocelot/seville_vsc9953.c
drivers/net/dsa/sja1105/sja1105_devlink.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/iavf/iavf_adminq.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_fltr.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_vf_lib.c
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/lantiq_xrx200.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_net.c
drivers/net/ethernet/mscc/ocelot_vsc7514.c
drivers/net/ethernet/mscc/vsc7514_regs.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_main.c
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/fddi/skfp/h/hwmtm.h
drivers/net/ipa/ipa_mem.c
drivers/net/ipa/ipa_reg.h
drivers/net/ipvlan/ipvtap.c
drivers/net/macsec.c
drivers/net/phy/phy_device.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/nfc/pn533/uart.c
drivers/parisc/ccio-dma.c
drivers/parisc/led.c
drivers/perf/riscv_pmu_legacy.c
drivers/platform/mellanox/mlxbf-tmfifo.c
drivers/platform/x86/serial-multi-instantiate.c
drivers/regulator/core.c
drivers/remoteproc/remoteproc_virtio.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/virtio/virtio_ccw.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/storvsc_drv.c
drivers/soc/bcm/brcmstb/pm/pm-arm.c
drivers/soc/fsl/Kconfig
drivers/soc/imx/gpcv2.c
drivers/soc/imx/imx8m-blk-ctrl.c
drivers/spi/spi-meson-spicc.c
drivers/spi/spi.c
drivers/tee/tee_shm.c
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
drivers/thermal/thermal_core.c
drivers/ufs/core/ufshcd.c
drivers/ufs/host/ufs-exynos.c
drivers/video/console/sticore.c
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/aty/radeon_base.c
drivers/video/fbdev/bw2.c
drivers/video/fbdev/chipsfb.c
drivers/video/fbdev/cirrusfb.c
drivers/video/fbdev/clps711x-fb.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/core/fbsysfs.c
drivers/video/fbdev/cyber2000fb.c
drivers/video/fbdev/ffb.c
drivers/video/fbdev/geode/gx1fb_core.c
drivers/video/fbdev/gxt4500.c
drivers/video/fbdev/i740fb.c
drivers/video/fbdev/imxfb.c
drivers/video/fbdev/matrox/matroxfb_base.c
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
drivers/video/fbdev/pm2fb.c
drivers/video/fbdev/pxa168fb.c
drivers/video/fbdev/pxafb.c
drivers/video/fbdev/s3fb.c
drivers/video/fbdev/simplefb.c
drivers/video/fbdev/sis/sis_main.c
drivers/video/fbdev/sm501fb.c
drivers/video/fbdev/ssd1307fb.c
drivers/video/fbdev/sstfb.c
drivers/video/fbdev/sunxvr1000.c
drivers/video/fbdev/sunxvr2500.c
drivers/video/fbdev/sunxvr500.c
drivers/video/fbdev/tcx.c
drivers/video/fbdev/tdfxfb.c
drivers/video/fbdev/tgafb.c
drivers/video/fbdev/tridentfb.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_ring.c
drivers/virtio/virtio_vdpa.c
drivers/xen/privcmd.c
drivers/xen/xen-scsiback.c
drivers/xen/xenbus/xenbus_probe_frontend.c
fs/btrfs/block-group.c
fs/btrfs/block-group.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/locking.c
fs/btrfs/locking.h
fs/btrfs/relocation.c
fs/btrfs/root-tree.c
fs/btrfs/tree-checker.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/xattr.c
fs/cifs/cifs_debug.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifsroot.c
fs/cifs/connect.c
fs/cifs/misc.c
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/cifs/smb2file.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2proto.h
fs/cifs/transport.c
fs/dcache.c
fs/exec.c
fs/fs-writeback.c
fs/inode.c
fs/ksmbd/ksmbd_netlink.h
fs/ksmbd/mgmt/share_config.c
fs/ksmbd/mgmt/share_config.h
fs/ksmbd/mgmt/tree_connect.c
fs/ksmbd/smb2pdu.c
fs/locks.c
fs/namespace.c
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/nfs4file.c
fs/nfs/pnfs.c
fs/nfs/write.c
fs/ntfs3/attrib.c
fs/ntfs3/bitmap.c
fs/ntfs3/file.c
fs/ntfs3/frecord.c
fs/ntfs3/fslog.c
fs/ntfs3/fsntfs.c
fs/ntfs3/index.c
fs/ntfs3/inode.c
fs/ntfs3/namei.c
fs/ntfs3/ntfs_fs.h
fs/ntfs3/record.c
fs/ntfs3/run.c
fs/ntfs3/super.c
fs/ntfs3/xattr.c
fs/ocfs2/dlmglue.c
fs/ocfs2/super.c
fs/overlayfs/inode.c
fs/posix_acl.c
fs/proc/task_mmu.c
fs/squashfs/file.c
fs/squashfs/file_direct.c
fs/squashfs/page_actor.c
fs/squashfs/page_actor.h
fs/userfaultfd.c
include/asm-generic/bitops/atomic.h
include/asm-generic/bitops/generic-non-atomic.h
include/asm-generic/bitops/instrumented-non-atomic.h
include/asm-generic/bitops/non-atomic.h
include/asm-generic/bitops/non-instrumented-non-atomic.h
include/asm-generic/sections.h
include/linux/bitops.h
include/linux/blk-mq.h
include/linux/buffer_head.h
include/linux/cgroup.h
include/linux/cpumask.h
include/linux/kvm_host.h
include/linux/libata.h
include/linux/memcontrol.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/netdevice.h
include/linux/netfilter_bridge/ebtables.h
include/linux/nfs_fs.h
include/linux/psi.h
include/linux/scmi_protocol.h
include/linux/shmem_fs.h
include/linux/userfaultfd_k.h
include/linux/virtio.h
include/linux/virtio_config.h
include/linux/vm_event_item.h
include/linux/wait_bit.h
include/net/bond_3ad.h
include/net/busy_poll.h
include/net/gro.h
include/net/neighbour.h
include/net/netfilter/nf_flow_table.h
include/net/netfilter/nf_tables.h
include/net/netns/conntrack.h
include/net/sock.h
include/soc/mscc/ocelot.h
include/trace/events/scmi.h
include/uapi/linux/io_uring.h
include/uapi/linux/virtio_ring.h
include/uapi/linux/xfrm.h
include/ufs/ufshci.h
init/Kconfig
init/main.c
io_uring/cancel.c
io_uring/io_uring.c
io_uring/net.c
io_uring/net.h
io_uring/notif.c
io_uring/notif.h
io_uring/opdef.c
io_uring/opdef.h
io_uring/uring_cmd.c
kernel/audit_fsnotify.c
kernel/auditsc.c
kernel/bpf/reuseport_array.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/crash_core.c
kernel/kprobes.c
kernel/module/main.c
kernel/sched/psi.c
kernel/sched/wait_bit.c
kernel/sys_ni.c
kernel/trace/ftrace.c
kernel/trace/trace_eprobe.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_probe.c
lib/Kconfig.debug
lib/Makefile
lib/cpumask.c
lib/cpumask_kunit.c [moved from lib/test_cpumask.c with 58% similarity]
lib/ratelimit.c
mm/backing-dev.c
mm/bootmem_info.c
mm/damon/dbgfs.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/mmap.c
mm/mprotect.c
mm/page-writeback.c
mm/shmem.c
mm/userfaultfd.c
mm/vmstat.c
mm/zsmalloc.c
net/bridge/netfilter/ebtable_broute.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/ebtables.c
net/core/bpf_sk_storage.c
net/core/dev.c
net/core/filter.c
net/core/gen_stats.c
net/core/gro_cells.c
net/core/neighbour.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/skmsg.c
net/core/sock.c
net/core/sysctl_net_core.c
net/dsa/port.c
net/dsa/slave.c
net/ipv4/devinet.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/ip6_tunnel.c
net/ipv6/ipv6_sockglue.c
net/ipv6/ndisc.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/key/af_key.c
net/mptcp/protocol.c
net/netfilter/Kconfig
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_irc.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_sane.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nft_osf.c
net/netfilter/nft_payload.c
net/netfilter/nft_tproxy.c
net/netfilter/nft_tunnel.c
net/netlink/genetlink.c
net/netlink/policy.c
net/qrtr/mhi.c
net/rds/ib_recv.c
net/rose/rose_loopback.c
net/rxrpc/call_object.c
net/rxrpc/sendmsg.c
net/sched/cls_route.c
net/sched/sch_generic.c
net/socket.c
net/sunrpc/clnt.c
net/sunrpc/sysfs.c
net/tls/tls_sw.c
net/xfrm/espintcp.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
scripts/Makefile.extrawarn
scripts/Makefile.gcc-plugins
scripts/clang-tools/run-clang-tools.py
scripts/dummy-tools/gcc
scripts/gcc-goto.sh [deleted file]
scripts/mod/modpost.c
security/loadpin/loadpin.c
sound/core/info.c
sound/pci/hda/cs35l41_hda.c
sound/pci/hda/patch_cs8409-tables.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/tas2770.c
sound/soc/codecs/tas2770.h
sound/soc/codecs/tlv320aic32x4.c
sound/soc/intel/avs/pcm.c
sound/soc/intel/boards/sof_es8336.c
sound/soc/sh/rz-ssi.c
sound/soc/soc-pcm.c
sound/soc/sof/debug.c
sound/soc/sof/intel/hda.c
sound/soc/sof/ipc3-topology.c
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/msr-index.h
tools/arch/x86/include/asm/rmwcc.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/include/linux/compiler_types.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/fscrypt.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/vhost.h
tools/lib/perf/cpumap.c
tools/lib/perf/evsel.c
tools/lib/perf/include/perf/cpumap.h
tools/lib/perf/include/perf/event.h
tools/lib/perf/include/perf/evsel.h
tools/lib/perf/tests/test-evsel.c
tools/objtool/check.c
tools/perf/Documentation/intel-hybrid.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Makefile.config
tools/perf/builtin-sched.c
tools/perf/builtin-stat.c
tools/perf/tests/cpumap.c
tools/perf/tests/sample-parsing.c
tools/perf/tests/shell/stat.sh
tools/perf/trace/beauty/include/linux/socket.h
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/event.h
tools/perf/util/evsel.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/stat-shadow.c
tools/perf/util/synthetic-events.c
tools/perf/util/synthetic-events.h
tools/testing/selftests/Makefile
tools/testing/selftests/drivers/net/bonding/Makefile [new file with mode: 0644]
tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh [new file with mode: 0755]
tools/testing/selftests/drivers/net/bonding/config [new file with mode: 0644]
tools/testing/selftests/drivers/net/bonding/settings [new file with mode: 0644]
tools/testing/selftests/landlock/Makefile
tools/testing/selftests/lib.mk
tools/testing/selftests/netfilter/nft_flowtable.sh
tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore [new file with mode: 0644]
tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore
tools/testing/selftests/sgx/sigstruct.c
tools/tracing/rtla/Makefile
tools/tracing/rtla/src/timerlat_hist.c
tools/tracing/rtla/src/timerlat_top.c
virt/kvm/kvm_main.c
virt/kvm/pfncache.c

index a64d219137455f407a7b1f2c6b156c5575852e9e..c298bab3d3207fc5c6dd81d843b8a145ea8c655d 100644 (file)
@@ -1,2 +1,4 @@
+Alan Cox <alan@lxorguk.ukuu.org.uk>
+Alan Cox <root@hraefn.swansea.linux.org.uk>
 Christoph Hellwig <hch@lst.de>
 Marc Gonzalez <marc.w.gonzalez@free.fr>
index 38255d412f0b3a82f26d63b4c52f84e3c50d4cc0..8ded2e7c2906f5c858c15893f27740424ff2eeae 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -98,8 +98,7 @@ Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
 Christian Marangi <ansuelsmth@gmail.com>
 Christophe Ricard <christophe.ricard@gmail.com>
 Christoph Hellwig <hch@lst.de>
-Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
-Colin Ian King <colin.king@intel.com> <colin.i.king@gmail.com>
+Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
 Corey Minyard <minyard@acm.org>
 Damian Hobson-Garcia <dhobsong@igel.co.jp>
 Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
@@ -150,6 +149,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@linux.vnet.ibm.com>
+Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@canonical.com>
 Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
 Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
@@ -253,6 +254,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
 Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
 Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
+Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
 Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
 Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
 Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
index 5bf61881f01269196fa97a4d5ec325cd330cdb1a..760c889b6cd1441582d7049a1a33215c5f90a7f1 100644 (file)
@@ -523,6 +523,7 @@ What:               /sys/devices/system/cpu/vulnerabilities
                /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
                /sys/devices/system/cpu/vulnerabilities/itlb_multihit
                /sys/devices/system/cpu/vulnerabilities/mmio_stale_data
+               /sys/devices/system/cpu/vulnerabilities/retbleed
 Date:          January 2018
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Information about CPU vulnerabilities
index 9393c50b5afc9c9fe8b9ac90ed9fe4774e1d1550..c98fd11907cc8754878ea89f371f5e753d9e0269 100644 (file)
@@ -230,6 +230,20 @@ The possible values in this file are:
      * - 'Mitigation: Clear CPU buffers'
        - The processor is vulnerable and the CPU buffer clearing mitigation is
          enabled.
+     * - 'Unknown: No mitigations'
+       - The processor vulnerability status is unknown because it is
+        out of Servicing period. Mitigation is not attempted.
+
+Definitions:
+------------
+
+Servicing period: The process of providing functional and security updates to
+Intel processors or platforms, utilizing the Intel Platform Update (IPU)
+process or other similar mechanisms.
+
+End of Servicing Updates (ESU): ESU is the date at which Intel will no
+longer provide Servicing, such as through IPU or other similar update
+processes. ESU dates will typically be aligned to end of quarter.
 
 If the processor is vulnerable then the following information is appended to
 the above information:
index d7f30902fda02fe09ea9eaa6563065b1655b0c2c..426fa892d311a308e38131edf21de630db0654aa 100644 (file)
        rodata=         [KNL]
                on      Mark read-only kernel memory as read-only (default).
                off     Leave read-only kernel memory writable for debugging.
+               full    Mark read-only kernel memory and aliases as read-only
+                       [arm64]
 
        rockchip.usb_uart
                        Enable the uart passthrough on the designated usb port
index 805f2281e000ae0677c3f399b12f27208de6cfb6..60d44165fba76e147d7bd21f10ca1c00f800bce9 100644 (file)
@@ -271,7 +271,7 @@ poll cycle or the number of packets processed reaches netdev_budget.
 netdev_max_backlog
 ------------------
 
-Maximum number  of  packets,  queued  on  the  INPUT  side, when the interface
+Maximum number of packets, queued on the INPUT side, when the interface
 receives packets faster than kernel can process them.
 
 netdev_rss_key
index 52b75a25c20541bd056d83872d25cbcffbf9606c..311021f2e5600dd37ba1370861e47f160b25a6f5 100644 (file)
@@ -242,44 +242,34 @@ HWCAP2_MTE3
     by Documentation/arm64/memory-tagging-extension.rst.
 
 HWCAP2_SME
-
     Functionality implied by ID_AA64PFR1_EL1.SME == 0b0001, as described
     by Documentation/arm64/sme.rst.
 
 HWCAP2_SME_I16I64
-
     Functionality implied by ID_AA64SMFR0_EL1.I16I64 == 0b1111.
 
 HWCAP2_SME_F64F64
-
     Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1.
 
 HWCAP2_SME_I8I32
-
     Functionality implied by ID_AA64SMFR0_EL1.I8I32 == 0b1111.
 
 HWCAP2_SME_F16F32
-
     Functionality implied by ID_AA64SMFR0_EL1.F16F32 == 0b1.
 
 HWCAP2_SME_B16F32
-
     Functionality implied by ID_AA64SMFR0_EL1.B16F32 == 0b1.
 
 HWCAP2_SME_F32F32
-
     Functionality implied by ID_AA64SMFR0_EL1.F32F32 == 0b1.
 
 HWCAP2_SME_FA64
-
     Functionality implied by ID_AA64SMFR0_EL1.FA64 == 0b1.
 
 HWCAP2_WFXT
-
     Functionality implied by ID_AA64ISAR2_EL1.WFXT == 0b0010.
 
 HWCAP2_EBF16
-
     Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
 
 4. Unused AT_HWCAP bits
index 33b04db8408f98dce58be3e71cbef70fcdb2f66e..fda97b3fcf0184ad0442f5a8db9522e0fbb85b92 100644 (file)
@@ -52,6 +52,8 @@ stable kernels.
 | Allwinner      | A64/R18         | UNKNOWN1        | SUN50I_ERRATUM_UNKNOWN1     |
 +----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A510     | #2457168        | ARM64_ERRATUM_2457168       |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A510     | #2064142        | ARM64_ERRATUM_2064142       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A510     | #2038923        | ARM64_ERRATUM_2038923       |
index 093cdaefdb3733ecd076b5ffbfe3ad880b2c3afd..edea4656c5c05f3da3f6ef46a8e8faf2b2a3f1a1 100644 (file)
@@ -58,13 +58,11 @@ Like with atomic_t, the rule of thumb is:
 
  - RMW operations that have a return value are fully ordered.
 
- - RMW operations that are conditional are unordered on FAILURE,
-   otherwise the above rules apply. In the case of test_and_{}_bit() operations,
-   if the bit in memory is unchanged by the operation then it is deemed to have
-   failed.
+ - RMW operations that are conditional are fully ordered.
 
-Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
-clear_bit_unlock() which has RELEASE semantics.
+Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics,
+clear_bit_unlock() which has RELEASE semantics and test_bit_acquire which has
+ACQUIRE semantics.
 
 Since a platform only has a single means of achieving atomic operations
 the same barriers as for atomic_t are used, see atomic_t.txt.
index 4a92a4c7dcd70ef2f4cc63446cb48bf323ae9476..f8168986a0a9eaf02b33fce13a8eddaed57c087d 100644 (file)
@@ -233,6 +233,7 @@ allOf:
               - allwinner,sun8i-a83t-tcon-lcd
               - allwinner,sun8i-v3s-tcon
               - allwinner,sun9i-a80-tcon-lcd
+              - allwinner,sun20i-d1-tcon-lcd
 
     then:
       properties:
@@ -252,6 +253,7 @@ allOf:
               - allwinner,sun8i-a83t-tcon-tv
               - allwinner,sun8i-r40-tcon-tv
               - allwinner,sun9i-a80-tcon-tv
+              - allwinner,sun20i-d1-tcon-tv
 
     then:
       properties:
@@ -278,6 +280,7 @@ allOf:
               - allwinner,sun9i-a80-tcon-lcd
               - allwinner,sun4i-a10-tcon
               - allwinner,sun8i-a83t-tcon-lcd
+              - allwinner,sun20i-d1-tcon-lcd
 
     then:
       required:
@@ -294,6 +297,7 @@ allOf:
               - allwinner,sun8i-a23-tcon
               - allwinner,sun8i-a33-tcon
               - allwinner,sun8i-a83t-tcon-lcd
+              - allwinner,sun20i-d1-tcon-lcd
 
     then:
       properties:
index 8a9f3559335b50f0c663bcfe49ca6cc336745e6f..7e14e26676ec9216f610198fb7571ab0e28a558b 100644 (file)
@@ -34,8 +34,8 @@ Example:
 Use specific request line passing from dma
 For example, MMC request line is 5
 
-       sdhci: sdhci@98e00000 {
-               compatible = "moxa,moxart-sdhci";
+       mmc: mmc@98e00000 {
+               compatible = "moxa,moxart-mmc";
                reg = <0x98e00000 0x5C>;
                interrupts = <5 0>;
                clocks = <&clk_apb>;
index 445e46feda69200ee05c86014501909174e13838..2b39fce5f6504b05694e6f5e184004aaa0c72edc 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: i.MX8M DDR Controller
 
 maintainers:
-  - Leonard Crestez <leonard.crestez@nxp.com>
+  - Peng Fan <peng.fan@nxp.com>
 
 description:
   The DDRC block is integrated in i.MX8M for interfacing with DDR based
index e6cb2291471c4c1161eba37876b49249c1e10f22..7ae8aa14863454d2dca9e87815e30b48ed4729b5 100644 (file)
@@ -14,7 +14,7 @@ MAC node:
 - mac-address : The 6-byte MAC address. If present, it is the default
        MAC address.
 - internal-phy : phandle to the internal PHY node
-- phy-handle : phandle the external PHY node
+- phy-handle : phandle to the external PHY node
 
 Internal PHY node:
 - compatible : Should be "qcom,fsm9900-emac-sgmii" or "qcom,qdf2432-emac-sgmii".
index b539781e39aa45989fde397e206a13077cc464a6..835b53302db803817e4d37a03ae3b149ee4e895c 100644 (file)
@@ -47,12 +47,6 @@ properties:
         description:
           Properties for single LDO regulator.
 
-        properties:
-          regulator-name:
-            pattern: "^LDO[1-5]$"
-            description:
-              should be "LDO1", ..., "LDO5"
-
         unevaluatedProperties: false
 
       "^BUCK[1-6]$":
@@ -62,11 +56,6 @@ properties:
           Properties for single BUCK regulator.
 
         properties:
-          regulator-name:
-            pattern: "^BUCK[1-6]$"
-            description:
-              should be "BUCK1", ..., "BUCK6"
-
           nxp,dvs-run-voltage:
             $ref: "/schemas/types.yaml#/definitions/uint32"
             minimum: 600000
index 553601a441a7dc2a2a0cf3f87b0357e4861222bd..510b82c177c059048bb3b900a966870f329551db 100644 (file)
@@ -10,7 +10,7 @@ description:
   See spi-peripheral-props.yaml for more info.
 
 maintainers:
-  - Pratyush Yadav <p.yadav@ti.com>
+  - Vaishnav Achath <vaishnav.a@ti.com>
 
 properties:
   # cdns,qspi-nor.yaml
index 0a537fa3a6410cf0f28bce7317aadd3d0c2300fb..4707294d8f596888eb1a3063106bcfe71ce237c0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Cadence Quad SPI controller
 
 maintainers:
-  - Pratyush Yadav <p.yadav@ti.com>
+  - Vaishnav Achath <vaishnav.a@ti.com>
 
 allOf:
   - $ref: spi-controller.yaml#
index ce048e782e80422d2a5474b2c94262ac55e6abc1..a4abe15880053419cb0926330c5955d4cbf8e529 100644 (file)
@@ -16,7 +16,7 @@ description:
   their own separate schema that should be referenced from here.
 
 maintainers:
-  - Pratyush Yadav <p.yadav@ti.com>
+  - Mark Brown <broonie@kernel.org>
 
 properties:
   reg:
index 00dcbdd361442981fb6f102e0deb32fb3c41d64c..119998d10ff418369c7e1d2ea22cc6cf5551422d 100644 (file)
@@ -42,7 +42,7 @@ properties:
     description:
       Address ranges of the thermal registers. If more then one range is given
       the first one must be the common registers followed by each sensor
-      according the datasheet.
+      according to the datasheet.
     minItems: 1
     maxItems: 4
 
index 2d34f3ccb2572ddb9caafd2f290d6c7e2779b4ed..8d2c6d74b605a1ebde7d2e3d3b9cb78610d40dec 100644 (file)
@@ -214,6 +214,7 @@ patternProperties:
       - polling-delay
       - polling-delay-passive
       - thermal-sensors
+      - trips
 
     additionalProperties: false
 
index 7fb398649f5108ef52d271a7c206e3341ca0fdc0..858ed5d80defeaf2dc2ec8afaeefa260321315ac 100644 (file)
@@ -525,8 +525,8 @@ followed by a test macro::
 If you need to expose a compiler capability to makefiles and/or C source files,
 `CC_HAS_` is the recommended prefix for the config option::
 
-  config CC_HAS_ASM_GOTO
-       def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
+  config CC_HAS_FOO
+       def_bool $(success,$(srctree)/scripts/cc-check-foo.sh $(CC))
 
 Build as module only
 ~~~~~~~~~~~~~~~~~~~~
index e12eae1f33019675c6fa772ce32d3b23b2d36298..6bf7f0ca45564ad61f02288df4e93ebbe9a4d3b6 100644 (file)
@@ -33,7 +33,7 @@ EXAMPLE
 =======
 In the example below, **rtla timerlat hist** is set to run for *10* minutes,
 in the cpus *0-4*, *skipping zero* only lines. Moreover, **rtla timerlat
-hist** will change the priority of the *timelat* threads to run under
+hist** will change the priority of the *timerlat* threads to run under
 *SCHED_DEADLINE* priority, with a *10us* runtime every *1ms* period. The
 *1ms* period is also passed to the *timerlat* tracer::
 
index 8a5012ba6ff98ac6f4a1415b3fce51423329c2d2..c2bd60ad921fc591155d09ea3676e523e886d39e 100644 (file)
@@ -2178,7 +2178,7 @@ M:        Jean-Marie Verdun <verdun@hpe.com>
 M:     Nick Hawkins <nick.hawkins@hpe.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/arm/hpe,gxp.yaml
-F:     Documentation/devicetree/bindings/spi/hpe,gxp-spi.yaml
+F:     Documentation/devicetree/bindings/spi/hpe,gxp-spifi.yaml
 F:     Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml
 F:     arch/arm/boot/dts/hpe-bmc*
 F:     arch/arm/boot/dts/hpe-gxp*
@@ -3612,6 +3612,7 @@ F:        include/linux/find.h
 F:     include/linux/nodemask.h
 F:     lib/bitmap.c
 F:     lib/cpumask.c
+F:     lib/cpumask_kunit.c
 F:     lib/find_bit.c
 F:     lib/find_bit_benchmark.c
 F:     lib/test_bitmap.c
@@ -3679,6 +3680,7 @@ F:        Documentation/networking/bonding.rst
 F:     drivers/net/bonding/
 F:     include/net/bond*
 F:     include/uapi/linux/if_bonding.h
+F:     tools/testing/selftests/drivers/net/bonding/
 
 BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
 M:     Dan Robertson <dan@dlrobertson.com>
@@ -5145,6 +5147,7 @@ T:        git git://git.samba.org/sfrench/cifs-2.6.git
 F:     Documentation/admin-guide/cifs/
 F:     fs/cifs/
 F:     fs/smbfs_common/
+F:     include/uapi/linux/cifs
 
 COMPACTPCI HOTPLUG CORE
 M:     Scott Murray <scott@spiteful.org>
@@ -9780,7 +9783,7 @@ M:        Christian Brauner <brauner@kernel.org>
 M:     Seth Forshee <sforshee@kernel.org>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/vfs/idmapping.git
 F:     Documentation/filesystems/idmappings.rst
 F:     tools/testing/selftests/mount_setattr/
 F:     include/linux/mnt_idmapping.h
@@ -10657,6 +10660,7 @@ T:      git git://git.kernel.dk/linux-block
 T:     git git://git.kernel.dk/liburing
 F:     io_uring/
 F:     include/linux/io_uring.h
+F:     include/linux/io_uring_types.h
 F:     include/uapi/linux/io_uring.h
 F:     tools/io_uring/
 
@@ -17527,9 +17531,19 @@ M:     Conor Dooley <conor.dooley@microchip.com>
 M:     Daire McNamara <daire.mcnamara@microchip.com>
 L:     linux-riscv@lists.infradead.org
 S:     Supported
+F:     Documentation/devicetree/bindings/clock/microchip,mpfs.yaml
+F:     Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml
+F:     Documentation/devicetree/bindings/i2c/microchip,corei2c.yaml
+F:     Documentation/devicetree/bindings/mailbox/microchip,mpfs-mailbox.yaml
+F:     Documentation/devicetree/bindings/net/can/microchip,mpfs-can.yaml
+F:     Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml
+F:     Documentation/devicetree/bindings/soc/microchip/microchip,mpfs-sys-controller.yaml
+F:     Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
+F:     Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml
 F:     arch/riscv/boot/dts/microchip/
 F:     drivers/char/hw_random/mpfs-rng.c
 F:     drivers/clk/microchip/clk-mpfs.c
+F:     drivers/i2c/busses/i2c-microchip-core.c
 F:     drivers/mailbox/mailbox-mpfs.c
 F:     drivers/pci/controller/pcie-microchip-host.c
 F:     drivers/rtc/rtc-mpfs.c
index f09673b6c11d730b9f805ede60897e71d8223bef..952d354069a43f0c269e1ad662a08f89d06fd6ad 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
@@ -1113,13 +1113,11 @@ vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \
                     $(patsubst %/,%,$(filter %/, $(core-) \
                        $(drivers-) $(libs-))))
 
-subdir-modorder := $(addsuffix modules.order,$(filter %/, \
-                       $(core-y) $(core-m) $(libs-y) $(libs-m) \
-                       $(drivers-y) $(drivers-m)))
-
 build-dirs     := $(vmlinux-dirs)
 clean-dirs     := $(vmlinux-alldirs)
 
+subdir-modorder := $(addsuffix /modules.order, $(build-dirs))
+
 # Externally visible symbols (used by link-vmlinux.sh)
 KBUILD_VMLINUX_OBJS := $(head-y) $(patsubst %/,%/built-in.a, $(core-y))
 KBUILD_VMLINUX_OBJS += $(addsuffix built-in.a, $(filter %/, $(libs-y)))
index f330410da63a6f940431170e7d0e29d6cd48ee38..5dbf11a5ba4e8ea2879b780f23d543d78ac31ebc 100644 (file)
@@ -53,7 +53,6 @@ config KPROBES
 config JUMP_LABEL
        bool "Optimize very unlikely/likely branches"
        depends on HAVE_ARCH_JUMP_LABEL
-       depends on CC_HAS_ASM_GOTO
        select OBJTOOL if HAVE_JUMP_LABEL_HACK
        help
         This option enables a transparent branch optimization that
@@ -1361,7 +1360,7 @@ config HAVE_PREEMPT_DYNAMIC_CALL
 
 config HAVE_PREEMPT_DYNAMIC_KEY
        bool
-       depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
+       depends on HAVE_ARCH_JUMP_LABEL
        select HAVE_PREEMPT_DYNAMIC
        help
           An architecture should select this if it can handle the preemption
index 492c7713ddae6d1d9cea6075326e3c6315fb3bf7..bafb1c1f0fdc169384128156505a47151e176d3c 100644 (file)
@@ -283,11 +283,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
        return (old & mask) != 0;
 }
 
-static __always_inline bool
-arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
-{
-       return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
 
 /*
  * ffz = Find First Zero in word. Undefined if no zero exists,
index 2dfb32bf9d482d364364b6e9c62f96dacc1849bc..fbb2258b451f904856d6fedd41af6e67afeea5ad 100644 (file)
                        compatible = "arm,pl022", "arm,primecell";
                        reg = <0x1000d000 0x1000>;
                        clocks = <&sspclk>, <&pclk>;
-                       clock-names = "SSPCLK", "apb_pclk";
+                       clock-names = "sspclk", "apb_pclk";
                };
 
                wdog: watchdog@10010000 {
index 06b8723b09eb955e1894c9326dd44ab116f43e39..efed325af88d206fc5cc6f4d9b6e6d7f4f05cb77 100644 (file)
                        interrupt-parent = <&intc_dc1176>;
                        interrupts = <0 17 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&sspclk>, <&pclk>;
-                       clock-names = "SSPCLK", "apb_pclk";
+                       clock-names = "sspclk", "apb_pclk";
                };
 
                pb1176_serial0: serial@1010c000 {
index 295aef4481239bdee6541ecc4c7b69e628ff224d..89103d54ecc15c9afbdca380ab01b9427924cc9c 100644 (file)
                        interrupt-parent = <&intc_pb11mp>;
                        interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&sspclk>, <&pclk>;
-                       clock-names = "SSPCLK", "apb_pclk";
+                       clock-names = "sspclk", "apb_pclk";
                };
 
                watchdog@1000f000 {
index 6f61f968d689222e4ed9d56927137693c9ed7b58..ec1507c5147c64ea978e133b5bc6bafc5238fa69 100644 (file)
                        compatible = "arm,pl022", "arm,primecell";
                        reg = <0x1000d000 0x1000>;
                        clocks = <&sspclk>, <&pclk>;
-                       clock-names = "SSPCLK", "apb_pclk";
+                       clock-names = "sspclk", "apb_pclk";
                };
 
                wdog0: watchdog@1000f000 {
index 5463443f07620e9af98814ff2e85c071ccc8b40e..cbd094dde6d07e2715d03510a15795ba4b656508 100644 (file)
@@ -32,6 +32,7 @@
                        next-level-cache = <&L2_0>;
                        enable-method = "psci";
                };
+
                CA7_2: cpu@2 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a7";
@@ -39,6 +40,7 @@
                        next-level-cache = <&L2_0>;
                        enable-method = "psci";
                };
+
                L2_0: l2-cache0 {
                        compatible = "cache";
                };
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                       <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                       <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                       <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(3) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(3) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(3) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(3) | IRQ_TYPE_LEVEL_LOW)>;
                arm,cpu-registers-not-fw-configured;
        };
 
        psci {
                compatible = "arm,psci-0.2";
                method = "smc";
-               cpu_off = <1>;
-               cpu_on = <2>;
        };
 
        axi@81000000 {
                compatible = "simple-bus";
                #address-cells = <1>;
                #size-cells = <1>;
-               ranges = <0 0x81000000 0x4000>;
+               ranges = <0 0x81000000 0x8000>;
 
                gic: interrupt-controller@1000 {
                        compatible = "arm,cortex-a7-gic";
                        #interrupt-cells = <3>;
-                       #address-cells = <0>;
                        interrupt-controller;
+                       interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(3) | IRQ_TYPE_LEVEL_HIGH)>;
                        reg = <0x1000 0x1000>,
-                               <0x2000 0x2000>;
+                               <0x2000 0x2000>,
+                               <0x4000 0x2000>,
+                               <0x6000 0x2000>;
                };
        };
 
index e610c102498fac9f70b90f323f1a3fd6ea1984f3..8aa47a2583b2985276ae7f3685237b2afdac0f6e 100644 (file)
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                       <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                       <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                       <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
                arm,cpu-registers-not-fw-configured;
        };
 
        psci {
                compatible = "arm,psci-0.2";
                method = "smc";
-               cpu_off = <1>;
-               cpu_on = <2>;
        };
 
        axi@81000000 {
                compatible = "simple-bus";
                #address-cells = <1>;
                #size-cells = <1>;
-               ranges = <0 0x81000000 0x4000>;
+               ranges = <0 0x81000000 0x8000>;
 
                gic: interrupt-controller@1000 {
                        compatible = "arm,cortex-a7-gic";
                        #interrupt-cells = <3>;
-                       #address-cells = <0>;
                        interrupt-controller;
+                       interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
                        reg = <0x1000 0x1000>,
-                               <0x2000 0x2000>;
+                               <0x2000 0x2000>,
+                               <0x4000 0x2000>,
+                               <0x6000 0x2000>;
                };
        };
 
index a7dff596fe1e673505be4fa6438c86417a419d70..1e8b5fa96c256f57ff5830ecaf3a91d6900c2e1e 100644 (file)
@@ -32,6 +32,7 @@
                        next-level-cache = <&L2_0>;
                        enable-method = "psci";
                };
+
                L2_0: l2-cache0 {
                        compatible = "cache";
                };
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                       <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                       <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                       <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
                arm,cpu-registers-not-fw-configured;
        };
 
index 095c9143d99a35496ac043171e457e15277ede3e..6b791d515e2940c021eff71b0faf444a3ce330a5 100644 (file)
                vin-supply = <&reg_3p3v_s5>;
        };
 
-       reg_3p3v_s0: regulator-3p3v-s0 {
-               compatible = "regulator-fixed";
-               regulator-name = "V_3V3_S0";
-               regulator-min-microvolt = <3300000>;
-               regulator-max-microvolt = <3300000>;
-               regulator-always-on;
-               regulator-boot-on;
-               vin-supply = <&reg_3p3v_s5>;
-       };
-
        reg_3p3v_s5: regulator-3p3v-s5 {
                compatible = "regulator-fixed";
                regulator-name = "V_3V3_S5";
 
        /* default boot source: workaround #1 for errata ERR006282 */
        smarc_flash: flash@0 {
-               compatible = "winbond,w25q16dw", "jedec,spi-nor";
+               compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <20000000>;
        };
index a1676b5d2980fbd6c466ac00edc629f36bb1962f..c5a98b0110dd3eb94945f27c942160da89795f26 100644 (file)
@@ -28,7 +28,7 @@
                enable-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
        };
 
-       backlight_led: backlight_led {
+       backlight_led: backlight-led {
                compatible = "pwm-backlight";
                pwms = <&pwm3 0 5000000 0>;
                brightness-levels = <0 16 64 255>;
index d47bfb66d0692a0b0cc7c7842b887df0355e8801..4c22e44362718377625345785e1954ae5e1ec5b2 100644 (file)
                clock-names = "uartclk", "apb_pclk";
        };
 
-       ssp@300000 {
+       spi@300000 {
                compatible = "arm,pl022", "arm,primecell";
                reg = <0x00300000 0x1000>;
                interrupts-extended = <&impd1_vic 3>;
                clocks = <&impd1_sspclk>, <&sysclk>;
-               clock-names = "spiclk", "apb_pclk";
+               clock-names = "sspclk", "apb_pclk";
        };
 
        impd1_gpio0: gpio@400000 {
index eb5291b0ee3aa15a6928b08c52806340a234de4a..e07b807b4cec569bbdbf3dd1eb390212b178cb0b 100644 (file)
@@ -79,7 +79,7 @@
        clocks = <&ref12>;
 };
 
-&sdhci {
+&mmc {
        status = "okay";
 };
 
index f5f070a8748231077f623ceac4ce1fa785262624..764832ddfa78ace56941070a49915cc276669d11 100644 (file)
@@ -93,8 +93,8 @@
                        clock-names = "PCLK";
                };
 
-               sdhci: sdhci@98e00000 {
-                       compatible = "moxa,moxart-sdhci";
+               mmc: mmc@98e00000 {
+                       compatible = "moxa,moxart-mmc";
                        reg = <0x98e00000 0x5C>;
                        interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clk_apb>;
index 79f7cc2412824fb680cdec15fad5af8ae406a3c8..a520615f4d8dbc64bee0589b4d026cc164a1d588 100644 (file)
                        reg = <0x101f4000 0x1000>;
                        interrupts = <11>;
                        clocks = <&xtal24mhz>, <&pclk>;
-                       clock-names = "SSPCLK", "apb_pclk";
+                       clock-names = "sspclk", "apb_pclk";
                };
 
                fpga {
index f9904716ec7f15866db8db4f03cd84ea96192980..f543e2adae0cd154e93de751efd4188200153752 100644 (file)
@@ -46,7 +46,7 @@ static void __init ixp4xx_of_map_io(void)
 }
 
 /*
- * We handle 4 differen SoC families. These compatible strings are enough
+ * We handle 4 different SoC families. These compatible strings are enough
  * to provide the core so that different boards can add their more detailed
  * specifics.
  */
index 571cc234d0b3f320b81d63f38bc87725b6451211..9fb9fff08c94d026e3b51a3c37aecdea0577d384 100644 (file)
@@ -917,6 +917,23 @@ config ARM64_ERRATUM_1902691
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_2457168
+       bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly"
+       depends on ARM64_AMU_EXTN
+       default y
+       help
+         This option adds the workaround for ARM Cortex-A510 erratum 2457168.
+
+         The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate
+         as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments
+         incorrectly giving a significantly higher output value.
+
+         Work around this problem by returning 0 when reading the affected counter in
+         key locations that results in disabling all users of this counter. This effect
+         is the same to firmware disabling affected counters.
+
+         If unsure, say Y.
+
 config CAVIUM_ERRATUM_22375
        bool "Cavium erratum 22375, 24313"
        default y
index 8d0d45d168d1338e286229dddb8fcd29d5bcb2cc..2f27619d8abd59855c46cd6f899575ae278f75e1 100644 (file)
@@ -26,7 +26,8 @@
                compatible = "arm,mhu", "arm,primecell";
                reg = <0x0 0x2b1f0000 0x0 0x1000>;
                interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+                            <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
                #mbox-cells = <1>;
                clocks = <&soc_refclk100mhz>;
                clock-names = "apb_pclk";
index ba88d1596f6f1f734135f2441a2e052cc16f1914..09d2b692e9e1f2b01f76e88077e18ffddd0eb6a9 100644 (file)
@@ -67,7 +67,6 @@
                        port@0 {
                                reg = <0>;
                                csys2_funnel_in_port0: endpoint {
-                                       slave-mode;
                                        remote-endpoint = <&etf0_out_port>;
                                };
                        };
@@ -75,7 +74,6 @@
                        port@1 {
                                reg = <1>;
                                csys2_funnel_in_port1: endpoint {
-                                       slave-mode;
                                        remote-endpoint = <&etf1_out_port>;
                                };
                        };
index 40d34c8384a5e10392896320a383a449559df086..b949cac0374272714a1d1d66e6d8238ceb726d22 100644 (file)
@@ -25,7 +25,6 @@
 &enetc_port0 {
        phy-handle = <&slot1_sgmii>;
        phy-mode = "2500base-x";
-       managed = "in-band-status";
        status = "okay";
 };
 
index c97f4e06ae5f304831a2f2e40af4253521f7ee87..32f6f2f50c10ca29c4c80bac23614df665657337 100644 (file)
         * CPLD_reset is RESET_SOFT in schematic
         */
        gpio-line-names =
-               "CPLD_D[1]", "CPLD_int", "CPLD_reset", "",
-               "", "CPLD_D[0]", "", "",
-               "", "", "", "CPLD_D[2]",
-               "CPLD_D[3]", "CPLD_D[4]", "CPLD_D[5]", "CPLD_D[6]",
-               "CPLD_D[7]", "", "", "",
+               "CPLD_D[6]", "CPLD_int", "CPLD_reset", "",
+               "", "CPLD_D[7]", "", "",
+               "", "", "", "CPLD_D[5]",
+               "CPLD_D[4]", "CPLD_D[3]", "CPLD_D[2]", "CPLD_D[1]",
+               "CPLD_D[0]", "", "", "",
                "", "", "", "",
                "", "", "", "KBD_intK",
                "", "", "", "";
index 286d2df01cfa72e38fdacd7777d7b9516efebd66..7e0aeb2db30549b7c9469b7b1d3d1ee908e7e2a1 100644 (file)
@@ -5,7 +5,6 @@
 
 /dts-v1/;
 
-#include <dt-bindings/phy/phy-imx8-pcie.h>
 #include "imx8mm-tqma8mqml.dtsi"
 #include "mba8mx.dtsi"
 
index 16ee9b5179e6e356ad6ad022d6b098b8c8947179..f649dfacb4b696caef22fc9e17a2d55cc20fb8e0 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright 2020-2021 TQ-Systems GmbH
  */
 
+#include <dt-bindings/phy/phy-imx8-pcie.h>
 #include "imx8mm.dtsi"
 
 / {
index 35fb929e7bccecef9d739844fd616054d55743a0..d3ee6fc4baabd7bdba3e5936dc6fdaecc5707bac 100644 (file)
                        lan1: port@0 {
                                reg = <0>;
                                label = "lan1";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan2: port@1 {
                                reg = <1>;
                                label = "lan2";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan3: port@2 {
                                reg = <2>;
                                label = "lan3";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan4: port@3 {
                                reg = <3>;
                                label = "lan4";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
index d1b4582f44c4d79230cd2f85c23cbf0527a97caa..3ec0c9ac3170588839adcb66d9cbc22777b4b6f5 100644 (file)
        };
 
        /* Fixed clock dedicated to SPI CAN controller */
-       clk20m: oscillator {
+       clk40m: oscillator {
                compatible = "fixed-clock";
                #clock-cells = <0>;
-               clock-frequency = <20000000>;
+               clock-frequency = <40000000>;
        };
 
        gpio-keys {
 
        can1: can@0 {
                compatible = "microchip,mcp251xfd";
-               clocks = <&clk20m>;
-               interrupts-extended = <&gpio1 6 IRQ_TYPE_EDGE_FALLING>;
+               clocks = <&clk40m>;
+               interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_LOW>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_can1_int>;
                reg = <0>;
                                nxp,dvs-standby-voltage = <850000>;
                                regulator-always-on;
                                regulator-boot-on;
-                               regulator-max-microvolt = <950000>;
-                               regulator-min-microvolt = <850000>;
+                               regulator-max-microvolt = <1050000>;
+                               regulator-min-microvolt = <805000>;
                                regulator-name = "On-module +VDD_ARM (BUCK2)";
                                regulator-ramp-delay = <3125>;
                        };
                        reg_vdd_dram: BUCK3 {
                                regulator-always-on;
                                regulator-boot-on;
-                               regulator-max-microvolt = <950000>;
-                               regulator-min-microvolt = <850000>;
+                               regulator-max-microvolt = <1000000>;
+                               regulator-min-microvolt = <805000>;
                                regulator-name = "On-module +VDD_GPU_VPU_DDR (BUCK3)";
                        };
 
                        reg_vdd_snvs: LDO2 {
                                regulator-always-on;
                                regulator-boot-on;
-                               regulator-max-microvolt = <900000>;
+                               regulator-max-microvolt = <800000>;
                                regulator-min-microvolt = <800000>;
                                regulator-name = "On-module +V0.8_SNVS (LDO2)";
                        };
                pinctrl-0 = <&pinctrl_gpio_9_dsi>, <&pinctrl_i2s_2_bclk_touch_reset>;
                reg = <0x4a>;
                /* Verdin I2S_2_BCLK (TOUCH_RESET#, SODIMM 42) */
-               reset-gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
                status = "disabled";
        };
 
 };
 
 &usbphynop2 {
+       power-domains = <&pgc_otg2>;
        vcc-supply = <&reg_vdd_3v3>;
 };
 
index 0c71b740a3166fbaa187439665ea58bcc20b87ad..cb2836bfbd95c7a3d7d354a6b63bea2e885f5925 100644 (file)
                                                         <&clk IMX8MN_CLK_GPU_SHADER>,
                                                         <&clk IMX8MN_CLK_GPU_BUS_ROOT>,
                                                         <&clk IMX8MN_CLK_GPU_AHB>;
-                                               resets = <&src IMX8MQ_RESET_GPU_RESET>;
                                        };
 
                                        pgc_dispmix: power-domain@3 {
index a616eb3780025fe892953b416f46996c4871875f..0f13ee36277151479f98e3ed3b31531620888856 100644 (file)
@@ -70,7 +70,7 @@
 &ecspi1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_ecspi1>;
-       cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+       cs-gpios = <&gpio5 17 GPIO_ACTIVE_LOW>;
        status = "disabled";
 };
 
        pinctrl-names = "default", "gpio";
        pinctrl-0 = <&pinctrl_i2c5>;
        pinctrl-1 = <&pinctrl_i2c5_gpio>;
-       scl-gpios = <&gpio5 26 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
-       sda-gpios = <&gpio5 27 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+       scl-gpios = <&gpio3 26 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+       sda-gpios = <&gpio3 27 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
        status = "okay";
 };
 
 
        pinctrl_ecspi1: dhcom-ecspi1-grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_ECSPI1_SCLK__ECSPI1_SCLK           0x44
-                       MX8MP_IOMUXC_ECSPI1_MOSI__ECSPI1_MOSI           0x44
-                       MX8MP_IOMUXC_ECSPI1_MISO__ECSPI1_MISO           0x44
-                       MX8MP_IOMUXC_ECSPI1_SS0__GPIO5_IO09             0x40
+                       MX8MP_IOMUXC_I2C1_SCL__ECSPI1_SCLK              0x44
+                       MX8MP_IOMUXC_I2C1_SDA__ECSPI1_MOSI              0x44
+                       MX8MP_IOMUXC_I2C2_SCL__ECSPI1_MISO              0x44
+                       MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17               0x40
                >;
        };
 
index d8ca52976170e8ce342d4c7a0c06567841a97645..0e237b2f95415613b225b6b02188970f28b0882d 100644 (file)
                switch-1 {
                        label = "S12";
                        linux,code = <BTN_0>;
-                       gpios = <&gpio5 26 GPIO_ACTIVE_LOW>;
+                       gpios = <&gpio5 27 GPIO_ACTIVE_LOW>;
                };
 
                switch-2 {
                        label = "S13";
                        linux,code = <BTN_1>;
-                       gpios = <&gpio5 27 GPIO_ACTIVE_LOW>;
+                       gpios = <&gpio5 26 GPIO_ACTIVE_LOW>;
                };
        };
 
 
 &pcf85063 {
        /* RTC_EVENT# is connected on MBa8MPxL */
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_pcf85063>;
        interrupt-parent = <&gpio4>;
        interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
 };
                fsl,pins = <MX8MP_IOMUXC_SAI5_RXC__GPIO3_IO20           0x10>; /* Power enable */
        };
 
+       pinctrl_pcf85063: pcf85063grp {
+               fsl,pins = <MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28          0x80>;
+       };
+
        /* LVDS Backlight */
        pinctrl_pwm2: pwm2grp {
                fsl,pins = <MX8MP_IOMUXC_SAI5_RXD0__PWM2_OUT            0x14>;
index 521215520a0f40acb3a9310f82eef1354328ee7c..211e6a1b296e1355c45f020a2fb3bdb502237558 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_reg_can>;
                regulator-name = "can2_stby";
-               gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>;
-               enable-active-high;
+               gpio = <&gpio3 19 GPIO_ACTIVE_LOW>;
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
        };
                        lan1: port@0 {
                                reg = <0>;
                                label = "lan1";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan2: port@1 {
                                reg = <1>;
                                label = "lan2";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan3: port@2 {
                                reg = <2>;
                                label = "lan3";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan4: port@3 {
                                reg = <3>;
                                label = "lan4";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan5: port@4 {
                                reg = <4>;
                                label = "lan5";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
-                       port@6 {
-                               reg = <6>;
+                       port@5 {
+                               reg = <5>;
                                label = "cpu";
                                ethernet = <&fec>;
                                phy-mode = "rgmii-id";
 
        pinctrl_sai2: sai2grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC
-                       MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00
-                       MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK
-                       MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI2_MCLK
+                       MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC   0xd6
+                       MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00 0xd6
+                       MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK    0xd6
+                       MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI2_MCLK      0xd6
                >;
        };
 
index c5987bdbb383cac59f7f1ca1ef303af3ea1a8b36..1c74c6a1944911bbad28f19ae0fe6e0923d0c8ac 100644 (file)
                interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
                reg = <0x4a>;
                /* Verdin GPIO_2 (SODIMM 208) */
-               reset-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
                status = "disabled";
        };
 };
                pinctrl-0 = <&pinctrl_gpio_9_dsi>, <&pinctrl_i2s_2_bclk_touch_reset>;
                reg = <0x4a>;
                /* Verdin I2S_2_BCLK (TOUCH_RESET#, SODIMM 42) */
-               reset-gpios = <&gpio5 0 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
                status = "disabled";
        };
 
index 899e8e7dbc24f20ac2097245f71f3cb43345dc1f..802ad6e5cef61790c98147ac9f596c4e1bdfbb21 100644 (file)
                reg = <0x51>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_rtc>;
-               interrupt-names = "irq";
                interrupt-parent = <&gpio1>;
                interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
                quartz-load-femtofarads = <7000>;
index 60c1b018bf03db0ed9ea1ac5201129229d336b86..bb56390b8f5412f69e6539b4f8e7c0dcbfe86c66 100644 (file)
                                compatible = "fsl,imx8ulp-pcc3";
                                reg = <0x292d0000 0x10000>;
                                #clock-cells = <1>;
+                               #reset-cells = <1>;
                        };
 
                        tpm5: tpm@29340000 {
                                compatible = "fsl,imx8ulp-pcc4";
                                reg = <0x29800000 0x10000>;
                                #clock-cells = <1>;
+                               #reset-cells = <1>;
                        };
 
                        lpi2c6: i2c@29840000 {
                                compatible = "fsl,imx8ulp-pcc5";
                                reg = <0x2da70000 0x10000>;
                                #clock-cells = <1>;
+                               #reset-cells = <1>;
                        };
                };
 
index 7cbb0de060ddc03b6df2cdcbd0a4087a56c00d84..1c15726cff8bf10fc80fb62368c5d386f01124ad 100644 (file)
@@ -85,7 +85,7 @@
                                     "renesas,rcar-gen4-hscif",
                                     "renesas,hscif";
                        reg = <0 0xe6540000 0 96>;
-                       interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&cpg CPG_MOD 514>,
                                 <&cpg CPG_CORE R8A779G0_CLK_S0D3_PER>,
                                 <&scif_clk>;
index 7249871530ab96c778b09a6366a7a8ed817b3a00..5eecbefa8a336e83435c661cc189129ad08ac355 100644 (file)
@@ -2,8 +2,8 @@
 /*
  * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
  * Copyright (c) 2020 Engicam srl
- * Copyright (c) 2020 Amarula Solutons
- * Copyright (c) 2020 Amarula Solutons(India)
+ * Copyright (c) 2020 Amarula Solutions
+ * Copyright (c) 2020 Amarula Solutions(India)
  */
 
 #include <dt-bindings/gpio/gpio.h>
index 31ebb4e5fd3307335008ecafd1ca8be1df54a607..0f9cc042d9bf06b3445c2cb125435c823f3b26b4 100644 (file)
@@ -88,3 +88,8 @@
                };
        };
 };
+
+&wlan_host_wake_l {
+       /* Kevin has an external pull up, but Bob does not. */
+       rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
+};
index cd074641884bfe51cd12293954eb83000e54d95d..ee6095baba4d3a0679f1ce01106ae81c05920760 100644 (file)
 &edp {
        status = "okay";
 
+       /*
+        * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only
+        * set this here, because rk3399-gru.dtsi ensures we can generate this
+        * off GPLL=600MHz, whereas some other RK3399 boards may not.
+        */
+       assigned-clocks = <&cru PCLK_EDP>;
+       assigned-clock-rates = <24000000>;
+
        ports {
                edp_out: port@1 {
                        reg = <1>;
@@ -578,6 +586,7 @@ ap_i2c_tp: &i2c5 {
        };
 
        wlan_host_wake_l: wlan-host-wake-l {
+               /* Kevin has an external pull up, but Bob does not */
                rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
        };
 };
index b1ac3a89f259cdcab1842f2e161774a554f814a6..aa3e21bd6c8f44ff5f9b249dd43b56316dbc2a60 100644 (file)
@@ -62,7 +62,6 @@
        vcc5v0_host: vcc5v0-host-regulator {
                compatible = "regulator-fixed";
                gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
-               enable-active-low;
                pinctrl-names = "default";
                pinctrl-0 = <&vcc5v0_host_en>;
                regulator-name = "vcc5v0_host";
index d943559b157cec08d62dbc2cc4748f033131e5a6..a05460b924153ec3df73fb9e311096b9ed3e4f87 100644 (file)
 
        vcc3v3_sd: vcc3v3_sd {
                compatible = "regulator-fixed";
-               enable-active-low;
                gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
                pinctrl-names = "default";
                pinctrl-0 = <&vcc_sd_h>;
index 02d5f5a8ca036171f715114212eb9df5b7042d0c..528bb4e8ac776beee1b111dda93c2db93157236a 100644 (file)
        disable-wp;
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
-       sd-uhs-sdr104;
+       sd-uhs-sdr50;
        vmmc-supply = <&vcc3v3_sd>;
        vqmmc-supply = <&vccio_sd>;
        status = "okay";
index 5e34bd0b214d624c0ec13c0c9683a25403d68965..93d383b8be878ac20d891b5fcadf3a88f24443bb 100644 (file)
 };
 
 &usb_host0_xhci {
-       extcon = <&usb2phy0>;
+       dr_mode = "host";
        status = "okay";
 };
 
index 6ff89ff95ad1c9ff00ca3855af6e7be3fc8e8bd9..674792567fa6e288c94169406cd69b0e2262d5b2 100644 (file)
 };
 
 &usb2phy0_otg {
-       vbus-supply = <&vcc5v0_usb_otg>;
+       phy-supply = <&vcc5v0_usb_otg>;
        status = "okay";
 };
 
index 6b5093a1a6cf5ca242309eb635b7adac49a4185e..b2e040dffb5913da994c24343050881ae40df7df 100644 (file)
 };
 
 &usb2phy0_otg {
-       vbus-supply = <&vcc5v0_usb_otg>;
+       phy-supply = <&vcc5v0_usb_otg>;
        status = "okay";
 };
 
index d5b2d2dd49043d26aaa71a7e996c083fc75963df..5b167649097edc5aee5f7564cfeb529903095948 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_ARCH_KEEMBAY=y
 CONFIG_ARCH_MEDIATEK=y
 CONFIG_ARCH_MESON=y
 CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_NXP=y
 CONFIG_ARCH_MXC=y
 CONFIG_ARCH_NPCM=y
 CONFIG_ARCH_QCOM=y
index ca9b487112ccbfa6c1c2d55dea2a781004356bdc..34256bda0da9de81f19c61983298cdf9cf52f0d6 100644 (file)
@@ -71,7 +71,7 @@ static __always_inline int icache_is_vpipt(void)
 
 static inline u32 cache_type_cwg(void)
 {
-       return (read_cpuid_cachetype() >> CTR_EL0_CWG_SHIFT) & CTR_EL0_CWG_MASK;
+       return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype());
 }
 
 #define __read_mostly __section(".data..read_mostly")
index 9bb1873f529515a9ec9e1ee487a4e9e8a02e1f3b..6f86b7ab6c28f00d38573336c9e0425df75d80c1 100644 (file)
@@ -153,7 +153,7 @@ struct vl_info {
 
 #ifdef CONFIG_ARM64_SVE
 
-extern void sve_alloc(struct task_struct *task);
+extern void sve_alloc(struct task_struct *task, bool flush);
 extern void fpsimd_release_task(struct task_struct *task);
 extern void fpsimd_sync_to_sve(struct task_struct *task);
 extern void fpsimd_force_sync_to_sve(struct task_struct *task);
@@ -256,7 +256,7 @@ size_t sve_state_size(struct task_struct const *task);
 
 #else /* ! CONFIG_ARM64_SVE */
 
-static inline void sve_alloc(struct task_struct *task) { }
+static inline void sve_alloc(struct task_struct *task, bool flush) { }
 static inline void fpsimd_release_task(struct task_struct *task) { }
 static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
 static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
index f38ef299f13bd53f5de8abc76263357c3f46798c..e9c9388ccc024e5fd9b1dfcb932af2140ece0950 100644 (file)
@@ -929,6 +929,10 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
        (system_supports_mte() &&                               \
         test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
 
+#define kvm_supports_32bit_el0()                               \
+       (system_supports_32bit_el0() &&                         \
+        !static_branch_unlikely(&arm64_mismatched_32bit_el0))
+
 int kvm_trng_call(struct kvm_vcpu *vcpu);
 #ifdef CONFIG_KVM
 extern phys_addr_t hyp_mem_base;
index 6437df661700920473aa9ccb1f217985436f856c..f4af547ef54caa70a521387d442ffeb6d04a230b 100644 (file)
@@ -3,6 +3,8 @@
 #ifndef __ARM64_ASM_SETUP_H
 #define __ARM64_ASM_SETUP_H
 
+#include <linux/string.h>
+
 #include <uapi/asm/setup.h>
 
 void *get_early_fdt_ptr(void);
@@ -14,4 +16,19 @@ void early_fdt_map(u64 dt_phys);
 extern phys_addr_t __fdt_pointer __initdata;
 extern u64 __cacheline_aligned boot_args[4];
 
+static inline bool arch_parse_debug_rodata(char *arg)
+{
+       extern bool rodata_enabled;
+       extern bool rodata_full;
+
+       if (arg && !strcmp(arg, "full")) {
+               rodata_enabled = true;
+               rodata_full = true;
+               return true;
+       }
+
+       return false;
+}
+#define arch_parse_debug_rodata arch_parse_debug_rodata
+
 #endif
index 7c71358d44c4ab811fc05af7ff6d405aa737d4cf..818df938a7ad01669a7d35771d7c9bca2acaf7f4 100644 (file)
 
 #else
 
+#include <linux/bitfield.h>
 #include <linux/build_bug.h>
 #include <linux/types.h>
 #include <asm/alternative.h>
        par;                                                            \
 })
 
-#endif
-
 #define SYS_FIELD_GET(reg, field, val)         \
                 FIELD_GET(reg##_##field##_MASK, val)
 
 #define SYS_FIELD_PREP_ENUM(reg, field, val)           \
                 FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val)
 
+#endif
+
 #endif /* __ASM_SYSREG_H */
index 3bb134355874c8bf12e81da517aa661dbb60ea6c..316917b9870704de245f002cb4261c46f8a2fea4 100644 (file)
@@ -75,9 +75,11 @@ struct kvm_regs {
 
 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
 #define KVM_ARM_DEVICE_TYPE_SHIFT      0
-#define KVM_ARM_DEVICE_TYPE_MASK       (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_TYPE_MASK       GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
+                                               KVM_ARM_DEVICE_TYPE_SHIFT)
 #define KVM_ARM_DEVICE_ID_SHIFT                16
-#define KVM_ARM_DEVICE_ID_MASK         (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
+#define KVM_ARM_DEVICE_ID_MASK         GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
+                                               KVM_ARM_DEVICE_ID_SHIFT)
 
 /* Supported device IDs */
 #define KVM_ARM_DEVICE_VGIC_V2         0
index 587543c6c51cb0fdbc7a55595be1310d1f6d6221..97c42be71338a9ee29b79ce823592d238c5ba0fe 100644 (file)
@@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
 
 int init_cache_level(unsigned int cpu)
 {
-       unsigned int ctype, level, leaves, fw_level;
+       unsigned int ctype, level, leaves;
+       int fw_level;
        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 
        for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
@@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu)
        else
                fw_level = acpi_find_last_cache_level(cpu);
 
+       if (fw_level < 0)
+               return fw_level;
+
        if (level < fw_level) {
                /*
                 * some external caches not specified in CLIDR_EL1
index 7e6289e709fc898f983967e0cb7b067deb257b47..53b973b6059f7c8007c24c9883cc8500b29a0076 100644 (file)
@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1286807
        {
                ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+       },
+       {
                /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
                ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
        },
@@ -654,6 +656,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_2457168
+       {
+               .desc = "ARM erratum 2457168",
+               .capability = ARM64_WORKAROUND_2457168,
+               .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+
+               /* Cortex-A510 r0p0-r1p1 */
+               CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
+       },
+#endif
 #ifdef CONFIG_ARM64_ERRATUM_2038923
        {
                .desc = "ARM erratum 2038923",
index 907401e4fffb19cf91b3620a8db8cf90306cfc14..af4de817d7123a3608fdcab8b2c1dc42bcdc42aa 100644 (file)
@@ -1870,7 +1870,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
                pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
                        smp_processor_id());
                cpumask_set_cpu(smp_processor_id(), &amu_cpus);
-               update_freq_counters_refs();
+
+               /* 0 reference values signal broken/disabled counters */
+               if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
+                       update_freq_counters_refs();
        }
 }
 
index 254fe31c03a07d048c41e2156a95aff997d68d06..2d73b3e793b2bc56878b8ef62cde5d98773b54db 100644 (file)
@@ -502,7 +502,7 @@ tsk .req    x28             // current thread_info
 SYM_CODE_START(vectors)
        kernel_ventry   1, t, 64, sync          // Synchronous EL1t
        kernel_ventry   1, t, 64, irq           // IRQ EL1t
-       kernel_ventry   1, t, 64, fiq           // FIQ EL1h
+       kernel_ventry   1, t, 64, fiq           // FIQ EL1t
        kernel_ventry   1, t, 64, error         // Error EL1t
 
        kernel_ventry   1, h, 64, sync          // Synchronous EL1h
index dd63ffc3a2fa2782823fec804c8327405c69a4cd..23834d96d1e781d16e34e760f5a7e72af7422525 100644 (file)
@@ -715,10 +715,12 @@ size_t sve_state_size(struct task_struct const *task)
  * do_sve_acc() case, there is no ABI requirement to hide stale data
  * written previously be task.
  */
-void sve_alloc(struct task_struct *task)
+void sve_alloc(struct task_struct *task, bool flush)
 {
        if (task->thread.sve_state) {
-               memset(task->thread.sve_state, 0, sve_state_size(task));
+               if (flush)
+                       memset(task->thread.sve_state, 0,
+                              sve_state_size(task));
                return;
        }
 
@@ -1388,7 +1390,7 @@ void do_sve_acc(unsigned long esr, struct pt_regs *regs)
                return;
        }
 
-       sve_alloc(current);
+       sve_alloc(current, true);
        if (!current->thread.sve_state) {
                force_sig(SIGKILL);
                return;
@@ -1439,7 +1441,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
                return;
        }
 
-       sve_alloc(current);
+       sve_alloc(current, false);
        sme_alloc(current);
        if (!current->thread.sve_state || !current->thread.za_state) {
                force_sig(SIGKILL);
@@ -1460,17 +1462,6 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
                fpsimd_bind_task_to_cpu();
        }
 
-       /*
-        * If SVE was not already active initialise the SVE registers,
-        * any non-shared state between the streaming and regular SVE
-        * registers is architecturally guaranteed to be zeroed when
-        * we enter streaming mode.  We do not need to initialize ZA
-        * since ZA must be disabled at this point and enabling ZA is
-        * architecturally defined to zero ZA.
-        */
-       if (system_supports_sve() && !test_thread_flag(TIF_SVE))
-               sve_init_regs();
-
        put_cpu_fpsimd_context();
 }
 
index 6c3855e693956499b3f20f497574efc8111a1cfb..17bff6e399e46b0bb33efa2bf9f1ebe58d1a770d 100644 (file)
@@ -94,11 +94,9 @@ asmlinkage u64 kaslr_early_init(void *fdt)
 
        seed = get_kaslr_seed(fdt);
        if (!seed) {
-#ifdef CONFIG_ARCH_RANDOM
-                if (!__early_cpu_has_rndr() ||
-                    !__arm64_rndr((unsigned long *)&seed))
-#endif
-               return 0;
+               if (!__early_cpu_has_rndr() ||
+                   !__arm64_rndr((unsigned long *)&seed))
+                       return 0;
        }
 
        /*
index 21da83187a602c499e4240d538d28da3bed5624d..eb7c08dfb8348e29175e0076eb5c783bea24f8d3 100644 (file)
@@ -882,7 +882,7 @@ static int sve_set_common(struct task_struct *target,
                 * state and ensure there's storage.
                 */
                if (target->thread.svcr != old_svcr)
-                       sve_alloc(target);
+                       sve_alloc(target, true);
        }
 
        /* Registers: FPSIMD-only case */
@@ -912,7 +912,7 @@ static int sve_set_common(struct task_struct *target,
                goto out;
        }
 
-       sve_alloc(target);
+       sve_alloc(target, true);
        if (!target->thread.sve_state) {
                ret = -ENOMEM;
                clear_tsk_thread_flag(target, TIF_SVE);
@@ -1082,7 +1082,7 @@ static int za_set(struct task_struct *target,
 
        /* Ensure there is some SVE storage for streaming mode */
        if (!target->thread.sve_state) {
-               sve_alloc(target);
+               sve_alloc(target, false);
                if (!target->thread.sve_state) {
                        clear_thread_flag(TIF_SME);
                        ret = -ENOMEM;
index 3e6d0352d7d36469d85853af6059a9523a5fa8e4..9ad911f1647c8a204424b992255f11de92279332 100644 (file)
@@ -91,7 +91,7 @@ static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
  * not taken into account.  This limit is not a guarantee and is
  * NOT ABI.
  */
-#define SIGFRAME_MAXSZ SZ_64K
+#define SIGFRAME_MAXSZ SZ_256K
 
 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
                            unsigned long *offset, size_t size, bool extend)
@@ -310,7 +310,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
        fpsimd_flush_task_state(current);
        /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
 
-       sve_alloc(current);
+       sve_alloc(current, true);
        if (!current->thread.sve_state) {
                clear_thread_flag(TIF_SVE);
                return -ENOMEM;
@@ -926,6 +926,16 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
 
        /* Signal handlers are invoked with ZA and streaming mode disabled */
        if (system_supports_sme()) {
+               /*
+                * If we were in streaming mode the saved register
+                * state was SVE but we will exit SM and use the
+                * FPSIMD register state - flush the saved FPSIMD
+                * register state in case it gets loaded.
+                */
+               if (current->thread.svcr & SVCR_SM_MASK)
+                       memset(&current->thread.uw.fpsimd_state, 0,
+                              sizeof(current->thread.uw.fpsimd_state));
+
                current->thread.svcr &= ~(SVCR_ZA_MASK |
                                          SVCR_SM_MASK);
                sme_smstop();
index 869ffc4d4484777310d2bf905c3d7346cef04c4c..ad2bfc794257dbb0d6d48a6a70790b3e97aa5198 100644 (file)
@@ -296,12 +296,25 @@ core_initcall(init_amu_fie);
 
 static void cpu_read_corecnt(void *val)
 {
+       /*
+        * A value of 0 can be returned if the current CPU does not support AMUs
+        * or if the counter is disabled for this CPU. A return value of 0 at
+        * counter read is properly handled as an error case by the users of the
+        * counter.
+        */
        *(u64 *)val = read_corecnt();
 }
 
 static void cpu_read_constcnt(void *val)
 {
-       *(u64 *)val = read_constcnt();
+       /*
+        * Return 0 if the current CPU is affected by erratum 2457168. A value
+        * of 0 is also returned if the current CPU does not support AMUs or if
+        * the counter is disabled. A return value of 0 at counter read is
+        * properly handled as an error case by the users of the counter.
+        */
+       *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ?
+                     0UL : read_constcnt();
 }
 
 static inline
@@ -328,7 +341,22 @@ int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
  */
 bool cpc_ffh_supported(void)
 {
-       return freq_counters_valid(get_cpu_with_amu_feat());
+       int cpu = get_cpu_with_amu_feat();
+
+       /*
+        * FFH is considered supported if there is at least one present CPU that
+        * supports AMUs. Using FFH to read core and reference counters for CPUs
+        * that do not support AMUs, have counters disabled or that are affected
+        * by errata, will result in a return value of 0.
+        *
+        * This is done to allow any enabled and valid counters to be read
+        * through FFH, knowing that potentially returning 0 as counter value is
+        * properly handled by the users of these counters.
+        */
+       if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
+               return false;
+
+       return true;
 }
 
 int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
index 986cee6fbc7f2267de120f1137417c2e5f315386..2ff0ef62abadc8cbf01a23b4b1df4f46039b55f7 100644 (file)
@@ -757,8 +757,7 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
        if (likely(!vcpu_mode_is_32bit(vcpu)))
                return false;
 
-       return !system_supports_32bit_el0() ||
-               static_branch_unlikely(&arm64_mismatched_32bit_el0);
+       return !kvm_supports_32bit_el0();
 }
 
 /**
index 8c607199cad14d4dee8320d8847dc3ab9dab52ce..f802a3b3f8dbc86664d280601b1180ff2da0378f 100644 (file)
@@ -242,7 +242,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
                u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
                switch (mode) {
                case PSR_AA32_MODE_USR:
-                       if (!system_supports_32bit_el0())
+                       if (!kvm_supports_32bit_el0())
                                return -EINVAL;
                        break;
                case PSR_AA32_MODE_FIQ:
index 87f1cd0df36ea7360ac5e3b597009292bc9ef45a..c9a13e487187cc790eb546d96a9b3d55db425a51 100644 (file)
@@ -993,7 +993,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
                 * THP doesn't start to split while we are adjusting the
                 * refcounts.
                 *
-                * We are sure this doesn't happen, because mmu_notifier_retry
+                * We are sure this doesn't happen, because mmu_invalidate_retry
                 * was successful and we are holding the mmu_lock, so if this
                 * THP is trying to split, it will be blocked in the mmu
                 * notifier before touching any of the pages, specifically
@@ -1188,9 +1188,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        return ret;
        }
 
-       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       mmu_seq = vcpu->kvm->mmu_invalidate_seq;
        /*
-        * Ensure the read of mmu_notifier_seq happens before we call
+        * Ensure the read of mmu_invalidate_seq happens before we call
         * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
         * the page we just got a reference to gets unmapped before we have a
         * chance to grab the mmu_lock, which ensure that if the page gets
@@ -1246,7 +1246,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        else
                write_lock(&kvm->mmu_lock);
        pgt = vcpu->arch.hw_mmu->pgt;
-       if (mmu_notifier_retry(kvm, mmu_seq))
+       if (mmu_invalidate_retry(kvm, mmu_seq))
                goto out_unlock;
 
        /*
index c059b259aea63203a5c586d7b4f3e1fb05046c77..3234f50b8c4b25160aea442a1c212fa88f6ecd2b 100644 (file)
@@ -652,7 +652,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
         */
        val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
               | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
-       if (!system_supports_32bit_el0())
+       if (!kvm_supports_32bit_el0())
                val |= ARMV8_PMU_PMCR_LC;
        __vcpu_sys_reg(vcpu, r->reg) = val;
 }
@@ -701,7 +701,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                val = __vcpu_sys_reg(vcpu, PMCR_EL0);
                val &= ~ARMV8_PMU_PMCR_MASK;
                val |= p->regval & ARMV8_PMU_PMCR_MASK;
-               if (!system_supports_32bit_el0())
+               if (!kvm_supports_32bit_el0())
                        val |= ARMV8_PMU_PMCR_LC;
                __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
                kvm_pmu_handle_pmcr(vcpu, val);
index db7c4e6ae57bbe02774748c9e050d246486b4f9f..e7ad44585f40abab6293dc4ffbfd08775f1e53ac 100644 (file)
@@ -642,24 +642,6 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
        vm_area_add_early(vma);
 }
 
-static int __init parse_rodata(char *arg)
-{
-       int ret = strtobool(arg, &rodata_enabled);
-       if (!ret) {
-               rodata_full = false;
-               return 0;
-       }
-
-       /* permit 'full' in addition to boolean options */
-       if (strcmp(arg, "full"))
-               return -EINVAL;
-
-       rodata_enabled = true;
-       rodata_full = true;
-       return 0;
-}
-early_param("rodata", parse_rodata);
-
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 static int __init map_entry_trampoline(void)
 {
index 779653771507a03c5d83960c8d687d132161da62..63b2484ce6c3d001e24f767d95c712b478450828 100644 (file)
@@ -67,6 +67,7 @@ WORKAROUND_1902691
 WORKAROUND_2038923
 WORKAROUND_2064142
 WORKAROUND_2077057
+WORKAROUND_2457168
 WORKAROUND_TRBE_OVERWRITE_FILL_MODE
 WORKAROUND_TSB_FLUSH_FAILURE
 WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
index da500471ac73ccb0a6569992c4f189d1bce5f2f4..160d8f37fa1a34c237282940ae6cf2947893f382 100644 (file)
@@ -179,6 +179,21 @@ arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
        return retval;
 }
 
+static __always_inline bool
+arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+       int retval;
+
+       asm volatile(
+       "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
+       : "=&r" (retval)
+       : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
+       : "p0", "memory"
+       );
+
+       return retval;
+}
+
 /*
  * ffz - find first zero in word.
  * @word: The word to search
index 9f62af7fd7c42c9d0f621a06058274863786a0a3..1accb7842f58806446720a8992ce0d1d2f71c99c 100644 (file)
@@ -331,11 +331,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
        return (old & bit) != 0;
 }
 
-static __always_inline bool
-arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
-{
-       return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
 
 /**
  * ffz - find the first zero bit in a long word
index 4abc9a28aba4eee5959f7a1e17f4814a945165c0..26aeb1408e56937ac20491937b05d58507ff5719 100644 (file)
@@ -111,6 +111,7 @@ config LOONGARCH
        select PCI_ECAM if ACPI
        select PCI_LOONGSON
        select PCI_MSI_ARCH_FALLBACKS
+       select PCI_QUIRKS
        select PERF_USE_VMALLOC
        select RTC_LIB
        select SMP
index b91e0733b2e571aa79885d8764f11148384586d0..d342935e5a72d1de92c496087ba3f2e96e46d352 100644 (file)
@@ -109,4 +109,20 @@ extern unsigned long vm_map_base;
  */
 #define PHYSADDR(a)            ((_ACAST64_(a)) & TO_PHYS_MASK)
 
+/*
+ * On LoongArch, I/O ports mappring is following:
+ *
+ *              |         ....          |
+ *              |-----------------------|
+ *              | pci io ports(16K~32M) |
+ *              |-----------------------|
+ *              | isa io ports(0  ~16K) |
+ * PCI_IOBASE ->|-----------------------|
+ *              |         ....          |
+ */
+#define PCI_IOBASE     ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
+#define PCI_IOSIZE     SZ_32M
+#define ISA_IOSIZE     SZ_16K
+#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+
 #endif /* _ASM_ADDRSPACE_H */
index 0a9b0fac1eeeb6115bfcd444d35b1975f45a1277..ae19e33c77548aed5f94c59f0c727c1d535eb647 100644 (file)
@@ -5,8 +5,9 @@
 #ifndef __ASM_CMPXCHG_H
 #define __ASM_CMPXCHG_H
 
-#include <asm/barrier.h>
+#include <linux/bits.h>
 #include <linux/build_bug.h>
+#include <asm/barrier.h>
 
 #define __xchg_asm(amswap_db, m, val)          \
 ({                                             \
                __ret;                          \
 })
 
+static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
+                                       unsigned int size)
+{
+       unsigned int shift;
+       u32 old32, mask, temp;
+       volatile u32 *ptr32;
+
+       /* Mask value to the correct size. */
+       mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+       val &= mask;
+
+       /*
+        * Calculate a shift & mask that correspond to the value we wish to
+        * exchange within the naturally aligned 4 byte integerthat includes
+        * it.
+        */
+       shift = (unsigned long)ptr & 0x3;
+       shift *= BITS_PER_BYTE;
+       mask <<= shift;
+
+       /*
+        * Calculate a pointer to the naturally aligned 4 byte integer that
+        * includes our byte of interest, and load its value.
+        */
+       ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+
+       asm volatile (
+       "1:     ll.w            %0, %3          \n"
+       "       andn            %1, %0, %z4     \n"
+       "       or              %1, %1, %z5     \n"
+       "       sc.w            %1, %2          \n"
+       "       beqz            %1, 1b          \n"
+       : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
+       : "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
+       : "memory");
+
+       return (old32 & mask) >> shift;
+}
+
 static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
                                   int size)
 {
        switch (size) {
+       case 1:
+       case 2:
+               return __xchg_small(ptr, x, size);
+
        case 4:
                return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
 
@@ -67,10 +111,62 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
        __ret;                                                          \
 })
 
+static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
+                                          unsigned int new, unsigned int size)
+{
+       unsigned int shift;
+       u32 old32, mask, temp;
+       volatile u32 *ptr32;
+
+       /* Mask inputs to the correct size. */
+       mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+       old &= mask;
+       new &= mask;
+
+       /*
+        * Calculate a shift & mask that correspond to the value we wish to
+        * compare & exchange within the naturally aligned 4 byte integer
+        * that includes it.
+        */
+       shift = (unsigned long)ptr & 0x3;
+       shift *= BITS_PER_BYTE;
+       old <<= shift;
+       new <<= shift;
+       mask <<= shift;
+
+       /*
+        * Calculate a pointer to the naturally aligned 4 byte integer that
+        * includes our byte of interest, and load its value.
+        */
+       ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+
+       asm volatile (
+       "1:     ll.w            %0, %3          \n"
+       "       and             %1, %0, %z4     \n"
+       "       bne             %1, %z5, 2f     \n"
+       "       andn            %1, %0, %z4     \n"
+       "       or              %1, %1, %z6     \n"
+       "       sc.w            %1, %2          \n"
+       "       beqz            %1, 1b          \n"
+       "       b               3f              \n"
+       "2:                                     \n"
+       __WEAK_LLSC_MB
+       "3:                                     \n"
+       : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
+       : "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
+       : "memory");
+
+       return (old32 & mask) >> shift;
+}
+
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
                                      unsigned long new, unsigned int size)
 {
        switch (size) {
+       case 1:
+       case 2:
+               return __cmpxchg_small(ptr, old, new, size);
+
        case 4:
                return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
                                     (u32)old, new);
index 884599739b3676d3e693dc1d56918f3cbb964f75..999944ea1cea46e3a0062b987c69880766fc5f34 100644 (file)
@@ -7,34 +7,15 @@
 
 #define ARCH_HAS_IOREMAP_WC
 
-#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
-#include <asm/bug.h>
-#include <asm/byteorder.h>
 #include <asm/cpu.h>
 #include <asm/page.h>
 #include <asm/pgtable-bits.h>
 #include <asm/string.h>
 
-/*
- * On LoongArch, I/O ports mappring is following:
- *
- *              |         ....          |
- *              |-----------------------|
- *              | pci io ports(64K~32M) |
- *              |-----------------------|
- *              | isa io ports(0  ~16K) |
- * PCI_IOBASE ->|-----------------------|
- *              |         ....          |
- */
-#define PCI_IOBASE     ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
-#define PCI_IOSIZE     SZ_32M
-#define ISA_IOSIZE     SZ_16K
-#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
-
 /*
  * Change "struct page" to physical address.
  */
index 4b130199ceae716b2d76d15549c302230705578c..d06d4542b634c35a9e29992f68a6a572cc730e03 100644 (file)
@@ -81,7 +81,6 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
 #define GSI_MIN_PCH_IRQ                LOONGSON_PCH_IRQ_BASE
 #define GSI_MAX_PCH_IRQ                (LOONGSON_PCH_IRQ_BASE + 256 - 1)
 
-extern int find_pch_pic(u32 gsi);
 struct acpi_madt_lio_pic;
 struct acpi_madt_eio_pic;
 struct acpi_madt_ht_pic;
index a37324ac460b6e0a002ca1abd00fc255f42ff08f..53f284a961823c9e3f9cad4613a45d42eece9617 100644 (file)
@@ -95,7 +95,7 @@ static inline int pfn_valid(unsigned long pfn)
 
 #endif
 
-#define virt_to_pfn(kaddr)     PFN_DOWN(virt_to_phys((void *)(kaddr)))
+#define virt_to_pfn(kaddr)     PFN_DOWN(PHYSADDR(kaddr))
 #define virt_to_page(kaddr)    pfn_to_page(virt_to_pfn(kaddr))
 
 extern int __virt_addr_valid(volatile void *kaddr);
index e6569f18c6ddfcd5e8d75bde426c4e4b5a7071fe..0bd6b0110198f776a7bc154f052cfc0d370ac1d8 100644 (file)
@@ -123,6 +123,10 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
                                                int size)
 {
        switch (size) {
+       case 1:
+       case 2:
+               return __xchg_small((volatile void *)ptr, val, size);
+
        case 4:
                return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
 
@@ -204,9 +208,13 @@ do {                                                                       \
 #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
 #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
 
+#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
 #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
 #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
 
+#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
 
index e03443abaf7daab97400a1e5f954d9f124282e9f..8ea57e2f0e04c46fda7a3d0c4ef1c872c754271b 100644 (file)
@@ -59,7 +59,6 @@
 #include <linux/mm_types.h>
 #include <linux/mmzone.h>
 #include <asm/fixmap.h>
-#include <asm/io.h>
 
 struct mm_struct;
 struct vm_area_struct;
@@ -145,7 +144,7 @@ static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
        *p4d = p4dval;
 }
 
-#define p4d_phys(p4d)          virt_to_phys((void *)p4d_val(p4d))
+#define p4d_phys(p4d)          PHYSADDR(p4d_val(p4d))
 #define p4d_page(p4d)          (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
 
 #endif
@@ -188,7 +187,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
 
 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
 
-#define pud_phys(pud)          virt_to_phys((void *)pud_val(pud))
+#define pud_phys(pud)          PHYSADDR(pud_val(pud))
 #define pud_page(pud)          (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
 
 #endif
@@ -221,7 +220,7 @@ static inline void pmd_clear(pmd_t *pmdp)
 
 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
 
-#define pmd_phys(pmd)          virt_to_phys((void *)pmd_val(pmd))
+#define pmd_phys(pmd)          PHYSADDR(pmd_val(pmd))
 
 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
 #define pmd_page(pmd)          (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
diff --git a/arch/loongarch/include/asm/reboot.h b/arch/loongarch/include/asm/reboot.h
deleted file mode 100644 (file)
index 5115174..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _ASM_REBOOT_H
-#define _ASM_REBOOT_H
-
-extern void (*pm_restart)(void);
-
-#endif /* _ASM_REBOOT_H */
index 800c965a17eaa81dc5b5d94064e7df5a802c4b1f..8c82021eb2f447d867560a77b3f5e6c276804bf4 100644 (file)
 #include <acpi/reboot.h>
 #include <asm/idle.h>
 #include <asm/loongarch.h>
-#include <asm/reboot.h>
 
-static void default_halt(void)
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_halt(void)
 {
+#ifdef CONFIG_SMP
+       preempt_disable();
+       smp_send_stop();
+#endif
        local_irq_disable();
        clear_csr_ecfg(ECFG0_IM);
 
@@ -30,18 +36,29 @@ static void default_halt(void)
        }
 }
 
-static void default_poweroff(void)
+void machine_power_off(void)
 {
+#ifdef CONFIG_SMP
+       preempt_disable();
+       smp_send_stop();
+#endif
+       do_kernel_power_off();
 #ifdef CONFIG_EFI
        efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
 #endif
+
        while (true) {
                __arch_cpu_idle();
        }
 }
 
-static void default_restart(void)
+void machine_restart(char *command)
 {
+#ifdef CONFIG_SMP
+       preempt_disable();
+       smp_send_stop();
+#endif
+       do_kernel_restart(command);
 #ifdef CONFIG_EFI
        if (efi_capsule_pending(NULL))
                efi_reboot(REBOOT_WARM, NULL);
@@ -55,47 +72,3 @@ static void default_restart(void)
                __arch_cpu_idle();
        }
 }
-
-void (*pm_restart)(void);
-EXPORT_SYMBOL(pm_restart);
-
-void (*pm_power_off)(void);
-EXPORT_SYMBOL(pm_power_off);
-
-void machine_halt(void)
-{
-#ifdef CONFIG_SMP
-       preempt_disable();
-       smp_send_stop();
-#endif
-       default_halt();
-}
-
-void machine_power_off(void)
-{
-#ifdef CONFIG_SMP
-       preempt_disable();
-       smp_send_stop();
-#endif
-       pm_power_off();
-}
-
-void machine_restart(char *command)
-{
-#ifdef CONFIG_SMP
-       preempt_disable();
-       smp_send_stop();
-#endif
-       do_kernel_restart(command);
-       pm_restart();
-}
-
-static int __init loongarch_reboot_setup(void)
-{
-       pm_restart = default_restart;
-       pm_power_off = default_poweroff;
-
-       return 0;
-}
-
-arch_initcall(loongarch_reboot_setup);
index 605579b19a002ee0252e7506f126d55314bbd5b2..1ccd53655cab097f02ed09a5c1bd566de38d01ec 100644 (file)
@@ -216,6 +216,10 @@ good_area:
                return;
        }
 
+       /* The fault is fully completed (including releasing mmap lock) */
+       if (fault & VM_FAULT_COMPLETED)
+               return;
+
        if (unlikely(fault & VM_FAULT_RETRY)) {
                flags |= FAULT_FLAG_TRIED;
 
index 52e40f0ba732d715de4d9c12878694ebedcd6f1a..381a569635a9dc435ee4b1fee61fe2a5b4096724 100644 (file)
@@ -2,16 +2,9 @@
 /*
  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  */
-#include <linux/compiler.h>
-#include <linux/elf-randomize.h>
-#include <linux/errno.h>
+#include <linux/export.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
-#include <linux/export.h>
-#include <linux/personality.h>
-#include <linux/random.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/mm.h>
 
 unsigned long shm_align_mask = PAGE_SIZE - 1;  /* Sane caches */
 EXPORT_SYMBOL(shm_align_mask);
@@ -120,6 +113,6 @@ int __virt_addr_valid(volatile void *kaddr)
        if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
                return 0;
 
-       return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+       return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
 }
 EXPORT_SYMBOL_GPL(__virt_addr_valid);
index 43a0078e441854cf2787a8fcf3fb97f2ed442c9f..e02e775f53608119ccd32969398f8279634ea814 100644 (file)
@@ -24,6 +24,8 @@ static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
        return (struct vdso_pcpu_data *)(get_vdso_base() - VDSO_DATA_SIZE);
 }
 
+extern
+int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused);
 int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
 {
        int cpu_id;
index b1f4548dae924c455df460a8650397e6babc8481..8f22863bd7ea813bee095b2d377328fecd4af709 100644 (file)
@@ -6,20 +6,23 @@
  */
 #include <linux/types.h>
 
-int __vdso_clock_gettime(clockid_t clock,
-                        struct __kernel_timespec *ts)
+extern
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
 {
        return __cvdso_clock_gettime(clock, ts);
 }
 
-int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
-                       struct timezone *tz)
+extern
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
 {
        return __cvdso_gettimeofday(tv, tz);
 }
 
-int __vdso_clock_getres(clockid_t clock_id,
-                       struct __kernel_timespec *res)
+extern
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
 {
        return __cvdso_clock_getres(clock_id, res);
 }
index 470aed978590394b63c3337471e2cfc1ef3c81e9..e984af71df6bee14a90af4ad9d21b57c95e6f8ed 100644 (file)
@@ -157,11 +157,8 @@ arch___change_bit(unsigned long nr, volatile unsigned long *addr)
        change_bit(nr, addr);
 }
 
-static __always_inline bool
-arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
-{
-       return (addr[nr >> 5] & (1UL << (nr & 31))) != 0;
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
 
 static inline int bset_reg_test_and_set_bit(int nr,
                                            volatile unsigned long *vaddr)
index 717716cc51c5716470a1ea1560bf12682e35556c..5cedb28e8a4086a9f5808b0170a264ebc502a909 100644 (file)
@@ -84,8 +84,6 @@
 
 
 #define KVM_MAX_VCPUS          16
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS  0
 
 #define KVM_HALT_POLL_NS_DEFAULT 500000
 
index db17e870bdff5f1e6192fa692052824de61379b1..74cd64a24d059af6a13627dc1a884b653f5f2b90 100644 (file)
@@ -615,17 +615,17 @@ retry:
         * Used to check for invalidations in progress, of the pfn that is
         * returned by pfn_to_pfn_prot below.
         */
-       mmu_seq = kvm->mmu_notifier_seq;
+       mmu_seq = kvm->mmu_invalidate_seq;
        /*
-        * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
-        * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
+        * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
+        * in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
         * risk the page we get a reference to getting unmapped before we have a
-        * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
+        * chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
         *
         * This smp_rmb() pairs with the effective smp_wmb() of the combination
         * of the pte_unmap_unlock() after the PTE is zapped, and the
         * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
-        * mmu_notifier_seq is incremented.
+        * mmu_invalidate_seq is incremented.
         */
        smp_rmb();
 
@@ -638,7 +638,7 @@ retry:
 
        spin_lock(&kvm->mmu_lock);
        /* Check if an invalidation has taken place since we got pfn */
-       if (mmu_notifier_retry(kvm, mmu_seq)) {
+       if (mmu_invalidate_retry(kvm, mmu_seq)) {
                /*
                 * This can happen when mappings are changed asynchronously, but
                 * also synchronously if a COW is triggered by
index cf37f55efbc228def6770597ca7f3ad643975a3d..bafb7b2ca59fcb32a410cffd1666eb1c5e06d6bb 100644 (file)
@@ -50,7 +50,8 @@
        stw     r13, PT_R13(sp)
        stw     r14, PT_R14(sp)
        stw     r15, PT_R15(sp)
-       stw     r2, PT_ORIG_R2(sp)
+       movi    r24, -1
+       stw     r24, PT_ORIG_R2(sp)
        stw     r7, PT_ORIG_R7(sp)
 
        stw     ra, PT_RA(sp)
index 64246214487288eb5cc352dd625b4e55538dded2..9da34c3022a272890bcfcee05a86b080bbdbb29f 100644 (file)
@@ -74,6 +74,8 @@ extern void show_regs(struct pt_regs *);
        ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE)\
                - 1)
 
+#define force_successful_syscall_return() (current_pt_regs()->orig_r2 = -1)
+
 int do_syscall_trace_enter(void);
 void do_syscall_trace_exit(void);
 #endif /* __ASSEMBLY__ */
index 0794cd7803dfe0fe68e7ee4105bfa4ba805a329e..99f0a65e62347e24f8b7efb8d482888ef860d4b7 100644 (file)
@@ -185,6 +185,7 @@ ENTRY(handle_system_call)
        ldw     r5, PT_R5(sp)
 
 local_restart:
+       stw     r2, PT_ORIG_R2(sp)
        /* Check that the requested system call is within limits */
        movui   r1, __NR_syscalls
        bgeu    r2, r1, ret_invsyscall
@@ -192,7 +193,6 @@ local_restart:
        movhi   r11, %hiadj(sys_call_table)
        add     r1, r1, r11
        ldw     r1, %lo(sys_call_table)(r1)
-       beq     r1, r0, ret_invsyscall
 
        /* Check if we are being traced */
        GET_THREAD_INFO r11
@@ -213,6 +213,9 @@ local_restart:
 translate_rc_and_ret:
        movi    r1, 0
        bge     r2, zero, 3f
+       ldw     r1, PT_ORIG_R2(sp)
+       addi    r1, r1, 1
+       beq     r1, zero, 3f
        sub     r2, zero, r2
        movi    r1, 1
 3:
@@ -255,9 +258,9 @@ traced_system_call:
        ldw     r6, PT_R6(sp)
        ldw     r7, PT_R7(sp)
 
-       /* Fetch the syscall function, we don't need to check the boundaries
-        * since this is already done.
-        */
+       /* Fetch the syscall function. */
+       movui   r1, __NR_syscalls
+       bgeu    r2, r1, traced_invsyscall
        slli    r1, r2, 2
        movhi   r11,%hiadj(sys_call_table)
        add     r1, r1, r11
@@ -276,6 +279,9 @@ traced_system_call:
 translate_rc_and_ret2:
        movi    r1, 0
        bge     r2, zero, 4f
+       ldw     r1, PT_ORIG_R2(sp)
+       addi    r1, r1, 1
+       beq     r1, zero, 4f
        sub     r2, zero, r2
        movi    r1, 1
 4:
@@ -287,6 +293,11 @@ end_translate_rc_and_ret2:
        RESTORE_SWITCH_STACK
        br      ret_from_exception
 
+       /* If the syscall number was invalid return ENOSYS */
+traced_invsyscall:
+       movi    r2, -ENOSYS
+       br      translate_rc_and_ret2
+
 Luser_return:
        GET_THREAD_INFO r11                     /* get thread_info pointer */
        ldw     r10, TI_FLAGS(r11)              /* get thread_info->flags */
@@ -336,9 +347,6 @@ external_interrupt:
        /* skip if no interrupt is pending */
        beq     r12, r0, ret_from_interrupt
 
-       movi    r24, -1
-       stw     r24, PT_ORIG_R2(sp)
-
        /*
         * Process an external hardware interrupt.
         */
index cb0b91589cf202f3d81243ffa7be7bd8bc0a2d26..a5b93a30c6eb21142037d7db38ef9cd7fc608c9d 100644 (file)
@@ -242,7 +242,7 @@ static int do_signal(struct pt_regs *regs)
        /*
         * If we were from a system call, check for system call restarting...
         */
-       if (regs->orig_r2 >= 0) {
+       if (regs->orig_r2 >= 0 && regs->r1) {
                continue_addr = regs->ea;
                restart_addr = continue_addr - 4;
                retval = regs->r2;
@@ -264,6 +264,7 @@ static int do_signal(struct pt_regs *regs)
                        regs->ea = restart_addr;
                        break;
                }
+               regs->orig_r2 = -1;
        }
 
        if (get_signal(&ksig)) {
index 6176d63023c1dca24c992e341c6cd88689f9e118..c2875a6dd5a4a2bf3f76d0b2f8f58e9590c59234 100644 (file)
@@ -13,5 +13,6 @@
 #define __SYSCALL(nr, call) [nr] = (call),
 
 void *sys_call_table[__NR_syscalls] = {
+       [0 ... __NR_syscalls-1] = sys_ni_syscall,
 #include <asm/unistd.h>
 };
index 7f059cd1196a15e99dbe31a577697735113f0c97..9aede2447011bc13f2fa11081d4ddb350784f285 100644 (file)
@@ -146,10 +146,10 @@ menu "Processor type and features"
 
 choice
        prompt "Processor type"
-       default PA7000
+       default PA7000 if "$(ARCH)" = "parisc"
 
 config PA7000
-       bool "PA7000/PA7100"
+       bool "PA7000/PA7100" if "$(ARCH)" = "parisc"
        help
          This is the processor type of your CPU.  This information is
          used for optimizing purposes.  In order to compile a kernel
@@ -160,21 +160,21 @@ config PA7000
          which is required on some machines.
 
 config PA7100LC
-       bool "PA7100LC"
+       bool "PA7100LC" if "$(ARCH)" = "parisc"
        help
          Select this option for the PCX-L processor, as used in the
          712, 715/64, 715/80, 715/100, 715/100XC, 725/100, 743, 748,
          D200, D210, D300, D310 and E-class
 
 config PA7200
-       bool "PA7200"
+       bool "PA7200" if "$(ARCH)" = "parisc"
        help
          Select this option for the PCX-T' processor, as used in the
          C100, C110, J100, J110, J210XC, D250, D260, D350, D360,
          K100, K200, K210, K220, K400, K410 and K420
 
 config PA7300LC
-       bool "PA7300LC"
+       bool "PA7300LC" if "$(ARCH)" = "parisc"
        help
          Select this option for the PCX-L2 processor, as used in the
          744, A180, B132L, B160L, B180L, C132L, C160L, C180L,
@@ -224,17 +224,8 @@ config MLONGCALLS
          Enabling this option will probably slow down your kernel.
 
 config 64BIT
-       bool "64-bit kernel"
+       def_bool "$(ARCH)" = "parisc64"
        depends on PA8X00
-       help
-         Enable this if you want to support 64bit kernel on PA-RISC platform.
-
-         At the moment, only people willing to use more than 2GB of RAM,
-         or having a 64bit-only capable PA-RISC machine should say Y here.
-
-         Since there is no 64bit userland on PA-RISC, there is no point to
-         enable this option otherwise. The 64bit kernel is significantly bigger
-         and slower than the 32bit one.
 
 choice
        prompt "Kernel page size"
index 56ffd260c669b7d9ce50838edb07acb1de004600..0ec9cfc5131fc3d23b9694f1c8814d0e2d415630 100644 (file)
 #include <asm/barrier.h>
 #include <linux/atomic.h>
 
-/* compiler build environment sanity checks: */
-#if !defined(CONFIG_64BIT) && defined(__LP64__)
-#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
-#endif
-#if defined(CONFIG_64BIT) && !defined(__LP64__)
-#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
-#endif
-
 /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
  * on use of volatile and __*_bit() (set/clear/change):
  *     *_bit() want use of volatile.
index e0a9e96576221a614522acc63e45f13e5248462e..fd15fd4bbb61b8f4cd7567b20a7b6d29f36b7864 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/init.h>
 #include <linux/pgtable.h>
 
-       .level  PA_ASM_LEVEL
+       .level  1.1
 
        __INITDATA
 ENTRY(boot_args)
@@ -70,6 +70,47 @@ $bss_loop:
        stw,ma          %arg2,4(%r1)
        stw,ma          %arg3,4(%r1)
 
+#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
+       /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
+        * and halt kernel if we detect a PA1.x CPU. */
+       ldi             32,%r10
+       mtctl           %r10,%cr11
+       .level 2.0
+       mfctl,w         %cr11,%r10
+       .level 1.1
+       comib,<>,n      0,%r10,$cpu_ok
+
+       load32          PA(msg1),%arg0
+       ldi             msg1_end-msg1,%arg1
+$iodc_panic:
+       copy            %arg0, %r10
+       copy            %arg1, %r11
+       load32          PA(init_stack),%sp
+#define MEM_CONS 0x3A0
+       ldw             MEM_CONS+32(%r0),%arg0  // HPA
+       ldi             ENTRY_IO_COUT,%arg1
+       ldw             MEM_CONS+36(%r0),%arg2  // SPA
+       ldw             MEM_CONS+8(%r0),%arg3   // layers
+       load32          PA(__bss_start),%r1
+       stw             %r1,-52(%sp)            // arg4
+       stw             %r0,-56(%sp)            // arg5
+       stw             %r10,-60(%sp)           // arg6 = ptr to text
+       stw             %r11,-64(%sp)           // arg7 = len
+       stw             %r0,-68(%sp)            // arg8
+       load32          PA(.iodc_panic_ret), %rp
+       ldw             MEM_CONS+40(%r0),%r1    // ENTRY_IODC
+       bv,n            (%r1)
+.iodc_panic_ret:
+       b .                             /* wait endless with ... */
+       or              %r10,%r10,%r10  /* qemu idle sleep */
+msg1:  .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
+msg1_end:
+
+$cpu_ok:
+#endif
+
+       .level  PA_ASM_LEVEL
+
        /* Initialize startup VM. Just map first 16/32 MB of memory */
        load32          PA(swapper_pg_dir),%r4
        mtctl           %r4,%cr24       /* Initialize kernel root pointer */
index bac581b5ecfc5f85e3cb9a3c63efaa020690756b..e8a4d77cff53a77ee20d0e0319fa360d208cdd6d 100644 (file)
@@ -93,7 +93,7 @@
 #define R1(i) (((i)>>21)&0x1f)
 #define R2(i) (((i)>>16)&0x1f)
 #define R3(i) ((i)&0x1f)
-#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
+#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
 #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
 #define IM5_2(i) IM((i)>>16,5)
 #define IM5_3(i) IM((i),5)
index 4def2bd17b9b865fc410866876fb5e43860afb1f..d49065af08e955900187a5c0876b52f86c81cd44 100644 (file)
@@ -666,7 +666,7 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
        VM_WARN(!spin_is_locked(&kvm->mmu_lock),
                "%s called with kvm mmu_lock not held \n", __func__);
 
-       if (mmu_notifier_retry(kvm, mmu_seq))
+       if (mmu_invalidate_retry(kvm, mmu_seq))
                return NULL;
 
        pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
index bdd3332200c55fa697a90083b457b118ca6ea16c..31de91c8359c11ff6b44952e568c5f78eef82be4 100644 (file)
@@ -68,10 +68,6 @@ void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops)
        pci_dma_ops = dma_ops;
 }
 
-/*
- * This function should run under locking protection, specifically
- * hose_spinlock.
- */
 static int get_phb_number(struct device_node *dn)
 {
        int ret, phb_id = -1;
@@ -108,15 +104,20 @@ static int get_phb_number(struct device_node *dn)
        if (!ret)
                phb_id = (int)(prop & (MAX_PHBS - 1));
 
+       spin_lock(&hose_spinlock);
+
        /* We need to be sure to not use the same PHB number twice. */
        if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
-               return phb_id;
+               goto out_unlock;
 
        /* If everything fails then fallback to dynamic PHB numbering. */
        phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
        BUG_ON(phb_id >= MAX_PHBS);
        set_bit(phb_id, phb_bitmap);
 
+out_unlock:
+       spin_unlock(&hose_spinlock);
+
        return phb_id;
 }
 
@@ -127,10 +128,13 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
        phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
        if (phb == NULL)
                return NULL;
-       spin_lock(&hose_spinlock);
+
        phb->global_number = get_phb_number(dev);
+
+       spin_lock(&hose_spinlock);
        list_add_tail(&phb->list_node, &hose_list);
        spin_unlock(&hose_spinlock);
+
        phb->dn = dev;
        phb->is_dynamic = slab_is_available();
 #ifdef CONFIG_PPC64
index 1ae09992c9ea3a8d6327069390048e39d3183815..bc6a381b53463795dbd7723a42bb13a6420566e4 100644 (file)
@@ -90,7 +90,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
        unsigned long pfn;
 
        /* used to check for invalidations in progress */
-       mmu_seq = kvm->mmu_notifier_seq;
+       mmu_seq = kvm->mmu_invalidate_seq;
        smp_rmb();
 
        /* Get host physical address for gpa */
@@ -151,7 +151,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
        cpte = kvmppc_mmu_hpte_cache_next(vcpu);
 
        spin_lock(&kvm->mmu_lock);
-       if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
+       if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {
                r = -EAGAIN;
                goto out_unlock;
        }
index 514fd45c199478cde822f3e278ddb329090691a7..e9744b41a226ca9a13b89f524d24f057dbfea9d0 100644 (file)
@@ -578,7 +578,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
                return -EFAULT;
 
        /* used to check for invalidations in progress */
-       mmu_seq = kvm->mmu_notifier_seq;
+       mmu_seq = kvm->mmu_invalidate_seq;
        smp_rmb();
 
        ret = -EFAULT;
@@ -693,7 +693,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
 
        /* Check if we might have been invalidated; let the guest retry if so */
        ret = RESUME_GUEST;
-       if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
+       if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) {
                unlock_rmap(rmap);
                goto out_unlock;
        }
index 9d4b3feda3b6ce2a946eb62c3b51f48d592cbee7..5d5e12f3bf864a89ad9985788a4bb26b78a87745 100644 (file)
@@ -640,7 +640,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
        /* Check if we might have been invalidated; let the guest retry if so */
        spin_lock(&kvm->mmu_lock);
        ret = -EAGAIN;
-       if (mmu_notifier_retry(kvm, mmu_seq))
+       if (mmu_invalidate_retry(kvm, mmu_seq))
                goto out_unlock;
 
        /* Now traverse again under the lock and change the tree */
@@ -830,7 +830,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
        bool large_enable;
 
        /* used to check for invalidations in progress */
-       mmu_seq = kvm->mmu_notifier_seq;
+       mmu_seq = kvm->mmu_invalidate_seq;
        smp_rmb();
 
        /*
@@ -1191,7 +1191,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
         * Increase the mmu notifier sequence number to prevent any page
         * fault that read the memslot earlier from writing a PTE.
         */
-       kvm->mmu_notifier_seq++;
+       kvm->mmu_invalidate_seq++;
        spin_unlock(&kvm->mmu_lock);
 }
 
index be8249cc61078179d7647cff94a7d63c219db091..5a64a1341e6f1de1e7505b4b54468697357faf19 100644 (file)
@@ -1580,7 +1580,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
        /* 2. Find the host pte for this L1 guest real address */
 
        /* Used to check for invalidations in progress */
-       mmu_seq = kvm->mmu_notifier_seq;
+       mmu_seq = kvm->mmu_invalidate_seq;
        smp_rmb();
 
        /* See if can find translation in our partition scoped tables for L1 */
index 2257fb18cb72e5840d0634db7f41747b2197ce70..5a05953ae13fe27e17dacc151b9c4b8e071c89fd 100644 (file)
@@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
        g_ptel = ptel;
 
        /* used later to detect if we might have been invalidated */
-       mmu_seq = kvm->mmu_notifier_seq;
+       mmu_seq = kvm->mmu_invalidate_seq;
        smp_rmb();
 
        /* Find the memslot (if any) for this address */
@@ -366,7 +366,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                        rmap = real_vmalloc_addr(rmap);
                lock_rmap(rmap);
                /* Check for pending invalidations under the rmap chain lock */
-               if (mmu_notifier_retry(kvm, mmu_seq)) {
+               if (mmu_invalidate_retry(kvm, mmu_seq)) {
                        /* inval in progress, write a non-present HPTE */
                        pteh |= HPTE_V_ABSENT;
                        pteh &= ~HPTE_V_VALID;
@@ -932,7 +932,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
        int i;
 
        /* Used later to detect if we might have been invalidated */
-       mmu_seq = kvm->mmu_notifier_seq;
+       mmu_seq = kvm->mmu_invalidate_seq;
        smp_rmb();
 
        arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
@@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
        long ret = H_SUCCESS;
 
        /* Used later to detect if we might have been invalidated */
-       mmu_seq = kvm->mmu_notifier_seq;
+       mmu_seq = kvm->mmu_invalidate_seq;
        smp_rmb();
 
        arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
index 7f16afc331efdb5d01f3883ae76e05209d1fc15a..05668e96414066d49ed259dc9c2413e366403218 100644 (file)
@@ -339,7 +339,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        unsigned long flags;
 
        /* used to check for invalidations in progress */
-       mmu_seq = kvm->mmu_notifier_seq;
+       mmu_seq = kvm->mmu_invalidate_seq;
        smp_rmb();
 
        /*
@@ -460,7 +460,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        }
 
        spin_lock(&kvm->mmu_lock);
-       if (mmu_notifier_retry(kvm, mmu_seq)) {
+       if (mmu_invalidate_retry(kvm, mmu_seq)) {
                ret = -EAGAIN;
                goto out;
        }
index 044982a11df5045b2eb7abd13d74dec8ebb69523..f3f87ed2007f3cf4b59ccc8b1bd411ab74afa409 100644 (file)
 
        phy1: ethernet-phy@9 {
                reg = <9>;
-               ti,fifo-depth = <0x1>;
        };
 
        phy0: ethernet-phy@8 {
                reg = <8>;
-               ti,fifo-depth = <0x1>;
        };
 };
 
        disable-wp;
        cap-sd-highspeed;
        cap-mmc-highspeed;
-       card-detect-delay = <200>;
        mmc-ddr-1_8v;
        mmc-hs200-1_8v;
        sd-uhs-sdr12;
index 82c93c8f5c17e6c39cdb532296ba1bcb1d2546ed..c87cc2d8fe29fa2174bbedca08c272c7331283b8 100644 (file)
 
        phy1: ethernet-phy@5 {
                reg = <5>;
-               ti,fifo-depth = <0x01>;
        };
 
        phy0: ethernet-phy@4 {
                reg = <4>;
-               ti,fifo-depth = <0x01>;
        };
 };
 
@@ -72,7 +70,6 @@
        disable-wp;
        cap-sd-highspeed;
        cap-mmc-highspeed;
-       card-detect-delay = <200>;
        mmc-ddr-1_8v;
        mmc-hs200-1_8v;
        sd-uhs-sdr12;
index 499c2e63ad35e160f0adaf3b6f319496622ab4b8..74493344ea41b440881ee6cadf9a39a9dbaa7dbc 100644 (file)
                        cache-size = <2097152>;
                        cache-unified;
                        interrupt-parent = <&plic>;
-                       interrupts = <1>, <2>, <3>;
+                       interrupts = <1>, <3>, <4>, <2>;
                };
 
                clint: clint@2000000 {
                        ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
                        msi-parent = <&pcie>;
                        msi-controller;
-                       microchip,axi-m-atr0 = <0x10 0x0>;
                        status = "disabled";
-                       pcie_intc: legacy-interrupt-controller {
+                       pcie_intc: interrupt-controller {
                                #address-cells = <0>;
                                #interrupt-cells = <1>;
                                interrupt-controller;
diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h
new file mode 100644 (file)
index 0000000..532c29e
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SIGNAL_H
+#define __ASM_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+asmlinkage __visible
+void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
+
+#endif
index 78933ac04995b2d22f545bd8d7853743440c5e49..67322f878e0d7828178a51fb0cff047270d59a2a 100644 (file)
@@ -42,6 +42,8 @@
 
 #ifndef __ASSEMBLY__
 
+extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
+
 #include <asm/processor.h>
 #include <asm/csr.h>
 
index 553d755483ed6d2c86051e5e4decb4a21d030874..3b5583db9d80ee4a175a5290ccb77ceea33c97c4 100644 (file)
@@ -28,7 +28,7 @@ unsigned long elf_hwcap __read_mostly;
 /* Host ISA bitmap */
 static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
 
-__ro_after_init DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX);
+DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX);
 EXPORT_SYMBOL(riscv_isa_ext_keys);
 
 /**
index 38b05ca6fe66983426e7bfba231c0775ae8c81d4..5a2de6b6f882239c37d471b40cfeb683529c5554 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <asm/ucontext.h>
 #include <asm/vdso.h>
+#include <asm/signal.h>
 #include <asm/signal32.h>
 #include <asm/switch_to.h>
 #include <asm/csr.h>
index 39d0f8bba4b40e3e22ac8125c628f50d919bd346..635e6ec269380214b86f766897ae05ee18090ecb 100644 (file)
 
 #include <asm/asm-prototypes.h>
 #include <asm/bug.h>
+#include <asm/csr.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
-#include <asm/csr.h>
+#include <asm/thread_info.h>
 
 int show_unhandled_signals = 1;
 
index 3a35b2d95697c469ce74db840fb634194bef6307..3620ecac2fa146f321921c5674acf9f514e3c32a 100644 (file)
@@ -666,7 +666,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
                return ret;
        }
 
-       mmu_seq = kvm->mmu_notifier_seq;
+       mmu_seq = kvm->mmu_invalidate_seq;
 
        hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
        if (hfn == KVM_PFN_ERR_HWPOISON) {
@@ -686,7 +686,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
 
        spin_lock(&kvm->mmu_lock);
 
-       if (mmu_notifier_retry(kvm, mmu_seq))
+       if (mmu_invalidate_retry(kvm, mmu_seq))
                goto out_unlock;
 
        if (writable) {
index f0bc4dc3e9bf0c486500840a36bdb22def45a462..6511d15ace45e4b4eae5629ab6b36ed8bbabd6c7 100644 (file)
@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
        int rc;
 
        if (diag204_probe()) {
-               pr_err("The hardware system does not support hypfs\n");
+               pr_info("The hardware system does not support hypfs\n");
                return -ENODATA;
        }
 
index 5c97f48cea91d3a40dcecdc74ac821770d7cc8de..ee919bfc818678973ca55b49b67681dfdd0c549d 100644 (file)
@@ -496,9 +496,9 @@ fail_hypfs_sprp_exit:
        hypfs_vm_exit();
 fail_hypfs_diag_exit:
        hypfs_diag_exit();
+       pr_err("Initialization of hypfs failed with rc=%i\n", rc);
 fail_dbfs_exit:
        hypfs_dbfs_exit();
-       pr_err("Initialization of hypfs failed with rc=%i\n", rc);
        return rc;
 }
 device_initcall(hypfs_init)
index 9a7d15da966e34db8b485cec09f02907542f2a46..2de74fcd0578f42c25d178f4f3ef1dbbaa30158c 100644 (file)
@@ -176,14 +176,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
        return old & mask;
 }
 
-static __always_inline bool
-arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
-{
-       const volatile unsigned long *p = __bitops_word(nr, addr);
-       unsigned long mask = __bitops_mask(nr);
-
-       return *p & mask;
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
 
 static inline bool arch_test_and_set_bit_lock(unsigned long nr,
                                              volatile unsigned long *ptr)
index 89949b9f3cf88eb12af09377eda67030f9a5f126..d5119e039d8551194e19fd634267780c4b7acb94 100644 (file)
@@ -91,6 +91,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 
        memcpy(dst, src, arch_task_struct_size);
        dst->thread.fpu.regs = dst->thread.fpu.fprs;
+
+       /*
+        * Don't transfer over the runtime instrumentation or the guarded
+        * storage control block pointers. These fields are cleared here instead
+        * of in copy_thread() to avoid premature freeing of associated memory
+        * on fork() failure. Wait to clear the RI flag because ->stack still
+        * refers to the source thread.
+        */
+       dst->thread.ri_cb = NULL;
+       dst->thread.gs_cb = NULL;
+       dst->thread.gs_bc_cb = NULL;
+
        return 0;
 }
 
@@ -150,13 +162,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
        frame->childregs.flags = 0;
        if (new_stackp)
                frame->childregs.gprs[15] = new_stackp;
-
-       /* Don't copy runtime instrumentation info */
-       p->thread.ri_cb = NULL;
+       /*
+        * Clear the runtime instrumentation flag after the above childregs
+        * copy. The CB pointer was already cleared in arch_dup_task_struct().
+        */
        frame->childregs.psw.mask &= ~PSW_MASK_RI;
-       /* Don't copy guarded storage control block */
-       p->thread.gs_cb = NULL;
-       p->thread.gs_bc_cb = NULL;
 
        /* Set a new TLS ?  */
        if (clone_flags & CLONE_SETTLS) {
index 13449941516c2502bfd0974482abefc590238324..09b6e756d521d5b70b234ca1054419f7a40ff490 100644 (file)
@@ -379,7 +379,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
        flags = FAULT_FLAG_DEFAULT;
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
-       if (access == VM_WRITE || is_write)
+       if (is_write)
+               access = VM_WRITE;
+       if (access == VM_WRITE)
                flags |= FAULT_FLAG_WRITE;
        mmap_read_lock(mm);
 
index 565a85d8b7fb014275ce981de1df7fe0bbf95da8..5ace89b4650791d2bfb0d3c3567219a52c6b07fc 100644 (file)
@@ -135,16 +135,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
        return (old & mask) != 0;
 }
 
-/**
- * arch_test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static __always_inline bool
-arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
-{
-       return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
 
 #include <asm-generic/bitops/non-instrumented-non-atomic.h>
 
index 79e38afd4b91efced1baeb0b9c16f0693d26474a..e719af8bdf56d3e9b6f98e0cf685f1ef0a78715d 100644 (file)
@@ -1011,7 +1011,7 @@ error_kzalloc:
 
 static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                       struct virtqueue *vqs[], vq_callback_t *callbacks[],
-                      const char * const names[], u32 sizes[], const bool *ctx,
+                      const char * const names[], const bool *ctx,
                       struct irq_affinity *desc)
 {
        struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
index 19cd7ed6ec3cd8804aabcef5ea6dc3bb30726215..4b6d1b526bc1217e2e89d4670f9c4385e68dacc7 100644 (file)
@@ -65,20 +65,6 @@ extern void setup_clear_cpu_cap(unsigned int bit);
 
 #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
 
-#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
-
-/*
- * Workaround for the sake of BPF compilation which utilizes kernel
- * headers, but clang does not support ASM GOTO and fails the build.
- */
-#ifndef __BPF_TRACING__
-#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
-#endif
-
-#define static_cpu_has(bit)            boot_cpu_has(bit)
-
-#else
-
 /*
  * Static testing of CPU features. Used the same as boot_cpu_has(). It
  * statically patches the target code for additional performance. Use
@@ -137,7 +123,6 @@ t_no:
                boot_cpu_has(bit) :                             \
                _static_cpu_has(bit)                            \
 )
-#endif
 
 #define cpu_has_bug(c, bit)            cpu_has(c, (bit))
 #define set_cpu_bug(c, bit)            set_cpu_cap(c, (bit))
index 7854685c5f25b7926a6a722af83c134bd6ec6cd5..bafbd905e6e7c4857064156d0d70a8275b9dd19d 100644 (file)
@@ -286,10 +286,6 @@ vdso_install:
 
 archprepare: checkbin
 checkbin:
-ifndef CONFIG_CC_HAS_ASM_GOTO
-       @echo Compiler lacks asm-goto support.
-       @exit 1
-endif
 ifdef CONFIG_RETPOLINE
 ifeq ($(RETPOLINE_CFLAGS),)
        @echo "You are building kernel with non-retpoline compiler." >&2
index 4910bf230d7b4afbb44f83d958e0b1902fe26458..62208ec04ca4b800e7cf3eb8949f6cf6a228fee2 100644 (file)
@@ -132,7 +132,17 @@ void snp_set_page_private(unsigned long paddr);
 void snp_set_page_shared(unsigned long paddr);
 void sev_prep_identity_maps(unsigned long top_level_pgt);
 #else
-static inline void sev_enable(struct boot_params *bp) { }
+static inline void sev_enable(struct boot_params *bp)
+{
+       /*
+        * bp->cc_blob_address should only be set by boot/compressed kernel.
+        * Initialize it to 0 unconditionally (thus here in this stub too) to
+        * ensure that uninitialized values from buggy bootloaders aren't
+        * propagated.
+        */
+       if (bp)
+               bp->cc_blob_address = 0;
+}
 static inline void sev_es_shutdown_ghcb(void) { }
 static inline bool sev_es_check_ghcb_fault(unsigned long address)
 {
index 52f989f6acc281f95815bc76e0976348b5b8f635..c93930d5ccbd0f1b4894012e09c885e20ee1ba0a 100644 (file)
@@ -276,6 +276,14 @@ void sev_enable(struct boot_params *bp)
        struct msr m;
        bool snp;
 
+       /*
+        * bp->cc_blob_address should only be set by boot/compressed kernel.
+        * Initialize it to 0 to ensure that uninitialized values from
+        * buggy bootloaders aren't propagated.
+        */
+       if (bp)
+               bp->cc_blob_address = 0;
+
        /*
         * Setup/preliminary detection of SNP. This will be sanity-checked
         * against CPUID/MSR values later.
index d9fc7139fd46edc708565fa27135ee6f11ca56a3..581296255b39e403246d4ad076786d10cb86dc60 100644 (file)
@@ -14,7 +14,6 @@ CONFIG_CPU_FREQ=y
 
 # x86 xen specific config options
 CONFIG_XEN_PVH=y
-CONFIG_XEN_MAX_DOMAIN_MEMORY=500
 CONFIG_XEN_SAVE_RESTORE=y
 # CONFIG_XEN_DEBUG_FS is not set
 CONFIG_XEN_MCE_LOG=y
index 682338e7e2a38dbedcdef57ac6cbb6078e8abe75..4dd19819053a5848d1ee0ff3ab3c44c0a09c283e 100644 (file)
@@ -311,7 +311,7 @@ SYM_CODE_START(entry_INT80_compat)
         * Interrupts are off on entry.
         */
        ASM_CLAC                        /* Do this early to minimize exposure */
-       SWAPGS
+       ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
 
        /*
         * User tracing code (ptrace or signal handlers) might assume that
index 2db93498ff7119518f8731b1a6efc88d95d57b20..cb98a05ee74379f8fcc1c075cd10a4f3f685ba41 100644 (file)
@@ -6291,10 +6291,8 @@ __init int intel_pmu_init(void)
                x86_pmu.pebs_aliases = NULL;
                x86_pmu.pebs_prec_dist = true;
                x86_pmu.pebs_block = true;
-               x86_pmu.pebs_capable = ~0ULL;
                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
-               x86_pmu.flags |= PMU_FL_PEBS_ALL;
                x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
                x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
 
@@ -6337,10 +6335,8 @@ __init int intel_pmu_init(void)
                x86_pmu.pebs_aliases = NULL;
                x86_pmu.pebs_prec_dist = true;
                x86_pmu.pebs_block = true;
-               x86_pmu.pebs_capable = ~0ULL;
                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
-               x86_pmu.flags |= PMU_FL_PEBS_ALL;
                x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
                x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
                x86_pmu.lbr_pt_coexist = true;
index ba60427caa6d36553672509c22a292ea7e2b7d1e..de1f55d517847b04e039eacbe9ff1765b563a1f6 100644 (file)
@@ -291,6 +291,7 @@ static u64 load_latency_data(struct perf_event *event, u64 status)
 static u64 store_latency_data(struct perf_event *event, u64 status)
 {
        union intel_x86_pebs_dse dse;
+       union perf_mem_data_src src;
        u64 val;
 
        dse.val = status;
@@ -304,7 +305,14 @@ static u64 store_latency_data(struct perf_event *event, u64 status)
 
        val |= P(BLK, NA);
 
-       return val;
+       /*
+        * the pebs_data_source table is only for loads
+        * so override the mem_op to say STORE instead
+        */
+       src.val = val;
+       src.mem_op = P(OP,STORE);
+
+       return src.val;
 }
 
 struct pebs_record_core {
@@ -822,7 +830,7 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
 
 struct event_constraint intel_grt_pebs_event_constraints[] = {
        /* Allow all events as PEBS with no flags */
-       INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0xf),
+       INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3),
        INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf),
        EVENT_CONSTRAINT_END
 };
@@ -2262,6 +2270,7 @@ void __init intel_ds_init(void)
                                        PERF_SAMPLE_BRANCH_STACK |
                                        PERF_SAMPLE_TIME;
                                x86_pmu.flags |= PMU_FL_PEBS_ALL;
+                               x86_pmu.pebs_capable = ~0ULL;
                                pebs_qual = "-baseline";
                                x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
                        } else {
index 4f70fb6c2c1eb78e35f717ffb3085ee9f459e145..47fca6a7a8bcdd571446c63e64b5f67e10b4f12c 100644 (file)
@@ -1097,6 +1097,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
 
        if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
                reg->config = mask;
+
+               /*
+                * The Arch LBR HW can retrieve the common branch types
+                * from the LBR_INFO. It doesn't require the high overhead
+                * SW disassemble.
+                * Enable the branch type by default for the Arch LBR.
+                */
+               reg->reg |= X86_BR_TYPE_SAVE;
                return 0;
        }
 
index ce440011cc4e411892ae6cd4009b4acba86df1f5..1ef4f7861e2ecb323bad131846cf9df92fc291f0 100644 (file)
@@ -841,6 +841,22 @@ int snb_pci2phy_map_init(int devid)
        return 0;
 }
 
+static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       /*
+        * SNB IMC counters are 32-bit and are laid out back to back
+        * in MMIO space. Therefore we must use a 32-bit accessor function
+        * using readq() from uncore_mmio_read_counter() causes problems
+        * because it is reading 64-bit at a time. This is okay for the
+        * uncore_perf_event_update() function because it drops the upper
+        * 32-bits but not okay for plain uncore_read_counter() as invoked
+        * in uncore_pmu_event_start().
+        */
+       return (u64)readl(box->io_addr + hwc->event_base);
+}
+
 static struct pmu snb_uncore_imc_pmu = {
        .task_ctx_nr    = perf_invalid_context,
        .event_init     = snb_uncore_imc_event_init,
@@ -860,7 +876,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
        .disable_event  = snb_uncore_imc_disable_event,
        .enable_event   = snb_uncore_imc_enable_event,
        .hw_config      = snb_uncore_imc_hw_config,
-       .read_counter   = uncore_mmio_read_counter,
+       .read_counter   = snb_uncore_imc_read_counter,
 };
 
 static struct intel_uncore_type snb_uncore_imc = {
index 973c6bd17f98e31c65b38693c48f54f701ac32a9..0fe9de58af313153b962af9bfde0d77cae44d26e 100644 (file)
@@ -207,6 +207,20 @@ static __always_inline bool constant_test_bit(long nr, const volatile unsigned l
                (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
 }
 
+static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
+{
+       bool oldbit;
+
+       asm volatile("testb %2,%1"
+                    CC_SET(nz)
+                    : CC_OUT(nz) (oldbit)
+                    : "m" (((unsigned char *)addr)[nr >> 3]),
+                      "i" (1 << (nr & 7))
+                    :"memory");
+
+       return oldbit;
+}
+
 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
 {
        bool oldbit;
@@ -226,6 +240,13 @@ arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
                                          variable_test_bit(nr, addr);
 }
 
+static __always_inline bool
+arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+       return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
+                                         variable_test_bit(nr, addr);
+}
+
 /**
  * __ffs - find first set bit in word
  * @word: The word to search
index ea34cc31b0474f52a98e03d17763210f4a7b856b..1a85e1fb09226653bbf78fb82e3dbb4bfe2a7eb3 100644 (file)
@@ -155,20 +155,6 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
 
 #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
 
-#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
-
-/*
- * Workaround for the sake of BPF compilation which utilizes kernel
- * headers, but clang does not support ASM GOTO and fails the build.
- */
-#ifndef __BPF_TRACING__
-#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
-#endif
-
-#define static_cpu_has(bit)            boot_cpu_has(bit)
-
-#else
-
 /*
  * Static testing of CPU features. Used the same as boot_cpu_has(). It
  * statically patches the target code for additional performance. Use
@@ -208,7 +194,6 @@ t_no:
                boot_cpu_has(bit) :                             \
                _static_cpu_has(bit)                            \
 )
-#endif
 
 #define cpu_has_bug(c, bit)            cpu_has(c, (bit))
 #define set_cpu_bug(c, bit)            set_cpu_cap(c, (bit))
index 235dc85c91c3e372980b8b428e9713869115ead4..ef4775c6db01c128ab112a3d741c1b38ef7cdf64 100644 (file)
 #define X86_BUG_ITLB_MULTIHIT          X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
 #define X86_BUG_SRBDS                  X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
 #define X86_BUG_MMIO_STALE_DATA                X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
-#define X86_BUG_RETBLEED               X86_BUG(26) /* CPU is affected by RETBleed */
-#define X86_BUG_EIBRS_PBRSB            X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
+#define X86_BUG_MMIO_UNKNOWN           X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
+#define X86_BUG_RETBLEED               X86_BUG(27) /* CPU is affected by RETBleed */
+#define X86_BUG_EIBRS_PBRSB            X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 50362262740042198ef779355a62b040539c295c..991e31cfde94cc16644bb5017c03f63d91c50c72 100644 (file)
@@ -64,4 +64,6 @@
 #define        EX_TYPE_UCOPY_LEN4              (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(4))
 #define        EX_TYPE_UCOPY_LEN8              (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(8))
 
+#define EX_TYPE_ZEROPAD                        20 /* longword load with zeropad on fault */
+
 #endif
index 689880eca9bab4eb79d2b414047ca501ce16458f..9b08082a5d9f564bf3a765e9e52b2b4db9866de5 100644 (file)
 
 #define __noendbr      __attribute__((nocf_check))
 
+/*
+ * Create a dummy function pointer reference to prevent objtool from marking
+ * the function as needing to be "sealed" (i.e. ENDBR converted to NOP by
+ * apply_ibt_endbr()).
+ */
+#define IBT_NOSEAL(fname)                              \
+       ".pushsection .discard.ibt_endbr_noseal\n\t"    \
+       _ASM_PTR fname "\n\t"                           \
+       ".popsection\n\t"
+
 static inline __attribute_const__ u32 gen_endbr(void)
 {
        u32 endbr;
@@ -84,6 +94,7 @@ extern __noendbr void ibt_restore(u64 save);
 #ifndef __ASSEMBLY__
 
 #define ASM_ENDBR
+#define IBT_NOSEAL(name)
 
 #define __noendbr
 
index def6ca121111ce2873749ea418c43169caadb412..aeb38023a703915c9c94b99c244a2617b930e9a8 100644 (file)
@@ -27,6 +27,7 @@
  *             _X      - regular server parts
  *             _D      - micro server parts
  *             _N,_P   - other mobile parts
+ *             _S      - other client parts
  *
  *             Historical OPTDIFFs:
  *
 
 #define INTEL_FAM6_RAPTORLAKE          0xB7
 #define INTEL_FAM6_RAPTORLAKE_P                0xBA
+#define INTEL_FAM6_RAPTORLAKE_S                0xBF
 
 /* "Small Core" Processors (Atom) */
 
index 5ffa578cafe1c51b031386db2b0a940bed2e56fd..2c96c43c313a260659f82a822afe2a7f9c8a78ce 100644 (file)
@@ -53,7 +53,7 @@
 #define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)
 
 /* memory slots that are not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 3
+#define KVM_INTERNAL_MEM_SLOTS 3
 
 #define KVM_HALT_POLL_NS_DEFAULT 200000
 
index e64fd20778b61a23b59c8d04eba6cd5efc8cde0f..c936ce9f0c47c896efc83fc1f9370db429be142b 100644 (file)
 #define RSB_CLEAR_LOOPS                32      /* To forcibly overwrite all entries */
 
 /*
+ * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
+ */
+#define __FILL_RETURN_SLOT                     \
+       ANNOTATE_INTRA_FUNCTION_CALL;           \
+       call    772f;                           \
+       int3;                                   \
+772:
+
+/*
+ * Stuff the entire RSB.
+ *
  * Google experimented with loop-unrolling and this turned out to be
  * the optimal version - two calls, each with their own speculation
  * trap should their return address end up getting used, in a loop.
  */
-#define __FILL_RETURN_BUFFER(reg, nr, sp)      \
-       mov     $(nr/2), reg;                   \
-771:                                           \
-       ANNOTATE_INTRA_FUNCTION_CALL;           \
-       call    772f;                           \
-773:   /* speculation trap */                  \
-       UNWIND_HINT_EMPTY;                      \
-       pause;                                  \
-       lfence;                                 \
-       jmp     773b;                           \
-772:                                           \
-       ANNOTATE_INTRA_FUNCTION_CALL;           \
-       call    774f;                           \
-775:   /* speculation trap */                  \
-       UNWIND_HINT_EMPTY;                      \
-       pause;                                  \
-       lfence;                                 \
-       jmp     775b;                           \
-774:                                           \
-       add     $(BITS_PER_LONG/8) * 2, sp;     \
-       dec     reg;                            \
-       jnz     771b;                           \
-       /* barrier for jnz misprediction */     \
+#ifdef CONFIG_X86_64
+#define __FILL_RETURN_BUFFER(reg, nr)                  \
+       mov     $(nr/2), reg;                           \
+771:                                                   \
+       __FILL_RETURN_SLOT                              \
+       __FILL_RETURN_SLOT                              \
+       add     $(BITS_PER_LONG/8) * 2, %_ASM_SP;       \
+       dec     reg;                                    \
+       jnz     771b;                                   \
+       /* barrier for jnz misprediction */             \
+       lfence;
+#else
+/*
+ * i386 doesn't unconditionally have LFENCE, as such it can't
+ * do a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr)                  \
+       .rept nr;                                       \
+       __FILL_RETURN_SLOT;                             \
+       .endr;                                          \
+       add     $(BITS_PER_LONG/8) * nr, %_ASM_SP;
+#endif
+
+/*
+ * Stuff a single RSB slot.
+ *
+ * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
+ * forced to retire before letting a RET instruction execute.
+ *
+ * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
+ * before this point.
+ */
+#define __FILL_ONE_RETURN                              \
+       __FILL_RETURN_SLOT                              \
+       add     $(BITS_PER_LONG/8), %_ASM_SP;           \
        lfence;
 
 #ifdef __ASSEMBLY__
 #endif
 .endm
 
-.macro ISSUE_UNBALANCED_RET_GUARD
-       ANNOTATE_INTRA_FUNCTION_CALL
-       call .Lunbalanced_ret_guard_\@
-       int3
-.Lunbalanced_ret_guard_\@:
-       add $(BITS_PER_LONG/8), %_ASM_SP
-       lfence
-.endm
-
  /*
   * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
   * monstrosity above, manually.
   */
-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
-.ifb \ftr2
-       ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
-.else
-       ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
-.endif
-       __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
-.Lunbalanced_\@:
-       ISSUE_UNBALANCED_RET_GUARD
+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
+       ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
+               __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
+               __stringify(__FILL_ONE_RETURN), \ftr2
+
 .Lskip_rsb_\@:
 .endm
 
index 8a9eba1915169b99a8b9b679110ca961a7b96fb2..7fa6112164172d3fff1a8b55dc6944f575d1b567 100644 (file)
@@ -11,7 +11,7 @@
 
 #define __CLOBBERS_MEM(clb...) "memory", ## clb
 
-#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO)
+#ifndef __GCC_ASM_FLAG_OUTPUTS__
 
 /* Use asm goto */
 
@@ -27,7 +27,7 @@ cc_label:     c = true;                                               \
        c;                                                              \
 })
 
-#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
+#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) */
 
 /* Use flags output or a set instruction */
 
@@ -40,7 +40,7 @@ cc_label:     c = true;                                               \
        c;                                                              \
 })
 
-#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
+#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) */
 
 #define GEN_UNARY_RMWcc_4(op, var, cc, arg0)                           \
        __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
index 4a23e52fe0ee1632eb64fb1ed6ab183c5b7bb81f..ebc271bb6d8ed1d74e07194604b25244646ffc0a 100644 (file)
@@ -195,7 +195,7 @@ void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
 void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
 void snp_set_wakeup_secondary_cpu(void);
 bool snp_init(struct boot_params *bp);
-void snp_abort(void);
+void __init __noreturn snp_abort(void);
 int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err);
 #else
 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
index 8338b0432b50e3c2d6bebfbb1ea9bbec1027ff69..46b4f1f7f3545285bcad688348941d3bf0c6589e 100644 (file)
@@ -77,58 +77,18 @@ static inline unsigned long find_zero(unsigned long mask)
  * and the next page not being mapped, take the exception and
  * return zeroes in the non-existing part.
  */
-#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-
 static inline unsigned long load_unaligned_zeropad(const void *addr)
 {
-       unsigned long offset, data;
        unsigned long ret;
 
-       asm_volatile_goto(
+       asm volatile(
                "1:     mov %[mem], %[ret]\n"
-
-               _ASM_EXTABLE(1b, %l[do_exception])
-
-               : [ret] "=r" (ret)
-               : [mem] "m" (*(unsigned long *)addr)
-               : : do_exception);
-
-       return ret;
-
-do_exception:
-       offset = (unsigned long)addr & (sizeof(long) - 1);
-       addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
-       data = *(unsigned long *)addr;
-       ret = data >> offset * 8;
-
-       return ret;
-}
-
-#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
-
-static inline unsigned long load_unaligned_zeropad(const void *addr)
-{
-       unsigned long offset, data;
-       unsigned long ret, err = 0;
-
-       asm(    "1:     mov %[mem], %[ret]\n"
                "2:\n"
-
-               _ASM_EXTABLE_FAULT(1b, 2b)
-
-               : [ret] "=&r" (ret), "+a" (err)
+               _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_ZEROPAD)
+               : [ret] "=r" (ret)
                : [mem] "m" (*(unsigned long *)addr));
 
-       if (unlikely(err)) {
-               offset = (unsigned long)addr & (sizeof(long) - 1);
-               addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
-               data = *(unsigned long *)addr;
-               ret = data >> offset * 8;
-       }
-
        return ret;
 }
 
-#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
-
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 510d85261132b06dc4825ec8e562ac89273043bb..da7c361f47e0d9ab39ec4b0f63eb326baa44a842 100644 (file)
@@ -433,7 +433,8 @@ static void __init mmio_select_mitigation(void)
        u64 ia32_cap;
 
        if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
-           cpu_mitigations_off()) {
+            boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
+            cpu_mitigations_off()) {
                mmio_mitigation = MMIO_MITIGATION_OFF;
                return;
        }
@@ -538,6 +539,8 @@ out:
                pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
        if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
                pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
+       else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+               pr_info("MMIO Stale Data: Unknown: No mitigations\n");
 }
 
 static void __init md_clear_select_mitigation(void)
@@ -2275,6 +2278,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
 
 static ssize_t mmio_stale_data_show_state(char *buf)
 {
+       if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+               return sysfs_emit(buf, "Unknown: No mitigations\n");
+
        if (mmio_mitigation == MMIO_MITIGATION_OFF)
                return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
 
@@ -2421,6 +2427,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                return srbds_show_state(buf);
 
        case X86_BUG_MMIO_STALE_DATA:
+       case X86_BUG_MMIO_UNKNOWN:
                return mmio_stale_data_show_state(buf);
 
        case X86_BUG_RETBLEED:
@@ -2480,7 +2487,10 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
 
 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
+       if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+               return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
+       else
+               return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
 }
 
 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
index 64a73f415f036432e9e6cc66e05259e3fcf68d38..3e508f23909830fd78fbd6b168ad04aaeb57ef18 100644 (file)
@@ -1135,7 +1135,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #define NO_SWAPGS              BIT(6)
 #define NO_ITLB_MULTIHIT       BIT(7)
 #define NO_SPECTRE_V2          BIT(8)
-#define NO_EIBRS_PBRSB         BIT(9)
+#define NO_MMIO                        BIT(9)
+#define NO_EIBRS_PBRSB         BIT(10)
 
 #define VULNWL(vendor, family, model, whitelist)       \
        X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
@@ -1158,6 +1159,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        VULNWL(VORTEX,  6, X86_MODEL_ANY,       NO_SPECULATION),
 
        /* Intel Family 6 */
+       VULNWL_INTEL(TIGERLAKE,                 NO_MMIO),
+       VULNWL_INTEL(TIGERLAKE_L,               NO_MMIO),
+       VULNWL_INTEL(ALDERLAKE,                 NO_MMIO),
+       VULNWL_INTEL(ALDERLAKE_L,               NO_MMIO),
+
        VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION | NO_ITLB_MULTIHIT),
        VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION | NO_ITLB_MULTIHIT),
        VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION | NO_ITLB_MULTIHIT),
@@ -1176,9 +1182,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
        VULNWL_INTEL(ATOM_AIRMONT_NP,           NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
 
-       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
-       VULNWL_INTEL(ATOM_GOLDMONT_D,           NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
-       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
+       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+       VULNWL_INTEL(ATOM_GOLDMONT_D,           NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
 
        /*
         * Technically, swapgs isn't serializing on AMD (despite it previously
@@ -1193,18 +1199,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        VULNWL_INTEL(ATOM_TREMONT_D,            NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
 
        /* AMD Family 0xf - 0x12 */
-       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
-       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
-       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
-       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
 
        /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
-       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
-       VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+       VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
 
        /* Zhaoxin Family 7 */
-       VULNWL(CENTAUR, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS),
-       VULNWL(ZHAOXIN, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS),
+       VULNWL(CENTAUR, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
+       VULNWL(ZHAOXIN, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
        {}
 };
 
@@ -1358,10 +1364,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
         * Affected CPU list is generally enough to enumerate the vulnerability,
         * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
         * not want the guest to enumerate the bug.
+        *
+        * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
+        * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
         */
-       if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
-           !arch_cap_mmio_immune(ia32_cap))
-               setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+       if (!arch_cap_mmio_immune(ia32_cap)) {
+               if (cpu_matches(cpu_vuln_blacklist, MMIO))
+                       setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+               else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
+                       setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
+       }
 
        if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
                if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
index 74167dc5f55ec4f6056626b25a6fbceb0903efa4..4c3c27b6aea3b56b2ffe9b8c2375c0353f9ce206 100644 (file)
@@ -505,7 +505,7 @@ static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
                match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
                        ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
                if (p->ainsn.jcc.type >= 0xe)
-                       match = match && (regs->flags & X86_EFLAGS_ZF);
+                       match = match || (regs->flags & X86_EFLAGS_ZF);
        }
        __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
 }
index 63dc626627a03ed1a4d0df7b5fb19fa9021b10b0..a428c62330d371a6742ada0a2e9e578dd3f1def6 100644 (file)
@@ -701,7 +701,13 @@ e_term:
 void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
                                         unsigned int npages)
 {
-       if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+       /*
+        * This can be invoked in early boot while running identity mapped, so
+        * use an open coded check for SNP instead of using cc_platform_has().
+        * This eliminates worries about jump tables or checking boot_cpu_data
+        * in the cc_platform_has() function.
+        */
+       if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
                return;
 
         /*
@@ -717,7 +723,13 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
 void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
                                        unsigned int npages)
 {
-       if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+       /*
+        * This can be invoked in early boot while running identity mapped, so
+        * use an open coded check for SNP instead of using cc_platform_has().
+        * This eliminates worries about jump tables or checking boot_cpu_data
+        * in the cc_platform_has() function.
+        */
+       if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
                return;
 
        /* Invalidate the memory pages before they are marked shared in the RMP table. */
@@ -2100,7 +2112,7 @@ bool __init snp_init(struct boot_params *bp)
        return true;
 }
 
-void __init snp_abort(void)
+void __init __noreturn snp_abort(void)
 {
        sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
 }
index 38185aedf7d1622103e83f7e771eb724a02196f2..0ea57da929407378221c29756fb6635596d444d9 100644 (file)
@@ -93,22 +93,27 @@ static struct orc_entry *orc_find(unsigned long ip);
 static struct orc_entry *orc_ftrace_find(unsigned long ip)
 {
        struct ftrace_ops *ops;
-       unsigned long caller;
+       unsigned long tramp_addr, offset;
 
        ops = ftrace_ops_trampoline(ip);
        if (!ops)
                return NULL;
 
+       /* Set tramp_addr to the start of the code copied by the trampoline */
        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
-               caller = (unsigned long)ftrace_regs_call;
+               tramp_addr = (unsigned long)ftrace_regs_caller;
        else
-               caller = (unsigned long)ftrace_call;
+               tramp_addr = (unsigned long)ftrace_caller;
+
+       /* Now place tramp_addr to the location within the trampoline ip is at */
+       offset = ip - ops->trampoline;
+       tramp_addr += offset;
 
        /* Prevent unlikely recursion */
-       if (ip == caller)
+       if (ip == tramp_addr)
                return NULL;
 
-       return orc_find(caller);
+       return orc_find(tramp_addr);
 }
 #else
 static struct orc_entry *orc_ftrace_find(unsigned long ip)
index b4eeb7c75dfad641173618698d34174b3e1ca3de..d5ec3a2ed5a44f32e01a50684ae2dde6b658516d 100644 (file)
@@ -326,7 +326,8 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
        ".align " __stringify(FASTOP_SIZE) " \n\t" \
        ".type " name ", @function \n\t" \
        name ":\n\t" \
-       ASM_ENDBR
+       ASM_ENDBR \
+       IBT_NOSEAL(name)
 
 #define FOP_FUNC(name) \
        __FOP_FUNC(#name)
@@ -446,27 +447,12 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
        FOP_END
 
 /* Special case for SETcc - 1 instruction per cc */
-
-/*
- * Depending on .config the SETcc functions look like:
- *
- * ENDBR                       [4 bytes; CONFIG_X86_KERNEL_IBT]
- * SETcc %al                   [3 bytes]
- * RET | JMP __x86_return_thunk        [1,5 bytes; CONFIG_RETHUNK]
- * INT3                                [1 byte; CONFIG_SLS]
- */
-#define SETCC_ALIGN    16
-
 #define FOP_SETCC(op) \
-       ".align " __stringify(SETCC_ALIGN) " \n\t" \
-       ".type " #op ", @function \n\t" \
-       #op ": \n\t" \
-       ASM_ENDBR \
+       FOP_FUNC(op) \
        #op " %al \n\t" \
-       __FOP_RET(#op) \
-       ".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t"
+       FOP_RET(op)
 
-__FOP_START(setcc, SETCC_ALIGN)
+FOP_START(setcc)
 FOP_SETCC(seto)
 FOP_SETCC(setno)
 FOP_SETCC(setc)
@@ -493,7 +479,7 @@ FOP_END;
 
 /*
  * XXX: inoutclob user must know where the argument is being expanded.
- *      Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
+ *      Using asm goto would allow us to remove _fault.
  */
 #define asm_safe(insn, inoutclob...) \
 ({ \
@@ -1079,7 +1065,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
 {
        u8 rc;
-       void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf);
+       void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
 
        flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
        asm("push %[flags]; popf; " CALL_NOSPEC
index eccddb1369542aefde946eec53cae92cdd9b900a..126fa9aec64cda2b4f80b9558426ffafa34cfb48 100644 (file)
@@ -2914,7 +2914,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
         * If addresses are being invalidated, skip prefetching to avoid
         * accidentally prefetching those addresses.
         */
-       if (unlikely(vcpu->kvm->mmu_notifier_count))
+       if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
                return;
 
        __direct_pte_prefetch(vcpu, sp, sptep);
@@ -2928,7 +2928,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
  *
  * There are several ways to safely use this helper:
  *
- * - Check mmu_notifier_retry_hva() after grabbing the mapping level, before
+ * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
  *   consuming it.  In this case, mmu_lock doesn't need to be held during the
  *   lookup, but it does need to be held while checking the MMU notifier.
  *
@@ -3056,7 +3056,7 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
                return;
 
        /*
-        * mmu_notifier_retry() was successful and mmu_lock is held, so
+        * mmu_invalidate_retry() was successful and mmu_lock is held, so
         * the pmd can't be split from under us.
         */
        fault->goal_level = fault->req_level;
@@ -4203,7 +4203,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
                return true;
 
        return fault->slot &&
-              mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
+              mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
 }
 
 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
@@ -4227,7 +4227,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        if (r)
                return r;
 
-       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       mmu_seq = vcpu->kvm->mmu_invalidate_seq;
        smp_rmb();
 
        r = kvm_faultin_pfn(vcpu, fault);
@@ -6055,7 +6055,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 
        write_lock(&kvm->mmu_lock);
 
-       kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
+       kvm_mmu_invalidate_begin(kvm, gfn_start, gfn_end);
 
        flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
 
@@ -6069,7 +6069,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
                kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
                                                   gfn_end - gfn_start);
 
-       kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
+       kvm_mmu_invalidate_end(kvm, gfn_start, gfn_end);
 
        write_unlock(&kvm->mmu_lock);
 }
index f5958071220c9aef44e1c011487b6cce47209fb3..39e0205e7300ac160da1fc547de73d2a738680e6 100644 (file)
@@ -589,7 +589,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
         * If addresses are being invalidated, skip prefetching to avoid
         * accidentally prefetching those addresses.
         */
-       if (unlikely(vcpu->kvm->mmu_notifier_count))
+       if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
                return;
 
        if (sp->role.direct)
@@ -838,7 +838,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        else
                fault->max_level = walker.level;
 
-       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       mmu_seq = vcpu->kvm->mmu_invalidate_seq;
        smp_rmb();
 
        r = kvm_faultin_pfn(vcpu, fault);
index 331310c2934920eab927e45f58d5f1ca7fc5dcac..60814e110a54ca92f7251c7d9932d08d8fc5f101 100644 (file)
@@ -41,6 +41,59 @@ static bool ex_handler_default(const struct exception_table_entry *e,
        return true;
 }
 
+/*
+ * This is the *very* rare case where we do a "load_unaligned_zeropad()"
+ * and it's a page crosser into a non-existent page.
+ *
+ * This happens when we optimistically load a pathname a word-at-a-time
+ * and the name is less than the full word and the  next page is not
+ * mapped. Typically that only happens for CONFIG_DEBUG_PAGEALLOC.
+ *
+ * NOTE! The faulting address is always a 'mov mem,reg' type instruction
+ * of size 'long', and the exception fixup must always point to right
+ * after the instruction.
+ */
+static bool ex_handler_zeropad(const struct exception_table_entry *e,
+                              struct pt_regs *regs,
+                              unsigned long fault_addr)
+{
+       struct insn insn;
+       const unsigned long mask = sizeof(long) - 1;
+       unsigned long offset, addr, next_ip, len;
+       unsigned long *reg;
+
+       next_ip = ex_fixup_addr(e);
+       len = next_ip - regs->ip;
+       if (len > MAX_INSN_SIZE)
+               return false;
+
+       if (insn_decode(&insn, (void *) regs->ip, len, INSN_MODE_KERN))
+               return false;
+       if (insn.length != len)
+               return false;
+
+       if (insn.opcode.bytes[0] != 0x8b)
+               return false;
+       if (insn.opnd_bytes != sizeof(long))
+               return false;
+
+       addr = (unsigned long) insn_get_addr_ref(&insn, regs);
+       if (addr == ~0ul)
+               return false;
+
+       offset = addr & mask;
+       addr = addr & ~mask;
+       if (fault_addr != addr + sizeof(long))
+               return false;
+
+       reg = insn_get_modrm_reg_ptr(&insn, regs);
+       if (!reg)
+               return false;
+
+       *reg = *(unsigned long *)addr >> (offset * 8);
+       return ex_handler_default(e, regs);
+}
+
 static bool ex_handler_fault(const struct exception_table_entry *fixup,
                             struct pt_regs *regs, int trapnr)
 {
@@ -217,6 +270,8 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
                return ex_handler_sgx(e, regs, trapnr);
        case EX_TYPE_UCOPY_LEN:
                return ex_handler_ucopy_len(e, regs, trapnr, reg, imm);
+       case EX_TYPE_ZEROPAD:
+               return ex_handler_zeropad(e, regs, fault_addr);
        }
        BUG();
 }
index 39c5246964a91592066fca256f5d7f590462ddbe..0fe690ebc269b529c5364d6148fd7e90e51c0a96 100644 (file)
@@ -645,7 +645,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
                        pages++;
                        spin_lock(&init_mm.page_table_lock);
 
-                       prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE);
+                       prot = __pgprot(pgprot_val(prot) | _PAGE_PSE);
 
                        set_pte_init((pte_t *)pud,
                                     pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
index d5ef64ddd35e9f38c2a533e22d79c016fd6fcc55..66a209f7eb86da01b9c27537cb7a4183f657162b 100644 (file)
@@ -62,6 +62,7 @@
 
 static bool __read_mostly pat_bp_initialized;
 static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
+static bool __initdata pat_force_disabled = !IS_ENABLED(CONFIG_X86_PAT);
 static bool __read_mostly pat_bp_enabled;
 static bool __read_mostly pat_cm_initialized;
 
@@ -86,6 +87,7 @@ void pat_disable(const char *msg_reason)
 static int __init nopat(char *str)
 {
        pat_disable("PAT support disabled via boot option.");
+       pat_force_disabled = true;
        return 0;
 }
 early_param("nopat", nopat);
@@ -272,7 +274,7 @@ static void pat_ap_init(u64 pat)
        wrmsrl(MSR_IA32_CR_PAT, pat);
 }
 
-void init_cache_modes(void)
+void __init init_cache_modes(void)
 {
        u64 pat = 0;
 
@@ -313,6 +315,12 @@ void init_cache_modes(void)
                 */
                pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
                      PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
+       } else if (!pat_force_disabled && cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) {
+               /*
+                * Clearly PAT is enabled underneath. Allow pat_enabled() to
+                * reflect this.
+                */
+               pat_bp_enabled = true;
        }
 
        __init_cache_modes(pat);
index 5ee62b95f3e5d040ff0112e4c37cc2ba567f0ee7..c96c8c4f751b31a77088f2024b35c2dc9e8f2658 100644 (file)
@@ -1931,7 +1931,8 @@ out:
        /* If we didn't flush the entire list, we could have told the driver
         * there was more coming, but that turned out to be a lie.
         */
-       if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
+       if ((!list_empty(list) || errors || needs_resource ||
+            ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued)
                q->mq_ops->commit_rqs(hctx);
        /*
         * Any items that need requeuing? Stuff them into hctx->dispatch,
@@ -2229,26 +2230,6 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
 }
 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
 
-/**
- * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
- * @q: request queue.
- *
- * The caller is responsible for serializing this function against
- * blk_mq_{start,stop}_hw_queue().
- */
-bool blk_mq_queue_stopped(struct request_queue *q)
-{
-       struct blk_mq_hw_ctx *hctx;
-       unsigned long i;
-
-       queue_for_each_hw_ctx(q, hctx, i)
-               if (blk_mq_hctx_stopped(hctx))
-                       return true;
-
-       return false;
-}
-EXPORT_SYMBOL(blk_mq_queue_stopped);
-
 /*
  * This function is often used for pausing .queue_rq() by driver when
  * there isn't enough resource or some conditions aren't satisfied, and
@@ -2570,7 +2551,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
                        break;
                case BLK_STS_RESOURCE:
                case BLK_STS_DEV_RESOURCE:
-                       blk_mq_request_bypass_insert(rq, false, last);
+                       blk_mq_request_bypass_insert(rq, false, true);
                        blk_mq_commit_rqs(hctx, &queued, from_schedule);
                        return;
                default:
@@ -2680,6 +2661,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                list_del_init(&rq->queuelist);
                ret = blk_mq_request_issue_directly(rq, list_empty(list));
                if (ret != BLK_STS_OK) {
+                       errors++;
                        if (ret == BLK_STS_RESOURCE ||
                                        ret == BLK_STS_DEV_RESOURCE) {
                                blk_mq_request_bypass_insert(rq, false,
@@ -2687,7 +2669,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                                break;
                        }
                        blk_mq_end_request(rq, ret);
-                       errors++;
                } else
                        queued++;
        }
index db6ac540e924a75f51b41ba7fd35922d0dfa1f58..e534fd49a67e50877cc9ecd2672720fceed467f5 100644 (file)
@@ -151,7 +151,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
        unsigned int cpu;
 
        for_each_cpu(cpu, policy->related_cpus) {
-               struct acpi_processor *pr = per_cpu(processors, policy->cpu);
+               struct acpi_processor *pr = per_cpu(processors, cpu);
 
                if (pr)
                        freq_qos_remove_request(&pr->thermal_req);
index 7b3ad8ed2f4e6c46ade0eb87ffa7754a0694c0eb..d4c168ce428ca6556c48642fd507aabb8cf34b95 100644 (file)
@@ -370,7 +370,7 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
                bool ret;
 
                status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
-               if (ACPI_FAILURE(status)) {
+               if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
                        acpi_handle_err(dn->handle, "Can't tag data node\n");
                        return false;
                }
@@ -1043,11 +1043,10 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
                                break;                                  \
                        }                                               \
                        if (__items[i].integer.value > _Generic(__val,  \
-                                                               u8: U8_MAX, \
-                                                               u16: U16_MAX, \
-                                                               u32: U32_MAX, \
-                                                               u64: U64_MAX, \
-                                                               default: 0U)) { \
+                                                               u8 *: U8_MAX, \
+                                                               u16 *: U16_MAX, \
+                                                               u32 *: U32_MAX, \
+                                                               u64 *: U64_MAX)) { \
                                ret = -EOVERFLOW;                       \
                                break;                                  \
                        }                                               \
index 1014beb128025706ac6d98d7e09a6030acb5c23f..51f4e1c5cd0199bbd34d886dd3e08c4f6664507e 100644 (file)
@@ -402,12 +402,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
        size_t size, data_offsets_size;
        int ret;
 
+       mmap_read_lock(alloc->vma_vm_mm);
        if (!binder_alloc_get_vma(alloc)) {
+               mmap_read_unlock(alloc->vma_vm_mm);
                binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
                                   "%d: binder_alloc_buf, no vma\n",
                                   alloc->pid);
                return ERR_PTR(-ESRCH);
        }
+       mmap_read_unlock(alloc->vma_vm_mm);
 
        data_offsets_size = ALIGN(data_size, sizeof(void *)) +
                ALIGN(offsets_size, sizeof(void *));
@@ -929,17 +932,25 @@ void binder_alloc_print_pages(struct seq_file *m,
         * Make sure the binder_alloc is fully initialized, otherwise we might
         * read inconsistent state.
         */
-       if (binder_alloc_get_vma(alloc) != NULL) {
-               for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
-                       page = &alloc->pages[i];
-                       if (!page->page_ptr)
-                               free++;
-                       else if (list_empty(&page->lru))
-                               active++;
-                       else
-                               lru++;
-               }
+
+       mmap_read_lock(alloc->vma_vm_mm);
+       if (binder_alloc_get_vma(alloc) == NULL) {
+               mmap_read_unlock(alloc->vma_vm_mm);
+               goto uninitialized;
        }
+
+       mmap_read_unlock(alloc->vma_vm_mm);
+       for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+               page = &alloc->pages[i];
+               if (!page->page_ptr)
+                       free++;
+               else if (list_empty(&page->lru))
+                       active++;
+               else
+                       lru++;
+       }
+
+uninitialized:
        mutex_unlock(&alloc->mutex);
        seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
        seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
index ef4508d72c023ee6ae8d97935a27f6d0e2275a04..7c128c89b45461ac51959293932c0158fa0d0698 100644 (file)
@@ -2122,6 +2122,7 @@ const char *ata_get_cmd_name(u8 command)
                { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
                { ATA_CMD_FPDMA_READ,           "READ FPDMA QUEUED" },
                { ATA_CMD_FPDMA_WRITE,          "WRITE FPDMA QUEUED" },
+               { ATA_CMD_NCQ_NON_DATA,         "NCQ NON-DATA" },
                { ATA_CMD_FPDMA_SEND,           "SEND FPDMA QUEUED" },
                { ATA_CMD_FPDMA_RECV,           "RECEIVE FPDMA QUEUED" },
                { ATA_CMD_PIO_READ,             "READ SECTOR(S)" },
index e3c0ba93c1a34cd4969d5e9235ded7382b11d113..ad92192c7d617368194fa22de2f9c3a2845f8e62 100644 (file)
@@ -979,6 +979,11 @@ loop_set_status_from_info(struct loop_device *lo,
 
        lo->lo_offset = info->lo_offset;
        lo->lo_sizelimit = info->lo_sizelimit;
+
+       /* loff_t vars have been assigned __u64 */
+       if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
+               return -EOVERFLOW;
+
        memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
        lo->lo_file_name[LO_NAME_SIZE-1] = 0;
        lo->lo_flags = info->lo_flags;
index 2b7d1db5c4a7ba1626bdd0a5fd695f8d6dc597f1..6a4a94b4cdf42fa1d56d79d28d73c53aef941391 100644 (file)
@@ -555,7 +555,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
        return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
 }
 
-static bool ubq_daemon_is_dying(struct ublk_queue *ubq)
+static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
 {
        return ubq->ubq_daemon->flags & PF_EXITING;
 }
@@ -605,8 +605,9 @@ static void ublk_complete_rq(struct request *req)
 }
 
 /*
- * __ublk_fail_req() may be called from abort context or ->ubq_daemon
- * context during exiting, so lock is required.
+ * Since __ublk_rq_task_work always fails requests immediately during
+ * exiting, __ublk_fail_req() is only called from abort context during
+ * exiting. So lock is unnecessary.
  *
  * Also aborting may not be started yet, keep in mind that one failed
  * request may be issued by block layer again.
@@ -644,8 +645,7 @@ static inline void __ublk_rq_task_work(struct request *req)
        struct ublk_device *ub = ubq->dev;
        int tag = req->tag;
        struct ublk_io *io = &ubq->ios[tag];
-       bool task_exiting = current != ubq->ubq_daemon ||
-               (current->flags & PF_EXITING);
+       bool task_exiting = current != ubq->ubq_daemon || ubq_daemon_is_dying(ubq);
        unsigned int mapped_bytes;
 
        pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
@@ -680,6 +680,11 @@ static inline void __ublk_rq_task_work(struct request *req)
                 * do the copy work.
                 */
                io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+               /* update iod->addr because ublksrv may have passed a new io buffer */
+               ublk_get_iod(ubq, req->tag)->addr = io->addr;
+               pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
+                               __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+                               ublk_get_iod(ubq, req->tag)->addr);
        }
 
        mapped_bytes = ublk_map_io(ubq, req, io);
@@ -751,9 +756,25 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
                if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
                        goto fail;
        } else {
-               struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+               struct ublk_io *io = &ubq->ios[rq->tag];
+               struct io_uring_cmd *cmd = io->cmd;
                struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
 
+               /*
+                * If the check pass, we know that this is a re-issued request aborted
+                * previously in monitor_work because the ubq_daemon(cmd's task) is
+                * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
+                * because this ioucmd's io_uring context may be freed now if no inflight
+                * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
+                *
+                * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
+                * the tag). Then the request is re-started(allocating the tag) and we are here.
+                * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
+                * guarantees that here is a re-issued request aborted previously.
+                */
+               if ((io->flags & UBLK_IO_FLAG_ABORTED))
+                       goto fail;
+
                pdu->req = rq;
                io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
        }
index 92cb929a45b79ae26090233cb83c300b5c937655..226ea76cc81978f9de919821a95bf270949f9e35 100644 (file)
@@ -1146,14 +1146,15 @@ static ssize_t bd_stat_show(struct device *dev,
 static ssize_t debug_stat_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       int version = 2;
+       int version = 1;
        struct zram *zram = dev_to_zram(dev);
        ssize_t ret;
 
        down_read(&zram->init_lock);
        ret = scnprintf(buf, PAGE_SIZE,
-                       "version: %d\n%8llu\n",
+                       "version: %d\n%8llu %8llu\n",
                        version,
+                       (u64)atomic64_read(&zram->stats.writestall),
                        (u64)atomic64_read(&zram->stats.miss_free));
        up_read(&zram->init_lock);
 
@@ -1351,7 +1352,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
 {
        int ret = 0;
        unsigned long alloced_pages;
-       unsigned long handle = 0;
+       unsigned long handle = -ENOMEM;
        unsigned int comp_len = 0;
        void *src, *dst, *mem;
        struct zcomp_strm *zstrm;
@@ -1369,6 +1370,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
        }
        kunmap_atomic(mem);
 
+compress_again:
        zstrm = zcomp_stream_get(zram->comp);
        src = kmap_atomic(page);
        ret = zcomp_compress(zstrm, src, &comp_len);
@@ -1377,20 +1379,39 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
        if (unlikely(ret)) {
                zcomp_stream_put(zram->comp);
                pr_err("Compression failed! err=%d\n", ret);
+               zs_free(zram->mem_pool, handle);
                return ret;
        }
 
        if (comp_len >= huge_class_size)
                comp_len = PAGE_SIZE;
-
-       handle = zs_malloc(zram->mem_pool, comp_len,
-                       __GFP_KSWAPD_RECLAIM |
-                       __GFP_NOWARN |
-                       __GFP_HIGHMEM |
-                       __GFP_MOVABLE);
-
+       /*
+        * handle allocation has 2 paths:
+        * a) fast path is executed with preemption disabled (for
+        *  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
+        *  since we can't sleep;
+        * b) slow path enables preemption and attempts to allocate
+        *  the page with __GFP_DIRECT_RECLAIM bit set. we have to
+        *  put per-cpu compression stream and, thus, to re-do
+        *  the compression once handle is allocated.
+        *
+        * if we have a 'non-null' handle here then we are coming
+        * from the slow path and handle has already been allocated.
+        */
+       if (IS_ERR((void *)handle))
+               handle = zs_malloc(zram->mem_pool, comp_len,
+                               __GFP_KSWAPD_RECLAIM |
+                               __GFP_NOWARN |
+                               __GFP_HIGHMEM |
+                               __GFP_MOVABLE);
        if (IS_ERR((void *)handle)) {
                zcomp_stream_put(zram->comp);
+               atomic64_inc(&zram->stats.writestall);
+               handle = zs_malloc(zram->mem_pool, comp_len,
+                               GFP_NOIO | __GFP_HIGHMEM |
+                               __GFP_MOVABLE);
+               if (!IS_ERR((void *)handle))
+                       goto compress_again;
                return PTR_ERR((void *)handle);
        }
 
@@ -1948,6 +1969,7 @@ static int zram_add(void)
        if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
                blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
 
+       blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
        ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
        if (ret)
                goto out_cleanup_disk;
index 158c91e5485018e6a56fcea418fc953ba5c455d5..80c3b43b4828fa553a1daaf0403e2c4776ddc3f3 100644 (file)
@@ -81,6 +81,7 @@ struct zram_stats {
        atomic64_t huge_pages_since;    /* no. of huge pages since zram set up */
        atomic64_t pages_stored;        /* no. of pages currently stored */
        atomic_long_t max_used_pages;   /* no. of maximum pages stored */
+       atomic64_t writestall;          /* no. of write slow paths */
        atomic64_t miss_free;           /* no. of missed free */
 #ifdef CONFIG_ZRAM_WRITEBACK
        atomic64_t bd_count;            /* no. of pages in backing device */
index 7820c4e7428934ddd9a31e4fa5e9117cb31574f2..69b3d61852ac61268453f7144a74b552943444ee 100644 (file)
@@ -532,7 +532,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
 
        target_freq = clamp_val(target_freq, policy->min, policy->max);
 
-       if (!cpufreq_driver->target_index)
+       if (!policy->freq_table)
                return target_freq;
 
        idx = cpufreq_frequency_table_target(policy, target_freq, relation);
index 3ed7ae0d6781e560a69ed79935acabb1c8266235..96060bf90a24abd63c36a9f702e6c633c01083a1 100644 (file)
@@ -450,9 +450,13 @@ static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
 static const struct scmi_clock_info *
 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
 {
+       struct scmi_clock_info *clk;
        struct clock_info *ci = ph->get_priv(ph);
-       struct scmi_clock_info *clk = ci->clk + clk_id;
 
+       if (clk_id >= ci->num_clocks)
+               return NULL;
+
+       clk = ci->clk + clk_id;
        if (!clk->name[0])
                return NULL;
 
index 8abace56b95885da4ccb9d52488083996c234e9e..f42dad997ac9a50a947191f55d19a05a1c3d08ba 100644 (file)
@@ -106,6 +106,7 @@ enum scmi_optee_pta_cmd {
  * @channel_id: OP-TEE channel ID used for this transport
  * @tee_session: TEE session identifier
  * @caps: OP-TEE SCMI channel capabilities
+ * @rx_len: Response size
  * @mu: Mutex protection on channel access
  * @cinfo: SCMI channel information
  * @shmem: Virtual base address of the shared memory
index 673f3eb498f43483c1ece184c98d8c4b3ce5aef0..e9afa8cab730949ae65b1199283e94cbbe205f78 100644 (file)
@@ -166,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
        struct scmi_xfer *t;
        struct scmi_msg_reset_domain_reset *dom;
        struct scmi_reset_info *pi = ph->get_priv(ph);
-       struct reset_dom_info *rdom = pi->dom_info + domain;
+       struct reset_dom_info *rdom;
 
-       if (rdom->async_reset)
+       if (domain >= pi->num_domains)
+               return -EINVAL;
+
+       rdom = pi->dom_info + domain;
+       if (rdom->async_reset && flags & AUTONOMOUS_RESET)
                flags |= ASYNCHRONOUS_RESET;
 
        ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
@@ -180,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
        dom->flags = cpu_to_le32(flags);
        dom->reset_state = cpu_to_le32(state);
 
-       if (rdom->async_reset)
+       if (flags & ASYNCHRONOUS_RESET)
                ret = ph->xops->do_xfer_with_response(ph, t);
        else
                ret = ph->xops->do_xfer(ph, t);
index 581d34c9576954d0b4563090e2d7e08f5ec550da..4e27c3d66a837d9dad46fa2d03adc7ded2f8ab10 100644 (file)
@@ -138,9 +138,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
        scmi_pd_data->domains = domains;
        scmi_pd_data->num_domains = num_domains;
 
+       dev_set_drvdata(dev, scmi_pd_data);
+
        return of_genpd_add_provider_onecell(np, scmi_pd_data);
 }
 
+static void scmi_pm_domain_remove(struct scmi_device *sdev)
+{
+       int i;
+       struct genpd_onecell_data *scmi_pd_data;
+       struct device *dev = &sdev->dev;
+       struct device_node *np = dev->of_node;
+
+       of_genpd_del_provider(np);
+
+       scmi_pd_data = dev_get_drvdata(dev);
+       for (i = 0; i < scmi_pd_data->num_domains; i++) {
+               if (!scmi_pd_data->domains[i])
+                       continue;
+               pm_genpd_remove(scmi_pd_data->domains[i]);
+       }
+}
+
 static const struct scmi_device_id scmi_id_table[] = {
        { SCMI_PROTOCOL_POWER, "genpd" },
        { },
@@ -150,6 +169,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table);
 static struct scmi_driver scmi_power_domain_driver = {
        .name = "scmi-power-domain",
        .probe = scmi_pm_domain_probe,
+       .remove = scmi_pm_domain_remove,
        .id_table = scmi_id_table,
 };
 module_scmi_driver(scmi_power_domain_driver);
index 7288c61178380813cb63fb83e588180c3308ab93..0b5853fa9d874f2d0800a5fa2aa20a45904e0f66 100644 (file)
@@ -762,6 +762,10 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
 {
        int ret;
        struct scmi_xfer *t;
+       struct sensors_info *si = ph->get_priv(ph);
+
+       if (sensor_id >= si->num_sensors)
+               return -EINVAL;
 
        ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET,
                                      sizeof(__le32), sizeof(__le32), &t);
@@ -771,7 +775,6 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
        put_unaligned_le32(sensor_id, t->tx.buf);
        ret = ph->xops->do_xfer(ph, t);
        if (!ret) {
-               struct sensors_info *si = ph->get_priv(ph);
                struct scmi_sensor_info *s = si->sensors + sensor_id;
 
                *sensor_config = get_unaligned_le64(t->rx.buf);
@@ -788,6 +791,10 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
        int ret;
        struct scmi_xfer *t;
        struct scmi_msg_sensor_config_set *msg;
+       struct sensors_info *si = ph->get_priv(ph);
+
+       if (sensor_id >= si->num_sensors)
+               return -EINVAL;
 
        ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET,
                                      sizeof(*msg), 0, &t);
@@ -800,7 +807,6 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
 
        ret = ph->xops->do_xfer(ph, t);
        if (!ret) {
-               struct sensors_info *si = ph->get_priv(ph);
                struct scmi_sensor_info *s = si->sensors + sensor_id;
 
                s->sensor_config = sensor_config;
@@ -831,8 +837,11 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
        int ret;
        struct scmi_xfer *t;
        struct scmi_msg_sensor_reading_get *sensor;
+       struct scmi_sensor_info *s;
        struct sensors_info *si = ph->get_priv(ph);
-       struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+       if (sensor_id >= si->num_sensors)
+               return -EINVAL;
 
        ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
                                      sizeof(*sensor), 0, &t);
@@ -841,6 +850,7 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
 
        sensor = t->tx.buf;
        sensor->id = cpu_to_le32(sensor_id);
+       s = si->sensors + sensor_id;
        if (s->async) {
                sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
                ret = ph->xops->do_xfer_with_response(ph, t);
@@ -895,9 +905,13 @@ scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph,
        int ret;
        struct scmi_xfer *t;
        struct scmi_msg_sensor_reading_get *sensor;
+       struct scmi_sensor_info *s;
        struct sensors_info *si = ph->get_priv(ph);
-       struct scmi_sensor_info *s = si->sensors + sensor_id;
 
+       if (sensor_id >= si->num_sensors)
+               return -EINVAL;
+
+       s = si->sensors + sensor_id;
        if (!count || !readings ||
            (!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
                return -EINVAL;
@@ -948,6 +962,9 @@ scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id)
 {
        struct sensors_info *si = ph->get_priv(ph);
 
+       if (sensor_id >= si->num_sensors)
+               return NULL;
+
        return si->sensors + sensor_id;
 }
 
index f191a1f901ac70cd7924ec7e62db054f9ff84e85..0eb6b617f709a4ebc181b145d913fc195ecb618f 100644 (file)
@@ -630,7 +630,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
 {
        if (memcmp(buf, "_SM3_", 5) == 0 &&
            buf[6] < 32 && dmi_checksum(buf, buf[6])) {
-               dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
+               dmi_ver = get_unaligned_be24(buf + 7);
                dmi_num = 0;                    /* No longer specified */
                dmi_len = get_unaligned_le32(buf + 12);
                dmi_base = get_unaligned_le64(buf + 16);
index c6cc493a548665e60f900cfc5d060283684fb389..2b97b8a96fb4944963093dc9ff6f569399af12d4 100644 (file)
@@ -148,30 +148,22 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
                              struct amdgpu_reset_context *reset_context)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+       struct list_head *reset_device_list = reset_context->reset_device_list;
        struct amdgpu_device *tmp_adev = NULL;
-       struct list_head reset_device_list;
        int r = 0;
 
        dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+
+       if (reset_device_list == NULL)
+               return -EINVAL;
+
        if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
            reset_context->hive == NULL) {
                /* Wrong context, return error */
                return -EINVAL;
        }
 
-       INIT_LIST_HEAD(&reset_device_list);
-       if (reset_context->hive) {
-               list_for_each_entry (tmp_adev,
-                                    &reset_context->hive->device_list,
-                                    gmc.xgmi.head)
-                       list_add_tail(&tmp_adev->reset_list,
-                                     &reset_device_list);
-       } else {
-               list_add_tail(&reset_context->reset_req_dev->reset_list,
-                             &reset_device_list);
-       }
-
-       list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+       list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
                mutex_lock(&tmp_adev->reset_cntl->reset_lock);
                tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
        }
@@ -179,7 +171,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
         * Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
         * them together so that they can be completed asynchronously on multiple nodes
         */
-       list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+       list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
                /* For XGMI run all resets in parallel to speed up the process */
                if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
                        if (!queue_work(system_unbound_wq,
@@ -197,7 +189,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
 
        /* For XGMI wait for all resets to complete before proceed */
        if (!r) {
-               list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+               list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
                        if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
                                flush_work(&tmp_adev->reset_cntl->reset_work);
                                r = tmp_adev->asic_reset_res;
@@ -207,7 +199,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
                }
        }
 
-       list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+       list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
                mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
                tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
        }
@@ -339,10 +331,13 @@ static int
 aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
                                  struct amdgpu_reset_context *reset_context)
 {
+       struct list_head *reset_device_list = reset_context->reset_device_list;
        struct amdgpu_device *tmp_adev = NULL;
-       struct list_head reset_device_list;
        int r;
 
+       if (reset_device_list == NULL)
+               return -EINVAL;
+
        if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==
                    IP_VERSION(13, 0, 2) &&
            reset_context->hive == NULL) {
@@ -350,19 +345,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
                return -EINVAL;
        }
 
-       INIT_LIST_HEAD(&reset_device_list);
-       if (reset_context->hive) {
-               list_for_each_entry (tmp_adev,
-                                    &reset_context->hive->device_list,
-                                    gmc.xgmi.head)
-                       list_add_tail(&tmp_adev->reset_list,
-                                     &reset_device_list);
-       } else {
-               list_add_tail(&reset_context->reset_req_dev->reset_list,
-                             &reset_device_list);
-       }
-
-       list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+       list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
                dev_info(tmp_adev->dev,
                         "GPU reset succeeded, trying to resume\n");
                r = aldebaran_mode2_restore_ip(tmp_adev);
index e146810c700ba7846c926ef205f6480418b3f415..d597e2656c475da6642e88156c8a9d6e056986da 100644 (file)
@@ -317,7 +317,7 @@ enum amdgpu_kiq_irq {
        AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
        AMDGPU_CP_KIQ_IRQ_LAST
 };
-
+#define SRIOV_USEC_TIMEOUT  1200000 /* wait 12 * 100ms for SRIOV */
 #define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
 #define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
 #define MAX_KIQ_REG_TRY 1000
index 3c09dcc0986ee96ee8350378a671c4638cae2011..647220a8762dc591cbf83fba338235ca9798ac9e 100644 (file)
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence {
 struct amdgpu_kfd_dev {
        struct kfd_dev *dev;
        uint64_t vram_used;
+       uint64_t vram_used_aligned;
        bool init_complete;
        struct work_struct reset_work;
 };
index a699134a1e8cf5dd8c92fd0ddfe2f1a449eb3b7b..cbd593f7d553f71e0b7b1ba80bf98f9384bcf889 100644 (file)
 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
 
 /*
- * Align VRAM allocations to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
+ * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
  * BO chunk
  */
-#define VRAM_ALLOCATION_ALIGN (1 << 21)
+#define VRAM_AVAILABLITY_ALIGN (1 << 21)
 
 /* Impose limit on how much memory KFD can use */
 static struct {
@@ -149,7 +149,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
                 * to avoid fragmentation caused by 4K allocations in the tail
                 * 2M BO chunk.
                 */
-               vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
+               vram_needed = size;
        } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
                system_mem_needed = size;
        } else if (!(alloc_flag &
@@ -182,8 +182,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
         */
        WARN_ONCE(vram_needed && !adev,
                  "adev reference can't be null when vram is used");
-       if (adev)
+       if (adev) {
                adev->kfd.vram_used += vram_needed;
+               adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+       }
        kfd_mem_limit.system_mem_used += system_mem_needed;
        kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
 
@@ -203,8 +205,10 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
        } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
                WARN_ONCE(!adev,
                          "adev reference can't be null when alloc mem flags vram is set");
-               if (adev)
-                       adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
+               if (adev) {
+                       adev->kfd.vram_used -= size;
+                       adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
+               }
        } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
                kfd_mem_limit.system_mem_used -= size;
        } else if (!(alloc_flag &
@@ -1608,15 +1612,14 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
        uint64_t reserved_for_pt =
                ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
        size_t available;
-
        spin_lock(&kfd_mem_limit.mem_limit_lock);
        available = adev->gmc.real_vram_size
-               - adev->kfd.vram_used
+               - adev->kfd.vram_used_aligned
                - atomic64_read(&adev->vram_pin_size)
                - reserved_for_pt;
        spin_unlock(&kfd_mem_limit.mem_limit_lock);
 
-       return ALIGN_DOWN(available, VRAM_ALLOCATION_ALIGN);
+       return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
 }
 
 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
index fd8f3731758edec3e9e0c35f3d52d8768e067d92..b81b77a9efa6157bcf562454a47b11aa7c557634 100644 (file)
@@ -314,7 +314,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
                                        mem_channel_number = vram_info->v30.channel_num;
                                        mem_channel_width = vram_info->v30.channel_width;
                                        if (vram_width)
-                                               *vram_width = mem_channel_number * mem_channel_width;
+                                               *vram_width = mem_channel_number * (1 << mem_channel_width);
                                        break;
                                default:
                                        return -EINVAL;
index d8f1335bc68f416154b6ee3aae6f5a4028f08cbf..b7bae833c804b02b05478fc509fa6d2eb3807341 100644 (file)
@@ -837,16 +837,12 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                        continue;
 
                r = amdgpu_vm_bo_update(adev, bo_va, false);
-               if (r) {
-                       mutex_unlock(&p->bo_list->bo_list_mutex);
+               if (r)
                        return r;
-               }
 
                r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
-               if (r) {
-                       mutex_unlock(&p->bo_list->bo_list_mutex);
+               if (r)
                        return r;
-               }
        }
 
        r = amdgpu_vm_handle_moved(adev, vm);
index e2eec985adb3a4434ec27eb5d07589497281c126..cb00c7d6f50bec79ba9f2d2bc2c723d733d7dfd5 100644 (file)
@@ -1705,7 +1705,7 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
        char reg_offset[11];
-       uint32_t *new, *tmp = NULL;
+       uint32_t *new = NULL, *tmp = NULL;
        int ret, i = 0, len = 0;
 
        do {
@@ -1747,7 +1747,8 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
        ret = size;
 
 error_free:
-       kfree(tmp);
+       if (tmp != new)
+               kfree(tmp);
        kfree(new);
        return ret;
 }
index c4a6fe3070b6e6e34bae181abcfb6a86c421f171..f095a2513affc2a58150e9213400e92ab237dadc 100644 (file)
@@ -2456,12 +2456,14 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
                        if (!hive->reset_domain ||
                            !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
                                r = -ENOENT;
+                               amdgpu_put_xgmi_hive(hive);
                                goto init_failed;
                        }
 
                        /* Drop the early temporary reset domain we created for device */
                        amdgpu_reset_put_reset_domain(adev->reset_domain);
                        adev->reset_domain = hive->reset_domain;
+                       amdgpu_put_xgmi_hive(hive);
                }
        }
 
@@ -4413,8 +4415,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 retry:
        amdgpu_amdkfd_pre_reset(adev);
 
-       amdgpu_amdkfd_pre_reset(adev);
-
        if (from_hypervisor)
                r = amdgpu_virt_request_full_gpu(adev, true);
        else
@@ -4742,6 +4742,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
        tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
                                    reset_list);
        amdgpu_reset_reg_dumps(tmp_adev);
+
+       reset_context->reset_device_list = device_list_handle;
        r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
        /* If reset handler not implemented, continue; otherwise return */
        if (r == -ENOSYS)
index 5071b96be9824629caec9a1a886a65fc15d5ffb2..b1099ee79c50b0bce57a031683dafa7912ff6532 100644 (file)
@@ -272,10 +272,6 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
        /* Signal all jobs not yet scheduled */
        for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
                struct drm_sched_rq *rq = &sched->sched_rq[i];
-
-               if (!rq)
-                       continue;
-
                spin_lock(&rq->lock);
                list_for_each_entry(s_entity, &rq->entities, list) {
                        while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
index b067ce45d2264aa107a5d268f07963c329400dbb..1036446abc3089f9937107efdec7d409c9c1910b 100644 (file)
@@ -2641,6 +2641,9 @@ static int psp_hw_fini(void *handle)
                psp_rap_terminate(psp);
                psp_dtm_terminate(psp);
                psp_hdcp_terminate(psp);
+
+               if (adev->gmc.xgmi.num_physical_nodes > 1)
+                       psp_xgmi_terminate(psp);
        }
 
        psp_asd_terminate(psp);
index 9e55a5d7a825334230d09190488585ee650b8a9e..ffda1560c6481d6476fe0ee081224f2ba879bc6c 100644 (file)
@@ -37,6 +37,7 @@ struct amdgpu_reset_context {
        struct amdgpu_device *reset_req_dev;
        struct amdgpu_job *job;
        struct amdgpu_hive_info *hive;
+       struct list_head *reset_device_list;
        unsigned long flags;
 };
 
index 3b4c19412625dd1395adf8f00bd3dbb85ec37940..134575a3893c535cdfd77bd5de2b902c05ba8254 100644 (file)
@@ -637,6 +637,8 @@ struct amdgpu_ttm_tt {
 #endif
 };
 
+#define ttm_to_amdgpu_ttm_tt(ptr)      container_of(ptr, struct amdgpu_ttm_tt, ttm)
+
 #ifdef CONFIG_DRM_AMDGPU_USERPTR
 /*
  * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
@@ -648,7 +650,7 @@ struct amdgpu_ttm_tt {
 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
 {
        struct ttm_tt *ttm = bo->tbo.ttm;
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
        unsigned long start = gtt->userptr;
        struct vm_area_struct *vma;
        struct mm_struct *mm;
@@ -702,7 +704,7 @@ out_unlock:
  */
 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
 {
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
        bool r = false;
 
        if (!gtt || !gtt->userptr)
@@ -751,7 +753,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
                                     struct ttm_tt *ttm)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
        enum dma_data_direction direction = write ?
                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -788,7 +790,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
                                        struct ttm_tt *ttm)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
        enum dma_data_direction direction = write ?
                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -822,7 +824,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
 {
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
        struct ttm_tt *ttm = tbo->ttm;
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
        if (amdgpu_bo_encrypted(abo))
                flags |= AMDGPU_PTE_TMZ;
@@ -860,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
                                   struct ttm_resource *bo_mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
-       struct amdgpu_ttm_tt *gtt = (void*)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
        uint64_t flags;
        int r;
 
@@ -927,7 +929,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct ttm_operation_ctx ctx = { false, false };
-       struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
        struct ttm_placement placement;
        struct ttm_place placements;
        struct ttm_resource *tmp;
@@ -998,7 +1000,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
                                      struct ttm_tt *ttm)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
        /* if the pages have userptr pinning then clear that first */
        if (gtt->userptr) {
@@ -1025,7 +1027,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
                                       struct ttm_tt *ttm)
 {
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
        if (gtt->usertask)
                put_task_struct(gtt->usertask);
@@ -1079,7 +1081,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
                                  struct ttm_operation_ctx *ctx)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
        pgoff_t i;
        int ret;
 
@@ -1113,7 +1115,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
                                     struct ttm_tt *ttm)
 {
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
        struct amdgpu_device *adev;
        pgoff_t i;
 
@@ -1182,7 +1184,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
        /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
        bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
 
-       gtt = (void *)bo->ttm;
+       gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
        gtt->userptr = addr;
        gtt->userflags = flags;
 
@@ -1199,7 +1201,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
  */
 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
 {
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
        if (gtt == NULL)
                return NULL;
@@ -1218,7 +1220,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
                                  unsigned long end, unsigned long *userptr)
 {
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
        unsigned long size;
 
        if (gtt == NULL || !gtt->userptr)
@@ -1241,7 +1243,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
  */
 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
 {
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
        if (gtt == NULL || !gtt->userptr)
                return false;
@@ -1254,7 +1256,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
  */
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
 {
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
        if (gtt == NULL)
                return false;
index 108e8e8a1a367e670eeab174d968cad612fbf8fe..576849e9529642a033d6ab768c1560dbb02aaf6c 100644 (file)
@@ -496,8 +496,7 @@ static int amdgpu_vkms_sw_init(void *handle)
        adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
 
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       /* disable prefer shadow for now due to hibernation issues */
-       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
 
        adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 
index 1b108d03e78593732b051eb13d87d5fd014127ac..f2aebbf3fbe3820c88edc79955a41185be078f0f 100644 (file)
@@ -742,7 +742,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
                amdgpu_put_xgmi_hive(hive);
        }
 
-       return psp_xgmi_terminate(&adev->psp);
+       return 0;
 }
 
 static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
index 33a8a7365aef9642a560faad85d267de815565ba..f0e235f98afb299d6647020c5177214a0f94632f 100644 (file)
 #include "navi10_enum.h"
 #include "soc15_common.h"
 
+#define regATHUB_MISC_CNTL_V3_0_1                      0x00d7
+#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX             0
+
+
+static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+       uint32_t data;
+
+       switch (adev->ip_versions[ATHUB_HWIP][0]) {
+       case IP_VERSION(3, 0, 1):
+               data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
+               break;
+       default:
+               data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+               break;
+       }
+       return data;
+}
+
+static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+       switch (adev->ip_versions[ATHUB_HWIP][0]) {
+       case IP_VERSION(3, 0, 1):
+               WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
+               break;
+       default:
+               WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+               break;
+       }
+}
+
 static void
 athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
                                            bool enable)
 {
        uint32_t def, data;
 
-       def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+       def = data = athub_v3_0_get_cg_cntl(adev);
 
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
                data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
@@ -42,7 +73,7 @@ athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
                data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
 
        if (def != data)
-               WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+               athub_v3_0_set_cg_cntl(adev, data);
 }
 
 static void
@@ -51,7 +82,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
 {
        uint32_t def, data;
 
-       def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+       def = data = athub_v3_0_get_cg_cntl(adev);
 
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
                data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
@@ -59,7 +90,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
                data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
 
        if (def != data)
-               WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+               athub_v3_0_set_cg_cntl(adev, data);
 }
 
 int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
@@ -70,6 +101,7 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
 
        switch (adev->ip_versions[ATHUB_HWIP][0]) {
        case IP_VERSION(3, 0, 0):
+       case IP_VERSION(3, 0, 1):
        case IP_VERSION(3, 0, 2):
                athub_v3_0_update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE);
@@ -88,7 +120,7 @@ void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
        int data;
 
        /* AMD_CG_SUPPORT_ATHUB_MGCG */
-       data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+       data = athub_v3_0_get_cg_cntl(adev);
        if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
                *flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
 
index 9c964cd3b5d4e24fec07595e172bc6e8bf1aaa59..288fce7dc0ed178305b443d93fe72906e603bbf6 100644 (file)
@@ -2796,8 +2796,7 @@ static int dce_v10_0_sw_init(void *handle)
        adev_to_drm(adev)->mode_config.max_height = 16384;
 
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       /* disable prefer shadow for now due to hibernation issues */
-       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
 
        adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
 
index e0ad9f27dc3f943dbe02d7bbbd491e241e202cad..cbe5250b31cb4e33ac7460a323690df56f955f70 100644 (file)
@@ -2914,8 +2914,7 @@ static int dce_v11_0_sw_init(void *handle)
        adev_to_drm(adev)->mode_config.max_height = 16384;
 
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       /* disable prefer shadow for now due to hibernation issues */
-       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
 
        adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
 
index 77f5e998a1202bb4666a8bac794d29592917aceb..b1c44fab074f32806266b054d4aa163816cd9c46 100644 (file)
@@ -2673,8 +2673,7 @@ static int dce_v6_0_sw_init(void *handle)
        adev_to_drm(adev)->mode_config.max_width = 16384;
        adev_to_drm(adev)->mode_config.max_height = 16384;
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       /* disable prefer shadow for now due to hibernation issues */
-       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
        adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
        adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 
index 802e5c753271cd590af370b9bd2e25a585a6b666..a22b45c9279227a2a28adf8aabebf2718c4dd1ba 100644 (file)
@@ -2693,8 +2693,11 @@ static int dce_v8_0_sw_init(void *handle)
        adev_to_drm(adev)->mode_config.max_height = 16384;
 
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       /* disable prefer shadow for now due to hibernation issues */
-       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+       if (adev->asic_type == CHIP_HAWAII)
+               /* disable prefer shadow for now due to hibernation issues */
+               adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+       else
+               adev_to_drm(adev)->mode_config.prefer_shadow = 1;
 
        adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
 
index fafbad3cf08d8592f13c151f2aeea88aa757e8c5..a2a4dc1844c0adc0c54ab850fa026e1f9ca62aec 100644 (file)
@@ -4846,7 +4846,7 @@ static int gfx_v10_0_sw_init(void *handle)
        case IP_VERSION(10, 3, 3):
        case IP_VERSION(10, 3, 7):
                adev->gfx.me.num_me = 1;
-               adev->gfx.me.num_pipe_per_me = 2;
+               adev->gfx.me.num_pipe_per_me = 1;
                adev->gfx.me.num_queue_per_pipe = 1;
                adev->gfx.mec.num_mec = 2;
                adev->gfx.mec.num_pipe_per_mec = 4;
index 6fd71cb10e54a0f65f3245595c465b2a4f60a2b0..f6b1bb40e5036e1dfe88d5554983f29105a6a734 100644 (file)
@@ -53,6 +53,7 @@
 #define GFX11_MEC_HPD_SIZE     2048
 
 #define RLCG_UCODE_LOADING_START_ADDRESS       0x00002000L
+#define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1       0x1388
 
 #define regCGTT_WD_CLK_CTRL            0x5086
 #define regCGTT_WD_CLK_CTRL_BASE_IDX   1
@@ -130,6 +131,8 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
                                           bool all_hub, uint8_t dst_sel);
 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
+static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
+                                     bool enable);
 
 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
 {
@@ -1138,6 +1141,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
        .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
        .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
        .init_spm_golden = &gfx_v11_0_init_spm_golden_registers,
+       .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
 };
 
 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -5181,9 +5185,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
                data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
                WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
 
-               data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
-               data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
-               WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+               /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+               if (adev->sdma.num_instances > 1) {
+                       data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+                       data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
+                       WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+               }
        } else {
                /* Program RLC_CGCG_CGLS_CTRL */
                def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
@@ -5212,9 +5219,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
                data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
                WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
 
-               data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
-               data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
-               WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+               /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+               if (adev->sdma.num_instances > 1) {
+                       data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+                       data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+                       WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+               }
        }
 }
 
@@ -5279,6 +5289,38 @@ static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
        .update_spm_vmid = gfx_v11_0_update_spm_vmid,
 };
 
+static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
+{
+       u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
+
+       if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+               data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+       WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
+
+       // Program RLC_PG_DELAY3 for CGPG hysteresis
+       if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+               switch (adev->ip_versions[GC_HWIP][0]) {
+               case IP_VERSION(11, 0, 1):
+                       WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
+{
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+       gfx_v11_cntl_power_gating(adev, enable);
+
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
+}
+
 static int gfx_v11_0_set_powergating_state(void *handle,
                                           enum amd_powergating_state state)
 {
@@ -5293,6 +5335,10 @@ static int gfx_v11_0_set_powergating_state(void *handle,
        case IP_VERSION(11, 0, 2):
                amdgpu_gfx_off_ctrl(adev, enable);
                break;
+       case IP_VERSION(11, 0, 1):
+               gfx_v11_cntl_pg(adev, enable);
+               amdgpu_gfx_off_ctrl(adev, enable);
+               break;
        default:
                break;
        }
@@ -5310,6 +5356,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
 
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(11, 0, 0):
+       case IP_VERSION(11, 0, 1):
        case IP_VERSION(11, 0, 2):
                gfx_v11_0_update_gfx_clock_gating(adev,
                                state ==  AMD_CG_STATE_GATE);
index c6e0f9313a7f79124db898639935bad6d6ae13c5..fc9c1043244cb3f1b9e507bf8c7279f7835cab5c 100644 (file)
@@ -2587,7 +2587,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
 
        gfx_v9_0_tiling_mode_table_init(adev);
 
-       gfx_v9_0_setup_rb(adev);
+       if (adev->gfx.num_gfx_rings)
+               gfx_v9_0_setup_rb(adev);
        gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
        adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
 
index 9ae8cdaa033ee391cdbfea5761879ac87af71862..f513e2c2e964f0c9b3c8d8d522e96692eb55259b 100644 (file)
@@ -419,6 +419,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
        uint32_t seq;
        uint16_t queried_pasid;
        bool ret;
+       u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
        struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
@@ -437,7 +438,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 
                amdgpu_ring_commit(ring);
                spin_unlock(&adev->gfx.kiq.ring_lock);
-               r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+               r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
                if (r < 1) {
                        dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
                        return -ETIME;
index 22761a3bb8181e076611ac4eef57eb4182a908c7..4603653916f5a551854a784c75da1817731f4bf9 100644 (file)
@@ -896,6 +896,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
        uint32_t seq;
        uint16_t queried_pasid;
        bool ret;
+       u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
        struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
@@ -935,7 +936,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 
                amdgpu_ring_commit(ring);
                spin_unlock(&adev->gfx.kiq.ring_lock);
-               r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+               r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
                if (r < 1) {
                        dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
                        up_read(&adev->reset_domain->sem);
@@ -1624,12 +1625,15 @@ static int gmc_v9_0_sw_init(void *handle)
                        amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
                else
                        amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+                       adev->gmc.translate_further = adev->vm_manager.num_level > 1;
                break;
        case IP_VERSION(9, 4, 1):
                adev->num_vmhubs = 3;
 
                /* Keep the vm size same with Vega20 */
                amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+               adev->gmc.translate_further = adev->vm_manager.num_level > 1;
                break;
        default:
                break;
index 39a696cd45b5e37b6970309714e1b5e6142d9ad7..29c3484ae1f1660a43c4e668f85ba23612db5039 100644 (file)
@@ -40,6 +40,156 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
                        0);
 }
 
+static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
+                                            bool enable)
+{
+       uint32_t hdp_clk_cntl;
+       uint32_t hdp_mem_pwr_cntl;
+
+       if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+                               AMD_CG_SUPPORT_HDP_DS |
+                               AMD_CG_SUPPORT_HDP_SD)))
+               return;
+
+       hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+       hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+
+       /* Before doing clock/power mode switch, forced on MEM clock */
+       hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+                                    ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
+       hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+                                    RC_MEM_CLK_SOFT_OVERRIDE, 1);
+       WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+
+       /* disable clock and power gating before any changing */
+       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+                                        ATOMIC_MEM_POWER_CTRL_EN, 0);
+       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+                                        ATOMIC_MEM_POWER_LS_EN, 0);
+       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+                                        ATOMIC_MEM_POWER_DS_EN, 0);
+       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+                                        ATOMIC_MEM_POWER_SD_EN, 0);
+       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+                                        RC_MEM_POWER_CTRL_EN, 0);
+       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+                                        RC_MEM_POWER_LS_EN, 0);
+       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+                                        RC_MEM_POWER_DS_EN, 0);
+       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+                                        RC_MEM_POWER_SD_EN, 0);
+       WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+       /* Already disabled above. The actions below are for "enabled" only */
+       if (enable) {
+               /* only one clock gating mode (LS/DS/SD) can be enabled */
+               if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+                       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+                                                        HDP_MEM_POWER_CTRL,
+                                                        ATOMIC_MEM_POWER_SD_EN, 1);
+                       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+                                                        HDP_MEM_POWER_CTRL,
+                                                        RC_MEM_POWER_SD_EN, 1);
+               } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+                       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+                                                        HDP_MEM_POWER_CTRL,
+                                                        ATOMIC_MEM_POWER_LS_EN, 1);
+                       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+                                                        HDP_MEM_POWER_CTRL,
+                                                        RC_MEM_POWER_LS_EN, 1);
+               } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+                       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+                                                        HDP_MEM_POWER_CTRL,
+                                                        ATOMIC_MEM_POWER_DS_EN, 1);
+                       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+                                                        HDP_MEM_POWER_CTRL,
+                                                        RC_MEM_POWER_DS_EN, 1);
+               }
+
+               /* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
+               if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+                                     AMD_CG_SUPPORT_HDP_SD)) {
+                       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+                                                        ATOMIC_MEM_POWER_CTRL_EN, 1);
+                       hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+                                                        RC_MEM_POWER_CTRL_EN, 1);
+                       WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+               }
+       }
+
+       /* disable MEM clock override after clock/power mode changing */
+       hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+                                    ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
+       hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+                                    RC_MEM_CLK_SOFT_OVERRIDE, 0);
+       WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+                                                     bool enable)
+{
+       uint32_t hdp_clk_cntl;
+
+       if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+               return;
+
+       hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+
+       if (enable) {
+               hdp_clk_cntl &=
+                       ~(uint32_t)
+                       (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+                        HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+                        HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+                        HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+                        HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+                        HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+       } else {
+               hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+                       HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+                       HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+                       HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+                       HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+                       HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+       }
+
+       WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
+                                          u64 *flags)
+{
+       uint32_t tmp;
+
+       /* AMD_CG_SUPPORT_HDP_MGCG */
+       tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+       if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+                    HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+                    HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+                    HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+                    HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+                    HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+               *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+       /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+       tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+       if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
+               *flags |= AMD_CG_SUPPORT_HDP_LS;
+       else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
+               *flags |= AMD_CG_SUPPORT_HDP_DS;
+       else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
+               *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
+                                             bool enable)
+{
+       hdp_v5_2_update_mem_power_gating(adev, enable);
+       hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
+}
+
 const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
        .flush_hdp = hdp_v5_2_flush_hdp,
+       .update_clock_gating = hdp_v5_2_update_clock_gating,
+       .get_clock_gating_state = hdp_v5_2_get_clockgating_state,
 };
index 92dc60a9d2094df3d7f3be42fbd8aa410ddaf340..085e613f3646d945e9d1dd668d037f1cc08dad2e 100644 (file)
@@ -727,6 +727,7 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
 static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
        .get_wptr = ih_v6_0_get_wptr,
        .decode_iv = amdgpu_ih_decode_iv_helper,
+       .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
        .set_rptr = ih_v6_0_set_rptr
 };
 
index 3f44a099c52a419acd877edd40c6ffff5878dccd..3e51e773f92be0efbb661edd4a6271aabb8515f4 100644 (file)
@@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
 
+       tmp = mmVM_L2_CNTL3_DEFAULT;
        if (adev->gmc.translate_further) {
                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
index cac72ced94c852e155ac11a280aeb20a7109774e..e8058edc1d1083969381374daf0ebe11751c15c4 100644 (file)
@@ -518,18 +518,41 @@ static u64 mmhub_v3_0_1_get_mc_fb_offset(struct amdgpu_device *adev)
 static void mmhub_v3_0_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
                                                          bool enable)
 {
-       //TODO
+       uint32_t def, data;
+
+       def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+       if (enable)
+               data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
+       else
+               data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
+
+       if (def != data)
+               WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
 }
 
 static void mmhub_v3_0_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
                                                         bool enable)
 {
-       //TODO
+       uint32_t def, data;
+
+       def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+       if (enable)
+               data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+       else
+               data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+       if (def != data)
+               WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
 }
 
 static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
                                        enum amd_clockgating_state state)
 {
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        mmhub_v3_0_1_update_medium_grain_clock_gating(adev,
                        state == AMD_CG_STATE_GATE);
        mmhub_v3_0_1_update_medium_grain_light_sleep(adev,
@@ -539,7 +562,20 @@ static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
 
 static void mmhub_v3_0_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
 {
-       //TODO
+       int data;
+
+       if (amdgpu_sriov_vf(adev))
+               *flags = 0;
+
+       data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+       /* AMD_CG_SUPPORT_MC_MGCG */
+       if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
+               *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+       /* AMD_CG_SUPPORT_MC_LS */
+       if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+               *flags |= AMD_CG_SUPPORT_MC_LS;
 }
 
 const struct amdgpu_mmhub_funcs mmhub_v3_0_1_funcs = {
index 6e0145b2b408a07dda1c485ba602a5e0961f2c4a..445cb06b9d264bf3023b18a9e353d5f251996652 100644 (file)
@@ -295,9 +295,17 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
 static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+       unsigned int num_level, block_size;
        uint32_t tmp;
        int i;
 
+       num_level = adev->vm_manager.num_level;
+       block_size = adev->vm_manager.block_size;
+       if (adev->gmc.translate_further)
+               num_level -= 1;
+       else
+               block_size -= 9;
+
        for (i = 0; i <= 14; i++) {
                tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
                                hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
@@ -305,7 +313,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
                                    ENABLE_CONTEXT, 1);
                tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
                                    PAGE_TABLE_DEPTH,
-                                   adev->vm_manager.num_level);
+                                   num_level);
                tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
                                    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
                tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
@@ -323,7 +331,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
                                    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
                tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
                                    PAGE_TABLE_BLOCK_SIZE,
-                                   adev->vm_manager.block_size - 9);
+                                   block_size);
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
index 4b5396d3e60f668985b2487d4d946af7b1555af5..eec13cb5bf75828e45c88c7715b0afb157d7605d 100644 (file)
@@ -409,9 +409,11 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
        u32 wptr, tmp;
        struct amdgpu_ih_regs *ih_regs;
 
-       if (ih == &adev->irq.ih) {
+       if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
                /* Only ring0 supports writeback. On other rings fall back
                 * to register-based code with overflow checking below.
+                * ih_soft ring doesn't have any backing hardware registers,
+                * update wptr and return.
                 */
                wptr = le32_to_cpu(*ih->wptr_cpu);
 
@@ -483,6 +485,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
 {
        struct amdgpu_ih_regs *ih_regs;
 
+       if (ih == &adev->irq.ih_soft)
+               return;
+
        if (ih->use_doorbell) {
                /* XXX check if swapping is necessary on BE */
                *ih->rptr_cpu = ih->rptr;
index 01e8288d09a8f21e7df4152c13a2c1d4a6481613..1dc95ef21da6afbd38c966f6e221cc6d5ced551d 100644 (file)
@@ -247,6 +247,81 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
 
 }
 
+static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+                                                      bool enable)
+{
+       uint32_t def, data;
+
+       if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
+               return;
+
+       def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+       if (enable) {
+               data |= (BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+                        BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+                        BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+                        BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+                        BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+                        BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+       } else {
+               data &= ~(BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+                         BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+                         BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+                         BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+                         BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+                         BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+       }
+
+       if (def != data)
+               WREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL, data);
+}
+
+static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+                                                     bool enable)
+{
+       uint32_t def, data;
+
+       if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
+               return;
+
+       def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+       if (enable)
+               data |= BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+       else
+               data &= ~BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+
+       if (def != data)
+               WREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2, data);
+
+       def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1);
+       if (enable) {
+               data |= (BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+                       BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+       } else {
+               data &= ~(BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+                       BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+       }
+
+       if (def != data)
+               WREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1, data);
+}
+
+static void nbio_v7_7_get_clockgating_state(struct amdgpu_device *adev,
+                                           u64 *flags)
+{
+       uint32_t data;
+
+       /* AMD_CG_SUPPORT_BIF_MGCG */
+       data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+       if (data & BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+               *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+       /* AMD_CG_SUPPORT_BIF_LS */
+       data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+       if (data & BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+               *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
 const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
        .get_hdp_flush_req_offset = nbio_v7_7_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v7_7_get_hdp_flush_done_offset,
@@ -262,6 +337,9 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
        .enable_doorbell_aperture = nbio_v7_7_enable_doorbell_aperture,
        .enable_doorbell_selfring_aperture = nbio_v7_7_enable_doorbell_selfring_aperture,
        .ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
+       .update_medium_grain_clock_gating = nbio_v7_7_update_medium_grain_clock_gating,
+       .update_medium_grain_light_sleep = nbio_v7_7_update_medium_grain_light_sleep,
+       .get_clockgating_state = nbio_v7_7_get_clockgating_state,
        .ih_control = nbio_v7_7_ih_control,
        .init_registers = nbio_v7_7_init_registers,
 };
index a2588200ea580919786074c936b643900aa95c60..0b2ac418e4ac4f79dcc1f2fe109851104603b4fe 100644 (file)
@@ -101,6 +101,16 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
                adev->psp.dtm_context.context.bin_desc.start_addr =
                        (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
                        le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+               if (adev->apu_flags & AMD_APU_IS_RENOIR) {
+                       adev->psp.securedisplay_context.context.bin_desc.fw_version =
+                               le32_to_cpu(ta_hdr->securedisplay.fw_version);
+                       adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+                               le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+                       adev->psp.securedisplay_context.context.bin_desc.start_addr =
+                               (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+                               le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+               }
        }
 
        return 0;
index 726a5bba40b2023694bafbc5826e4729d6ee6cfa..a75a286e1ecf37e0927ec05cdfcf7a15f7a3d582 100644 (file)
@@ -20,7 +20,6 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include <linux/dev_printk.h>
 #include <drm/drm_drv.h>
 #include <linux/vmalloc.h>
 #include "amdgpu.h"
index 52816de5e17bf77af187c259d592476fa7e98cd6..55284b24f113934bcf9470be8ab8c03805dbd3b8 100644 (file)
@@ -494,6 +494,20 @@ static void soc21_pre_asic_init(struct amdgpu_device *adev)
 {
 }
 
+static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
+                                         bool enter)
+{
+       if (enter)
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
+       else
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+       if (adev->gfx.funcs->update_perfmon_mgcg)
+               adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
+
+       return 0;
+}
+
 static const struct amdgpu_asic_funcs soc21_asic_funcs =
 {
        .read_disabled_bios = &soc21_read_disabled_bios,
@@ -513,6 +527,7 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs =
        .supports_baco = &amdgpu_dpm_is_baco_supported,
        .pre_asic_init = &soc21_pre_asic_init,
        .query_video_codecs = &soc21_query_video_codecs,
+       .update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
 };
 
 static int soc21_common_early_init(void *handle)
@@ -546,8 +561,10 @@ static int soc21_common_early_init(void *handle)
        case IP_VERSION(11, 0, 0):
                adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
                        AMD_CG_SUPPORT_GFX_CGLS |
+#if 0
                        AMD_CG_SUPPORT_GFX_3D_CGCG |
                        AMD_CG_SUPPORT_GFX_3D_CGLS |
+#endif
                        AMD_CG_SUPPORT_GFX_MGCG |
                        AMD_CG_SUPPORT_REPEATER_FGCG |
                        AMD_CG_SUPPORT_GFX_FGCG |
@@ -575,7 +592,9 @@ static int soc21_common_early_init(void *handle)
                        AMD_CG_SUPPORT_VCN_MGCG |
                        AMD_CG_SUPPORT_JPEG_MGCG |
                        AMD_CG_SUPPORT_ATHUB_MGCG |
-                       AMD_CG_SUPPORT_ATHUB_LS;
+                       AMD_CG_SUPPORT_ATHUB_LS |
+                       AMD_CG_SUPPORT_IH_CG |
+                       AMD_CG_SUPPORT_HDP_SD;
                adev->pg_flags =
                        AMD_PG_SUPPORT_VCN |
                        AMD_PG_SUPPORT_VCN_DPG |
@@ -586,9 +605,25 @@ static int soc21_common_early_init(void *handle)
                break;
        case IP_VERSION(11, 0, 1):
                adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_FGCG |
+                       AMD_CG_SUPPORT_REPEATER_FGCG |
+                       AMD_CG_SUPPORT_GFX_PERF_CLK |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_ATHUB_MGCG |
+                       AMD_CG_SUPPORT_ATHUB_LS |
+                       AMD_CG_SUPPORT_IH_CG |
+                       AMD_CG_SUPPORT_BIF_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
                        AMD_CG_SUPPORT_VCN_MGCG |
                        AMD_CG_SUPPORT_JPEG_MGCG;
                adev->pg_flags =
+                       AMD_PG_SUPPORT_GFX_PG |
                        AMD_PG_SUPPORT_JPEG;
                adev->external_rev_id = adev->rev_id + 0x1;
                break;
@@ -683,6 +718,8 @@ static int soc21_common_set_clockgating_state(void *handle,
 
        switch (adev->ip_versions[NBIO_HWIP][0]) {
        case IP_VERSION(4, 3, 0):
+       case IP_VERSION(4, 3, 1):
+       case IP_VERSION(7, 7, 0):
                adev->nbio.funcs->update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE);
                adev->nbio.funcs->update_medium_grain_light_sleep(adev,
index ca14c3ef742ecd27881baa1912bd772ce038fc49..fb2d74f3044814522958adda6e6435cad0ea9617 100644 (file)
@@ -1115,7 +1115,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
  *
  * Stop VCN block with dpg mode
  */
-static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
 {
        uint32_t tmp;
 
@@ -1133,7 +1133,6 @@ static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
        /* disable dynamic power gating mode */
        WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
                ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
-       return 0;
 }
 
 /**
@@ -1154,7 +1153,7 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
                fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
 
                if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
-                       r = vcn_v4_0_stop_dpg_mode(adev, i);
+                       vcn_v4_0_stop_dpg_mode(adev, i);
                        continue;
                }
 
index cdd599a081258c304d880f1fc6d9ffd7294d8bc5..03b7066471f9ad251d4337350ced36882a10f582 100644 (file)
@@ -334,9 +334,11 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
        u32 wptr, tmp;
        struct amdgpu_ih_regs *ih_regs;
 
-       if (ih == &adev->irq.ih) {
+       if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
                /* Only ring0 supports writeback. On other rings fall back
                 * to register-based code with overflow checking below.
+                * ih_soft ring doesn't have any backing hardware registers,
+                * update wptr and return.
                 */
                wptr = le32_to_cpu(*ih->wptr_cpu);
 
@@ -409,6 +411,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
 {
        struct amdgpu_ih_regs *ih_regs;
 
+       if (ih == &adev->irq.ih_soft)
+               return;
+
        if (ih->use_doorbell) {
                /* XXX check if swapping is necessary on BE */
                *ih->rptr_cpu = ih->rptr;
index 3b4eb8285943c1c4091d54c06eea8fb6d2966d5c..2022ffbb8dba55e6522e56e689d582c87dc6e543 100644 (file)
@@ -385,9 +385,11 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
        u32 wptr, tmp;
        struct amdgpu_ih_regs *ih_regs;
 
-       if (ih == &adev->irq.ih) {
+       if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
                /* Only ring0 supports writeback. On other rings fall back
                 * to register-based code with overflow checking below.
+                * ih_soft ring doesn't have any backing hardware registers,
+                * update wptr and return.
                 */
                wptr = le32_to_cpu(*ih->wptr_cpu);
 
@@ -461,6 +463,9 @@ static void vega20_ih_set_rptr(struct amdgpu_device *adev,
 {
        struct amdgpu_ih_regs *ih_regs;
 
+       if (ih == &adev->irq.ih_soft)
+               return;
+
        if (ih->use_doorbell) {
                /* XXX check if swapping is necessary on BE */
                *ih->rptr_cpu = ih->rptr;
index 2b3d8bc8f0aaeb2e243c43f75f40675fb3f039bb..dc774ddf34456461a0818c4cb0955efadfdc566c 100644 (file)
@@ -874,7 +874,7 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
        err = kfd_wait_on_events(p, args->num_events,
                        (void __user *)args->events_ptr,
                        (args->wait_for_all != 0),
-                       args->timeout, &args->wait_result);
+                       &args->timeout, &args->wait_result);
 
        return err;
 }
index f5853835f03a23c0f6857c3c037a41218a0ec9d8..22c0929d410b6ba5ce50f6e297c22d95b01168c1 100644 (file)
@@ -102,13 +102,18 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
 
        switch (sdma_version) {
        case IP_VERSION(6, 0, 0):
-       case IP_VERSION(6, 0, 1):
        case IP_VERSION(6, 0, 2):
                /* Reserve 1 for paging and 1 for gfx */
                kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
                /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
                kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
                break;
+       case IP_VERSION(6, 0, 1):
+               /* Reserve 1 for paging and 1 for gfx */
+               kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
+               /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
+               kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
+               break;
        default:
                break;
        }
@@ -377,12 +382,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
                                f2g = &gfx_v10_3_kfd2kgd;
                        break;
                case IP_VERSION(10, 3, 6):
-                       gfx_target_version = 100306;
-                       if (!vf)
-                               f2g = &gfx_v10_3_kfd2kgd;
-                       break;
                case IP_VERSION(10, 3, 7):
-                       gfx_target_version = 100307;
+                       gfx_target_version = 100306;
                        if (!vf)
                                f2g = &gfx_v10_3_kfd2kgd;
                        break;
index 3942a56c28bbbcce5a2f90e86b522ac7db4ce1b2..83e3ce9f604911b554f5e1a600e1dee49db02b3a 100644 (file)
@@ -894,7 +894,8 @@ static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
        return msecs_to_jiffies(user_timeout_ms) + 1;
 }
 
-static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
+static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
+                        bool undo_auto_reset)
 {
        uint32_t i;
 
@@ -903,6 +904,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
                        spin_lock(&waiters[i].event->lock);
                        remove_wait_queue(&waiters[i].event->wq,
                                          &waiters[i].wait);
+                       if (undo_auto_reset && waiters[i].activated &&
+                           waiters[i].event && waiters[i].event->auto_reset)
+                               set_event(waiters[i].event);
                        spin_unlock(&waiters[i].event->lock);
                }
 
@@ -911,7 +915,7 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
 
 int kfd_wait_on_events(struct kfd_process *p,
                       uint32_t num_events, void __user *data,
-                      bool all, uint32_t user_timeout_ms,
+                      bool all, uint32_t *user_timeout_ms,
                       uint32_t *wait_result)
 {
        struct kfd_event_data __user *events =
@@ -920,7 +924,7 @@ int kfd_wait_on_events(struct kfd_process *p,
        int ret = 0;
 
        struct kfd_event_waiter *event_waiters = NULL;
-       long timeout = user_timeout_to_jiffies(user_timeout_ms);
+       long timeout = user_timeout_to_jiffies(*user_timeout_ms);
 
        event_waiters = alloc_event_waiters(num_events);
        if (!event_waiters) {
@@ -970,15 +974,11 @@ int kfd_wait_on_events(struct kfd_process *p,
                }
 
                if (signal_pending(current)) {
-                       /*
-                        * This is wrong when a nonzero, non-infinite timeout
-                        * is specified. We need to use
-                        * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
-                        * contains a union with data for each user and it's
-                        * in generic kernel code that I don't want to
-                        * touch yet.
-                        */
                        ret = -ERESTARTSYS;
+                       if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
+                           *user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
+                               *user_timeout_ms = jiffies_to_msecs(
+                                       max(0l, timeout-1));
                        break;
                }
 
@@ -1019,7 +1019,7 @@ int kfd_wait_on_events(struct kfd_process *p,
                                               event_waiters, events);
 
 out_unlock:
-       free_waiters(num_events, event_waiters);
+       free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
        mutex_unlock(&p->event_mutex);
 out:
        if (ret)
index d03a3b9c9c5d66cb532f4576e40c9ad7d1d5088b..bf610e3b683bbaf23212de15103c5c68e744b09b 100644 (file)
@@ -1317,7 +1317,7 @@ void kfd_event_free_process(struct kfd_process *p);
 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
 int kfd_wait_on_events(struct kfd_process *p,
                       uint32_t num_events, void __user *data,
-                      bool all, uint32_t user_timeout_ms,
+                      bool all, uint32_t *user_timeout_ms,
                       uint32_t *wait_result);
 void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
                                uint32_t valid_id_bits);
index a67ba8879a56730226cc0ebbdd21a35cce8d68ba..11074cc8c333b274484929dddb0752725e4af24b 100644 (file)
@@ -541,7 +541,6 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
                kfree(svm_bo);
                return -ESRCH;
        }
-       svm_bo->svms = prange->svms;
        svm_bo->eviction_fence =
                amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
                                           mm,
@@ -3273,7 +3272,6 @@ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
 {
        struct svm_range_bo *svm_bo;
-       struct kfd_process *p;
        struct mm_struct *mm;
        int r = 0;
 
@@ -3281,13 +3279,12 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
        if (!svm_bo_ref_unless_zero(svm_bo))
                return; /* svm_bo was freed while eviction was pending */
 
-       /* svm_range_bo_release destroys this worker thread. So during
-        * the lifetime of this thread, kfd_process and mm will be valid.
-        */
-       p = container_of(svm_bo->svms, struct kfd_process, svms);
-       mm = p->mm;
-       if (!mm)
+       if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+               mm = svm_bo->eviction_fence->mm;
+       } else {
+               svm_range_bo_unref(svm_bo);
                return;
+       }
 
        mmap_read_lock(mm);
        spin_lock(&svm_bo->list_lock);
@@ -3305,8 +3302,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
 
                mutex_lock(&prange->migrate_mutex);
                do {
-                       r = svm_migrate_vram_to_ram(prange,
-                                               svm_bo->eviction_fence->mm,
+                       r = svm_migrate_vram_to_ram(prange, mm,
                                                KFD_MIGRATE_TRIGGER_TTM_EVICTION);
                } while (!r && prange->actual_loc && --retries);
 
@@ -3324,6 +3320,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
        }
        spin_unlock(&svm_bo->list_lock);
        mmap_read_unlock(mm);
+       mmput(mm);
 
        dma_fence_signal(&svm_bo->eviction_fence->base);
 
index 9156b041ef17519db0094cbc05d8868fda0885e1..cfac13ad06ef0f70e2983bceeb85b59c0427e2fb 100644 (file)
@@ -46,7 +46,6 @@ struct svm_range_bo {
        spinlock_t                      list_lock;
        struct amdgpu_amdkfd_fence      *eviction_fence;
        struct work_struct              eviction_work;
-       struct svm_range_list           *svms;
        uint32_t                        evicting;
        struct work_struct              release_work;
 };
index 25990bec600d08fde12f59d96b16a6cdfb394069..3f0a4a415907d425b113f251684822f90ed4c495 100644 (file)
@@ -1392,8 +1392,8 @@ static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
 
 static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
 {
+       struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
        struct kfd_iolink_properties *props = NULL, *props2 = NULL;
-       struct kfd_iolink_properties *gpu_link, *cpu_link;
        struct kfd_topology_device *cpu_dev;
        int ret = 0;
        int i, num_cpu;
@@ -1416,16 +1416,19 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
                        continue;
 
                /* find CPU <-->  CPU links */
+               cpu_link = NULL;
                cpu_dev = kfd_topology_device_by_proximity_domain(i);
                if (cpu_dev) {
-                       list_for_each_entry(cpu_link,
+                       list_for_each_entry(tmp_link,
                                        &cpu_dev->io_link_props, list) {
-                               if (cpu_link->node_to == gpu_link->node_to)
+                               if (tmp_link->node_to == gpu_link->node_to) {
+                                       cpu_link = tmp_link;
                                        break;
+                               }
                        }
                }
 
-               if (cpu_link->node_to != gpu_link->node_to)
+               if (!cpu_link)
                        return -ENOMEM;
 
                /* CPU <--> CPU <--> GPU, GPU node*/
index 8660d93cc40551add54b57a549421640940b5036..5140d9c2bf3b40b689134fa9017cf9953aedecd6 100644 (file)
@@ -3825,8 +3825,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
        adev_to_drm(adev)->mode_config.max_height = 16384;
 
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       /* disable prefer shadow for now due to hibernation issues */
-       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+       if (adev->asic_type == CHIP_HAWAII)
+               /* disable prefer shadow for now due to hibernation issues */
+               adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+       else
+               adev_to_drm(adev)->mode_config.prefer_shadow = 1;
        /* indicates support for immediate flip */
        adev_to_drm(adev)->mode_config.async_page_flip = true;
 
@@ -4135,6 +4138,7 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
        }
 }
 
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
 
 /*
  * In this architecture, the association
@@ -4326,6 +4330,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                                        adev_to_drm(adev)->vblank_disable_immediate = false;
                        }
                }
+               amdgpu_set_panel_orientation(&aconnector->base);
        }
 
        /* Software is initialized. Now we can register interrupt handlers. */
@@ -6684,6 +6689,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
            connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
                return;
 
+       mutex_lock(&connector->dev->mode_config.mutex);
+       amdgpu_dm_connector_get_modes(connector);
+       mutex_unlock(&connector->dev->mode_config.mutex);
+
        encoder = amdgpu_dm_connector_to_encoder(connector);
        if (!encoder)
                return;
@@ -6728,8 +6737,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
                 * restored here.
                 */
                amdgpu_dm_update_freesync_caps(connector, edid);
-
-               amdgpu_set_panel_orientation(connector);
        } else {
                amdgpu_dm_connector->num_modes = 0;
        }
index b841b8b0a9d82074a3699f523ba8013a1277631c..987bde4dca3db7cc4f3f387c298313731c08f158 100644 (file)
@@ -34,6 +34,7 @@
 #include "dal_asic_id.h"
 #include "amdgpu_display.h"
 #include "amdgpu_dm_trace.h"
+#include "amdgpu_dm_plane.h"
 #include "gc/gc_11_0_0_offset.h"
 #include "gc/gc_11_0_0_sh_mask.h"
 
@@ -149,12 +150,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_
        *size += 1;
 }
 
-bool modifier_has_dcc(uint64_t modifier)
+static bool modifier_has_dcc(uint64_t modifier)
 {
        return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
 }
 
-unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
 {
        if (modifier == DRM_FORMAT_MOD_LINEAR)
                return 0;
@@ -660,7 +661,7 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
                        add_gfx10_1_modifiers(adev, mods, &size, &capacity);
                break;
        case AMDGPU_FAMILY_GC_11_0_0:
-       case AMDGPU_FAMILY_GC_11_0_2:
+       case AMDGPU_FAMILY_GC_11_0_1:
                add_gfx11_modifiers(adev, mods, &size, &capacity);
                break;
        }
@@ -1412,7 +1413,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
                }
                break;
        case AMDGPU_FAMILY_GC_11_0_0:
-       case AMDGPU_FAMILY_GC_11_0_2:
+       case AMDGPU_FAMILY_GC_11_0_1:
                switch (AMD_FMT_MOD_GET(TILE, modifier)) {
                case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
                case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
index 95168c2cfa6fa8ea510341cb68323fd173ac9f1f..286981a2dd403b547aa95179f183fdb5c99c7349 100644 (file)
@@ -36,17 +36,9 @@ int fill_dc_scaling_info(struct amdgpu_device *adev,
                         const struct drm_plane_state *state,
                         struct dc_scaling_info *scaling_info);
 
-void get_min_max_dc_plane_scaling(struct drm_device *dev,
-                                 struct drm_framebuffer *fb,
-                                 int *min_downscale, int *max_upscale);
-
 int dm_plane_helper_check_state(struct drm_plane_state *state,
                                struct drm_crtc_state *new_crtc_state);
 
-bool modifier_has_dcc(uint64_t modifier);
-
-unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier);
-
 int fill_plane_buffer_attributes(struct amdgpu_device *adev,
                                 const struct amdgpu_framebuffer *afb,
                                 const enum surface_pixel_format format,
index 6767fab55c260d4869095c3baaf9517c5b831dd3..352e9afb85c6d67354eb47204b36c8dbc4e9da83 100644 (file)
@@ -100,3 +100,24 @@ void convert_float_matrix(
                matrix[i] = (uint16_t)reg_value;
        }
 }
+
+static uint32_t find_gcd(uint32_t a, uint32_t b)
+{
+       uint32_t remainder = 0;
+       while (b != 0) {
+               remainder = a % b;
+               a = b;
+               b = remainder;
+       }
+       return a;
+}
+
+void reduce_fraction(uint32_t num, uint32_t den,
+               uint32_t *out_num, uint32_t *out_den)
+{
+       uint32_t gcd = 0;
+
+       gcd = find_gcd(num, den);
+       *out_num = num / gcd;
+       *out_den = den / gcd;
+}
index ade785c4fdc7dc1fbd347d9dbb22bb9bb8336464..81da4e6f7a1acb074c02d719b068661b1c06db81 100644 (file)
@@ -38,6 +38,9 @@ void convert_float_matrix(
        struct fixed31_32 *flt,
        uint32_t buffer_size);
 
+void reduce_fraction(uint32_t num, uint32_t den,
+               uint32_t *out_num, uint32_t *out_den);
+
 static inline unsigned int log_2(unsigned int num)
 {
        return ilog2(num);
index 4c76091fd1f21af087cfb75532678d51ca7ce719..f276abb63bcd7ce4ea2a0e3d7e6beea1581b9d98 100644 (file)
@@ -337,7 +337,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
            break;
        }
 
-       case AMDGPU_FAMILY_GC_11_0_2: {
+       case AMDGPU_FAMILY_GC_11_0_1: {
                struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
 
                if (clk_mgr == NULL) {
@@ -397,7 +397,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
                dcn32_clk_mgr_destroy(clk_mgr);
                break;
 
-       case AMDGPU_FAMILY_GC_11_0_2:
+       case AMDGPU_FAMILY_GC_11_0_1:
                dcn314_clk_mgr_destroy(clk_mgr);
                break;
 
index 0202dc682682b1cd7975b27e64dd550a6dd44a6e..ca6dfd2d7561fab9378ddfa7a0fd3781e7872df9 100644 (file)
  */
 
 #include "dccg.h"
-#include "clk_mgr_internal.h"
+#include "rn_clk_mgr.h"
 
 #include "dcn20/dcn20_clk_mgr.h"
-#include "rn_clk_mgr.h"
 #include "dml/dcn20/dcn20_fpu.h"
 
 #include "dce100/dce_clk_mgr.h"
index 2e088c5171b28b89f51e49d1660c42752566fea8..f1319957e400af37a0450c17821519006f5899a7 100644 (file)
@@ -28,6 +28,7 @@
 
 #include "clk_mgr.h"
 #include "dm_pp_smu.h"
+#include "clk_mgr_internal.h"
 
 extern struct wm_table ddr4_wm_table_gs;
 extern struct wm_table lpddr4_wm_table_gs;
index ee99974b3b62bb3aea7a43b10e6009e0717de67d..beb025cd3dc29671a5917a5d0f01ef2c46410d63 100644 (file)
@@ -307,16 +307,6 @@ static void dcn314_enable_pme_wa(struct clk_mgr *clk_mgr_base)
        dcn314_smu_enable_pme_wa(clk_mgr);
 }
 
-void dcn314_init_clocks(struct clk_mgr *clk_mgr)
-{
-       memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
-       // Assumption is that boot state always supports pstate
-       clk_mgr->clks.p_state_change_support = true;
-       clk_mgr->clks.prev_p_state_change_support = true;
-       clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
-       clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
-
 bool dcn314_are_clock_states_equal(struct dc_clocks *a,
                struct dc_clocks *b)
 {
@@ -425,7 +415,7 @@ static struct wm_table lpddr5_wm_table = {
        }
 };
 
-static DpmClocks_t dummy_clocks;
+static DpmClocks314_t dummy_clocks;
 
 static struct dcn314_watermarks dummy_wms = { 0 };
 
@@ -510,7 +500,7 @@ static void dcn314_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
 static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
                struct dcn314_smu_dpm_clks *smu_dpm_clks)
 {
-       DpmClocks_t *table = smu_dpm_clks->dpm_clks;
+       DpmClocks314_t *table = smu_dpm_clks->dpm_clks;
 
        if (!clk_mgr->smu_ver)
                return;
@@ -527,6 +517,26 @@ static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
        dcn314_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
 }
 
+static inline bool is_valid_clock_value(uint32_t clock_value)
+{
+       return clock_value > 1 && clock_value < 100000;
+}
+
+static unsigned int convert_wck_ratio(uint8_t wck_ratio)
+{
+       switch (wck_ratio) {
+       case WCK_RATIO_1_2:
+               return 2;
+
+       case WCK_RATIO_1_4:
+               return 4;
+
+       default:
+               break;
+       }
+       return 1;
+}
+
 static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
 {
        uint32_t max = 0;
@@ -540,89 +550,127 @@ static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
        return max;
 }
 
-static unsigned int find_clk_for_voltage(
-               const DpmClocks_t *clock_table,
-               const uint32_t clocks[],
-               unsigned int voltage)
-{
-       int i;
-       int max_voltage = 0;
-       int clock = 0;
-
-       for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
-               if (clock_table->SocVoltage[i] == voltage) {
-                       return clocks[i];
-               } else if (clock_table->SocVoltage[i] >= max_voltage &&
-                               clock_table->SocVoltage[i] < voltage) {
-                       max_voltage = clock_table->SocVoltage[i];
-                       clock = clocks[i];
-               }
-       }
-
-       ASSERT(clock);
-       return clock;
-}
-
 static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
                                                    struct integrated_info *bios_info,
-                                                   const DpmClocks_t *clock_table)
+                                                   const DpmClocks314_t *clock_table)
 {
-       int i, j;
        struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
-       uint32_t max_dispclk = 0, max_dppclk = 0;
-
-       j = -1;
-
-       ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
-       /* Find lowest DPM, FCLK is filled in reverse order*/
+       struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+       uint32_t max_pstate = 0,  max_fclk = 0,  min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+       int i;
 
-       for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
-               if (clock_table->DfPstateTable[i].FClk != 0) {
-                       j = i;
-                       break;
+       /* Find highest valid fclk pstate */
+       for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+               if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) &&
+                   clock_table->DfPstateTable[i].FClk > max_fclk) {
+                       max_fclk = clock_table->DfPstateTable[i].FClk;
+                       max_pstate = i;
                }
        }
 
-       if (j == -1) {
-               /* clock table is all 0s, just use our own hardcode */
-               ASSERT(0);
-               return;
-       }
-
-       bw_params->clk_table.num_entries = j + 1;
+       /* We expect the table to contain at least one valid fclk entry. */
+       ASSERT(is_valid_clock_value(max_fclk));
 
-       /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+       /* Dispclk and dppclk can be max at any voltage, same number of levels for both */
        if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
            clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
                max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
                max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
        } else {
+               /* Invalid number of entries in the table from PMFW. */
                ASSERT(0);
        }
 
-       for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
-               bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
-               bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
-               bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
-               switch (clock_table->DfPstateTable[j].WckRatio) {
-               case WCK_RATIO_1_2:
-                       bw_params->clk_table.entries[i].wck_ratio = 2;
-                       break;
-               case WCK_RATIO_1_4:
-                       bw_params->clk_table.entries[i].wck_ratio = 4;
-                       break;
-               default:
-                       bw_params->clk_table.entries[i].wck_ratio = 1;
+       /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+       for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+               uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
+               int j;
+
+               for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+                       if (is_valid_clock_value(clock_table->DfPstateTable[j].FClk) &&
+                           clock_table->DfPstateTable[j].FClk < min_fclk &&
+                           clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
+                               min_fclk = clock_table->DfPstateTable[j].FClk;
+                               min_pstate = j;
+                       }
                }
-               bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
-               bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
+
+               /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+               for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+                       if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+                               break;
+
+               bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+               bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+               bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+               /* Now update clocks we do read */
+               bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+               bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+               bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+               bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+               bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
                bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
                bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+               bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+                       clock_table->DfPstateTable[min_pstate].WckRatio);
+       };
+
+       /* Make sure to include at least one entry at highest pstate */
+       if (max_pstate != min_pstate || i == 0) {
+               if (i > MAX_NUM_DPM_LVL - 1)
+                       i = MAX_NUM_DPM_LVL - 1;
+
+               bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+               bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+               bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+               bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
+               bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+               bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+               bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+               bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+                       clock_table->DfPstateTable[max_pstate].WckRatio);
+               i++;
        }
+       bw_params->clk_table.num_entries = i--;
+
+       /* Make sure all highest clocks are included*/
+       bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+       bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+       bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+       ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+       bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+       bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+       bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
 
+       /*
+        * Set any 0 clocks to max default setting. Not an issue for
+        * power since we aren't doing switching in such case anyway
+        */
+       for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+               if (!bw_params->clk_table.entries[i].fclk_mhz) {
+                       bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+                       bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+                       bw_params->clk_table.entries[i].voltage = def_max.voltage;
+               }
+               if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+                       bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+               if (!bw_params->clk_table.entries[i].socclk_mhz)
+                       bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+               if (!bw_params->clk_table.entries[i].dispclk_mhz)
+                       bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+               if (!bw_params->clk_table.entries[i].dppclk_mhz)
+                       bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+               if (!bw_params->clk_table.entries[i].phyclk_mhz)
+                       bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+               if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+                       bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+               if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+                       bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+       }
+       ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
        bw_params->vram_type = bios_info->memory_type;
-       bw_params->num_channels = bios_info->ma_channel_number;
+       bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
 
        for (i = 0; i < WM_SET_COUNT; i++) {
                bw_params->wm_table.entries[i].wm_inst = i;
@@ -641,7 +689,7 @@ static struct clk_mgr_funcs dcn314_funcs = {
        .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
        .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
        .update_clocks = dcn314_update_clocks,
-       .init_clocks = dcn314_init_clocks,
+       .init_clocks = dcn31_init_clocks,
        .enable_pme_wa = dcn314_enable_pme_wa,
        .are_clock_states_equal = dcn314_are_clock_states_equal,
        .notify_wm_ranges = dcn314_notify_wm_ranges
@@ -681,10 +729,10 @@ void dcn314_clk_mgr_construct(
        }
        ASSERT(clk_mgr->smu_wm_set.wm_set);
 
-       smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
+       smu_dpm_clks.dpm_clks = (DpmClocks314_t *)dm_helpers_allocate_gpu_mem(
                                clk_mgr->base.base.ctx,
                                DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
-                               sizeof(DpmClocks_t),
+                               sizeof(DpmClocks314_t),
                                &smu_dpm_clks.mc_address.quad_part);
 
        if (smu_dpm_clks.dpm_clks == NULL) {
@@ -729,7 +777,7 @@ void dcn314_clk_mgr_construct(
        if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
                dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
 
-               if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+               if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
                        dcn314_clk_mgr_helper_populate_bw_params(
                                        &clk_mgr->base,
                                        ctx->dc_bios->integrated_info,
index c695a4498c50fc51b6e7f52d755b6a002c5509c7..171f84340eb2fb1d532776ac348cc1fbfad858f5 100644 (file)
@@ -42,7 +42,7 @@ struct clk_mgr_dcn314 {
 
 bool dcn314_are_clock_states_equal(struct dc_clocks *a,
                struct dc_clocks *b);
-void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
 void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
                        struct dc_state *context,
                        bool safe_to_lower);
index a7958dc965810bb96c09830937aad95171aca2b8..047d19ea919c78ff84386c43fbfeeb77dfb82c28 100644 (file)
@@ -36,6 +36,37 @@ typedef enum {
        WCK_RATIO_MAX
 } WCK_RATIO_e;
 
+typedef struct {
+  uint32_t FClk;
+  uint32_t MemClk;
+  uint32_t Voltage;
+  uint8_t  WckRatio;
+  uint8_t  Spare[3];
+} DfPstateTable314_t;
+
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+  uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+  uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+  uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+  uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+  uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+  uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+  uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+  DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+
+  uint8_t  NumDcfClkLevelsEnabled;
+  uint8_t  NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+  uint8_t  NumSocClkLevelsEnabled;
+  uint8_t  VcnClkLevelsEnabled;     //Applies to both Vclk and Dclk
+  uint8_t  NumDfPstatesEnabled;
+  uint8_t  spare[3];
+
+  uint32_t MinGfxClk;
+  uint32_t MaxGfxClk;
+} DpmClocks314_t;
+
 struct dcn314_watermarks {
        // Watermarks
        WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
@@ -43,7 +74,7 @@ struct dcn314_watermarks {
 };
 
 struct dcn314_smu_dpm_clks {
-       DpmClocks_t *dpm_clks;
+       DpmClocks314_t *dpm_clks;
        union large_integer mc_address;
 };
 
index e42f44fc1c08d50bffe551703c1a75df2d62d958..aeecca68dea73b25b64c7de1d920444381a7e1ec 100644 (file)
@@ -1074,8 +1074,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
                struct dc_stream_state *old_stream =
                                dc->current_state->res_ctx.pipe_ctx[i].stream;
                bool should_disable = true;
-               bool pipe_split_change =
-                       context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
+               bool pipe_split_change = false;
+
+               if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
+                       (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
+                       pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
+                               dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
+               else
+                       pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
+                               dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
 
                for (j = 0; j < context->stream_count; j++) {
                        if (old_stream == context->streams[j]) {
@@ -3229,7 +3236,7 @@ static void commit_planes_for_stream(struct dc *dc,
                                odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
        }
 
-       if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+       if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
                if (top_pipe_to_program &&
                        top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
                        if (should_use_dmub_lock(stream->link)) {
@@ -3247,7 +3254,6 @@ static void commit_planes_for_stream(struct dc *dc,
                                top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
                                                top_pipe_to_program->stream_res.tg);
                }
-       }
 
        if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
                if (dc->hwss.subvp_pipe_control_lock)
@@ -3466,7 +3472,7 @@ static void commit_planes_for_stream(struct dc *dc,
                dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
        }
 
-       if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+       if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
                if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
                        top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
                                top_pipe_to_program->stream_res.tg,
@@ -3493,21 +3499,19 @@ static void commit_planes_for_stream(struct dc *dc,
                                top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
                                        top_pipe_to_program->stream_res.tg);
                }
-       }
 
-       if (update_type != UPDATE_TYPE_FAST) {
+       if (update_type != UPDATE_TYPE_FAST)
                dc->hwss.post_unlock_program_front_end(dc, context);
 
-               /* Since phantom pipe programming is moved to post_unlock_program_front_end,
-                * move the SubVP lock to after the phantom pipes have been setup
-                */
-               if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
-                       if (dc->hwss.subvp_pipe_control_lock)
-                               dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
-               } else {
-                       if (dc->hwss.subvp_pipe_control_lock)
-                               dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
-               }
+       /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+        * move the SubVP lock to after the phantom pipes have been setup
+        */
+       if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+               if (dc->hwss.subvp_pipe_control_lock)
+                       dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+       } else {
+               if (dc->hwss.subvp_pipe_control_lock)
+                       dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
        }
 
        // Fire manual trigger only when bottom plane is flipped
@@ -4292,7 +4296,7 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
            !dc->debug.dpia_debug.bits.disable_dpia)
                return true;
 
-       if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2 &&
+       if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
            !dc->debug.dpia_debug.bits.disable_dpia)
                return true;
 
@@ -4340,6 +4344,7 @@ void dc_enable_dmub_outbox(struct dc *dc)
        struct dc_context *dc_ctx = dc->ctx;
 
        dmub_enable_outbox_notification(dc_ctx->dmub_srv);
+       DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
 }
 
 /**
index 9e51338441d079db5fb0cfba982405fb2f4f4080..66d2ae7aacf5eee8888d319c5b3b072a4a1ad938 100644 (file)
@@ -3372,7 +3372,7 @@ bool dc_link_setup_psr(struct dc_link *link,
                switch(link->ctx->asic_id.chip_family) {
                case FAMILY_YELLOW_CARP:
                case AMDGPU_FAMILY_GC_10_3_6:
-               case AMDGPU_FAMILY_GC_11_0_2:
+               case AMDGPU_FAMILY_GC_11_0_1:
                        if(!dc->debug.disable_z10)
                                psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
                        break;
index ffc0f1c0ea93b524c6b62bc5399946942c3331fa..7dbab15bfa68fc0d8cd1ccfc1e242d7901803959 100644 (file)
@@ -169,7 +169,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
                if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
                        dc_version = DCN_VERSION_3_21;
                break;
-       case AMDGPU_FAMILY_GC_11_0_2:
+       case AMDGPU_FAMILY_GC_11_0_1:
                dc_version = DCN_VERSION_3_14;
                break;
        default:
index 8e1e40083ec8372113d0a8a79e5137a66205a874..5908b60db313964c9a888b470926a5cc1d478a4c 100644 (file)
@@ -47,7 +47,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.196"
+#define DC_VER "3.2.198"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -213,6 +213,7 @@ struct dc_caps {
        uint32_t cache_num_ways;
        uint16_t subvp_fw_processing_delay_us;
        uint16_t subvp_prefetch_end_to_mall_start_us;
+       uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height
        uint16_t subvp_pstate_allow_width_us;
        uint16_t subvp_vertical_int_margin_us;
        bool seamless_odm;
@@ -352,6 +353,7 @@ struct dc_config {
        bool use_pipe_ctx_sync_logic;
        bool ignore_dpref_ss;
        bool enable_mipi_converter_optimization;
+       bool use_default_clock_table;
 };
 
 enum visual_confirm {
@@ -609,6 +611,7 @@ struct dc_bounding_box_overrides {
        int percent_of_ideal_drambw;
        int dram_clock_change_latency_ns;
        int dummy_clock_change_latency_ns;
+       int fclk_clock_change_latency_ns;
        /* This forces a hard min on the DCFCLK we use
         * for DML.  Unlike the debug option for forcing
         * DCFCLK, this override affects watermark calculations
@@ -751,6 +754,7 @@ struct dc_debug_options {
        uint32_t mst_start_top_delay;
        uint8_t psr_power_use_phy_fsm;
        enum dml_hostvm_override_opts dml_hostvm_override;
+       bool dml_disallow_alternate_prefetch_modes;
        bool use_legacy_soc_bb_mechanism;
        bool exit_idle_opt_for_cursor_updates;
        bool enable_single_display_2to1_odm_policy;
index 2d61c2a91cee269642bf7500b15bb9731518bd9e..09b304507badb6bc5ec6b800e53a771888edeb61 100644 (file)
@@ -29,6 +29,7 @@
 #include "dm_helpers.h"
 #include "dc_hw_types.h"
 #include "core_types.h"
+#include "../basics/conversion.h"
 
 #define CTX dc_dmub_srv->ctx
 #define DC_LOGGER CTX->logger
@@ -275,8 +276,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
        union dmub_rb_cmd cmd = { 0 };
 
        cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
-       // TODO: Uncomment once FW headers are promoted
-       //cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
+       cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
        cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
 
        cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
@@ -601,6 +601,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
                        &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
        struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
        struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+       uint32_t out_num, out_den;
 
        pipe_data->mode = SUBVP;
        pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
@@ -612,6 +613,16 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
                        main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
        pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
        pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
+       pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param;
+
+       /* Calculate the scaling factor from the src and dst height.
+        * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
+        * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
+        */
+       reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, &out_num, &out_den);
+       // TODO: Uncomment below lines once DMCUB include headers are promoted
+       //pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
+       //pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
 
        // Prefetch lines is equal to VACTIVE + BP + VSYNC
        pipe_data->pipe_config.subvp_data.prefetch_lines =
index a0af0f6afeef858fcbc74085708c608b4e2026e9..9544abf75e846eab97013154b00e8437be8a399f 100644 (file)
@@ -344,6 +344,7 @@ enum dc_detect_reason {
        DETECT_REASON_HPDRX,
        DETECT_REASON_FALLBACK,
        DETECT_REASON_RETRAIN,
+       DETECT_REASON_TDR,
 };
 
 bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
index 213de8cabfadb39e48a36037ab89e3cde03afdbc..165392380842adbfb8d01f5bb9dbf41c7d551eba 100644 (file)
@@ -543,9 +543,11 @@ static void dce112_get_pix_clk_dividers_helper (
                switch (pix_clk_params->color_depth) {
                case COLOR_DEPTH_101010:
                        actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
+                       actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
                        break;
                case COLOR_DEPTH_121212:
                        actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
+                       actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
                        break;
                case COLOR_DEPTH_161616:
                        actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
index d4a6504dfe0004865a37b331ff113b13e5a7bc17..db7ca4b0cdb9dd4165f9b8d1d53e9be2608ccc89 100644 (file)
@@ -361,8 +361,6 @@ void dpp1_cnv_setup (
                select = INPUT_CSC_SELECT_ICSC;
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
-               pixel_format = 22;
-               break;
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
                pixel_format = 26; /* ARGB16161616_UNORM */
                break;
index b54c1240032377b6036347af961044b2e3d44935..564e061ccb589da01bf3e31bd8896521102712c1 100644 (file)
@@ -278,9 +278,6 @@ void hubp1_program_pixel_format(
                                SURFACE_PIXEL_FORMAT, 10);
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
-               REG_UPDATE(DCSURF_SURFACE_CONFIG,
-                               SURFACE_PIXEL_FORMAT, 22);
-               break;
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
                REG_UPDATE(DCSURF_SURFACE_CONFIG,
                                SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
index bed783747f169b522e7ff561204b8bf4d32cd4e9..5b5d952b2b8cd72d3c8c143d12c3cc6822211354 100644 (file)
@@ -110,6 +110,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
                 */
                if (pipe_ctx->top_pipe ||
                    !pipe_ctx->stream ||
+                   !pipe_ctx->plane_state ||
                    !tg->funcs->is_tg_enabled(tg))
                        continue;
 
index 769974375b4b350f33ca19649aa89eb2161ba3a8..8e9384094f6d6b3d8482983baa3440c7d119d49f 100644 (file)
@@ -131,6 +131,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
        while (tmp_mpcc != NULL) {
                if (tmp_mpcc->dpp_id == dpp_id)
                        return tmp_mpcc;
+
+               /* avoid circular linked list */
+               ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+               if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+                       break;
+
                tmp_mpcc = tmp_mpcc->mpcc_bot;
        }
        return NULL;
index e1a9a45b03b65e32eb824e75cfb761e457953888..3fc300cd1ce9516ab21aa630161e8d5e6e4b7e95 100644 (file)
@@ -465,6 +465,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
                                OTG_CLOCK_ON, 1,
                                1, 1000);
        } else  {
+
+               //last chance to clear underflow, otherwise, it will always there due to clock is off.
+               if (optc->funcs->is_optc_underflow_occurred(optc) == true)
+                       optc->funcs->clear_optc_underflow(optc);
+
                REG_UPDATE_2(OTG_CLOCK_CONTROL,
                                OTG_CLOCK_GATE_DIS, 0,
                                OTG_CLOCK_EN, 0);
index ea1f14af0db7565fdef5f057fcfdeaf26192d026..eaa7032f0f1a3c11f71e99d5dfc1526f8861eb94 100644 (file)
@@ -166,8 +166,6 @@ static void dpp2_cnv_setup (
                select = DCN2_ICSC_SELECT_ICSC_A;
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
-               pixel_format = 22;
-               break;
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
                pixel_format = 26; /* ARGB16161616_UNORM */
                break;
index 936af65381ef725fc433ad36df87be5626895a5e..9570c2118ccc73ae4ce3ffc32f7064c31cfa49a1 100644 (file)
@@ -463,9 +463,6 @@ void hubp2_program_pixel_format(
                                SURFACE_PIXEL_FORMAT, 10);
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
-               REG_UPDATE(DCSURF_SURFACE_CONFIG,
-                               SURFACE_PIXEL_FORMAT, 22);
-               break;
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
                REG_UPDATE(DCSURF_SURFACE_CONFIG,
                                SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
index 3d307dd58e9af70877f296db247b3787b3ce9676..116f67a0b989deb45eca4f1114b509df8dcc6371 100644 (file)
@@ -531,6 +531,12 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
        while (tmp_mpcc != NULL) {
                if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
                        return tmp_mpcc;
+
+               /* avoid circular linked list */
+               ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+               if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+                       break;
+
                tmp_mpcc = tmp_mpcc->mpcc_bot;
        }
        return NULL;
index c5e200d09038fba2cf7cfc1eb3ceba438ed33fa9..5752271f22dfedda223a7feabdbaeb0b37047505 100644 (file)
@@ -67,9 +67,15 @@ static uint32_t convert_and_clamp(
 void dcn21_dchvm_init(struct hubbub *hubbub)
 {
        struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
-       uint32_t riommu_active;
+       uint32_t riommu_active, prefetch_done;
        int i;
 
+       REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
+
+       if (prefetch_done) {
+               hubbub->riommu_active = true;
+               return;
+       }
        //Init DCHVM block
        REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
 
index 77b00f86c2165d0d30fc12cb1f12eff7cc8ea89c..4a668d6563dfd6aff3dd329d0bcb27f36bf6c9a3 100644 (file)
@@ -244,8 +244,6 @@ void dpp3_cnv_setup (
                select = INPUT_CSC_SELECT_ICSC;
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
-               pixel_format = 22;
-               break;
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
                pixel_format = 26; /* ARGB16161616_UNORM */
                break;
index 6a4dcafb9bba5c3c9f0cb87a2b001c1edcd9c010..dc3e8df706b347a435c77165271c30c5119d101c 100644 (file)
@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
                        VMID, address->vmid);
 
        if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
-               REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
+               REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
                REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
 
        } else {
index 0a67f8a5656decee3fdff57a9d9ad446a8874424..d97076648acba46ea4c53ddb170a59ff43601d7d 100644 (file)
@@ -372,7 +372,7 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
        int afmt_inst;
 
        /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
-       if (eng_id <= ENGINE_ID_DIGE) {
+       if (eng_id <= ENGINE_ID_DIGB) {
                vpg_inst = eng_id;
                afmt_inst = eng_id;
        } else
index 7c77c71591a08219341a3134b94f0f7606ceed94..82c3b3ac1f0d01459e18cbb27218542c4283f19e 100644 (file)
        SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\
        SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\
        SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\
-       SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh)
+       SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh),\
+       SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL, HBLANK_MINIMUM_SYMBOL_WIDTH, mask_sh)
 
 
 #define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \
index 468a893ff7854e451c1588c8b579ceabc0914269..aedff18aff563328b9791d55c984dd4ea5609ce0 100644 (file)
@@ -2153,7 +2153,7 @@ static bool dcn31_resource_construct(
                pool->base.usb4_dpia_count = 4;
        }
 
-       if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2)
+       if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1)
                pool->base.usb4_dpia_count = 4;
 
        /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
index 41f8ec99da6b386800e5bc1845e13d0c9796f1c2..901436591ed45c29556c2f447ac22c0632128519 100644 (file)
@@ -32,7 +32,6 @@
        container_of(pool, struct dcn31_resource_pool, base)
 
 extern struct _vcs_dpi_ip_params_st dcn3_1_ip;
-extern struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc;
 
 struct dcn31_resource_pool {
        struct resource_pool base;
index e3b5a95e03b19ace16176db0a5a9269e31ba0a12..702c28c2560eb2d7e73c8cec30aee7b14c256434 100644 (file)
 DCN314 = dcn314_resource.o dcn314_hwseq.o dcn314_init.o \
                dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o
 
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -msse2
-endif
-endif
-
 AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_DCN314)
index 755c715ad8dceebaf61bdf6c03f2107d59cdcd1f..39931d48f3851cebb7c18a9a73f9ba05f6b00ae1 100644 (file)
@@ -343,7 +343,10 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
 {
        struct dc_stream_state *stream = pipe_ctx->stream;
        unsigned int odm_combine_factor = 0;
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
+       bool two_pix_per_container = false;
 
+       two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
        odm_combine_factor = get_odm_config(pipe_ctx, NULL);
 
        if (is_dp_128b_132b_signal(pipe_ctx)) {
@@ -355,16 +358,13 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
                else
                        *k2_div = PIXEL_RATE_DIV_BY_4;
        } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
-               if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+               if (two_pix_per_container) {
                        *k1_div = PIXEL_RATE_DIV_BY_1;
                        *k2_div = PIXEL_RATE_DIV_BY_2;
-               } else if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
-                       *k1_div = PIXEL_RATE_DIV_BY_2;
-                       *k2_div = PIXEL_RATE_DIV_BY_2;
                } else {
-                       if (odm_combine_factor == 1)
-                               *k2_div = PIXEL_RATE_DIV_BY_4;
-                       else if (odm_combine_factor == 2)
+                       *k1_div = PIXEL_RATE_DIV_BY_1;
+                       *k2_div = PIXEL_RATE_DIV_BY_4;
+                       if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
                                *k2_div = PIXEL_RATE_DIV_BY_2;
                }
        }
@@ -374,3 +374,31 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
 
        return odm_combine_factor;
 }
+
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+{
+       uint32_t pix_per_cycle = 1;
+       uint32_t odm_combine_factor = 1;
+
+       if (!pipe_ctx || !pipe_ctx->stream || !pipe_ctx->stream_res.stream_enc)
+               return;
+
+       odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+       if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1
+               || dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+               pix_per_cycle = 2;
+
+       if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
+               pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
+                               pix_per_cycle);
+}
+
+bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
+{
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+       if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
+               dc->debug.enable_dp_dig_pixel_rate_div_policy)
+               return true;
+       return false;
+}
index be0f5e4d48e13b1fd5d794cb651c759a45aa6ca0..d014580592aca6aa8286beeb42e0d7a70f5e5211 100644 (file)
@@ -39,4 +39,8 @@ void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
 
 unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
 
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+
+bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+
 #endif /* __DC_HWSS_DCN314_H__ */
index b9debeb081fdf1a1352c5b40d822756dcd525767..fcf67eb3478f07e60e361bdaaee35098c4ad145f 100644 (file)
@@ -145,6 +145,8 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
        .set_shaper_3dlut = dcn20_set_shaper_3dlut,
        .setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
        .calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
+       .set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
+       .is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy,
 };
 
 void dcn314_hw_sequencer_construct(struct dc *dc)
index 63861cdfb09f2be608d02214c9359cf6fa2112a2..3a9e3870b3a95f223b18563d126befc657d28d6a 100644 (file)
@@ -70,6 +70,7 @@
 #include "dce110/dce110_resource.h"
 #include "dml/display_mode_vba.h"
 #include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn314/dcn314_fpu.h"
 #include "dcn314/dcn314_dccg.h"
 #include "dcn10/dcn10_resource.h"
 #include "dcn31/dcn31_panel_cntl.h"
@@ -132,155 +133,6 @@ static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C
 
 #define DC_LOGGER_INIT(logger)
 
-#define DCN3_14_DEFAULT_DET_SIZE 384
-#define DCN3_14_MAX_DET_SIZE 384
-#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
-#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_14_ip = {
-       .VBlankNomDefaultUS = 668,
-       .gpuvm_enable = 1,
-       .gpuvm_max_page_table_levels = 1,
-       .hostvm_enable = 1,
-       .hostvm_max_page_table_levels = 2,
-       .rob_buffer_size_kbytes = 64,
-       .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
-       .config_return_buffer_size_in_kbytes = 1792,
-       .compressed_buffer_segment_size_in_kbytes = 64,
-       .meta_fifo_size_in_kentries = 32,
-       .zero_size_buffer_entries = 512,
-       .compbuf_reserved_space_64b = 256,
-       .compbuf_reserved_space_zs = 64,
-       .dpp_output_buffer_pixels = 2560,
-       .opp_output_buffer_lines = 1,
-       .pixel_chunk_size_kbytes = 8,
-       .meta_chunk_size_kbytes = 2,
-       .min_meta_chunk_size_bytes = 256,
-       .writeback_chunk_size_kbytes = 8,
-       .ptoi_supported = false,
-       .num_dsc = 4,
-       .maximum_dsc_bits_per_component = 10,
-       .dsc422_native_support = false,
-       .is_line_buffer_bpp_fixed = true,
-       .line_buffer_fixed_bpp = 48,
-       .line_buffer_size_bits = 789504,
-       .max_line_buffer_lines = 12,
-       .writeback_interface_buffer_size_kbytes = 90,
-       .max_num_dpp = 4,
-       .max_num_otg = 4,
-       .max_num_hdmi_frl_outputs = 1,
-       .max_num_wb = 1,
-       .max_dchub_pscl_bw_pix_per_clk = 4,
-       .max_pscl_lb_bw_pix_per_clk = 2,
-       .max_lb_vscl_bw_pix_per_clk = 4,
-       .max_vscl_hscl_bw_pix_per_clk = 4,
-       .max_hscl_ratio = 6,
-       .max_vscl_ratio = 6,
-       .max_hscl_taps = 8,
-       .max_vscl_taps = 8,
-       .dpte_buffer_size_in_pte_reqs_luma = 64,
-       .dpte_buffer_size_in_pte_reqs_chroma = 34,
-       .dispclk_ramp_margin_percent = 1,
-       .max_inter_dcn_tile_repeaters = 8,
-       .cursor_buffer_size = 16,
-       .cursor_chunk_size = 2,
-       .writeback_line_buffer_buffer_size = 0,
-       .writeback_min_hscl_ratio = 1,
-       .writeback_min_vscl_ratio = 1,
-       .writeback_max_hscl_ratio = 1,
-       .writeback_max_vscl_ratio = 1,
-       .writeback_max_hscl_taps = 1,
-       .writeback_max_vscl_taps = 1,
-       .dppclk_delay_subtotal = 46,
-       .dppclk_delay_scl = 50,
-       .dppclk_delay_scl_lb_only = 16,
-       .dppclk_delay_cnvc_formatter = 27,
-       .dppclk_delay_cnvc_cursor = 6,
-       .dispclk_delay_subtotal = 119,
-       .dynamic_metadata_vm_enabled = false,
-       .odm_combine_4to1_supported = false,
-       .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
-               /*TODO: correct dispclk/dppclk voltage level determination*/
-       .clock_limits = {
-               {
-                       .state = 0,
-                       .dispclk_mhz = 1200.0,
-                       .dppclk_mhz = 1200.0,
-                       .phyclk_mhz = 600.0,
-                       .phyclk_d18_mhz = 667.0,
-                       .dscclk_mhz = 186.0,
-                       .dtbclk_mhz = 625.0,
-               },
-               {
-                       .state = 1,
-                       .dispclk_mhz = 1200.0,
-                       .dppclk_mhz = 1200.0,
-                       .phyclk_mhz = 810.0,
-                       .phyclk_d18_mhz = 667.0,
-                       .dscclk_mhz = 209.0,
-                       .dtbclk_mhz = 625.0,
-               },
-               {
-                       .state = 2,
-                       .dispclk_mhz = 1200.0,
-                       .dppclk_mhz = 1200.0,
-                       .phyclk_mhz = 810.0,
-                       .phyclk_d18_mhz = 667.0,
-                       .dscclk_mhz = 209.0,
-                       .dtbclk_mhz = 625.0,
-               },
-               {
-                       .state = 3,
-                       .dispclk_mhz = 1200.0,
-                       .dppclk_mhz = 1200.0,
-                       .phyclk_mhz = 810.0,
-                       .phyclk_d18_mhz = 667.0,
-                       .dscclk_mhz = 371.0,
-                       .dtbclk_mhz = 625.0,
-               },
-               {
-                       .state = 4,
-                       .dispclk_mhz = 1200.0,
-                       .dppclk_mhz = 1200.0,
-                       .phyclk_mhz = 810.0,
-                       .phyclk_d18_mhz = 667.0,
-                       .dscclk_mhz = 417.0,
-                       .dtbclk_mhz = 625.0,
-               },
-       },
-       .num_states = 5,
-       .sr_exit_time_us = 9.0,
-       .sr_enter_plus_exit_time_us = 11.0,
-       .sr_exit_z8_time_us = 442.0,
-       .sr_enter_plus_exit_z8_time_us = 560.0,
-       .writeback_latency_us = 12.0,
-       .dram_channel_width_bytes = 4,
-       .round_trip_ping_latency_dcfclk_cycles = 106,
-       .urgent_latency_pixel_data_only_us = 4.0,
-       .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
-       .urgent_latency_vm_data_only_us = 4.0,
-       .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
-       .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
-       .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
-       .pct_ideal_sdp_bw_after_urgent = 80.0,
-       .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
-       .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
-       .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
-       .max_avg_sdp_bw_use_normal_percent = 60.0,
-       .max_avg_dram_bw_use_normal_percent = 60.0,
-       .fabric_datapath_to_dcn_data_return_bytes = 32,
-       .return_bus_width_bytes = 64,
-       .downspread_percent = 0.38,
-       .dcn_downspread_percent = 0.5,
-       .gpuvm_min_page_size_bytes = 4096,
-       .hostvm_min_page_size_bytes = 4096,
-       .do_urgent_latency_adjustment = false,
-       .urgent_latency_adjustment_fabric_clock_component_us = 0,
-       .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
 enum dcn31_clk_src_array_id {
        DCN31_CLK_SRC_PLL0,
        DCN31_CLK_SRC_PLL1,
@@ -1402,7 +1254,7 @@ static struct stream_encoder *dcn314_stream_encoder_create(
        int afmt_inst;
 
        /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
-       if (eng_id <= ENGINE_ID_DIGF) {
+       if (eng_id < ENGINE_ID_DIGF) {
                vpg_inst = eng_id;
                afmt_inst = eng_id;
        } else
@@ -1447,7 +1299,8 @@ static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
         * VPG[8] -> HPO_DP[2]
         * VPG[9] -> HPO_DP[3]
         */
-       vpg_inst = hpo_dp_inst + 6;
+       //Uses offset index 5-8, but actually maps to vpg_inst 6-9
+       vpg_inst = hpo_dp_inst + 5;
 
        /* Mapping of APG register blocks to HPO DP block instance:
         * APG[0] -> HPO_DP[0]
@@ -1793,109 +1646,16 @@ static struct clock_source *dcn31_clock_source_create(
        return NULL;
 }
 
-static bool is_dual_plane(enum surface_pixel_format format)
-{
-       return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
 static int dcn314_populate_dml_pipes_from_context(
        struct dc *dc, struct dc_state *context,
        display_e2e_pipe_params_st *pipes,
        bool fast_validate)
 {
-       int i, pipe_cnt;
-       struct resource_context *res_ctx = &context->res_ctx;
-       struct pipe_ctx *pipe;
-       bool upscaled = false;
-
-       dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
-
-       for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
-               struct dc_crtc_timing *timing;
-
-               if (!res_ctx->pipe_ctx[i].stream)
-                       continue;
-               pipe = &res_ctx->pipe_ctx[i];
-               timing = &pipe->stream->timing;
-
-               if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
-                       && pipe->stream->adjust.v_total_min > timing->v_total)
-                       pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
-
-               if (pipe->plane_state &&
-                               (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
-                               pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
-                       upscaled = true;
-
-               /*
-                * Immediate flip can be set dynamically after enabling the plane.
-                * We need to require support for immediate flip or underflow can be
-                * intermittently experienced depending on peak b/w requirements.
-                */
-               pipes[pipe_cnt].pipe.src.immediate_flip = true;
-
-               pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
-               pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
-               pipes[pipe_cnt].pipe.src.gpuvm = true;
-               pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
-               pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
-               pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
-               pipes[pipe_cnt].pipe.src.dcc_rate = 3;
-               pipes[pipe_cnt].dout.dsc_input_bpc = 0;
-
-               if (pipes[pipe_cnt].dout.dsc_enable) {
-                       switch (timing->display_color_depth) {
-                       case COLOR_DEPTH_888:
-                               pipes[pipe_cnt].dout.dsc_input_bpc = 8;
-                               break;
-                       case COLOR_DEPTH_101010:
-                               pipes[pipe_cnt].dout.dsc_input_bpc = 10;
-                               break;
-                       case COLOR_DEPTH_121212:
-                               pipes[pipe_cnt].dout.dsc_input_bpc = 12;
-                               break;
-                       default:
-                               ASSERT(0);
-                               break;
-                       }
-               }
-
-               pipe_cnt++;
-       }
-       context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
-
-       dc->config.enable_4to1MPC = false;
-       if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
-               if (is_dual_plane(pipe->plane_state->format)
-                               && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
-                       dc->config.enable_4to1MPC = true;
-               } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
-                       /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
-                       context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
-                       pipes[0].pipe.src.unbounded_req_mode = true;
-               }
-       } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
-                       && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
-               context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
-       } else if (context->stream_count >= 3 && upscaled) {
-               context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
-       }
-
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
-               if (!pipe->stream)
-                       continue;
+       int pipe_cnt;
 
-               if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
-                               pipe->stream->apply_seamless_boot_optimization) {
-
-                       if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
-                               context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
-                               break;
-                       }
-               }
-       }
+       DC_FP_START();
+       pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+       DC_FP_END();
 
        return pipe_cnt;
 }
@@ -1906,88 +1666,9 @@ static struct dc_cap_funcs cap_funcs = {
 
 static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
 {
-       struct clk_limit_table *clk_table = &bw_params->clk_table;
-       struct _vcs_dpi_voltage_scaling_st *clock_tmp = dcn3_14_soc._clock_tmp;
-       unsigned int i, closest_clk_lvl;
-       int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
-       int j;
-
-       // Default clock levels are used for diags, which may lead to overclocking.
-       if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
-               dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
-               dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
-
-               if (bw_params->num_channels > 0)
-                       dcn3_14_soc.num_chans = bw_params->num_channels;
-
-               ASSERT(dcn3_14_soc.num_chans);
-               ASSERT(clk_table->num_entries);
-
-               /* Prepass to find max clocks independent of voltage level. */
-               for (i = 0; i < clk_table->num_entries; ++i) {
-                       if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
-                               max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
-                       if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
-                               max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
-               }
-
-               for (i = 0; i < clk_table->num_entries; i++) {
-                       /* loop backwards*/
-                       for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
-                               if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
-                                       closest_clk_lvl = j;
-                                       break;
-                               }
-                       }
-                       if (clk_table->num_entries == 1) {
-                               /*smu gives one DPM level, let's take the highest one*/
-                               closest_clk_lvl = dcn3_14_soc.num_states - 1;
-                       }
-
-                       clock_tmp[i].state = i;
-
-                       /* Clocks dependent on voltage level. */
-                       clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
-                       if (clk_table->num_entries == 1 &&
-                               clock_tmp[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
-                               /*SMU fix not released yet*/
-                               clock_tmp[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
-                       }
-                       clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
-                       clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
-
-                       if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
-                               clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
-                       /* Clocks independent of voltage level. */
-                       clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
-                               dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
-                       clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
-                               dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
-                       clock_tmp[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
-                       clock_tmp[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
-                       clock_tmp[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
-                       clock_tmp[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
-                       clock_tmp[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
-               }
-               for (i = 0; i < clk_table->num_entries; i++)
-                       dcn3_14_soc.clock_limits[i] = clock_tmp[i];
-               if (clk_table->num_entries)
-                       dcn3_14_soc.num_states = clk_table->num_entries;
-       }
-
-       if (max_dispclk_mhz) {
-               dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
-               dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
-       }
-
-       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
-               dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
-       else
-               dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+       DC_FP_START();
+       dcn314_update_bw_bounding_box_fpu(dc, bw_params);
+       DC_FP_END();
 }
 
 static struct resource_funcs dcn314_res_pool_funcs = {
@@ -2069,6 +1750,7 @@ static bool dcn314_resource_construct(
        dc->caps.post_blend_color_processing = true;
        dc->caps.force_dp_tps4_for_cp2520 = true;
        dc->caps.dp_hpo = true;
+       dc->caps.dp_hdmi21_pcon_support = true;
        dc->caps.edp_dsc_support = true;
        dc->caps.extended_aux_timeout_support = true;
        dc->caps.dmcub_support = true;
index c41108847ce08ea843d3e001d038289cc52b42e5..0dd3153aa5c17aaca0bcdf8229a0e788acae102a 100644 (file)
@@ -29,6 +29,9 @@
 
 #include "core_types.h"
 
+extern struct _vcs_dpi_ip_params_st dcn3_14_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc;
+
 #define TO_DCN314_RES_POOL(pool)\
        container_of(pool, struct dcn314_resource_pool, base)
 
index 39929fa67a51020443f7887b1bd2790044be51ef..22849eaa6f243eb474789fd8a2874234bc1ea4cd 100644 (file)
@@ -32,7 +32,6 @@
        container_of(pool, struct dcn315_resource_pool, base)
 
 extern struct _vcs_dpi_ip_params_st dcn3_15_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_15_soc;
 
 struct dcn315_resource_pool {
        struct resource_pool base;
index 0dc5a6c13ae7d46f353a5dc7e31dee5cf51fcbe5..aba6d634131b41988f30b26b1f4166a089eca8d2 100644 (file)
@@ -32,7 +32,6 @@
        container_of(pool, struct dcn316_resource_pool, base)
 
 extern struct _vcs_dpi_ip_params_st dcn3_16_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_16_soc;
 
 struct dcn316_resource_pool {
        struct resource_pool base;
index d38341f68b1721f786ea2ae017b73248abd71a62..ebd3945c71f1b6ba3fad7e4423b8bc510dd57a71 100644 (file)
@@ -250,6 +250,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
        uint32_t total_lines = 0;
        uint32_t lines_per_way = 0;
        uint32_t num_ways = 0;
+       uint32_t prev_addr_low = 0;
 
        for (i = 0; i < ctx->stream_count; i++) {
                stream = ctx->streams[i];
@@ -267,10 +268,20 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
                        plane = ctx->stream_status[i].plane_states[j];
 
                        // Calculate total surface size
-                       surface_size = plane->plane_size.surface_pitch *
+                       if (prev_addr_low != plane->address.grph.addr.u.low_part) {
+                               /* if plane address are different from prev FB, then userspace allocated separate FBs*/
+                               surface_size += plane->plane_size.surface_pitch *
                                        plane->plane_size.surface_size.height *
                                        (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
 
+                               prev_addr_low = plane->address.grph.addr.u.low_part;
+                       } else {
+                               /* We have the same fb for all the planes.
+                                * Xorg always creates one giant fb that holds all surfaces,
+                                * so allocating it once is sufficient.
+                                * */
+                               continue;
+                       }
                        // Convert surface size + starting address to number of cache lines required
                        // (alignment accounted for)
                        cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
@@ -320,7 +331,10 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
 bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
 {
        union dmub_rb_cmd cmd;
-       uint8_t ways;
+       uint8_t ways, i;
+       int j;
+       bool stereo_in_use = false;
+       struct dc_plane_state *plane = NULL;
 
        if (!dc->ctx->dmub_srv)
                return false;
@@ -349,7 +363,23 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
                         * and configure HUBP's to fetch from MALL
                         */
                        ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
-                       if (ways <= dc->caps.cache_num_ways) {
+
+                       /* MALL not supported with Stereo3D. If any plane is using stereo,
+                        * don't try to enter MALL.
+                        */
+                       for (i = 0; i < dc->current_state->stream_count; i++) {
+                               for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+                                       plane = dc->current_state->stream_status[i].plane_states[j];
+
+                                       if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO) {
+                                               stereo_in_use = true;
+                                               break;
+                                       }
+                               }
+                               if (stereo_in_use)
+                                       break;
+                       }
+                       if (ways <= dc->caps.cache_num_ways && !stereo_in_use) {
                                memset(&cmd, 0, sizeof(cmd));
                                cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
                                cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
@@ -683,9 +713,11 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
                        if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
                                        hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
                        } else {
+                               // MALL not supported with Stereo3D
                                hubp->funcs->hubp_update_mall_sel(hubp,
                                        num_ways <= dc->caps.cache_num_ways &&
-                                       pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED ? 2 : 0,
+                                       pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+                                       pipe->plane_state->address.type !=  PLN_ADDR_TYPE_GRPH_STEREO ? 2 : 0,
                                                        cache_cursor);
                        }
                }
index eff1f4e17689c64428d9a9a88c39cfba96d533f9..1fad7b48bd5beb51d42459856d72ee5385fd064e 100644 (file)
@@ -281,7 +281,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
                .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
                .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
                .enable_optc_clock = optc1_enable_optc_clock,
-               .set_drr = optc31_set_drr, // TODO: Update to optc32_set_drr once FW headers are promoted
+               .set_drr = optc32_set_drr,
                .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
                .set_vtotal_min_max = optc3_set_vtotal_min_max,
                .set_static_screen_control = optc1_set_static_screen_control,
index 9a26d24b579f739c769aed46c508448508ca4e70..8b887b552f2c764a92816fe8eb4525c7eb7c6760 100644 (file)
@@ -867,7 +867,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                }
        },
        .use_max_lb = true,
-       .force_disable_subvp = true,
+       .force_disable_subvp = false,
        .exit_idle_opt_for_cursor_updates = true,
        .enable_single_display_2to1_odm_policy = true,
        .enable_dp_dig_pixel_rate_div_policy = 1,
@@ -2051,6 +2051,7 @@ static bool dcn32_resource_construct(
        dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
        dc->caps.subvp_fw_processing_delay_us = 15;
        dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+       dc->caps.subvp_swath_height_margin_lines = 16;
        dc->caps.subvp_pstate_allow_width_us = 20;
        dc->caps.subvp_vertical_int_margin_us = 30;
 
index b3f8503cea9c593b5185c72ff4ecaaaea9bb6a5d..955f52e6064df67ac1b0f9c5831d785e7a0b153d 100644 (file)
@@ -63,7 +63,7 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
                if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
                                pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
                        bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
-                       mall_region_pixels = pipe->stream->timing.h_addressable * pipe->stream->timing.v_addressable;
+                       mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable;
 
                        // For bytes required in MALL, calculate based on number of MBlks required
                        num_mblks = (mall_region_pixels * bytes_per_pixel +
index 8157e40d2c7efb9a129f770b4395ef9e63b0e646..c8b7d6ff38f4fa1887aad87bb8cc6a4bb9bcf0cb 100644 (file)
@@ -868,7 +868,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                }
        },
        .use_max_lb = true,
-       .force_disable_subvp = true,
+       .force_disable_subvp = false,
        .exit_idle_opt_for_cursor_updates = true,
        .enable_single_display_2to1_odm_policy = true,
        .enable_dp_dig_pixel_rate_div_policy = 1,
@@ -1662,8 +1662,9 @@ static bool dcn321_resource_construct(
        dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
        dc->caps.subvp_fw_processing_delay_us = 15;
        dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+       dc->caps.subvp_swath_height_margin_lines = 16;
        dc->caps.subvp_pstate_allow_width_us = 20;
-
+       dc->caps.subvp_vertical_int_margin_us = 30;
        dc->caps.max_slave_planes = 1;
        dc->caps.max_slave_yuv_planes = 1;
        dc->caps.max_slave_rgb_planes = 1;
index 359f6e9a1da04fd2207853f9f548b2c576316bf1..86a3b5bfd699b2c9b5a15ea048be1b2fbe6f9990 100644 (file)
@@ -61,7 +61,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
@@ -71,6 +70,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
@@ -82,7 +82,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare
@@ -131,6 +130,7 @@ DML += dcn321/dcn321_fpu.o
 DML += dcn301/dcn301_fpu.o
 DML += dcn302/dcn302_fpu.o
 DML += dcn303/dcn303_fpu.o
+DML += dcn314/dcn314_fpu.o
 DML += dsc/rc_calc_fpu.o
 DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
 endif
index ca44df4fca747bc3cf16b0a1e17a2e9079516792..d34e0f1314d9141c4b26fe36f7f1b7d7850a6159 100644 (file)
@@ -30,6 +30,7 @@
 #include "dchubbub.h"
 #include "dcn20/dcn20_resource.h"
 #include "dcn21/dcn21_resource.h"
+#include "clk_mgr/dcn21/rn_clk_mgr.h"
 
 #include "dcn20_fpu.h"
 
index 7ef66e511ec8ef428c09f0cca9238f7b7f6ff7e6..d211cf6d234c7c46bfa342475dab74e1b54ddf88 100644 (file)
@@ -26,6 +26,7 @@
 #include "clk_mgr.h"
 #include "dcn20/dcn20_resource.h"
 #include "dcn301/dcn301_resource.h"
+#include "clk_mgr/dcn301/vg_clk_mgr.h"
 
 #include "dml/dcn20/dcn20_fpu.h"
 #include "dcn301_fpu.h"
index e36cfa5985ea9c6e7b0a7156d3267cbdc52d5672..149a1b17cdf3f34fa26c13fd78ab253da3630dd3 100644 (file)
@@ -25,6 +25,9 @@
 
 #include "resource.h"
 #include "clk_mgr.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn315/dcn315_resource.h"
+#include "dcn316/dcn316_resource.h"
 
 #include "dml/dcn20/dcn20_fpu.h"
 #include "dcn31_fpu.h"
@@ -114,7 +117,7 @@ struct _vcs_dpi_ip_params_st dcn3_1_ip = {
        .dcc_supported = true,
 };
 
-struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
                /*TODO: correct dispclk/dppclk voltage level determination*/
        .clock_limits = {
                {
@@ -259,7 +262,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
        .dcc_supported = true,
 };
 
-struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
        .sr_exit_time_us = 9.0,
        .sr_enter_plus_exit_time_us = 11.0,
        .sr_exit_z8_time_us = 50.0,
@@ -355,7 +358,7 @@ struct _vcs_dpi_ip_params_st dcn3_16_ip = {
        .dcc_supported = true,
 };
 
-struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
                /*TODO: correct dispclk/dppclk voltage level determination*/
        .clock_limits = {
                {
index 3fab19134480d3784dc237b14349bcb0a1377d19..d63b4209b14c080538fb2905129e18354f163dba 100644 (file)
@@ -26,7 +26,7 @@
 #include "dc.h"
 #include "dc_link.h"
 #include "../display_mode_lib.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
 #include "display_mode_vba_31.h"
 #include "../dml_inline_defs.h"
 
index 66b82e4f05c6e8127c11d8fab35b9e6e9f787444..35d10b4d018bf0507a59cb0089125eed5e0b272b 100644 (file)
@@ -27,7 +27,7 @@
 #include "../display_mode_vba.h"
 #include "../dml_inline_defs.h"
 #include "display_rq_dlg_calc_31.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
 
 static bool is_dual_plane(enum source_format_class source_format)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
new file mode 100644 (file)
index 0000000..34a5d0f
--- /dev/null
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "clk_mgr.h"
+#include "resource.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn314_fpu.h"
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dml/display_mode_vba.h"
+
+struct _vcs_dpi_ip_params_st dcn3_14_ip = {
+       .VBlankNomDefaultUS = 668,
+       .gpuvm_enable = 1,
+       .gpuvm_max_page_table_levels = 1,
+       .hostvm_enable = 1,
+       .hostvm_max_page_table_levels = 2,
+       .rob_buffer_size_kbytes = 64,
+       .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
+       .config_return_buffer_size_in_kbytes = 1792,
+       .compressed_buffer_segment_size_in_kbytes = 64,
+       .meta_fifo_size_in_kentries = 32,
+       .zero_size_buffer_entries = 512,
+       .compbuf_reserved_space_64b = 256,
+       .compbuf_reserved_space_zs = 64,
+       .dpp_output_buffer_pixels = 2560,
+       .opp_output_buffer_lines = 1,
+       .pixel_chunk_size_kbytes = 8,
+       .meta_chunk_size_kbytes = 2,
+       .min_meta_chunk_size_bytes = 256,
+       .writeback_chunk_size_kbytes = 8,
+       .ptoi_supported = false,
+       .num_dsc = 4,
+       .maximum_dsc_bits_per_component = 10,
+       .dsc422_native_support = false,
+       .is_line_buffer_bpp_fixed = true,
+       .line_buffer_fixed_bpp = 48,
+       .line_buffer_size_bits = 789504,
+       .max_line_buffer_lines = 12,
+       .writeback_interface_buffer_size_kbytes = 90,
+       .max_num_dpp = 4,
+       .max_num_otg = 4,
+       .max_num_hdmi_frl_outputs = 1,
+       .max_num_wb = 1,
+       .max_dchub_pscl_bw_pix_per_clk = 4,
+       .max_pscl_lb_bw_pix_per_clk = 2,
+       .max_lb_vscl_bw_pix_per_clk = 4,
+       .max_vscl_hscl_bw_pix_per_clk = 4,
+       .max_hscl_ratio = 6,
+       .max_vscl_ratio = 6,
+       .max_hscl_taps = 8,
+       .max_vscl_taps = 8,
+       .dpte_buffer_size_in_pte_reqs_luma = 64,
+       .dpte_buffer_size_in_pte_reqs_chroma = 34,
+       .dispclk_ramp_margin_percent = 1,
+       .max_inter_dcn_tile_repeaters = 8,
+       .cursor_buffer_size = 16,
+       .cursor_chunk_size = 2,
+       .writeback_line_buffer_buffer_size = 0,
+       .writeback_min_hscl_ratio = 1,
+       .writeback_min_vscl_ratio = 1,
+       .writeback_max_hscl_ratio = 1,
+       .writeback_max_vscl_ratio = 1,
+       .writeback_max_hscl_taps = 1,
+       .writeback_max_vscl_taps = 1,
+       .dppclk_delay_subtotal = 46,
+       .dppclk_delay_scl = 50,
+       .dppclk_delay_scl_lb_only = 16,
+       .dppclk_delay_cnvc_formatter = 27,
+       .dppclk_delay_cnvc_cursor = 6,
+       .dispclk_delay_subtotal = 119,
+       .dynamic_metadata_vm_enabled = false,
+       .odm_combine_4to1_supported = false,
+       .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+               /*TODO: correct dispclk/dppclk voltage level determination*/
+       .clock_limits = {
+               {
+                       .state = 0,
+                       .dispclk_mhz = 1200.0,
+                       .dppclk_mhz = 1200.0,
+                       .phyclk_mhz = 600.0,
+                       .phyclk_d18_mhz = 667.0,
+                       .dscclk_mhz = 186.0,
+                       .dtbclk_mhz = 600.0,
+               },
+               {
+                       .state = 1,
+                       .dispclk_mhz = 1200.0,
+                       .dppclk_mhz = 1200.0,
+                       .phyclk_mhz = 810.0,
+                       .phyclk_d18_mhz = 667.0,
+                       .dscclk_mhz = 209.0,
+                       .dtbclk_mhz = 600.0,
+               },
+               {
+                       .state = 2,
+                       .dispclk_mhz = 1200.0,
+                       .dppclk_mhz = 1200.0,
+                       .phyclk_mhz = 810.0,
+                       .phyclk_d18_mhz = 667.0,
+                       .dscclk_mhz = 209.0,
+                       .dtbclk_mhz = 600.0,
+               },
+               {
+                       .state = 3,
+                       .dispclk_mhz = 1200.0,
+                       .dppclk_mhz = 1200.0,
+                       .phyclk_mhz = 810.0,
+                       .phyclk_d18_mhz = 667.0,
+                       .dscclk_mhz = 371.0,
+                       .dtbclk_mhz = 600.0,
+               },
+               {
+                       .state = 4,
+                       .dispclk_mhz = 1200.0,
+                       .dppclk_mhz = 1200.0,
+                       .phyclk_mhz = 810.0,
+                       .phyclk_d18_mhz = 667.0,
+                       .dscclk_mhz = 417.0,
+                       .dtbclk_mhz = 600.0,
+               },
+       },
+       .num_states = 5,
+       .sr_exit_time_us = 9.0,
+       .sr_enter_plus_exit_time_us = 11.0,
+       .sr_exit_z8_time_us = 442.0,
+       .sr_enter_plus_exit_z8_time_us = 560.0,
+       .writeback_latency_us = 12.0,
+       .dram_channel_width_bytes = 4,
+       .round_trip_ping_latency_dcfclk_cycles = 106,
+       .urgent_latency_pixel_data_only_us = 4.0,
+       .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+       .urgent_latency_vm_data_only_us = 4.0,
+       .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+       .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+       .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+       .pct_ideal_sdp_bw_after_urgent = 80.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+       .max_avg_sdp_bw_use_normal_percent = 60.0,
+       .max_avg_dram_bw_use_normal_percent = 60.0,
+       .fabric_datapath_to_dcn_data_return_bytes = 32,
+       .return_bus_width_bytes = 64,
+       .downspread_percent = 0.38,
+       .dcn_downspread_percent = 0.5,
+       .gpuvm_min_page_size_bytes = 4096,
+       .hostvm_min_page_size_bytes = 4096,
+       .do_urgent_latency_adjustment = false,
+       .urgent_latency_adjustment_fabric_clock_component_us = 0,
+       .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
+{
+       struct clk_limit_table *clk_table = &bw_params->clk_table;
+       struct _vcs_dpi_voltage_scaling_st *clock_limits =
+               dcn3_14_soc.clock_limits;
+       unsigned int i, closest_clk_lvl;
+       int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+       int j;
+
+       dc_assert_fp_enabled();
+
+       // Default clock levels are used for diags, which may lead to overclocking.
+       if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) {
+
+               dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+               dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
+
+               if (bw_params->num_channels > 0)
+                       dcn3_14_soc.num_chans = bw_params->num_channels;
+
+               ASSERT(dcn3_14_soc.num_chans);
+               ASSERT(clk_table->num_entries);
+
+               /* Prepass to find max clocks independent of voltage level. */
+               for (i = 0; i < clk_table->num_entries; ++i) {
+                       if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+                               max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+                       if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+                               max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+               }
+
+               for (i = 0; i < clk_table->num_entries; i++) {
+                       /* loop backwards*/
+                       for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
+                               if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+                                       closest_clk_lvl = j;
+                                       break;
+                               }
+                       }
+                       if (clk_table->num_entries == 1) {
+                               /*smu gives one DPM level, let's take the highest one*/
+                               closest_clk_lvl = dcn3_14_soc.num_states - 1;
+                       }
+
+                       clock_limits[i].state = i;
+
+                       /* Clocks dependent on voltage level. */
+                       clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+                       if (clk_table->num_entries == 1 &&
+                               clock_limits[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+                               /*SMU fix not released yet*/
+                               clock_limits[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+                       }
+                       clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+                       clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+
+                       if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
+                               clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+                       /* Clocks independent of voltage level. */
+                       clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+                               dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+                       clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+                               dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+                       clock_limits[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+                       clock_limits[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+                       clock_limits[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+                       clock_limits[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+                       clock_limits[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+               }
+               for (i = 0; i < clk_table->num_entries; i++)
+                       dcn3_14_soc.clock_limits[i] = clock_limits[i];
+               if (clk_table->num_entries) {
+                       dcn3_14_soc.num_states = clk_table->num_entries;
+               }
+       }
+
+       if (max_dispclk_mhz) {
+               dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+               dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+       }
+
+       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+               dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
+       else
+               dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+       return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+                                              display_e2e_pipe_params_st *pipes,
+                                              bool fast_validate)
+{
+       int i, pipe_cnt;
+       struct resource_context *res_ctx = &context->res_ctx;
+       struct pipe_ctx *pipe;
+       bool upscaled = false;
+
+       dc_assert_fp_enabled();
+
+       dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+       for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+               struct dc_crtc_timing *timing;
+
+               if (!res_ctx->pipe_ctx[i].stream)
+                       continue;
+               pipe = &res_ctx->pipe_ctx[i];
+               timing = &pipe->stream->timing;
+
+               if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
+                       && pipe->stream->adjust.v_total_min > timing->v_total)
+                       pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+
+               if (pipe->plane_state &&
+                               (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+                               pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+                       upscaled = true;
+
+               /*
+                * Immediate flip can be set dynamically after enabling the plane.
+                * We need to require support for immediate flip or underflow can be
+                * intermittently experienced depending on peak b/w requirements.
+                */
+               pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+               pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+               pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
+               pipes[pipe_cnt].pipe.src.gpuvm = true;
+               pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+               pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+               pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+               pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+               pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+
+               if (pipes[pipe_cnt].dout.dsc_enable) {
+                       switch (timing->display_color_depth) {
+                       case COLOR_DEPTH_888:
+                               pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+                               break;
+                       case COLOR_DEPTH_101010:
+                               pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+                               break;
+                       case COLOR_DEPTH_121212:
+                               pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+                               break;
+                       default:
+                               ASSERT(0);
+                               break;
+                       }
+               }
+
+               pipe_cnt++;
+       }
+       context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
+
+       dc->config.enable_4to1MPC = false;
+       if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+               if (is_dual_plane(pipe->plane_state->format)
+                               && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+                       dc->config.enable_4to1MPC = true;
+               } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+                       /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
+                       context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+                       pipes[0].pipe.src.unbounded_req_mode = true;
+               }
+       } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
+                       && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+               context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
+       } else if (context->stream_count >= 3 && upscaled) {
+               context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+       }
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+               if (!pipe->stream)
+                       continue;
+
+               if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
+                               pipe->stream->apply_seamless_boot_optimization) {
+
+                       if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
+                               context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
+                               break;
+                       }
+               }
+       }
+
+       return pipe_cnt;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
new file mode 100644 (file)
index 0000000..d32c5bb
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN314_FPU_H__
+#define __DCN314_FPU_H__
+
+#define DCN3_14_DEFAULT_DET_SIZE 384
+#define DCN3_14_MAX_DET_SIZE 384
+#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+                                              display_e2e_pipe_params_st *pipes,
+                                              bool fast_validate);
+
+#endif
index 66453546e24fe9b42a6966fe06f57e81dedb6d9c..8118cfc5b405672b84fe8cce9daa812b572277a0 100644 (file)
@@ -473,8 +473,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
 
        // DML calculation for MALL region doesn't take into account FW delay
        // and required pstate allow width for multi-display cases
+       /* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
+        * to 2 swaths (i.e. 16 lines)
+        */
        phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
-                               pstate_width_fw_delay_lines;
+                               pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
 
        // For backporch of phantom pipe, use vstartup of the main pipe
        phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -490,6 +493,7 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
                                                phantom_stream->timing.v_front_porch +
                                                phantom_stream->timing.v_sync_width +
                                                phantom_bp;
+       phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
 }
 
 /**
@@ -983,9 +987,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
         * DML favors voltage over p-state, but we're more interested in
         * supporting p-state over voltage. We can't support p-state in
         * prefetch mode > 0 so try capping the prefetch mode to start.
+        * Override present for testing.
         */
-       context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+       if (dc->debug.dml_disallow_alternate_prefetch_modes)
+               context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
                        dm_prefetch_support_uclk_fclk_and_stutter;
+       else
+               context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+                       dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+
        *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
        /* This may adjust vlevel and maxMpcComb */
        if (*vlevel < context->bw_ctx.dml.soc.num_states)
@@ -1014,7 +1024,9 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
                         * will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched
                         * enough to support MCLK switching.
                         */
-                       if (*vlevel == context->bw_ctx.dml.soc.num_states) {
+                       if (*vlevel == context->bw_ctx.dml.soc.num_states &&
+                               context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==
+                                       dm_prefetch_support_uclk_fclk_and_stutter) {
                                context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
                                                                dm_prefetch_support_stutter;
                                /* There are params (such as FabricClock) that need to be recalculated
@@ -1344,7 +1356,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
        int split[MAX_PIPES] = { 0 };
        bool merge[MAX_PIPES] = { false };
        bool newly_split[MAX_PIPES] = { false };
-       int pipe_cnt, i, pipe_idx, vlevel;
+       int pipe_cnt, i, pipe_idx;
+       int vlevel = context->bw_ctx.dml.soc.num_states;
        struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
 
        dc_assert_fp_enabled();
@@ -1373,17 +1386,22 @@ bool dcn32_internal_validate_bw(struct dc *dc,
                DC_FP_END();
        }
 
-       if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
-                       vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+       if (fast_validate ||
+                       (dc->debug.dml_disallow_alternate_prefetch_modes &&
+                       (vlevel == context->bw_ctx.dml.soc.num_states ||
+                               vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
                /*
-                * If mode is unsupported or there's still no p-state support then
-                * fall back to favoring voltage.
+                * If dml_disallow_alternate_prefetch_modes is false, then we have already
+                * tried alternate prefetch modes during full validation.
+                *
+                * If mode is unsupported or there is no p-state support, then
+                * fall back to favouring voltage.
                 *
-                * If Prefetch mode 0 failed for this config, or passed with Max UCLK, try if
-                * supported with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
+                * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
+                * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
                 */
                context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
-                               dm_prefetch_support_fclk_and_stutter;
+                       dm_prefetch_support_fclk_and_stutter;
 
                vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
 
@@ -2098,6 +2116,13 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
                                dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
                }
 
+               if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
+                               != dc->bb_overrides.fclk_clock_change_latency_ns
+                               && dc->bb_overrides.fclk_clock_change_latency_ns) {
+                       dcn3_2_soc.fclk_change_latency_us =
+                               dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+               }
+
                if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
                                != dc->bb_overrides.dummy_clock_change_latency_ns
                                && dc->bb_overrides.dummy_clock_change_latency_ns) {
index 890612db08dc4224bb4ac960ea5ba881fc3a9714..cb2025771646b916d6d0d23224889e6d9d3921a2 100644 (file)
@@ -221,7 +221,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                // VBA_DELTA
                // Calculate DET size, swath height
                dml32_CalculateSwathAndDETConfiguration(
-                               &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
                                mode_lib->vba.DETSizeOverride,
                                mode_lib->vba.UsesMALLForPStateChange,
                                mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -461,7 +460,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
        {
 
                dml32_CalculateVMRowAndSwath(
-                               &v->dummy_vars.dml32_CalculateVMRowAndSwath,
                                mode_lib->vba.NumberOfActiveSurfaces,
                                v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters,
                                v->SurfaceSizeInMALL,
@@ -757,9 +755,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
-                       v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
-                                       &v->dummy_vars.dml32_CalculatePrefetchSchedule,
-                                       v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
+                       v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
                                        &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
                                        mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
                                        mode_lib->vba.DPPCLKDelaySCL,
@@ -1167,7 +1163,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
 
                dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-                       &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
                        mode_lib->vba.USRRetrainingRequiredFinal,
                        mode_lib->vba.UsesMALLForPStateChange,
                        mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
@@ -1952,7 +1947,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
        }
 
        dml32_CalculateSwathAndDETConfiguration(
-                       &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
                        mode_lib->vba.DETSizeOverride,
                        mode_lib->vba.UsesMALLForPStateChange,
                        mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2549,7 +2543,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                        }
 
                        dml32_CalculateSwathAndDETConfiguration(
-                                       &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
                                        mode_lib->vba.DETSizeOverride,
                                        mode_lib->vba.UsesMALLForPStateChange,
                                        mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2749,7 +2742,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
                        {
                                dml32_CalculateVMRowAndSwath(
-                                               &v->dummy_vars.dml32_CalculateVMRowAndSwath,
                                                mode_lib->vba.NumberOfActiveSurfaces,
                                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters,
                                                mode_lib->vba.SurfaceSizeInMALL,
@@ -3266,7 +3258,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
                                        mode_lib->vba.NoTimeForPrefetch[i][j][k] =
                                                dml32_CalculatePrefetchSchedule(
-                                                       &v->dummy_vars.dml32_CalculatePrefetchSchedule,
                                                        v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
                                                        mode_lib->vba.DSCDelayPerState[i][k],
@@ -3566,7 +3557,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
                        {
                                dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-                                               &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
                                                mode_lib->vba.USRRetrainingRequiredFinal,
                                                mode_lib->vba.UsesMALLForPStateChange,
                                                mode_lib->vba.PrefetchModePerState[i][j],
index 07f8f3b8626b2a6b7c67ca04145600d3e497c252..05fc14a47fba91b86cc5579c823da689bc719a59 100644 (file)
@@ -391,7 +391,6 @@ void dml32_CalculateBytePerPixelAndBlockSizes(
 } // CalculateBytePerPixelAndBlockSizes
 
 void dml32_CalculateSwathAndDETConfiguration(
-               struct dml32_CalculateSwathAndDETConfiguration *st_vars,
                unsigned int DETSizeOverride[],
                enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
                unsigned int ConfigReturnBufferSizeInKByte,
@@ -456,10 +455,18 @@ void dml32_CalculateSwathAndDETConfiguration(
                bool ViewportSizeSupportPerSurface[],
                bool *ViewportSizeSupport)
 {
+       unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
+       unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
+       unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
+       unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
+       unsigned int RoundedUpSwathSizeBytesY;
+       unsigned int RoundedUpSwathSizeBytesC;
+       double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
+       double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
        unsigned int k;
-
-       st_vars->TotalActiveDPP = 0;
-       st_vars->NoChromaSurfaces = true;
+       unsigned int TotalActiveDPP = 0;
+       bool NoChromaSurfaces = true;
+       unsigned int DETBufferSizeInKByteForSwathCalculation;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -494,43 +501,43 @@ void dml32_CalculateSwathAndDETConfiguration(
                        DPPPerSurface,
 
                        /* Output */
-                       st_vars->SwathWidthdoubleDPP,
-                       st_vars->SwathWidthdoubleDPPChroma,
+                       SwathWidthdoubleDPP,
+                       SwathWidthdoubleDPPChroma,
                        SwathWidth,
                        SwathWidthChroma,
-                       st_vars->MaximumSwathHeightY,
-                       st_vars->MaximumSwathHeightC,
+                       MaximumSwathHeightY,
+                       MaximumSwathHeightC,
                        swath_width_luma_ub,
                        swath_width_chroma_ub);
 
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               st_vars->RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * st_vars->MaximumSwathHeightY[k];
-               st_vars->RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * st_vars->MaximumSwathHeightC[k];
+               RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * MaximumSwathHeightY[k];
+               RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MaximumSwathHeightC[k];
 #ifdef __DML_VBA_DEBUG__
                dml_print("DML::%s: k=%0d DPPPerSurface = %d\n", __func__, k, DPPPerSurface[k]);
                dml_print("DML::%s: k=%0d swath_width_luma_ub = %d\n", __func__, k, swath_width_luma_ub[k]);
                dml_print("DML::%s: k=%0d BytePerPixDETY = %f\n", __func__, k, BytePerPixDETY[k]);
-               dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, st_vars->MaximumSwathHeightY[k]);
+               dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, MaximumSwathHeightY[k]);
                dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__, k,
-                               st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+                               RoundedUpMaxSwathSizeBytesY[k]);
                dml_print("DML::%s: k=%0d swath_width_chroma_ub = %d\n", __func__, k, swath_width_chroma_ub[k]);
                dml_print("DML::%s: k=%0d BytePerPixDETC = %f\n", __func__, k, BytePerPixDETC[k]);
-               dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, st_vars->MaximumSwathHeightC[k]);
+               dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, MaximumSwathHeightC[k]);
                dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__, k,
-                               st_vars->RoundedUpMaxSwathSizeBytesC[k]);
+                               RoundedUpMaxSwathSizeBytesC[k]);
 #endif
 
                if (SourcePixelFormat[k] == dm_420_10) {
-                       st_vars->RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesY[k], 256);
-                       st_vars->RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesC[k], 256);
+                       RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesY[k], 256);
+                       RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesC[k], 256);
                }
        }
 
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               st_vars->TotalActiveDPP = st_vars->TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
+               TotalActiveDPP = TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
                if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 ||
                                SourcePixelFormat[k] == dm_420_12 || SourcePixelFormat[k] == dm_rgbe_alpha) {
-                       st_vars->NoChromaSurfaces = false;
+                       NoChromaSurfaces = false;
                }
        }
 
@@ -540,10 +547,10 @@ void dml32_CalculateSwathAndDETConfiguration(
        // if unbounded req is enabled, program reserved space such that the ROB will not hold more than 8 swaths worth of data
        // - assume worst-case compression rate of 4. [ROB size - 8 * swath_size / max_compression ratio]
        // - assume for "narrow" vp case in which the ROB can fit 8 swaths, the DET should be big enough to do full size req
-       *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (st_vars->RoundedUpMaxSwathSizeBytesY[0]/512);
+       *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (RoundedUpMaxSwathSizeBytesY[0]/512);
 
        if (*CompBufReservedSpaceNeedAdjustment == 1) {
-               *CompBufReservedSpaceKBytes = ROBSizeKBytes - st_vars->RoundedUpMaxSwathSizeBytesY[0]/512;
+               *CompBufReservedSpaceKBytes = ROBSizeKBytes - RoundedUpMaxSwathSizeBytesY[0]/512;
        }
 
        #ifdef __DML_VBA_DEBUG__
@@ -551,7 +558,7 @@ void dml32_CalculateSwathAndDETConfiguration(
                dml_print("DML::%s: CompBufReservedSpaceNeedAdjustment  = %d\n",  __func__, *CompBufReservedSpaceNeedAdjustment);
        #endif
 
-       *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, st_vars->TotalActiveDPP, st_vars->NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
+       *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, TotalActiveDPP, NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
 
        dml32_CalculateDETBufferSize(DETSizeOverride,
                        UseMALLForPStateChange,
@@ -566,8 +573,8 @@ void dml32_CalculateSwathAndDETConfiguration(
                        SourcePixelFormat,
                        ReadBandwidthLuma,
                        ReadBandwidthChroma,
-                       st_vars->RoundedUpMaxSwathSizeBytesY,
-                       st_vars->RoundedUpMaxSwathSizeBytesC,
+                       RoundedUpMaxSwathSizeBytesY,
+                       RoundedUpMaxSwathSizeBytesC,
                        DPPPerSurface,
 
                        /* Output */
@@ -575,7 +582,7 @@ void dml32_CalculateSwathAndDETConfiguration(
                        CompressedBufferSizeInkByte);
 
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, st_vars->TotalActiveDPP);
+       dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, TotalActiveDPP);
        dml_print("DML::%s: nomDETInKByte = %d\n", __func__, nomDETInKByte);
        dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
        dml_print("DML::%s: UseUnboundedRequestingFinal = %d\n", __func__, UseUnboundedRequestingFinal);
@@ -586,42 +593,42 @@ void dml32_CalculateSwathAndDETConfiguration(
        *ViewportSizeSupport = true;
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
 
-               st_vars->DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
+               DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
                                dm_use_mall_pstate_change_phantom_pipe ? 1024 : DETBufferSizeInKByte[k]);
 #ifdef __DML_VBA_DEBUG__
                dml_print("DML::%s: k=%0d DETBufferSizeInKByteForSwathCalculation = %d\n", __func__, k,
-                               st_vars->DETBufferSizeInKByteForSwathCalculation);
+                               DETBufferSizeInKByteForSwathCalculation);
 #endif
 
-               if (st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
-                               st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
-                       SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
-                       SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
-                       st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
-                       st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
-               } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
-                               st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
-                               st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
-                       SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
-                       SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
-                       st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
-                       st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
-               } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] < 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
-                               st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 <=
-                               st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
-                       SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
-                       SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
-                       st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
-                       st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+               if (RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] <=
+                               DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+                       SwathHeightY[k] = MaximumSwathHeightY[k];
+                       SwathHeightC[k] = MaximumSwathHeightC[k];
+                       RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+                       RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+               } else if (RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+                               RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] <=
+                               DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+                       SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+                       SwathHeightC[k] = MaximumSwathHeightC[k];
+                       RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+                       RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+               } else if (RoundedUpMaxSwathSizeBytesY[k] < 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+                               RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] / 2 <=
+                               DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+                       SwathHeightY[k] = MaximumSwathHeightY[k];
+                       SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+                       RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+                       RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
                } else {
-                       SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
-                       SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
-                       st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
-                       st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+                       SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+                       SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+                       RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+                       RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
                }
 
-               if ((st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 >
-                               st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
+               if ((RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] / 2 >
+                               DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
                                || SwathWidth[k] > MaximumSwathWidthLuma[k] || (SwathHeightC[k] > 0 &&
                                                SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
                        *ViewportSizeSupport = false;
@@ -636,7 +643,7 @@ void dml32_CalculateSwathAndDETConfiguration(
 #endif
                        DETBufferSizeY[k] = DETBufferSizeInKByte[k] * 1024;
                        DETBufferSizeC[k] = 0;
-               } else if (st_vars->RoundedUpSwathSizeBytesY <= 1.5 * st_vars->RoundedUpSwathSizeBytesC) {
+               } else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
 #ifdef __DML_VBA_DEBUG__
                        dml_print("DML::%s: k=%0d Half DET for plane0, half for plane1\n", __func__, k);
 #endif
@@ -654,11 +661,11 @@ void dml32_CalculateSwathAndDETConfiguration(
                dml_print("DML::%s: k=%0d SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
                dml_print("DML::%s: k=%0d SwathHeightC = %d\n", __func__, k, SwathHeightC[k]);
                dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__,
-                               k, st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+                               k, RoundedUpMaxSwathSizeBytesY[k]);
                dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__,
-                               k, st_vars->RoundedUpMaxSwathSizeBytesC[k]);
-               dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesY);
-               dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesC);
+                               k, RoundedUpMaxSwathSizeBytesC[k]);
+               dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, RoundedUpSwathSizeBytesY);
+               dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, RoundedUpSwathSizeBytesC);
                dml_print("DML::%s: k=%0d DETBufferSizeInKByte = %d\n", __func__, k, DETBufferSizeInKByte[k]);
                dml_print("DML::%s: k=%0d DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
                dml_print("DML::%s: k=%0d DETBufferSizeC = %d\n", __func__, k, DETBufferSizeC[k]);
@@ -1867,7 +1874,6 @@ void dml32_CalculateSurfaceSizeInMall(
 } // CalculateSurfaceSizeInMall
 
 void dml32_CalculateVMRowAndSwath(
-               struct dml32_CalculateVMRowAndSwath *st_vars,
                unsigned int NumberOfActiveSurfaces,
                DmlPipe myPipe[],
                unsigned int SurfaceSizeInMALL[],
@@ -1933,6 +1939,21 @@ void dml32_CalculateVMRowAndSwath(
                unsigned int BIGK_FRAGMENT_SIZE[])
 {
        unsigned int k;
+       unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
+       unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
+       unsigned int PDEAndMetaPTEBytesFrameY;
+       unsigned int PDEAndMetaPTEBytesFrameC;
+       unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
+       unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
+       unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
+       unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
+       unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
+       unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
+       unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+       unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
+       unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+       unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
+       bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
 
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
                if (HostVMEnable == true) {
@@ -1954,15 +1975,15 @@ void dml32_CalculateVMRowAndSwath(
                                myPipe[k].SourcePixelFormat == dm_rgbe_alpha) {
                        if ((myPipe[k].SourcePixelFormat == dm_420_10 || myPipe[k].SourcePixelFormat == dm_420_12) &&
                                        !IsVertical(myPipe[k].SourceRotation)) {
-                               st_vars->PTEBufferSizeInRequestsForLuma[k] =
+                               PTEBufferSizeInRequestsForLuma[k] =
                                                (PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma) / 2;
-                               st_vars->PTEBufferSizeInRequestsForChroma[k] = st_vars->PTEBufferSizeInRequestsForLuma[k];
+                               PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsForLuma[k];
                        } else {
-                               st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
-                               st_vars->PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
+                               PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
+                               PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
                        }
 
-                       st_vars->PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
+                       PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
                                        myPipe[k].ViewportStationary,
                                        myPipe[k].DCCEnable,
                                        myPipe[k].DPPPerSurface,
@@ -1982,21 +2003,21 @@ void dml32_CalculateVMRowAndSwath(
                                        GPUVMMaxPageTableLevels,
                                        GPUVMMinPageSizeKBytes[k],
                                        HostVMMinPageSize,
-                                       st_vars->PTEBufferSizeInRequestsForChroma[k],
+                                       PTEBufferSizeInRequestsForChroma[k],
                                        myPipe[k].PitchC,
                                        myPipe[k].DCCMetaPitchC,
                                        myPipe[k].BlockWidthC,
                                        myPipe[k].BlockHeightC,
 
                                        /* Output */
-                                       &st_vars->MetaRowByteC[k],
-                                       &st_vars->PixelPTEBytesPerRowC[k],
+                                       &MetaRowByteC[k],
+                                       &PixelPTEBytesPerRowC[k],
                                        &dpte_row_width_chroma_ub[k],
                                        &dpte_row_height_chroma[k],
                                        &dpte_row_height_linear_chroma[k],
-                                       &st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k],
-                                       &st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k],
-                                       &st_vars->dpte_row_height_chroma_one_row_per_frame[k],
+                                       &PixelPTEBytesPerRowC_one_row_per_frame[k],
+                                       &dpte_row_width_chroma_ub_one_row_per_frame[k],
+                                       &dpte_row_height_chroma_one_row_per_frame[k],
                                        &meta_req_width_chroma[k],
                                        &meta_req_height_chroma[k],
                                        &meta_row_width_chroma[k],
@@ -2024,19 +2045,19 @@ void dml32_CalculateVMRowAndSwath(
                                        &VInitPreFillC[k],
                                        &MaxNumSwathC[k]);
                } else {
-                       st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
-                       st_vars->PTEBufferSizeInRequestsForChroma[k] = 0;
-                       st_vars->PixelPTEBytesPerRowC[k] = 0;
-                       st_vars->PDEAndMetaPTEBytesFrameC = 0;
-                       st_vars->MetaRowByteC[k] = 0;
+                       PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
+                       PTEBufferSizeInRequestsForChroma[k] = 0;
+                       PixelPTEBytesPerRowC[k] = 0;
+                       PDEAndMetaPTEBytesFrameC = 0;
+                       MetaRowByteC[k] = 0;
                        MaxNumSwathC[k] = 0;
                        PrefetchSourceLinesC[k] = 0;
-                       st_vars->dpte_row_height_chroma_one_row_per_frame[k] = 0;
-                       st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
-                       st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
+                       dpte_row_height_chroma_one_row_per_frame[k] = 0;
+                       dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
+                       PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
                }
 
-               st_vars->PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
+               PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
                                myPipe[k].ViewportStationary,
                                myPipe[k].DCCEnable,
                                myPipe[k].DPPPerSurface,
@@ -2056,21 +2077,21 @@ void dml32_CalculateVMRowAndSwath(
                                GPUVMMaxPageTableLevels,
                                GPUVMMinPageSizeKBytes[k],
                                HostVMMinPageSize,
-                               st_vars->PTEBufferSizeInRequestsForLuma[k],
+                               PTEBufferSizeInRequestsForLuma[k],
                                myPipe[k].PitchY,
                                myPipe[k].DCCMetaPitchY,
                                myPipe[k].BlockWidthY,
                                myPipe[k].BlockHeightY,
 
                                /* Output */
-                               &st_vars->MetaRowByteY[k],
-                               &st_vars->PixelPTEBytesPerRowY[k],
+                               &MetaRowByteY[k],
+                               &PixelPTEBytesPerRowY[k],
                                &dpte_row_width_luma_ub[k],
                                &dpte_row_height_luma[k],
                                &dpte_row_height_linear_luma[k],
-                               &st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k],
-                               &st_vars->dpte_row_width_luma_ub_one_row_per_frame[k],
-                               &st_vars->dpte_row_height_luma_one_row_per_frame[k],
+                               &PixelPTEBytesPerRowY_one_row_per_frame[k],
+                               &dpte_row_width_luma_ub_one_row_per_frame[k],
+                               &dpte_row_height_luma_one_row_per_frame[k],
                                &meta_req_width[k],
                                &meta_req_height[k],
                                &meta_row_width[k],
@@ -2098,19 +2119,19 @@ void dml32_CalculateVMRowAndSwath(
                                &VInitPreFillY[k],
                                &MaxNumSwathY[k]);
 
-               PDEAndMetaPTEBytesFrame[k] = st_vars->PDEAndMetaPTEBytesFrameY + st_vars->PDEAndMetaPTEBytesFrameC;
-               MetaRowByte[k] = st_vars->MetaRowByteY[k] + st_vars->MetaRowByteC[k];
+               PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC;
+               MetaRowByte[k] = MetaRowByteY[k] + MetaRowByteC[k];
 
-               if (st_vars->PixelPTEBytesPerRowY[k] <= 64 * st_vars->PTEBufferSizeInRequestsForLuma[k] &&
-                               st_vars->PixelPTEBytesPerRowC[k] <= 64 * st_vars->PTEBufferSizeInRequestsForChroma[k]) {
+               if (PixelPTEBytesPerRowY[k] <= 64 * PTEBufferSizeInRequestsForLuma[k] &&
+                               PixelPTEBytesPerRowC[k] <= 64 * PTEBufferSizeInRequestsForChroma[k]) {
                        PTEBufferSizeNotExceeded[k] = true;
                } else {
                        PTEBufferSizeNotExceeded[k] = false;
                }
 
-               st_vars->one_row_per_frame_fits_in_buffer[k] = (st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
-                       st_vars->PTEBufferSizeInRequestsForLuma[k] &&
-                       st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * st_vars->PTEBufferSizeInRequestsForChroma[k]);
+               one_row_per_frame_fits_in_buffer[k] = (PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
+                       PTEBufferSizeInRequestsForLuma[k] &&
+                       PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * PTEBufferSizeInRequestsForChroma[k]);
        }
 
        dml32_CalculateMALLUseForStaticScreen(
@@ -2118,7 +2139,7 @@ void dml32_CalculateVMRowAndSwath(
                        MALLAllocatedForDCN,
                        UseMALLForStaticScreen,   // mode
                        SurfaceSizeInMALL,
-                       st_vars->one_row_per_frame_fits_in_buffer,
+                       one_row_per_frame_fits_in_buffer,
                        /* Output */
                        UsesMALLForStaticScreen); // boolen
 
@@ -2144,13 +2165,13 @@ void dml32_CalculateVMRowAndSwath(
                                !(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame);
 
                if (use_one_row_for_frame[k]) {
-                       dpte_row_height_luma[k] = st_vars->dpte_row_height_luma_one_row_per_frame[k];
-                       dpte_row_width_luma_ub[k] = st_vars->dpte_row_width_luma_ub_one_row_per_frame[k];
-                       st_vars->PixelPTEBytesPerRowY[k] = st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k];
-                       dpte_row_height_chroma[k] = st_vars->dpte_row_height_chroma_one_row_per_frame[k];
-                       dpte_row_width_chroma_ub[k] = st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k];
-                       st_vars->PixelPTEBytesPerRowC[k] = st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k];
-                       PTEBufferSizeNotExceeded[k] = st_vars->one_row_per_frame_fits_in_buffer[k];
+                       dpte_row_height_luma[k] = dpte_row_height_luma_one_row_per_frame[k];
+                       dpte_row_width_luma_ub[k] = dpte_row_width_luma_ub_one_row_per_frame[k];
+                       PixelPTEBytesPerRowY[k] = PixelPTEBytesPerRowY_one_row_per_frame[k];
+                       dpte_row_height_chroma[k] = dpte_row_height_chroma_one_row_per_frame[k];
+                       dpte_row_width_chroma_ub[k] = dpte_row_width_chroma_ub_one_row_per_frame[k];
+                       PixelPTEBytesPerRowC[k] = PixelPTEBytesPerRowC_one_row_per_frame[k];
+                       PTEBufferSizeNotExceeded[k] = one_row_per_frame_fits_in_buffer[k];
                }
 
                if (MetaRowByte[k] <= DCCMetaBufferSizeBytes)
@@ -2158,7 +2179,7 @@ void dml32_CalculateVMRowAndSwath(
                else
                        DCCMetaBufferSizeNotExceeded[k] = false;
 
-               PixelPTEBytesPerRow[k] = st_vars->PixelPTEBytesPerRowY[k] + st_vars->PixelPTEBytesPerRowC[k];
+               PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY[k] + PixelPTEBytesPerRowC[k];
                if (use_one_row_for_frame[k])
                        PixelPTEBytesPerRow[k] = PixelPTEBytesPerRow[k] / 2;
 
@@ -2169,11 +2190,11 @@ void dml32_CalculateVMRowAndSwath(
                                myPipe[k].VRatioChroma,
                                myPipe[k].DCCEnable,
                                myPipe[k].HTotal / myPipe[k].PixelClock,
-                               st_vars->MetaRowByteY[k], st_vars->MetaRowByteC[k],
+                               MetaRowByteY[k], MetaRowByteC[k],
                                meta_row_height[k],
                                meta_row_height_chroma[k],
-                               st_vars->PixelPTEBytesPerRowY[k],
-                               st_vars->PixelPTEBytesPerRowC[k],
+                               PixelPTEBytesPerRowY[k],
+                               PixelPTEBytesPerRowC[k],
                                dpte_row_height_luma[k],
                                dpte_row_height_chroma[k],
 
@@ -2189,12 +2210,12 @@ void dml32_CalculateVMRowAndSwath(
                dml_print("DML::%s: k=%d, dpte_row_height_luma         = %d\n",  __func__, k, dpte_row_height_luma[k]);
                dml_print("DML::%s: k=%d, dpte_row_width_luma_ub       = %d\n",
                                __func__, k, dpte_row_width_luma_ub[k]);
-               dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY         = %d\n",  __func__, k, st_vars->PixelPTEBytesPerRowY[k]);
+               dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY         = %d\n",  __func__, k, PixelPTEBytesPerRowY[k]);
                dml_print("DML::%s: k=%d, dpte_row_height_chroma       = %d\n",
                                __func__, k, dpte_row_height_chroma[k]);
                dml_print("DML::%s: k=%d, dpte_row_width_chroma_ub     = %d\n",
                                __func__, k, dpte_row_width_chroma_ub[k]);
-               dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC         = %d\n",  __func__, k, st_vars->PixelPTEBytesPerRowC[k]);
+               dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC         = %d\n",  __func__, k, PixelPTEBytesPerRowC[k]);
                dml_print("DML::%s: k=%d, PixelPTEBytesPerRow          = %d\n",  __func__, k, PixelPTEBytesPerRow[k]);
                dml_print("DML::%s: k=%d, PTEBufferSizeNotExceeded     = %d\n",
                                __func__, k, PTEBufferSizeNotExceeded[k]);
@@ -3342,7 +3363,6 @@ double dml32_CalculateExtraLatency(
 } // CalculateExtraLatency
 
 bool dml32_CalculatePrefetchSchedule(
-               struct dml32_CalculatePrefetchSchedule *st_vars,
                double HostVMInefficiencyFactor,
                DmlPipe *myPipe,
                unsigned int DSCDelay,
@@ -3406,18 +3426,45 @@ bool dml32_CalculatePrefetchSchedule(
                double   *VReadyOffsetPix)
 {
        bool MyError = false;
-
-       st_vars->TimeForFetchingMetaPTE = 0;
-       st_vars->TimeForFetchingRowInVBlank = 0;
-       st_vars->LinesToRequestPrefetchPixelData = 0;
-       st_vars->max_vratio_pre = __DML_MAX_VRATIO_PRE__;
-       st_vars->Tsw_est1 = 0;
-       st_vars->Tsw_est3 = 0;
+       unsigned int DPPCycles, DISPCLKCycles;
+       double DSTTotalPixelsAfterScaler;
+       double LineTime;
+       double dst_y_prefetch_equ;
+       double prefetch_bw_oto;
+       double Tvm_oto;
+       double Tr0_oto;
+       double Tvm_oto_lines;
+       double Tr0_oto_lines;
+       double dst_y_prefetch_oto;
+       double TimeForFetchingMetaPTE = 0;
+       double TimeForFetchingRowInVBlank = 0;
+       double LinesToRequestPrefetchPixelData = 0;
+       unsigned int HostVMDynamicLevelsTrips;
+       double  trip_to_mem;
+       double  Tvm_trips;
+       double  Tr0_trips;
+       double  Tvm_trips_rounded;
+       double  Tr0_trips_rounded;
+       double  Lsw_oto;
+       double  Tpre_rounded;
+       double  prefetch_bw_equ;
+       double  Tvm_equ;
+       double  Tr0_equ;
+       double  Tdmbf;
+       double  Tdmec;
+       double  Tdmsks;
+       double  prefetch_sw_bytes;
+       double  bytes_pp;
+       double  dep_bytes;
+       unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+       double  min_Lsw;
+       double  Tsw_est1 = 0;
+       double  Tsw_est3 = 0;
 
        if (GPUVMEnable == true && HostVMEnable == true)
-               st_vars->HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+               HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
        else
-               st_vars->HostVMDynamicLevelsTrips = 0;
+               HostVMDynamicLevelsTrips = 0;
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
        dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
@@ -3440,19 +3487,19 @@ bool dml32_CalculatePrefetchSchedule(
                        TSetup,
 
                        /* output */
-                       &st_vars->Tdmbf,
-                       &st_vars->Tdmec,
-                       &st_vars->Tdmsks,
+                       &Tdmbf,
+                       &Tdmec,
+                       &Tdmsks,
                        VUpdateOffsetPix,
                        VUpdateWidthPix,
                        VReadyOffsetPix);
 
-       st_vars->LineTime = myPipe->HTotal / myPipe->PixelClock;
-       st_vars->trip_to_mem = UrgentLatency;
-       st_vars->Tvm_trips = UrgentExtraLatency + st_vars->trip_to_mem * (GPUVMPageTableLevels * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+       LineTime = myPipe->HTotal / myPipe->PixelClock;
+       trip_to_mem = UrgentLatency;
+       Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
 
        if (DynamicMetadataVMEnabled == true)
-               *Tdmdl = TWait + st_vars->Tvm_trips + st_vars->trip_to_mem;
+               *Tdmdl = TWait + Tvm_trips + trip_to_mem;
        else
                *Tdmdl = TWait + UrgentExtraLatency;
 
@@ -3462,15 +3509,15 @@ bool dml32_CalculatePrefetchSchedule(
 #endif
 
        if (DynamicMetadataEnable == true) {
-               if (VStartup * st_vars->LineTime < *TSetup + *Tdmdl + st_vars->Tdmbf + st_vars->Tdmec + st_vars->Tdmsks) {
+               if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
                        *NotEnoughTimeForDynamicMetadata = true;
 #ifdef __DML_VBA_DEBUG__
                        dml_print("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
                        dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n",
-                                       __func__, st_vars->Tdmbf);
-                       dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+                                       __func__, Tdmbf);
+                       dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
                        dml_print("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n",
-                                       __func__, st_vars->Tdmsks);
+                                       __func__, Tdmsks);
                        dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n",
                                        __func__, *Tdmdl);
 #endif
@@ -3482,21 +3529,21 @@ bool dml32_CalculatePrefetchSchedule(
        }
 
        *Tdmdl_vm =  (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
-                       GPUVMEnable == true ? TWait + st_vars->Tvm_trips : 0);
+                       GPUVMEnable == true ? TWait + Tvm_trips : 0);
 
        if (myPipe->ScalerEnabled)
-               st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+               DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
        else
-               st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+               DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
 
-       st_vars->DPPCycles = st_vars->DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+       DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
 
-       st_vars->DISPCLKCycles = DISPCLKDelaySubtotal;
+       DISPCLKCycles = DISPCLKDelaySubtotal;
 
        if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
                return true;
 
-       *DSTXAfterScaler = st_vars->DPPCycles * myPipe->PixelClock / myPipe->Dppclk + st_vars->DISPCLKCycles *
+       *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->Dppclk + DISPCLKCycles *
                        myPipe->PixelClock / myPipe->Dispclk + DSCDelay;
 
        *DSTXAfterScaler = *DSTXAfterScaler + (myPipe->ODMMode != dm_odm_combine_mode_disabled ? 18 : 0)
@@ -3506,10 +3553,10 @@ bool dml32_CalculatePrefetchSchedule(
                        + ((myPipe->ODMMode == dm_odm_mode_mso_1to4) ? myPipe->HActive * 3 / 4 : 0);
 
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: DPPCycles: %d\n", __func__, st_vars->DPPCycles);
+       dml_print("DML::%s: DPPCycles: %d\n", __func__, DPPCycles);
        dml_print("DML::%s: PixelClock: %f\n", __func__, myPipe->PixelClock);
        dml_print("DML::%s: Dppclk: %f\n", __func__, myPipe->Dppclk);
-       dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, st_vars->DISPCLKCycles);
+       dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, DISPCLKCycles);
        dml_print("DML::%s: DISPCLK: %f\n", __func__,  myPipe->Dispclk);
        dml_print("DML::%s: DSCDelay: %d\n", __func__,  DSCDelay);
        dml_print("DML::%s: ODMMode: %d\n", __func__,  myPipe->ODMMode);
@@ -3522,9 +3569,9 @@ bool dml32_CalculatePrefetchSchedule(
        else
                *DSTYAfterScaler = 0;
 
-       st_vars->DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
-       *DSTYAfterScaler = dml_floor(st_vars->DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
-       *DSTXAfterScaler = st_vars->DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+       DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
+       *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+       *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: DSTXAfterScaler: %d (final)\n", __func__,  *DSTXAfterScaler);
        dml_print("DML::%s: DSTYAfterScaler: %d (final)\n", __func__, *DSTYAfterScaler);
@@ -3532,132 +3579,132 @@ bool dml32_CalculatePrefetchSchedule(
 
        MyError = false;
 
-       st_vars->Tr0_trips = st_vars->trip_to_mem * (st_vars->HostVMDynamicLevelsTrips + 1);
+       Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
 
        if (GPUVMEnable == true) {
-               st_vars->Tvm_trips_rounded = dml_ceil(4.0 * st_vars->Tvm_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
-               st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+               Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
+               Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
                if (GPUVMPageTableLevels >= 3) {
-                       *Tno_bw = UrgentExtraLatency + st_vars->trip_to_mem *
-                                       (double) ((GPUVMPageTableLevels - 2) * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+                       *Tno_bw = UrgentExtraLatency + trip_to_mem *
+                                       (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
                } else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
-                       st_vars->Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / st_vars->LineTime, 1.0) /
-                                       4.0 * st_vars->LineTime; // VBA_ERROR
+                       Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
+                                       4.0 * LineTime; // VBA_ERROR
                        *Tno_bw = UrgentExtraLatency;
                } else {
                        *Tno_bw = 0;
                }
        } else if (myPipe->DCCEnable == true) {
-               st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
-               st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+               Tvm_trips_rounded = LineTime / 4.0;
+               Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
                *Tno_bw = 0;
        } else {
-               st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
-               st_vars->Tr0_trips_rounded = st_vars->LineTime / 2.0;
+               Tvm_trips_rounded = LineTime / 4.0;
+               Tr0_trips_rounded = LineTime / 2.0;
                *Tno_bw = 0;
        }
-       st_vars->Tvm_trips_rounded = dml_max(st_vars->Tvm_trips_rounded, st_vars->LineTime / 4.0);
-       st_vars->Tr0_trips_rounded = dml_max(st_vars->Tr0_trips_rounded, st_vars->LineTime / 4.0);
+       Tvm_trips_rounded = dml_max(Tvm_trips_rounded, LineTime / 4.0);
+       Tr0_trips_rounded = dml_max(Tr0_trips_rounded, LineTime / 4.0);
 
        if (myPipe->SourcePixelFormat == dm_420_8 || myPipe->SourcePixelFormat == dm_420_10
                        || myPipe->SourcePixelFormat == dm_420_12) {
-               st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
+               bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
        } else {
-               st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
+               bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
        }
 
-       st_vars->prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+       prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
                        + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
-       st_vars->prefetch_bw_oto = dml_max(st_vars->bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
-                       st_vars->prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * st_vars->LineTime));
+       prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
+                       prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
 
-       st_vars->min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / st_vars->max_vratio_pre;
-       st_vars->min_Lsw = dml_max(st_vars->min_Lsw, 1.0);
-       st_vars->Lsw_oto = dml_ceil(4.0 * dml_max(st_vars->prefetch_sw_bytes / st_vars->prefetch_bw_oto / st_vars->LineTime, st_vars->min_Lsw), 1.0) / 4.0;
+       min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre;
+       min_Lsw = dml_max(min_Lsw, 1.0);
+       Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
 
        if (GPUVMEnable == true) {
-               st_vars->Tvm_oto = dml_max3(
-                               st_vars->Tvm_trips,
-                               *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / st_vars->prefetch_bw_oto,
-                               st_vars->LineTime / 4.0);
+               Tvm_oto = dml_max3(
+                               Tvm_trips,
+                               *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+                               LineTime / 4.0);
        } else
-               st_vars->Tvm_oto = st_vars->LineTime / 4.0;
+               Tvm_oto = LineTime / 4.0;
 
        if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
-               st_vars->Tr0_oto = dml_max4(
-                               st_vars->Tr0_trips,
-                               (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto,
-                               (st_vars->LineTime - st_vars->Tvm_oto)/2.0,
-                               st_vars->LineTime / 4.0);
+               Tr0_oto = dml_max4(
+                               Tr0_trips,
+                               (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
+                               (LineTime - Tvm_oto)/2.0,
+                               LineTime / 4.0);
 #ifdef __DML_VBA_DEBUG__
                dml_print("DML::%s: Tr0_oto max0 = %f\n", __func__,
-                               (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto);
-               dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, st_vars->Tr0_trips);
-               dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, st_vars->LineTime - st_vars->Tvm_oto);
-               dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, st_vars->LineTime / 4);
+                               (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto);
+               dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, Tr0_trips);
+               dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, LineTime - Tvm_oto);
+               dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, LineTime / 4);
 #endif
        } else
-               st_vars->Tr0_oto = (st_vars->LineTime - st_vars->Tvm_oto) / 2.0;
+               Tr0_oto = (LineTime - Tvm_oto) / 2.0;
 
-       st_vars->Tvm_oto_lines = dml_ceil(4.0 * st_vars->Tvm_oto / st_vars->LineTime, 1) / 4.0;
-       st_vars->Tr0_oto_lines = dml_ceil(4.0 * st_vars->Tr0_oto / st_vars->LineTime, 1) / 4.0;
-       st_vars->dst_y_prefetch_oto = st_vars->Tvm_oto_lines + 2 * st_vars->Tr0_oto_lines + st_vars->Lsw_oto;
+       Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
+       Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
+       dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
 
-       st_vars->dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / st_vars->LineTime -
+       dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
                        (*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
-       dml_print("DML::%s: min_Lsw = %f\n", __func__, st_vars->min_Lsw);
+       dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
        dml_print("DML::%s: *Tno_bw = %f\n", __func__, *Tno_bw);
        dml_print("DML::%s: UrgentExtraLatency = %f\n", __func__, UrgentExtraLatency);
-       dml_print("DML::%s: trip_to_mem = %f\n", __func__, st_vars->trip_to_mem);
+       dml_print("DML::%s: trip_to_mem = %f\n", __func__, trip_to_mem);
        dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
        dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
        dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
        dml_print("DML::%s: BytePerPixelC = %d\n", __func__, myPipe->BytePerPixelC);
        dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
        dml_print("DML::%s: swath_width_chroma_ub = %d\n", __func__, swath_width_chroma_ub);
-       dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, st_vars->prefetch_sw_bytes);
-       dml_print("DML::%s: bytes_pp = %f\n", __func__, st_vars->bytes_pp);
+       dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, prefetch_sw_bytes);
+       dml_print("DML::%s: bytes_pp = %f\n", __func__, bytes_pp);
        dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
        dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
        dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
        dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
-       dml_print("DML::%s: Tvm_trips = %f\n", __func__, st_vars->Tvm_trips);
-       dml_print("DML::%s: Tr0_trips = %f\n", __func__, st_vars->Tr0_trips);
-       dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, st_vars->prefetch_bw_oto);
-       dml_print("DML::%s: Tr0_oto = %f\n", __func__, st_vars->Tr0_oto);
-       dml_print("DML::%s: Tvm_oto = %f\n", __func__, st_vars->Tvm_oto);
-       dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, st_vars->Tvm_oto_lines);
-       dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, st_vars->Tr0_oto_lines);
-       dml_print("DML::%s: Lsw_oto = %f\n", __func__, st_vars->Lsw_oto);
-       dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, st_vars->dst_y_prefetch_oto);
-       dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, st_vars->dst_y_prefetch_equ);
+       dml_print("DML::%s: Tvm_trips = %f\n", __func__, Tvm_trips);
+       dml_print("DML::%s: Tr0_trips = %f\n", __func__, Tr0_trips);
+       dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, prefetch_bw_oto);
+       dml_print("DML::%s: Tr0_oto = %f\n", __func__, Tr0_oto);
+       dml_print("DML::%s: Tvm_oto = %f\n", __func__, Tvm_oto);
+       dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, Tvm_oto_lines);
+       dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, Tr0_oto_lines);
+       dml_print("DML::%s: Lsw_oto = %f\n", __func__, Lsw_oto);
+       dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, dst_y_prefetch_oto);
+       dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, dst_y_prefetch_equ);
 #endif
 
-       st_vars->dst_y_prefetch_equ = dml_floor(4.0 * (st_vars->dst_y_prefetch_equ + 0.125), 1) / 4.0;
-       st_vars->Tpre_rounded = st_vars->dst_y_prefetch_equ * st_vars->LineTime;
+       dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
+       Tpre_rounded = dst_y_prefetch_equ * LineTime;
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, st_vars->dst_y_prefetch_equ);
-       dml_print("DML::%s: LineTime: %f\n", __func__, st_vars->LineTime);
+       dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
+       dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
        dml_print("DML::%s: VStartup: %d\n", __func__, VStartup);
        dml_print("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n",
-                       __func__, VStartup * st_vars->LineTime);
+                       __func__, VStartup * LineTime);
        dml_print("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *TSetup);
        dml_print("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, TCalc);
-       dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, st_vars->Tdmbf);
-       dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+       dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, Tdmbf);
+       dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
        dml_print("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd\n", __func__, *Tdmdl_vm);
        dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n", __func__, *Tdmdl);
        dml_print("DML::%s: DSTYAfterScaler: %d lines - number of lines of pipeline and buffer delay after scaler\n",
                        __func__, *DSTYAfterScaler);
 #endif
-       st_vars->dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
+       dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
                        MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor);
 
-       if (st_vars->prefetch_sw_bytes < st_vars->dep_bytes)
-               st_vars->prefetch_sw_bytes = 2 * st_vars->dep_bytes;
+       if (prefetch_sw_bytes < dep_bytes)
+               prefetch_sw_bytes = 2 * dep_bytes;
 
        *PrefetchBandwidth = 0;
        *DestinationLinesToRequestVMInVBlank = 0;
@@ -3665,61 +3712,61 @@ bool dml32_CalculatePrefetchSchedule(
        *VRatioPrefetchY = 0;
        *VRatioPrefetchC = 0;
        *RequiredPrefetchPixDataBWLuma = 0;
-       if (st_vars->dst_y_prefetch_equ > 1) {
+       if (dst_y_prefetch_equ > 1) {
                double PrefetchBandwidth1;
                double PrefetchBandwidth2;
                double PrefetchBandwidth3;
                double PrefetchBandwidth4;
 
-               if (st_vars->Tpre_rounded - *Tno_bw > 0) {
+               if (Tpre_rounded - *Tno_bw > 0) {
                        PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
                                        + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
-                                       + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - *Tno_bw);
-                       st_vars->Tsw_est1 = st_vars->prefetch_sw_bytes / PrefetchBandwidth1;
+                                       + prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw);
+                       Tsw_est1 = prefetch_sw_bytes / PrefetchBandwidth1;
                } else
                        PrefetchBandwidth1 = 0;
 
-               if (VStartup == MaxVStartup && (st_vars->Tsw_est1 / st_vars->LineTime < st_vars->min_Lsw)
-                               && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw > 0) {
+               if (VStartup == MaxVStartup && (Tsw_est1 / LineTime < min_Lsw)
+                               && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw > 0) {
                        PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
                                        + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
-                                       / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw);
+                                       / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw);
                }
 
-               if (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded > 0)
-                       PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + st_vars->prefetch_sw_bytes) /
-                       (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded);
+               if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+                       PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + prefetch_sw_bytes) /
+                       (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
                else
                        PrefetchBandwidth2 = 0;
 
-               if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded > 0) {
+               if (Tpre_rounded - Tvm_trips_rounded > 0) {
                        PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
-                                       + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded);
-                       st_vars->Tsw_est3 = st_vars->prefetch_sw_bytes / PrefetchBandwidth3;
+                                       + prefetch_sw_bytes) / (Tpre_rounded - Tvm_trips_rounded);
+                       Tsw_est3 = prefetch_sw_bytes / PrefetchBandwidth3;
                } else
                        PrefetchBandwidth3 = 0;
 
 
                if (VStartup == MaxVStartup &&
-                               (st_vars->Tsw_est3 / st_vars->LineTime < st_vars->min_Lsw) && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 *
-                               st_vars->LineTime - st_vars->Tvm_trips_rounded > 0) {
+                               (Tsw_est3 / LineTime < min_Lsw) && Tpre_rounded - min_Lsw * LineTime - 0.75 *
+                               LineTime - Tvm_trips_rounded > 0) {
                        PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
-                                       / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - st_vars->Tvm_trips_rounded);
+                                       / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded);
                }
 
-               if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded > 0) {
-                       PrefetchBandwidth4 = st_vars->prefetch_sw_bytes /
-                                       (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded);
+               if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0) {
+                       PrefetchBandwidth4 = prefetch_sw_bytes /
+                                       (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
                } else {
                        PrefetchBandwidth4 = 0;
                }
 
 #ifdef __DML_VBA_DEBUG__
-               dml_print("DML::%s: Tpre_rounded: %f\n", __func__, st_vars->Tpre_rounded);
+               dml_print("DML::%s: Tpre_rounded: %f\n", __func__, Tpre_rounded);
                dml_print("DML::%s: Tno_bw: %f\n", __func__, *Tno_bw);
-               dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, st_vars->Tvm_trips_rounded);
-               dml_print("DML::%s: Tsw_est1: %f\n", __func__, st_vars->Tsw_est1);
-               dml_print("DML::%s: Tsw_est3: %f\n", __func__, st_vars->Tsw_est3);
+               dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, Tvm_trips_rounded);
+               dml_print("DML::%s: Tsw_est1: %f\n", __func__, Tsw_est1);
+               dml_print("DML::%s: Tsw_est3: %f\n", __func__, Tsw_est3);
                dml_print("DML::%s: PrefetchBandwidth1: %f\n", __func__, PrefetchBandwidth1);
                dml_print("DML::%s: PrefetchBandwidth2: %f\n", __func__, PrefetchBandwidth2);
                dml_print("DML::%s: PrefetchBandwidth3: %f\n", __func__, PrefetchBandwidth3);
@@ -3732,9 +3779,9 @@ bool dml32_CalculatePrefetchSchedule(
 
                        if (PrefetchBandwidth1 > 0) {
                                if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
-                                               >= st_vars->Tvm_trips_rounded
+                                               >= Tvm_trips_rounded
                                                && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
-                                                               / PrefetchBandwidth1 >= st_vars->Tr0_trips_rounded) {
+                                                               / PrefetchBandwidth1 >= Tr0_trips_rounded) {
                                        Case1OK = true;
                                } else {
                                        Case1OK = false;
@@ -3745,9 +3792,9 @@ bool dml32_CalculatePrefetchSchedule(
 
                        if (PrefetchBandwidth2 > 0) {
                                if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
-                                               >= st_vars->Tvm_trips_rounded
+                                               >= Tvm_trips_rounded
                                                && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
-                                               / PrefetchBandwidth2 < st_vars->Tr0_trips_rounded) {
+                                               / PrefetchBandwidth2 < Tr0_trips_rounded) {
                                        Case2OK = true;
                                } else {
                                        Case2OK = false;
@@ -3758,9 +3805,9 @@ bool dml32_CalculatePrefetchSchedule(
 
                        if (PrefetchBandwidth3 > 0) {
                                if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 <
-                                               st_vars->Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
+                                               Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
                                                                HostVMInefficiencyFactor) / PrefetchBandwidth3 >=
-                                                               st_vars->Tr0_trips_rounded) {
+                                                               Tr0_trips_rounded) {
                                        Case3OK = true;
                                } else {
                                        Case3OK = false;
@@ -3770,80 +3817,80 @@ bool dml32_CalculatePrefetchSchedule(
                        }
 
                        if (Case1OK)
-                               st_vars->prefetch_bw_equ = PrefetchBandwidth1;
+                               prefetch_bw_equ = PrefetchBandwidth1;
                        else if (Case2OK)
-                               st_vars->prefetch_bw_equ = PrefetchBandwidth2;
+                               prefetch_bw_equ = PrefetchBandwidth2;
                        else if (Case3OK)
-                               st_vars->prefetch_bw_equ = PrefetchBandwidth3;
+                               prefetch_bw_equ = PrefetchBandwidth3;
                        else
-                               st_vars->prefetch_bw_equ = PrefetchBandwidth4;
+                               prefetch_bw_equ = PrefetchBandwidth4;
 
 #ifdef __DML_VBA_DEBUG__
                        dml_print("DML::%s: Case1OK: %d\n", __func__, Case1OK);
                        dml_print("DML::%s: Case2OK: %d\n", __func__, Case2OK);
                        dml_print("DML::%s: Case3OK: %d\n", __func__, Case3OK);
-                       dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, st_vars->prefetch_bw_equ);
+                       dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, prefetch_bw_equ);
 #endif
 
-                       if (st_vars->prefetch_bw_equ > 0) {
+                       if (prefetch_bw_equ > 0) {
                                if (GPUVMEnable == true) {
-                                       st_vars->Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
-                                                       HostVMInefficiencyFactor / st_vars->prefetch_bw_equ,
-                                                       st_vars->Tvm_trips, st_vars->LineTime / 4);
+                                       Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
+                                                       HostVMInefficiencyFactor / prefetch_bw_equ,
+                                                       Tvm_trips, LineTime / 4);
                                } else {
-                                       st_vars->Tvm_equ = st_vars->LineTime / 4;
+                                       Tvm_equ = LineTime / 4;
                                }
 
                                if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
-                                       st_vars->Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
-                                                       HostVMInefficiencyFactor) / st_vars->prefetch_bw_equ, st_vars->Tr0_trips,
-                                                       (st_vars->LineTime - st_vars->Tvm_equ) / 2, st_vars->LineTime / 4);
+                                       Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
+                                                       HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
+                                                       (LineTime - Tvm_equ) / 2, LineTime / 4);
                                } else {
-                                       st_vars->Tr0_equ = (st_vars->LineTime - st_vars->Tvm_equ) / 2;
+                                       Tr0_equ = (LineTime - Tvm_equ) / 2;
                                }
                        } else {
-                               st_vars->Tvm_equ = 0;
-                               st_vars->Tr0_equ = 0;
+                               Tvm_equ = 0;
+                               Tr0_equ = 0;
 #ifdef __DML_VBA_DEBUG__
                                dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
 #endif
                        }
                }
 
-               if (st_vars->dst_y_prefetch_oto < st_vars->dst_y_prefetch_equ) {
-                       *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_oto;
-                       st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_oto;
-                       st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_oto;
-                       *PrefetchBandwidth = st_vars->prefetch_bw_oto;
+               if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
+                       *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+                       TimeForFetchingMetaPTE = Tvm_oto;
+                       TimeForFetchingRowInVBlank = Tr0_oto;
+                       *PrefetchBandwidth = prefetch_bw_oto;
                } else {
-                       *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_equ;
-                       st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_equ;
-                       st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_equ;
-                       *PrefetchBandwidth = st_vars->prefetch_bw_equ;
+                       *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+                       TimeForFetchingMetaPTE = Tvm_equ;
+                       TimeForFetchingRowInVBlank = Tr0_equ;
+                       *PrefetchBandwidth = prefetch_bw_equ;
                }
 
-               *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * st_vars->TimeForFetchingMetaPTE / st_vars->LineTime, 1.0) / 4.0;
+               *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
 
                *DestinationLinesToRequestRowInVBlank =
-                               dml_ceil(4.0 * st_vars->TimeForFetchingRowInVBlank / st_vars->LineTime, 1.0) / 4.0;
+                               dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
 
-               st_vars->LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+               LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
                                *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
 
 #ifdef __DML_VBA_DEBUG__
                dml_print("DML::%s: DestinationLinesForPrefetch = %f\n", __func__, *DestinationLinesForPrefetch);
                dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
                                __func__, *DestinationLinesToRequestVMInVBlank);
-               dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, st_vars->TimeForFetchingRowInVBlank);
-               dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+               dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, TimeForFetchingRowInVBlank);
+               dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
                dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n",
                                __func__, *DestinationLinesToRequestRowInVBlank);
                dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
-               dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, st_vars->LinesToRequestPrefetchPixelData);
+               dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, LinesToRequestPrefetchPixelData);
 #endif
 
-               if (st_vars->LinesToRequestPrefetchPixelData >= 1 && st_vars->prefetch_bw_equ > 0) {
-                       *VRatioPrefetchY = (double) PrefetchSourceLinesY / st_vars->LinesToRequestPrefetchPixelData;
+               if (LinesToRequestPrefetchPixelData >= 1 && prefetch_bw_equ > 0) {
+                       *VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData;
                        *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
 #ifdef __DML_VBA_DEBUG__
                        dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
@@ -3851,12 +3898,12 @@ bool dml32_CalculatePrefetchSchedule(
                        dml_print("DML::%s: VInitPreFillY = %d\n", __func__, VInitPreFillY);
 #endif
                        if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
-                               if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+                               if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
                                        *VRatioPrefetchY =
                                                        dml_max((double) PrefetchSourceLinesY /
-                                                                       st_vars->LinesToRequestPrefetchPixelData,
+                                                                       LinesToRequestPrefetchPixelData,
                                                                        (double) MaxNumSwathY * SwathHeightY /
-                                                                       (st_vars->LinesToRequestPrefetchPixelData -
+                                                                       (LinesToRequestPrefetchPixelData -
                                                                        (VInitPreFillY - 3.0) / 2.0));
                                        *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
                                } else {
@@ -3870,7 +3917,7 @@ bool dml32_CalculatePrefetchSchedule(
 #endif
                        }
 
-                       *VRatioPrefetchC = (double) PrefetchSourceLinesC / st_vars->LinesToRequestPrefetchPixelData;
+                       *VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
                        *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
 
 #ifdef __DML_VBA_DEBUG__
@@ -3879,11 +3926,11 @@ bool dml32_CalculatePrefetchSchedule(
                        dml_print("DML::%s: VInitPreFillC = %d\n", __func__, VInitPreFillC);
 #endif
                        if ((SwathHeightC > 4)) {
-                               if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+                               if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
                                        *VRatioPrefetchC =
                                                dml_max(*VRatioPrefetchC,
                                                        (double) MaxNumSwathC * SwathHeightC /
-                                                       (st_vars->LinesToRequestPrefetchPixelData -
+                                                       (LinesToRequestPrefetchPixelData -
                                                        (VInitPreFillC - 3.0) / 2.0));
                                        *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
                                } else {
@@ -3898,25 +3945,25 @@ bool dml32_CalculatePrefetchSchedule(
                        }
 
                        *RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY
-                                       / st_vars->LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
-                                       / st_vars->LineTime;
+                                       / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
+                                       / LineTime;
 
 #ifdef __DML_VBA_DEBUG__
                        dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
                        dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
-                       dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+                       dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
                        dml_print("DML::%s: RequiredPrefetchPixDataBWLuma = %f\n",
                                        __func__, *RequiredPrefetchPixDataBWLuma);
 #endif
                        *RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC /
-                                       st_vars->LinesToRequestPrefetchPixelData
+                                       LinesToRequestPrefetchPixelData
                                        * myPipe->BytePerPixelC
-                                       * swath_width_chroma_ub / st_vars->LineTime;
+                                       * swath_width_chroma_ub / LineTime;
                } else {
                        MyError = true;
 #ifdef __DML_VBA_DEBUG__
                        dml_print("DML:%s: MyErr set. LinesToRequestPrefetchPixelData: %f, should be > 0\n",
-                                       __func__, st_vars->LinesToRequestPrefetchPixelData);
+                                       __func__, LinesToRequestPrefetchPixelData);
 #endif
                        *VRatioPrefetchY = 0;
                        *VRatioPrefetchC = 0;
@@ -3925,15 +3972,15 @@ bool dml32_CalculatePrefetchSchedule(
                }
 #ifdef __DML_VBA_DEBUG__
                dml_print("DML: Tpre: %fus - sum of time to request meta pte, 2 x data pte + meta data, swaths\n",
-                       (double)st_vars->LinesToRequestPrefetchPixelData * st_vars->LineTime +
-                       2.0*st_vars->TimeForFetchingRowInVBlank + st_vars->TimeForFetchingMetaPTE);
-               dml_print("DML:  Tvm: %fus - time to fetch page tables for meta surface\n", st_vars->TimeForFetchingMetaPTE);
+                       (double)LinesToRequestPrefetchPixelData * LineTime +
+                       2.0*TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
+               dml_print("DML:  Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
                dml_print("DML: To: %fus - time for propagation from scaler to optc\n",
-                       (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime);
+                       (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
                dml_print("DML: Tvstartup - TSetup - Tcalc - Twait - Tpre - To > 0\n");
-               dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * st_vars->LineTime -
-                       st_vars->TimeForFetchingMetaPTE - 2*st_vars->TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
-                       ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime - TWait - TCalc - *TSetup);
+               dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime -
+                       TimeForFetchingMetaPTE - 2*TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
+                       ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - *TSetup);
                dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n",
                                PixelPTEBytesPerRow);
 #endif
@@ -3941,7 +3988,7 @@ bool dml32_CalculatePrefetchSchedule(
                MyError = true;
 #ifdef __DML_VBA_DEBUG__
                dml_print("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n",
-                               __func__, st_vars->dst_y_prefetch_equ);
+                               __func__, dst_y_prefetch_equ);
 #endif
        }
 
@@ -3957,10 +4004,10 @@ bool dml32_CalculatePrefetchSchedule(
                        dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
                        dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
                                        __func__, *DestinationLinesToRequestVMInVBlank);
-                       dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+                       dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
 #endif
                        prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor /
-                                       (*DestinationLinesToRequestVMInVBlank * st_vars->LineTime);
+                                       (*DestinationLinesToRequestVMInVBlank * LineTime);
 #ifdef __DML_VBA_DEBUG__
                        dml_print("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
 #endif
@@ -3977,7 +4024,7 @@ bool dml32_CalculatePrefetchSchedule(
                        prefetch_row_bw = 0;
                } else if (*DestinationLinesToRequestRowInVBlank > 0) {
                        prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) /
-                                       (*DestinationLinesToRequestRowInVBlank * st_vars->LineTime);
+                                       (*DestinationLinesToRequestRowInVBlank * LineTime);
 
 #ifdef __DML_VBA_DEBUG__
                        dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
@@ -4000,12 +4047,12 @@ bool dml32_CalculatePrefetchSchedule(
 
        if (MyError) {
                *PrefetchBandwidth = 0;
-               st_vars->TimeForFetchingMetaPTE = 0;
-               st_vars->TimeForFetchingRowInVBlank = 0;
+               TimeForFetchingMetaPTE = 0;
+               TimeForFetchingRowInVBlank = 0;
                *DestinationLinesToRequestVMInVBlank = 0;
                *DestinationLinesToRequestRowInVBlank = 0;
                *DestinationLinesForPrefetch = 0;
-               st_vars->LinesToRequestPrefetchPixelData = 0;
+               LinesToRequestPrefetchPixelData = 0;
                *VRatioPrefetchY = 0;
                *VRatioPrefetchC = 0;
                *RequiredPrefetchPixDataBWLuma = 0;
@@ -4159,7 +4206,6 @@ void dml32_CalculateFlipSchedule(
 } // CalculateFlipSchedule
 
 void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-               struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
                bool USRRetrainingRequiredFinal,
                enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
                unsigned int PrefetchMode,
@@ -4221,15 +4267,37 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                double ActiveDRAMClockChangeLatencyMargin[])
 {
        unsigned int i, j, k;
-
-       st_vars->SurfaceWithMinActiveFCLKChangeMargin = 0;
-       st_vars->DRAMClockChangeSupportNumber = 0;
-       st_vars->DRAMClockChangeMethod = 0;
-       st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
-       st_vars->MinActiveFCLKChangeMargin = 0.;
-       st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
-       st_vars->TotalPixelBW = 0.0;
-       st_vars->TotalActiveWriteback = 0;
+       unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
+       unsigned int DRAMClockChangeSupportNumber = 0;
+       unsigned int LastSurfaceWithoutMargin;
+       unsigned int DRAMClockChangeMethod = 0;
+       bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
+       double MinActiveFCLKChangeMargin = 0.;
+       double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
+       double ActiveClockChangeLatencyHidingY;
+       double ActiveClockChangeLatencyHidingC;
+       double ActiveClockChangeLatencyHiding;
+    double EffectiveDETBufferSizeY;
+       double     ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
+       double     USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
+       double TotalPixelBW = 0.0;
+       bool    SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
+       double     EffectiveLBLatencyHidingY;
+       double     EffectiveLBLatencyHidingC;
+       double     LinesInDETY[DC__NUM_DPP__MAX];
+       double     LinesInDETC[DC__NUM_DPP__MAX];
+       unsigned int    LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
+       unsigned int    LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
+       double     FullDETBufferingTimeY;
+       double     FullDETBufferingTimeC;
+       double     WritebackDRAMClockChangeLatencyMargin;
+       double     WritebackFCLKChangeLatencyMargin;
+       double     WritebackLatencyHiding;
+       bool    SameTimingForFCLKChange;
+
+       unsigned int    TotalActiveWriteback = 0;
+       unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
+       unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
 
        Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
        Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
@@ -4261,13 +4329,13 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
 #endif
 
 
-       st_vars->TotalActiveWriteback = 0;
+       TotalActiveWriteback = 0;
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
                if (WritebackEnable[k] == true)
-                       st_vars->TotalActiveWriteback = st_vars->TotalActiveWriteback + 1;
+                       TotalActiveWriteback = TotalActiveWriteback + 1;
        }
 
-       if (st_vars->TotalActiveWriteback <= 1) {
+       if (TotalActiveWriteback <= 1) {
                Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
        } else {
                Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
@@ -4277,7 +4345,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
                                + mmSOCParameters.USRRetrainingLatency;
 
-       if (st_vars->TotalActiveWriteback <= 1) {
+       if (TotalActiveWriteback <= 1) {
                Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
                                + mmSOCParameters.WritebackLatency;
                Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
@@ -4307,14 +4375,14 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
 #endif
 
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               st_vars->TotalPixelBW = st_vars->TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
+               TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
                                SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
        }
 
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
 
-               st_vars->LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
-               st_vars->LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+               LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
+               LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
 
 
 #ifdef __DML_VBA_DEBUG__
@@ -4325,72 +4393,72 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                dml_print("DML::%s: k=%d, VTaps              = %d\n", __func__, k, VTaps[k]);
 #endif
 
-               st_vars->EffectiveLBLatencyHidingY = st_vars->LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
-               st_vars->EffectiveLBLatencyHidingC = st_vars->LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
-               st_vars->EffectiveDETBufferSizeY = DETBufferSizeY[k];
+               EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
+               EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+               EffectiveDETBufferSizeY = DETBufferSizeY[k];
 
                if (UnboundedRequestEnabled) {
-                       st_vars->EffectiveDETBufferSizeY = st_vars->EffectiveDETBufferSizeY
+                       EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
                                        + CompressedBufferSizeInkByte * 1024
                                                        * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
-                                                       / (HTotal[k] / PixelClock[k]) / st_vars->TotalPixelBW;
+                                                       / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
                }
 
-               st_vars->LinesInDETY[k] = (double) st_vars->EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
-               st_vars->LinesInDETYRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETY[k], SwathHeightY[k]);
-               st_vars->FullDETBufferingTimeY = st_vars->LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+               LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
+               LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
 
-               st_vars->ActiveClockChangeLatencyHidingY = st_vars->EffectiveLBLatencyHidingY + st_vars->FullDETBufferingTimeY
+               ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
                                - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
 
                if (NumberOfActiveSurfaces > 1) {
-                       st_vars->ActiveClockChangeLatencyHidingY = st_vars->ActiveClockChangeLatencyHidingY
+                       ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
                                        - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
                                                        / PixelClock[k] / VRatio[k];
                }
 
                if (BytePerPixelDETC[k] > 0) {
-                       st_vars->LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
-                       st_vars->LinesInDETCRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETC[k], SwathHeightC[k]);
-                       st_vars->FullDETBufferingTimeC = st_vars->LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
+                       LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+                       LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
+                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
                                        / VRatioChroma[k];
-                       st_vars->ActiveClockChangeLatencyHidingC = st_vars->EffectiveLBLatencyHidingC + st_vars->FullDETBufferingTimeC
+                       ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
                                        - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
                                                        / PixelClock[k];
                        if (NumberOfActiveSurfaces > 1) {
-                               st_vars->ActiveClockChangeLatencyHidingC = st_vars->ActiveClockChangeLatencyHidingC
+                               ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
                                                - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
                                                                / PixelClock[k] / VRatioChroma[k];
                        }
-                       st_vars->ActiveClockChangeLatencyHiding = dml_min(st_vars->ActiveClockChangeLatencyHidingY,
-                                       st_vars->ActiveClockChangeLatencyHidingC);
+                       ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
+                                       ActiveClockChangeLatencyHidingC);
                } else {
-                       st_vars->ActiveClockChangeLatencyHiding = st_vars->ActiveClockChangeLatencyHidingY;
+                       ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
                }
 
-               ActiveDRAMClockChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
+               ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
                                - Watermark->DRAMClockChangeWatermark;
-               st_vars->ActiveFCLKChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
+               ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
                                - Watermark->FCLKChangeWatermark;
-               st_vars->USRRetrainingLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
+               USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
 
                if (WritebackEnable[k]) {
-                       st_vars->WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
+                       WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
                                        / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
                                                        / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
                        if (WritebackPixelFormat[k] == dm_444_64)
-                               st_vars->WritebackLatencyHiding = st_vars->WritebackLatencyHiding / 2;
+                               WritebackLatencyHiding = WritebackLatencyHiding / 2;
 
-                       st_vars->WritebackDRAMClockChangeLatencyMargin = st_vars->WritebackLatencyHiding
+                       WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
                                        - Watermark->WritebackDRAMClockChangeWatermark;
 
-                       st_vars->WritebackFCLKChangeLatencyMargin = st_vars->WritebackLatencyHiding
+                       WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
                                        - Watermark->WritebackFCLKChangeWatermark;
 
                        ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
-                                       st_vars->WritebackFCLKChangeLatencyMargin);
-                       st_vars->ActiveFCLKChangeLatencyMargin[k] = dml_min(st_vars->ActiveFCLKChangeLatencyMargin[k],
-                                       st_vars->WritebackDRAMClockChangeLatencyMargin);
+                                       WritebackFCLKChangeLatencyMargin);
+                       ActiveFCLKChangeLatencyMargin[k] = dml_min(ActiveFCLKChangeLatencyMargin[k],
+                                       WritebackDRAMClockChangeLatencyMargin);
                }
                MaxActiveDRAMClockChangeLatencySupported[k] =
                                (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
@@ -4409,41 +4477,41 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                                        HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
                                        VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
                                        (DRRDisplay[i] || DRRDisplay[j]))) {
-                               st_vars->SynchronizedSurfaces[i][j] = true;
+                               SynchronizedSurfaces[i][j] = true;
                        } else {
-                               st_vars->SynchronizedSurfaces[i][j] = false;
+                               SynchronizedSurfaces[i][j] = false;
                        }
                }
        }
 
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
                if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
-                               (!st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
-                               st_vars->ActiveFCLKChangeLatencyMargin[k] < st_vars->MinActiveFCLKChangeMargin)) {
-                       st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
-                       st_vars->MinActiveFCLKChangeMargin = st_vars->ActiveFCLKChangeLatencyMargin[k];
-                       st_vars->SurfaceWithMinActiveFCLKChangeMargin = k;
+                               (!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
+                               ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
+                       FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
+                       MinActiveFCLKChangeMargin = ActiveFCLKChangeLatencyMargin[k];
+                       SurfaceWithMinActiveFCLKChangeMargin = k;
                }
        }
 
-       *MinActiveFCLKChangeLatencySupported = st_vars->MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
+       *MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
 
-       st_vars->SameTimingForFCLKChange = true;
+       SameTimingForFCLKChange = true;
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if (!st_vars->SynchronizedSurfaces[k][st_vars->SurfaceWithMinActiveFCLKChangeMargin]) {
+               if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
                        if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
-                                       (st_vars->SameTimingForFCLKChange ||
-                                       st_vars->ActiveFCLKChangeLatencyMargin[k] <
-                                       st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
-                               st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = st_vars->ActiveFCLKChangeLatencyMargin[k];
+                                       (SameTimingForFCLKChange ||
+                                       ActiveFCLKChangeLatencyMargin[k] <
+                                       SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
+                               SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = ActiveFCLKChangeLatencyMargin[k];
                        }
-                       st_vars->SameTimingForFCLKChange = false;
+                       SameTimingForFCLKChange = false;
                }
        }
 
-       if (st_vars->MinActiveFCLKChangeMargin > 0) {
+       if (MinActiveFCLKChangeMargin > 0) {
                *FCLKChangeSupport = dm_fclock_change_vactive;
-       } else if ((st_vars->SameTimingForFCLKChange || st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
+       } else if ((SameTimingForFCLKChange || SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
                        (PrefetchMode <= 1)) {
                *FCLKChangeSupport = dm_fclock_change_vblank;
        } else {
@@ -4453,7 +4521,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
        *USRRetrainingSupport = true;
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
                if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
-                               (st_vars->USRRetrainingLatencyMargin[k] < 0)) {
+                               (USRRetrainingLatencyMargin[k] < 0)) {
                        *USRRetrainingSupport = false;
                }
        }
@@ -4464,42 +4532,42 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                                UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
                                ActiveDRAMClockChangeLatencyMargin[k] < 0) {
                        if (PrefetchMode > 0) {
-                               st_vars->DRAMClockChangeSupportNumber = 2;
-                       } else if (st_vars->DRAMClockChangeSupportNumber == 0) {
-                               st_vars->DRAMClockChangeSupportNumber = 1;
-                               st_vars->LastSurfaceWithoutMargin = k;
-                       } else if (st_vars->DRAMClockChangeSupportNumber == 1 &&
-                                       !st_vars->SynchronizedSurfaces[st_vars->LastSurfaceWithoutMargin][k]) {
-                               st_vars->DRAMClockChangeSupportNumber = 2;
+                               DRAMClockChangeSupportNumber = 2;
+                       } else if (DRAMClockChangeSupportNumber == 0) {
+                               DRAMClockChangeSupportNumber = 1;
+                               LastSurfaceWithoutMargin = k;
+                       } else if (DRAMClockChangeSupportNumber == 1 &&
+                                       !SynchronizedSurfaces[LastSurfaceWithoutMargin][k]) {
+                               DRAMClockChangeSupportNumber = 2;
                        }
                }
        }
 
        for (k = 0; k < NumberOfActiveSurfaces; ++k) {
                if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
-                       st_vars->DRAMClockChangeMethod = 1;
+                       DRAMClockChangeMethod = 1;
                else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
-                       st_vars->DRAMClockChangeMethod = 2;
+                       DRAMClockChangeMethod = 2;
        }
 
-       if (st_vars->DRAMClockChangeMethod == 0) {
-               if (st_vars->DRAMClockChangeSupportNumber == 0)
+       if (DRAMClockChangeMethod == 0) {
+               if (DRAMClockChangeSupportNumber == 0)
                        *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
-               else if (st_vars->DRAMClockChangeSupportNumber == 1)
+               else if (DRAMClockChangeSupportNumber == 1)
                        *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
                else
                        *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
-       } else if (st_vars->DRAMClockChangeMethod == 1) {
-               if (st_vars->DRAMClockChangeSupportNumber == 0)
+       } else if (DRAMClockChangeMethod == 1) {
+               if (DRAMClockChangeSupportNumber == 0)
                        *DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_full_frame;
-               else if (st_vars->DRAMClockChangeSupportNumber == 1)
+               else if (DRAMClockChangeSupportNumber == 1)
                        *DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_full_frame;
                else
                        *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
        } else {
-               if (st_vars->DRAMClockChangeSupportNumber == 0)
+               if (DRAMClockChangeSupportNumber == 0)
                        *DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_sub_vp;
-               else if (st_vars->DRAMClockChangeSupportNumber == 1)
+               else if (DRAMClockChangeSupportNumber == 1)
                        *DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_sub_vp;
                else
                        *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
@@ -4513,7 +4581,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
 
                dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
                src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
-               src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + st_vars->LBLatencyHidingSourceLinesY[k];
+               src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
                sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
 
 #ifdef __DML_VBA_DEBUG__
@@ -4521,7 +4589,7 @@ dml_print("DML::%s: k=%d, DETBufferSizeY               = %d\n", __func__, k, DET
 dml_print("DML::%s: k=%d, BytePerPixelDETY             = %f\n", __func__, k, BytePerPixelDETY[k]);
 dml_print("DML::%s: k=%d, SwathWidthY                  = %d\n", __func__, k, SwathWidthY[k]);
 dml_print("DML::%s: k=%d, SwathHeightY                 = %d\n", __func__, k, SwathHeightY[k]);
-dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY  = %d\n", __func__, k, st_vars->LBLatencyHidingSourceLinesY[k]);
+dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY  = %d\n", __func__, k, LBLatencyHidingSourceLinesY[k]);
 dml_print("DML::%s: k=%d, dst_y_pstate      = %d\n", __func__, k, dst_y_pstate);
 dml_print("DML::%s: k=%d, src_y_pstate_l    = %d\n", __func__, k, src_y_pstate_l);
 dml_print("DML::%s: k=%d, src_y_ahead_l     = %d\n", __func__, k, src_y_ahead_l);
@@ -4532,7 +4600,7 @@ dml_print("DML::%s: k=%d, sub_vp_lines_l    = %d\n", __func__, k, sub_vp_lines_l
 
                if (BytePerPixelDETC[k] > 0) {
                        src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
-                       src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + st_vars->LBLatencyHidingSourceLinesC[k];
+                       src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
                        sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
                        SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
 
index 37a314ce284b24d019f73a5356c06738ae99e55b..d293856ba906b4032b5e461884a37392ae01fa18 100644 (file)
@@ -30,7 +30,6 @@
 #include "os_types.h"
 #include "../dc_features.h"
 #include "../display_mode_structs.h"
-#include "dml/display_mode_vba.h"
 
 unsigned int dml32_dscceComputeDelay(
                unsigned int bpc,
@@ -82,7 +81,6 @@ void dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(
                double *DPPCLKUsingSingleDPP);
 
 void dml32_CalculateSwathAndDETConfiguration(
-               struct dml32_CalculateSwathAndDETConfiguration *st_vars,
                unsigned int DETSizeOverride[],
                enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
                unsigned int ConfigReturnBufferSizeInKByte,
@@ -362,7 +360,6 @@ void dml32_CalculateSurfaceSizeInMall(
                bool *ExceededMALLSize);
 
 void dml32_CalculateVMRowAndSwath(
-               struct dml32_CalculateVMRowAndSwath *st_vars,
                unsigned int NumberOfActiveSurfaces,
                DmlPipe myPipe[],
                unsigned int SurfaceSizeInMALL[],
@@ -715,7 +712,6 @@ double dml32_CalculateExtraLatency(
                unsigned int HostVMMaxNonCachedPageTableLevels);
 
 bool dml32_CalculatePrefetchSchedule(
-               struct dml32_CalculatePrefetchSchedule *st_vars,
                double HostVMInefficiencyFactor,
                DmlPipe *myPipe,
                unsigned int DSCDelay,
@@ -811,7 +807,6 @@ void dml32_CalculateFlipSchedule(
                bool *ImmediateFlipSupportedForPipe);
 
 void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-               struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
                bool USRRetrainingRequiredFinal,
                enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
                unsigned int PrefetchMode,
index 84b4b00f29cbdda318d9aff98afcb5ca16cb66ae..c87091683b5dce2d8e7f1322e25473e8d2f3fbb7 100644 (file)
@@ -498,6 +498,13 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
                                dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
                }
 
+               if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
+                               != dc->bb_overrides.fclk_clock_change_latency_ns
+                               && dc->bb_overrides.fclk_clock_change_latency_ns) {
+                       dcn3_21_soc.fclk_change_latency_us =
+                               dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+               }
+
                if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000)
                                != dc->bb_overrides.dummy_clock_change_latency_ns
                                && dc->bb_overrides.dummy_clock_change_latency_ns) {
index 8460aefe7b6d8205364059d32e1770dbb727694e..492aec634b685815a40c8cb304ae45427d98f323 100644 (file)
@@ -182,108 +182,6 @@ void Calculate256BBlockSizes(
                unsigned int *BlockWidth256BytesY,
                unsigned int *BlockWidth256BytesC);
 
-struct dml32_CalculateSwathAndDETConfiguration {
-       unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
-       unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
-       unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
-       unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
-       unsigned int RoundedUpSwathSizeBytesY;
-       unsigned int RoundedUpSwathSizeBytesC;
-       double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
-       double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
-       unsigned int TotalActiveDPP;
-       bool NoChromaSurfaces;
-       unsigned int DETBufferSizeInKByteForSwathCalculation;
-};
-
-struct dml32_CalculateVMRowAndSwath {
-       unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
-       unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
-       unsigned int PDEAndMetaPTEBytesFrameY;
-       unsigned int PDEAndMetaPTEBytesFrameC;
-       unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
-       unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
-       unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
-       unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
-       unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
-       unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
-       unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
-       unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
-       unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
-       unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
-       bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport {
-       unsigned int SurfaceWithMinActiveFCLKChangeMargin;
-       unsigned int DRAMClockChangeSupportNumber;
-       unsigned int LastSurfaceWithoutMargin;
-       unsigned int DRAMClockChangeMethod;
-       bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin;
-       double MinActiveFCLKChangeMargin;
-       double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank;
-       double ActiveClockChangeLatencyHidingY;
-       double ActiveClockChangeLatencyHidingC;
-       double ActiveClockChangeLatencyHiding;
-       double EffectiveDETBufferSizeY;
-       double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
-       double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
-       double TotalPixelBW;
-       bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
-       double EffectiveLBLatencyHidingY;
-       double EffectiveLBLatencyHidingC;
-       double LinesInDETY[DC__NUM_DPP__MAX];
-       double LinesInDETC[DC__NUM_DPP__MAX];
-       unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
-       unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
-       double FullDETBufferingTimeY;
-       double FullDETBufferingTimeC;
-       double WritebackDRAMClockChangeLatencyMargin;
-       double WritebackFCLKChangeLatencyMargin;
-       double WritebackLatencyHiding;
-       bool SameTimingForFCLKChange;
-       unsigned int TotalActiveWriteback;
-       unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
-       unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculatePrefetchSchedule {
-       unsigned int DPPCycles, DISPCLKCycles;
-       double DSTTotalPixelsAfterScaler;
-       double LineTime;
-       double dst_y_prefetch_equ;
-       double prefetch_bw_oto;
-       double Tvm_oto;
-       double Tr0_oto;
-       double Tvm_oto_lines;
-       double Tr0_oto_lines;
-       double dst_y_prefetch_oto;
-       double TimeForFetchingMetaPTE;
-       double TimeForFetchingRowInVBlank;
-       double LinesToRequestPrefetchPixelData;
-       unsigned int HostVMDynamicLevelsTrips;
-       double trip_to_mem;
-       double Tvm_trips;
-       double Tr0_trips;
-       double Tvm_trips_rounded;
-       double Tr0_trips_rounded;
-       double Lsw_oto;
-       double Tpre_rounded;
-       double prefetch_bw_equ;
-       double Tvm_equ;
-       double Tr0_equ;
-       double Tdmbf;
-       double Tdmec;
-       double Tdmsks;
-       double prefetch_sw_bytes;
-       double bytes_pp;
-       double dep_bytes;
-       unsigned int max_vratio_pre;
-       double min_Lsw;
-       double Tsw_est1;
-       double Tsw_est3;
-};
-
 struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation {
        unsigned int dummy_integer_array[2][DC__NUM_DPP__MAX];
        double dummy_single_array[2][DC__NUM_DPP__MAX];
@@ -355,10 +253,6 @@ struct dummy_vars {
        struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
        DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation;
        struct dml32_ModeSupportAndSystemConfigurationFull dml32_ModeSupportAndSystemConfigurationFull;
-       struct dml32_CalculateSwathAndDETConfiguration dml32_CalculateSwathAndDETConfiguration;
-       struct dml32_CalculateVMRowAndSwath dml32_CalculateVMRowAndSwath;
-       struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport;
-       struct dml32_CalculatePrefetchSchedule dml32_CalculatePrefetchSchedule;
 };
 
 struct vba_vars_st {
index ab06c7fc74524c0f3dba85aa4afc6cfe0dd2ada4..9f3558c0ef110c4a524058f36116a9fb759a585c 100644 (file)
@@ -244,13 +244,15 @@ enum {
 #define ASICREV_IS_GC_10_3_7(eChipRev) ((eChipRev >= GC_10_3_7_A0) && (eChipRev < GC_10_3_7_UNKNOWN))
 
 #define AMDGPU_FAMILY_GC_11_0_0 145
-#define AMDGPU_FAMILY_GC_11_0_2 148
+#define AMDGPU_FAMILY_GC_11_0_1 148
 #define GC_11_0_0_A0 0x1
 #define GC_11_0_2_A0 0x10
+#define GC_11_0_3_A0 0x20
 #define GC_11_UNKNOWN 0xFF
 
 #define ASICREV_IS_GC_11_0_0(eChipRev) (eChipRev < GC_11_0_2_A0)
-#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
+#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
 
 /*
  * ASIC chip ID
index f093b49c5e6e6143cf0d658514d78fd75f7e6cd0..3bf08a60c45c6e79dc893f1bb1b6dcb6f4d0a9ec 100644 (file)
@@ -119,13 +119,15 @@ enum dc_log_type {
        LOG_HDMI_RETIMER_REDRIVER,
        LOG_DSC,
        LOG_SMU_MSG,
+       LOG_DC2RESERVED4,
+       LOG_DC2RESERVED5,
        LOG_DWB,
        LOG_GAMMA_DEBUG,
        LOG_MAX_HW_POINTS,
        LOG_ALL_TF_CHANNELS,
        LOG_SAMPLE_1DLUT,
        LOG_DP2,
-       LOG_SECTION_TOTAL_COUNT
+       LOG_DC2RESERVED12,
 };
 
 #define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
index da09ba7589f7316e0dec6b00df3d6e10527dab42..0f39ab9dc5b418d32e8d8d259fd5219fe318cdbc 100644 (file)
@@ -613,10 +613,6 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
         * Note: We should never go above the field rate of the mode timing set.
         */
        infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
-
-       /* FreeSync HDR */
-       infopacket->sb[9] = 0;
-       infopacket->sb[10] = 0;
 }
 
 static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
@@ -684,10 +680,6 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
 
        /* PB16 : Reserved bits 7:1, FixedRate bit 0 */
        infopacket->sb[16] = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? 1 : 0;
-
-       //FreeSync HDR
-       infopacket->sb[9] = 0;
-       infopacket->sb[10] = 0;
 }
 
 static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
@@ -772,8 +764,7 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal,
                /* HB2  = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */
                infopacket->hb2 = 0x09;
 
-               *payload_size = 0x0A;
-
+               *payload_size = 0x09;
        } else if (dc_is_dp_signal(signal)) {
 
                /* HEADER */
@@ -822,9 +813,9 @@ static void build_vrr_infopacket_header_v3(enum signal_type signal,
                infopacket->hb1 = version;
 
                /* HB2  = [Bits 7:5 = 0] [Bits 4:0 = Length] */
-               *payload_size = 0x10;
-               infopacket->hb2 = *payload_size - 1; //-1 for checksum
+               infopacket->hb2 = 0x10;
 
+               *payload_size = 0x10;
        } else if (dc_is_dp_signal(signal)) {
 
                /* HEADER */
index 2ed95790a6006305b7c3d54260be8917efdf27de..cf8d60c4df1beebda83372da65a857a597eb30ad 100644 (file)
 #define regBIF0_PCIE_TX_TRACKING_ADDR_HI_BASE_IDX                                                       5
 #define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS                                                            0x420186
 #define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS_BASE_IDX                                                   5
+#define regBIF0_PCIE_TX_POWER_CTRL_1                                                                    0x420187
+#define regBIF0_PCIE_TX_POWER_CTRL_1_BASE_IDX                                                           5
 #define regBIF0_PCIE_TX_CTRL_4                                                                          0x42018b
 #define regBIF0_PCIE_TX_CTRL_4_BASE_IDX                                                                 5
 #define regBIF0_PCIE_TX_STATUS                                                                          0x420194
index eb62a18fcc48077ca255f4e3996cfab2f0849b6f..3d60c9e92548126e10e7b84cbed1b7d56ee2b742 100644 (file)
 #define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK                                              0x0000000EL
 #define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK                                           0x00007F00L
 #define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK                                      0x00008000L
+//BIF0_PCIE_TX_POWER_CTRL_1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN__SHIFT                                                       0x0
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN__SHIFT                                                       0x1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN__SHIFT                                                       0x2
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN__SHIFT                                                    0x3
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN__SHIFT                                                    0x4
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN__SHIFT                                                    0x5
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK                                                         0x00000001L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN_MASK                                                         0x00000002L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN_MASK                                                         0x00000004L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK                                                      0x00000008L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN_MASK                                                      0x00000010L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN_MASK                                                      0x00000020L
 //BIF0_PCIE_TX_CTRL_4
 #define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW__SHIFT                                                 0x0
 #define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW_MASK                                                   0x0000000FL
index 78620b0bd27904be22bf5ddf90b1cdad4bc796a1..f745cd8f1ab76e29709c30c2985a21e01324cce5 100644 (file)
 #ifndef SMU13_DRIVER_IF_V13_0_0_H
 #define SMU13_DRIVER_IF_V13_0_0_H
 
-// *** IMPORTANT ***
-// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION  0x23
-
 //Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x1D
+#define PPTABLE_VERSION 0x22
 
 #define NUM_GFXCLK_DPM_LEVELS    16
 #define NUM_SOCCLK_DPM_LEVELS    8
@@ -1193,8 +1189,17 @@ typedef struct {
   // SECTION: Advanced Options
   uint32_t          DebugOverrides;
 
+  // Section: Total Board Power idle vs active coefficients
+  uint8_t     TotalBoardPowerSupport;
+  uint8_t     TotalBoardPowerPadding[3];
+
+  int16_t     TotalIdleBoardPowerM;
+  int16_t     TotalIdleBoardPowerB;
+  int16_t     TotalBoardPowerM;
+  int16_t     TotalBoardPowerB;
+
   // SECTION: Sku Reserved
-  uint32_t         Spare[64];
+  uint32_t         Spare[61];
 
   // Padding for MMHUB - do not modify this
   uint32_t     MmHubPadding[8];
@@ -1259,7 +1264,8 @@ typedef struct {
   // SECTION: Clock Spread Spectrum
 
   // UCLK Spread Spectrum
-  uint16_t     UclkSpreadPadding;
+  uint8_t      UclkTrainingModeSpreadPercent;
+  uint8_t      UclkSpreadPadding;
   uint16_t     UclkSpreadFreq;      // kHz
 
   // UCLK Spread Spectrum
@@ -1272,11 +1278,7 @@ typedef struct {
 
   // Section: Memory Config
   uint8_t      DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
-  uint8_t      PaddingMem1[3];
-
-  // Section: Total Board Power
-  uint16_t     TotalBoardPower;     //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
-  uint16_t     BoardPowerPadding;
+  uint8_t      PaddingMem1[7];
 
   // SECTION: UMC feature flags
   uint8_t      HsrEnabled;
@@ -1375,8 +1377,11 @@ typedef struct {
   uint16_t Vcn1ActivityPercentage  ;
 
   uint32_t EnergyAccumulator;
-  uint16_t AverageSocketPower    ;
+  uint16_t AverageSocketPower;
+  uint16_t AverageTotalBoardPower;
+
   uint16_t AvgTemperature[TEMP_COUNT];
+  uint16_t TempPadding;
 
   uint8_t  PcieRate               ;
   uint8_t  PcieWidth              ;
index 76f695a1d0658a2cdf9eb90c0642a9fdff5d6361..ae2d337158f3b0b3d8993245273ba6f050b2ebee 100644 (file)
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if
 // any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 4
+#define PMFW_DRIVER_IF_VERSION 5
 
 typedef struct {
   int32_t value;
@@ -197,6 +197,8 @@ typedef struct {
 
   uint16_t SkinTemp;
   uint16_t DeviceState;
+  uint16_t CurTemp;                     //[centi-Celsius]
+  uint16_t spare2;
 } SmuMetrics_t;
 
 typedef struct {
index c02e5e576728231d0842b14a7cc4716356227eaf..ac308e72241a5b53542f0a01d1c435581942b7ea 100644 (file)
@@ -28,9 +28,9 @@
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
 #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2E
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
 
 #define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500  //500ms
index fa520d79ef67fc698a930285c1a4011fb4765f9f..6db67f082d91758eece57c919e14e705e0148354 100644 (file)
@@ -4283,6 +4283,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .dump_pptable = sienna_cichlid_dump_pptable,
        .init_microcode = smu_v11_0_init_microcode,
        .load_microcode = smu_v11_0_load_microcode,
+       .fini_microcode = smu_v11_0_fini_microcode,
        .init_smc_tables = sienna_cichlid_init_smc_tables,
        .fini_smc_tables = smu_v11_0_fini_smc_tables,
        .init_power = smu_v11_0_init_power,
index e8fe84f806d172f98b56b5c411f566c26e742b56..18ee3b5e64c50fe5fa5450aba56749c50c66fe68 100644 (file)
@@ -212,6 +212,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
        if (!adev->scpm_enabled)
                return 0;
 
+       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7))
+               return 0;
+
        /* override pptable_id from driver parameter */
        if (amdgpu_smu_pptable_id >= 0) {
                pptable_id = amdgpu_smu_pptable_id;
@@ -219,16 +222,10 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
        } else {
                pptable_id = smu->smu_table.boot_values.pp_table_id;
 
-               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
-                       pptable_id == 3667)
-                       pptable_id = 36671;
-
-               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
-                       pptable_id == 3688)
-                       pptable_id = 36881;
                /*
                 * Temporary solution for SMU V13.0.0 with SCPM enabled:
                 *   - use 36831 signed pptable when pp_table_id is 3683
+                *   - use 37151 signed pptable when pp_table_id is 3715
                 *   - use 36641 signed pptable when pp_table_id is 3664 or 0
                 * TODO: drop these when the pptable carried in vbios is ready.
                 */
@@ -241,6 +238,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
                        case 3683:
                                pptable_id = 36831;
                                break;
+                       case 3715:
+                               pptable_id = 37151;
+                               break;
                        default:
                                dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
                                return -EINVAL;
@@ -478,7 +478,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
 
                /*
                 * Temporary solution for SMU V13.0.0 with SCPM disabled:
-                *   - use 3664 or 3683 on request
+                *   - use 3664, 3683 or 3715 on request
                 *   - use 3664 when pptable_id is 0
                 * TODO: drop these when the pptable carried in vbios is ready.
                 */
@@ -489,6 +489,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
                                break;
                        case 3664:
                        case 3683:
+                       case 3715:
                                break;
                        default:
                                dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
@@ -2344,8 +2345,8 @@ int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
 
        index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
                                               SMU_MSG_EnableGfxImu);
-
-       return smu_cmn_send_msg_without_waiting(smu, index, 0);
+       /* Param 1 to tell PMFW to enable GFXOFF feature */
+       return smu_cmn_send_msg_without_waiting(smu, index, 1);
 }
 
 int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
index 1bbeceeb9e3cbd67f2df9fdb68c160f3ad7b8694..df4a47acd72472353625ea41a3fadb499fa6743a 100644 (file)
@@ -1792,7 +1792,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .dump_pptable = smu_v13_0_0_dump_pptable,
        .init_microcode = smu_v13_0_init_microcode,
        .load_microcode = smu_v13_0_load_microcode,
+       .fini_microcode = smu_v13_0_fini_microcode,
        .init_smc_tables = smu_v13_0_0_init_smc_tables,
+       .fini_smc_tables = smu_v13_0_fini_smc_tables,
        .init_power = smu_v13_0_init_power,
        .fini_power = smu_v13_0_fini_power,
        .check_fw_status = smu_v13_0_check_fw_status,
index 82d3718d83244f2e8c2fbbc4789d0ea73cc28bb9..97e1d55dcaad5149d6c7ef9acd4df42cbbc4a76d 100644 (file)
@@ -71,7 +71,6 @@ static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  1),
        MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetPmfwVersion,               1),
        MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,           1),
-       MSG_MAP(EnableGfxOff,                   PPSMC_MSG_EnableGfxOff,                 1),
        MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,                  1),
        MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,               1),
        MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,                 1),
@@ -199,6 +198,9 @@ static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu)
        kfree(smu_table->watermarks_table);
        smu_table->watermarks_table = NULL;
 
+       kfree(smu_table->gpu_metrics_table);
+       smu_table->gpu_metrics_table = NULL;
+
        return 0;
 }
 
@@ -226,18 +228,6 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
        return ret;
 }
 
-static int smu_v13_0_4_post_smu_init(struct smu_context *smu)
-{
-       struct amdgpu_device *adev = smu->adev;
-       int ret = 0;
-
-       /* allow message will be sent after enable message */
-       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
-       if (ret)
-               dev_err(adev->dev, "Failed to Enable GfxOff!\n");
-       return ret;
-}
-
 static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu,
                                           void **table)
 {
@@ -1026,7 +1016,6 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
        .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
        .set_driver_table_location = smu_v13_0_set_driver_table_location,
        .gfx_off_control = smu_v13_0_gfx_off_control,
-       .post_init = smu_v13_0_4_post_smu_init,
        .mode2_reset = smu_v13_0_4_mode2_reset,
        .get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq,
        .od_edit_dpm_table = smu_v13_0_od_edit_dpm_table,
index 47360ef5c17589d1ef4ceee33b49d6b0064ca39b..66445964efbd1e5a94c7cfa3d2bbfca7b76e8c89 100644 (file)
@@ -176,6 +176,9 @@ static int smu_v13_0_5_fini_smc_tables(struct smu_context *smu)
        kfree(smu_table->watermarks_table);
        smu_table->watermarks_table = NULL;
 
+       kfree(smu_table->gpu_metrics_table);
+       smu_table->gpu_metrics_table = NULL;
+
        return 0;
 }
 
index 9dd56e73218be8b37c613ba7610cce412d5a1a18..1016d1c216d8c7e5576f95ce97268a777513959b 100644 (file)
@@ -1567,6 +1567,16 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
        return ret;
 }
 
+static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+
+       /* SRIOV does not support SMU mode1 reset */
+       if (amdgpu_sriov_vf(adev))
+               return false;
+
+       return true;
+}
 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
        .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1574,7 +1584,9 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .dump_pptable = smu_v13_0_7_dump_pptable,
        .init_microcode = smu_v13_0_init_microcode,
        .load_microcode = smu_v13_0_load_microcode,
+       .fini_microcode = smu_v13_0_fini_microcode,
        .init_smc_tables = smu_v13_0_7_init_smc_tables,
+       .fini_smc_tables = smu_v13_0_fini_smc_tables,
        .init_power = smu_v13_0_init_power,
        .fini_power = smu_v13_0_fini_power,
        .check_fw_status = smu_v13_0_7_check_fw_status,
@@ -1624,6 +1636,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .baco_set_state = smu_v13_0_baco_set_state,
        .baco_enter = smu_v13_0_baco_enter,
        .baco_exit = smu_v13_0_baco_exit,
+       .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
+       .mode1_reset = smu_v13_0_mode1_reset,
        .set_mp1_state = smu_v13_0_7_set_mp1_state,
 };
 
index 702ea803a743c43ce71786889c11c90ede80c1ae..39e7004de720056b37cc2eb1c9f70dff2d8ec796 100644 (file)
@@ -180,7 +180,7 @@ static int lvds_codec_probe(struct platform_device *pdev)
                of_node_put(bus_node);
                if (ret == -ENODEV) {
                        dev_warn(dev, "missing 'data-mapping' DT property\n");
-               } else if (ret) {
+               } else if (ret < 0) {
                        dev_err(dev, "invalid 'data-mapping' DT property\n");
                        return ret;
                } else {
index 86d670c712867163d8329c19c93ca8d7e1a64a92..ad068865ba2061a7d4a7778607c5666ea193fa71 100644 (file)
@@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
 
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
-       /*
-        * Note: obj->dma_buf can't disappear as long as we still hold a
-        * handle reference in obj->handle_count.
-        */
-       mutex_lock(&filp->prime.lock);
-       if (obj->dma_buf) {
-               drm_prime_remove_buf_handle_locked(&filp->prime,
-                                                  obj->dma_buf);
-       }
-       mutex_unlock(&filp->prime.lock);
-}
-
 /**
  * drm_gem_object_handle_free - release resources bound to userspace handles
  * @obj: GEM object to clean up.
@@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
        if (obj->funcs->close)
                obj->funcs->close(obj, file_priv);
 
-       drm_gem_remove_prime_handles(obj, file_priv);
+       drm_prime_remove_buf_handle(&file_priv->prime, id);
        drm_vma_node_revoke(&obj->vma_node, file_priv);
 
        drm_gem_object_handle_put_unlocked(obj);
index 1fbbc19f1ac097b040e6ebcbff34b2fdeba8c9d1..7bb98e6a446d08311e9714536f7397bfc7511c7b 100644 (file)
@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 
 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
-                                       struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+                                uint32_t handle);
 
 /* drm_drv.c */
 struct drm_minor *drm_minor_acquire(unsigned int minor_id);
index a3f180653b8bb5a0fe70e96dd3c2aa500a95f2b9..eb09e86044c6d4df1cade4dfe0d63099d9f9ab94 100644 (file)
@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
        return -ENOENT;
 }
 
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
-                                       struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+                                uint32_t handle)
 {
        struct rb_node *rb;
 
-       rb = prime_fpriv->dmabufs.rb_node;
+       mutex_lock(&prime_fpriv->lock);
+
+       rb = prime_fpriv->handles.rb_node;
        while (rb) {
                struct drm_prime_member *member;
 
-               member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
-               if (member->dma_buf == dma_buf) {
+               member = rb_entry(rb, struct drm_prime_member, handle_rb);
+               if (member->handle == handle) {
                        rb_erase(&member->handle_rb, &prime_fpriv->handles);
                        rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
 
-                       dma_buf_put(dma_buf);
+                       dma_buf_put(member->dma_buf);
                        kfree(member);
-                       return;
-               } else if (member->dma_buf < dma_buf) {
+                       break;
+               } else if (member->handle < handle) {
                        rb = rb->rb_right;
                } else {
                        rb = rb->rb_left;
                }
        }
+
+       mutex_unlock(&prime_fpriv->lock);
 }
 
 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
index ccec4055fde3edd325b8b0075feb4de32b5c4e88..389e9f157ca5efcdcd719f910044058509e2dad0 100644 (file)
@@ -268,7 +268,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
  */
 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
 {
-       assert_object_held(obj);
+       assert_object_held_shared(obj);
 
        if (!list_empty(&obj->vma.list)) {
                struct i915_vma *vma;
@@ -331,15 +331,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                        continue;
                }
 
-               if (!i915_gem_object_trylock(obj, NULL)) {
-                       /* busy, toss it back to the pile */
-                       if (llist_add(&obj->freed, &i915->mm.free_list))
-                               queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
-                       continue;
-               }
-
                __i915_gem_object_pages_fini(obj);
-               i915_gem_object_unlock(obj);
                __i915_gem_free_object(obj);
 
                /* But keep the pointer alive for RCU-protected lookups */
@@ -359,7 +351,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
 static void __i915_gem_free_work(struct work_struct *work)
 {
        struct drm_i915_private *i915 =
-               container_of(work, struct drm_i915_private, mm.free_work.work);
+               container_of(work, struct drm_i915_private, mm.free_work);
 
        i915_gem_flush_free_objects(i915);
 }
@@ -391,7 +383,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
         */
 
        if (llist_add(&obj->freed, &i915->mm.free_list))
-               queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
+               queue_work(i915->wq, &i915->mm.free_work);
 }
 
 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
@@ -745,7 +737,7 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
 
 void i915_gem_init__objects(struct drm_i915_private *i915)
 {
-       INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
+       INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
 }
 
 void i915_objects_module_exit(void)
index 5cf36a130061d96d780fc526b0dd3fc01210209a..9f6b14ec189a2e33d62be44958a494724ae7b189 100644 (file)
@@ -335,7 +335,6 @@ struct drm_i915_gem_object {
 #define I915_BO_READONLY          BIT(7)
 #define I915_TILING_QUIRK_BIT     8 /* unknown swizzling; do not release! */
 #define I915_BO_PROTECTED         BIT(9)
-#define I915_BO_WAS_BOUND_BIT     10
        /**
         * @mem_flags - Mutable placement-related flags
         *
@@ -616,6 +615,8 @@ struct drm_i915_gem_object {
                 * pages were last acquired.
                 */
                bool dirty:1;
+
+               u32 tlb;
        } mm;
 
        struct {
index 97c820eee115adc91641bce392f1a052dce9eba7..8357dbdcab5cb0e406e2bba97a8f7f528fb85ba1 100644 (file)
@@ -6,14 +6,15 @@
 
 #include <drm/drm_cache.h>
 
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 #include "i915_gem_lmem.h"
 #include "i915_gem_mman.h"
 
-#include "gt/intel_gt.h"
-
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
                                 struct sg_table *pages,
                                 unsigned int sg_page_sizes)
@@ -190,6 +191,18 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
                vunmap(ptr);
 }
 
+static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       struct intel_gt *gt = to_gt(i915);
+
+       if (!obj->mm.tlb)
+               return;
+
+       intel_gt_invalidate_tlb(gt, obj->mm.tlb);
+       obj->mm.tlb = 0;
+}
+
 struct sg_table *
 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 {
@@ -215,13 +228,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
        __i915_gem_object_reset_page_iter(obj);
        obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
 
-       if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
-               struct drm_i915_private *i915 = to_i915(obj->base.dev);
-               intel_wakeref_t wakeref;
-
-               with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
-                       intel_gt_invalidate_tlbs(to_gt(i915));
-       }
+       flush_tlb_invalidate(obj);
 
        return pages;
 }
index 68c2b0d8f18761d4866aa665316b71f631bf863f..f435e06125aab0b99d79a842b857c6cffdfab068 100644 (file)
@@ -11,7 +11,9 @@
 #include "pxp/intel_pxp.h"
 
 #include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
 #include "intel_context.h"
+#include "intel_engine_pm.h"
 #include "intel_engine_regs.h"
 #include "intel_ggtt_gmch.h"
 #include "intel_gt.h"
@@ -36,8 +38,6 @@ static void __intel_gt_init_early(struct intel_gt *gt)
 {
        spin_lock_init(&gt->irq_lock);
 
-       mutex_init(&gt->tlb_invalidate_lock);
-
        INIT_LIST_HEAD(&gt->closed_vma);
        spin_lock_init(&gt->closed_lock);
 
@@ -48,6 +48,8 @@ static void __intel_gt_init_early(struct intel_gt *gt)
        intel_gt_init_reset(gt);
        intel_gt_init_requests(gt);
        intel_gt_init_timelines(gt);
+       mutex_init(&gt->tlb.invalidate_lock);
+       seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
        intel_gt_pm_init_early(gt);
 
        intel_uc_init_early(&gt->uc);
@@ -768,6 +770,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
                intel_gt_fini_requests(gt);
                intel_gt_fini_reset(gt);
                intel_gt_fini_timelines(gt);
+               mutex_destroy(&gt->tlb.invalidate_lock);
                intel_engines_free(gt);
        }
 }
@@ -906,7 +909,7 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
        return rb;
 }
 
-void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+static void mmio_invalidate_full(struct intel_gt *gt)
 {
        static const i915_reg_t gen8_regs[] = {
                [RENDER_CLASS]                  = GEN8_RTCR,
@@ -924,13 +927,11 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
        struct drm_i915_private *i915 = gt->i915;
        struct intel_uncore *uncore = gt->uncore;
        struct intel_engine_cs *engine;
+       intel_engine_mask_t awake, tmp;
        enum intel_engine_id id;
        const i915_reg_t *regs;
        unsigned int num = 0;
 
-       if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
-               return;
-
        if (GRAPHICS_VER(i915) == 12) {
                regs = gen12_regs;
                num = ARRAY_SIZE(gen12_regs);
@@ -945,28 +946,41 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
                          "Platform does not implement TLB invalidation!"))
                return;
 
-       GEM_TRACE("\n");
-
-       assert_rpm_wakelock_held(&i915->runtime_pm);
-
-       mutex_lock(&gt->tlb_invalidate_lock);
        intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
        spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
 
+       awake = 0;
        for_each_engine(engine, gt, id) {
                struct reg_and_bit rb;
 
+               if (!intel_engine_pm_is_awake(engine))
+                       continue;
+
                rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
                if (!i915_mmio_reg_offset(rb.reg))
                        continue;
 
                intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+               awake |= engine->mask;
        }
 
+       GT_TRACE(gt, "invalidated engines %08x\n", awake);
+
+       /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
+       if (awake &&
+           (IS_TIGERLAKE(i915) ||
+            IS_DG1(i915) ||
+            IS_ROCKETLAKE(i915) ||
+            IS_ALDERLAKE_S(i915) ||
+            IS_ALDERLAKE_P(i915)))
+               intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
+
        spin_unlock_irq(&uncore->lock);
 
-       for_each_engine(engine, gt, id) {
+       for_each_engine_masked(engine, gt, awake, tmp) {
+               struct reg_and_bit rb;
+
                /*
                 * HW architecture suggest typical invalidation time at 40us,
                 * with pessimistic cases up to 100us and a recommendation to
@@ -974,12 +988,8 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
                 */
                const unsigned int timeout_us = 100;
                const unsigned int timeout_ms = 4;
-               struct reg_and_bit rb;
 
                rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
-               if (!i915_mmio_reg_offset(rb.reg))
-                       continue;
-
                if (__intel_wait_for_register_fw(uncore,
                                                 rb.reg, rb.bit, 0,
                                                 timeout_us, timeout_ms,
@@ -996,5 +1006,38 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
         * transitions.
         */
        intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
-       mutex_unlock(&gt->tlb_invalidate_lock);
+}
+
+static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
+{
+       u32 cur = intel_gt_tlb_seqno(gt);
+
+       /* Only skip if a *full* TLB invalidate barrier has passed */
+       return (s32)(cur - ALIGN(seqno, 2)) > 0;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
+{
+       intel_wakeref_t wakeref;
+
+       if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+               return;
+
+       if (intel_gt_is_wedged(gt))
+               return;
+
+       if (tlb_seqno_passed(gt, seqno))
+               return;
+
+       with_intel_gt_pm_if_awake(gt, wakeref) {
+               mutex_lock(&gt->tlb.invalidate_lock);
+               if (tlb_seqno_passed(gt, seqno))
+                       goto unlock;
+
+               mmio_invalidate_full(gt);
+
+               write_seqcount_invalidate(&gt->tlb.seqno);
+unlock:
+               mutex_unlock(&gt->tlb.invalidate_lock);
+       }
 }
index 82d6f248d876256f1831369951665ec526a826aa..40b06adf509a28c7a3c44f32e0c8ed03e798ebad 100644 (file)
@@ -101,6 +101,16 @@ void intel_gt_info_print(const struct intel_gt_info *info,
 
 void intel_gt_watchdog_work(struct work_struct *work);
 
-void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
+{
+       return seqprop_sequence(&gt->tlb.seqno);
+}
+
+static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
+{
+       return intel_gt_tlb_seqno(gt) | 1;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
 
 #endif /* __INTEL_GT_H__ */
index bc898df7a48ccfe930f6e32fd5a1a59d736fbd17..a334787a4939f76ef0e5d6e25ea259d56dde24f0 100644 (file)
@@ -55,6 +55,9 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
        for (tmp = 1, intel_gt_pm_get(gt); tmp; \
             intel_gt_pm_put(gt), tmp = 0)
 
+#define with_intel_gt_pm_if_awake(gt, wf) \
+       for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+
 static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
 {
        return intel_wakeref_wait_for_idle(&gt->wakeref);
index df708802889dfc00e17863295b11bba5bf7d2e90..3804a583382bad51ab01ef0579eac61b240cabf7 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/llist.h>
 #include <linux/mutex.h>
 #include <linux/notifier.h>
+#include <linux/seqlock.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
@@ -83,7 +84,22 @@ struct intel_gt {
        struct intel_uc uc;
        struct intel_gsc gsc;
 
-       struct mutex tlb_invalidate_lock;
+       struct {
+               /* Serialize global tlb invalidations */
+               struct mutex invalidate_lock;
+
+               /*
+                * Batch TLB invalidations
+                *
+                * After unbinding the PTE, we need to ensure the TLB
+                * are invalidated prior to releasing the physical pages.
+                * But we only need one such invalidation for all unbinds,
+                * so we track how many TLB invalidations have been
+                * performed since unbind the PTE and only emit an extra
+                * invalidate if no full barrier has been passed.
+                */
+               seqcount_mutex_t seqno;
+       } tlb;
 
        struct i915_wa_list wa_list;
 
index 2c35324b5f68c995cc7ad31d5245ffd39bab2475..2b10b96b17b5bda2f31d259fdf8b64a77270bcf8 100644 (file)
@@ -708,7 +708,7 @@ intel_context_migrate_copy(struct intel_context *ce,
        u8 src_access, dst_access;
        struct i915_request *rq;
        int src_sz, dst_sz;
-       bool ccs_is_src;
+       bool ccs_is_src, overwrite_ccs;
        int err;
 
        GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
@@ -749,6 +749,8 @@ intel_context_migrate_copy(struct intel_context *ce,
                        get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
        }
 
+       overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
+
        src_offset = 0;
        dst_offset = CHUNK_SZ;
        if (HAS_64K_PAGES(ce->engine->i915)) {
@@ -852,6 +854,25 @@ intel_context_migrate_copy(struct intel_context *ce,
                        if (err)
                                goto out_rq;
                        ccs_bytes_to_cpy -= ccs_sz;
+               } else if (overwrite_ccs) {
+                       err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+                       if (err)
+                               goto out_rq;
+
+                       /*
+                        * While we can't always restore/manage the CCS state,
+                        * we still need to ensure we don't leak the CCS state
+                        * from the previous user, so make sure we overwrite it
+                        * with something.
+                        */
+                       err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
+                                           dst_offset, DIRECT_ACCESS, len);
+                       if (err)
+                               goto out_rq;
+
+                       err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+                       if (err)
+                               goto out_rq;
                }
 
                /* Arbitration is re-enabled between requests. */
index d8b94d6385598aa8f43fd133294ecd8ced74d637..6ee8d11270168fe7af5ae557a1ad06bb9d386488 100644 (file)
@@ -206,8 +206,12 @@ void ppgtt_bind_vma(struct i915_address_space *vm,
 void ppgtt_unbind_vma(struct i915_address_space *vm,
                      struct i915_vma_resource *vma_res)
 {
-       if (vma_res->allocated)
-               vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+       if (!vma_res->allocated)
+               return;
+
+       vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+       if (vma_res->tlb)
+               vma_invalidate_tlb(vm, vma_res->tlb);
 }
 
 static unsigned long pd_count(u64 size, int shift)
index 6e90032e12e9b790e7f1f3bc1b6d228d8e017faa..aa6aed8371947b116851244b134e1b4763cd3eb6 100644 (file)
@@ -15,6 +15,7 @@
 #include "gt/intel_gt_mcr.h"
 #include "gt/intel_gt_regs.h"
 
+#ifdef CONFIG_64BIT
 static void _release_bars(struct pci_dev *pdev)
 {
        int resno;
@@ -111,6 +112,9 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
        pci_assign_unassigned_bus_resources(pdev->bus);
        pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
 }
+#else
+static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
+#endif
 
 static int
 region_lmem_release(struct intel_memory_region *mem)
index d25647be25d18ba6a88cbbecc97f4d02ff662e52..086bbe8945d6cbd53bb4e368a291dd69079439c1 100644 (file)
@@ -247,7 +247,7 @@ struct i915_gem_mm {
         * List of objects which are pending destruction.
         */
        struct llist_head free_list;
-       struct delayed_work free_work;
+       struct work_struct free_work;
        /**
         * Count of objects pending destructions. Used to skip needlessly
         * waiting on an RCU barrier if no objects are waiting to be freed.
@@ -1378,7 +1378,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
         * armed the work again.
         */
        while (atomic_read(&i915->mm.free_count)) {
-               flush_delayed_work(&i915->mm.free_work);
+               flush_work(&i915->mm.free_work);
                flush_delayed_work(&i915->bdev.wq);
                rcu_barrier();
        }
index ef3b04c7e15377c526dd6c89c0f887e040768cb2..26037171649006b3a6433e5541625e2fc2bb72c9 100644 (file)
@@ -538,8 +538,6 @@ int i915_vma_bind(struct i915_vma *vma,
                                   bind_flags);
        }
 
-       set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
-
        atomic_or(bind_flags, &vma->flags);
        return 0;
 }
@@ -1310,6 +1308,19 @@ err_unpin:
        return err;
 }
 
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
+{
+       /*
+        * Before we release the pages that were bound by this vma, we
+        * must invalidate all the TLBs that may still have a reference
+        * back to our physical address. It only needs to be done once,
+        * so after updating the PTE to point away from the pages, record
+        * the most recent TLB invalidation seqno, and if we have not yet
+        * flushed the TLBs upon release, perform a full invalidation.
+        */
+       WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
+}
+
 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
 {
        /* We allocate under vma_get_pages, so beware the shrinker */
@@ -1941,7 +1952,12 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
                vma->vm->skip_pte_rewrite;
        trace_i915_vma_unbind(vma);
 
-       unbind_fence = i915_vma_resource_unbind(vma_res);
+       if (async)
+               unbind_fence = i915_vma_resource_unbind(vma_res,
+                                                       &vma->obj->mm.tlb);
+       else
+               unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
+
        vma->resource = NULL;
 
        atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
@@ -1949,10 +1965,13 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
 
        i915_vma_detach(vma);
 
-       if (!async && unbind_fence) {
-               dma_fence_wait(unbind_fence, false);
-               dma_fence_put(unbind_fence);
-               unbind_fence = NULL;
+       if (!async) {
+               if (unbind_fence) {
+                       dma_fence_wait(unbind_fence, false);
+                       dma_fence_put(unbind_fence);
+                       unbind_fence = NULL;
+               }
+               vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
        }
 
        /*
index 88ca0bd9c9003029caf2284ceffa59745074bda8..33a58f605d75cca26b2d23abe4dd4922705a613a 100644 (file)
@@ -213,6 +213,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
                        u64 size, u64 alignment, u64 flags);
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
 void i915_vma_revoke_mmap(struct i915_vma *vma);
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
 int __i915_vma_unbind(struct i915_vma *vma);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
index 27c55027387a05c0ac4f984f6a05bc3ddce81d6d..5a67995ea5fe26f65fdf2a436652cfec3250e416 100644 (file)
@@ -223,10 +223,13 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
  * Return: A refcounted pointer to a dma-fence that signals when unbinding is
  * complete.
  */
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res)
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+                                          u32 *tlb)
 {
        struct i915_address_space *vm = vma_res->vm;
 
+       vma_res->tlb = tlb;
+
        /* Reference for the sw fence */
        i915_vma_resource_get(vma_res);
 
index 5d8427caa2ba23da59da8269b863e1d917fa346e..06923d1816e7e70ffcb5d6c8d9b1770abbfa3304 100644 (file)
@@ -67,6 +67,7 @@ struct i915_page_sizes {
  * taken when the unbind is scheduled.
  * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
  * needs to be skipped for unbind.
+ * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
  *
  * The lifetime of a struct i915_vma_resource is from a binding request to
  * the actual possible asynchronous unbind has completed.
@@ -119,6 +120,8 @@ struct i915_vma_resource {
        bool immediate_unbind:1;
        bool needs_wakeref:1;
        bool skip_pte_rewrite:1;
+
+       u32 *tlb;
 };
 
 bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
@@ -131,7 +134,8 @@ struct i915_vma_resource *i915_vma_resource_alloc(void);
 
 void i915_vma_resource_free(struct i915_vma_resource *vma_res);
 
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res);
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+                                          u32 *tlb);
 
 void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
 
index 9b84df34a6a12bbe8cd2f5b76948525749f90324..8cf3352d88582380e82e69cf9b10b1cc91829a2f 100644 (file)
@@ -142,8 +142,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
 
        drm_kms_helper_poll_init(drm);
 
-       drm_bridge_connector_enable_hpd(kms->connector);
-
        ret = drm_dev_register(drm, 0);
        if (ret)
                goto cleanup_crtc;
index 1b70938cfd2c43b327cc3df086a57a7efc8d07e4..bd4ca11d3ff536fbf8a297433dc35392cd560e7b 100644 (file)
@@ -115,8 +115,11 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
        for_each_endpoint_of_node(dev->of_node, ep) {
                /* If the endpoint node exists, consider it enabled */
                remote = of_graph_get_remote_port(ep);
-               if (remote)
+               if (remote) {
+                       of_node_put(remote);
+                       of_node_put(ep);
                        return true;
+               }
        }
 
        return false;
index 05076e530e7d443810ca4aaca149ddf6007b2b41..e29175e4b44ce32e6f6ac526ab07fed37f596d83 100644 (file)
@@ -820,6 +820,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
                if (ret == 0) {
                        ret = nouveau_fence_new(chan, false, &fence);
                        if (ret == 0) {
+                               /* TODO: figure out a better solution here
+                                *
+                                * wait on the fence here explicitly as going through
+                                * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+                                *
+                                * Without this the operation can timeout and we'll fallback to a
+                                * software copy, which might take several minutes to finish.
+                                */
+                               nouveau_fence_wait(fence, false, false);
                                ret = ttm_bo_move_accel_cleanup(bo,
                                                                &fence->base,
                                                                evict, false,
index 568182e68dd73bb7bb8268ed4fffc09ec5657b50..d8cf71fb0512813f14732bf5d142a17cb17cfe60 100644 (file)
@@ -2604,6 +2604,27 @@ nv172_chipset = {
        .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
+static const struct nvkm_device_chip
+nv173_chipset = {
+       .name = "GA103",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gpio     = { 0x00000001, ga102_gpio_new },
+       .i2c      = { 0x00000001, gm200_i2c_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mc       = { 0x00000001, ga100_mc_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .privring = { 0x00000001, gm200_privring_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .top      = { 0x00000001, ga100_top_new },
+       .disp     = { 0x00000001, ga102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+};
+
 static const struct nvkm_device_chip
 nv174_chipset = {
        .name = "GA104",
@@ -3067,6 +3088,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x167: device->chip = &nv167_chipset; break;
                case 0x168: device->chip = &nv168_chipset; break;
                case 0x172: device->chip = &nv172_chipset; break;
+               case 0x173: device->chip = &nv173_chipset; break;
                case 0x174: device->chip = &nv174_chipset; break;
                case 0x176: device->chip = &nv176_chipset; break;
                case 0x177: device->chip = &nv177_chipset; break;
index 2b12389f841ae73eabdab7479df1c9987da18eb3..ee0165687239b5b6b7a42618925dade63b0f5ea1 100644 (file)
@@ -1605,6 +1605,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
                if (r) {
                        /* delay GPU reset to resume */
                        radeon_fence_driver_force_completion(rdev, i);
+               } else {
+                       /* finish executing delayed work */
+                       flush_delayed_work(&rdev->fence_drv[i].lockup_work);
                }
        }
 
index b4dfa166eccdfa070b0c16082c0ea9f3b54c2893..34234a144e87dab393dd024577a6f5f21d415269 100644 (file)
@@ -531,7 +531,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
                                    struct drm_display_mode *mode)
 {
        struct mipi_dsi_device *device = dsi->device;
-       unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+       int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
        u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
        u32 basic_ctl = 0;
        size_t bytes;
@@ -555,7 +555,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
                 * (4 bytes). Its minimal size is therefore 10 bytes
                 */
 #define HSA_PACKET_OVERHEAD    10
-               hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+               hsa = max(HSA_PACKET_OVERHEAD,
                          (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
 
                /*
@@ -564,7 +564,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
                 * therefore 6 bytes
                 */
 #define HBP_PACKET_OVERHEAD    6
-               hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+               hbp = max(HBP_PACKET_OVERHEAD,
                          (mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
 
                /*
@@ -574,7 +574,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
                 * 16 bytes
                 */
 #define HFP_PACKET_OVERHEAD    16
-               hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+               hfp = max(HFP_PACKET_OVERHEAD,
                          (mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
 
                /*
@@ -583,7 +583,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
                 * bytes). Its minimal size is therefore 10 bytes.
                 */
 #define HBLK_PACKET_OVERHEAD   10
-               hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+               hblk = max(HBLK_PACKET_OVERHEAD,
                           (mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
                           HBLK_PACKET_OVERHEAD);
 
index 0e210df65c305393477085e8cfcf6032587b2c8d..97184c33352662662bbd27de462eacb111045435 100644 (file)
@@ -912,7 +912,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
        /*
         * We might need to add a TTM.
         */
-       if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+       if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
                ret = ttm_tt_create(bo, true);
                if (ret)
                        return ret;
index 061be9a6619df4de18bfc257d31640be1bb48a36..b0f3117102ca5260e95a6e1bb25479d733e5f2d7 100644 (file)
@@ -8,6 +8,7 @@ config DRM_VC4
        depends on DRM
        depends on SND && SND_SOC
        depends on COMMON_CLK
+       depends on PM
        select DRM_DISPLAY_HDMI_HELPER
        select DRM_DISPLAY_HELPER
        select DRM_KMS_HELPER
index 592c3b5d03e6e1f296dace9bf27b0a39c8a42dd9..1e5f68704d7d8b73a3d16f70d0bfe300243b26ac 100644 (file)
@@ -2855,7 +2855,7 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
        return 0;
 }
 
-static int __maybe_unused vc4_hdmi_runtime_suspend(struct device *dev)
+static int vc4_hdmi_runtime_suspend(struct device *dev)
 {
        struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
 
@@ -2972,17 +2972,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
                        vc4_hdmi->disable_4kp60 = true;
        }
 
+       pm_runtime_enable(dev);
+
        /*
-        * We need to have the device powered up at this point to call
-        * our reset hook and for the CEC init.
+        *  We need to have the device powered up at this point to call
+        *  our reset hook and for the CEC init.
         */
-       ret = vc4_hdmi_runtime_resume(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret)
-               goto err_put_ddc;
-
-       pm_runtime_get_noresume(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
+               goto err_disable_runtime_pm;
 
        if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
             of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
@@ -3028,6 +3026,7 @@ err_destroy_conn:
 err_destroy_encoder:
        drm_encoder_cleanup(encoder);
        pm_runtime_put_sync(dev);
+err_disable_runtime_pm:
        pm_runtime_disable(dev);
 err_put_ddc:
        put_device(&vc4_hdmi->ddc->dev);
index 78fb1a4274a6c3e04d4e697f8180179e0f616be3..e47fa34656717811f53909e7cd64964992f1cdd7 100644 (file)
@@ -1572,9 +1572,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
        struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
        int irq, ret;
 
-       ret = pm_runtime_resume_and_get(&pdev->dev);
-       if (ret < 0)
-               return ret;
+       ret = pm_runtime_get_sync(&pdev->dev);
 
        hrtimer_cancel(&i2c_imx->slave_timer);
 
@@ -1585,17 +1583,21 @@ static int i2c_imx_remove(struct platform_device *pdev)
        if (i2c_imx->dma)
                i2c_imx_dma_free(i2c_imx);
 
-       /* setup chip registers to defaults */
-       imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
-       imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
-       imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
-       imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+       if (ret == 0) {
+               /* setup chip registers to defaults */
+               imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+               imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+               imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
+               imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+               clk_disable(i2c_imx->clk);
+       }
 
        clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
        irq = platform_get_irq(pdev, 0);
        if (irq >= 0)
                free_irq(irq, i2c_imx);
-       clk_disable_unprepare(i2c_imx->clk);
+
+       clk_unprepare(i2c_imx->clk);
 
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
index 79798fc7462adf14887533118483368747ed6e96..6746aa46d96c861330b1c99ec97d48733674de84 100644 (file)
@@ -30,7 +30,7 @@ struct acpi_smbus_cmi {
        u8 cap_info:1;
        u8 cap_read:1;
        u8 cap_write:1;
-       const struct smbus_methods_t *methods;
+       struct smbus_methods_t *methods;
 };
 
 static const struct smbus_methods_t smbus_methods = {
@@ -361,6 +361,7 @@ static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level,
 static int acpi_smbus_cmi_add(struct acpi_device *device)
 {
        struct acpi_smbus_cmi *smbus_cmi;
+       const struct acpi_device_id *id;
        int ret;
 
        smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
@@ -368,7 +369,6 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
                return -ENOMEM;
 
        smbus_cmi->handle = device->handle;
-       smbus_cmi->methods = device_get_match_data(&device->dev);
        strcpy(acpi_device_name(device), ACPI_SMBUS_HC_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_SMBUS_HC_CLASS);
        device->driver_data = smbus_cmi;
@@ -376,6 +376,11 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
        smbus_cmi->cap_read = 0;
        smbus_cmi->cap_write = 0;
 
+       for (id = acpi_smbus_cmi_ids; id->id[0]; id++)
+               if (!strcmp(id->id, acpi_device_hid(device)))
+                       smbus_cmi->methods =
+                               (struct smbus_methods_t *) id->driver_data;
+
        acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
                            acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
 
index fce80a4a5147cd6e01cd1bb3a6c7e83432b80505..04c04e6d24c3582b7a973fabded51685bcde5a89 100644 (file)
@@ -18,6 +18,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
        struct scatterlist *sg;
        unsigned long start, end, cur = 0;
        unsigned int nmap = 0;
+       long ret;
        int i;
 
        dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
@@ -67,9 +68,14 @@ wait_fence:
         * may be not up-to-date. Wait for the exporter to finish
         * the migration.
         */
-       return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+       ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
                                     DMA_RESV_USAGE_KERNEL,
                                     false, MAX_SCHEDULE_TIMEOUT);
+       if (ret < 0)
+               return ret;
+       if (ret == 0)
+               return -ETIMEDOUT;
+       return 0;
 }
 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
 
index c16017f6e8db2d80e4780110cff9ccaf56086df4..14392c942f4928945a91cdd10a15b8e9704299d1 100644 (file)
@@ -2468,31 +2468,24 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                        opt2 |= CCTRL_ECN_V(1);
        }
 
-       skb_get(skb);
-       rpl = cplhdr(skb);
        if (!is_t4(adapter_type)) {
-               BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
-               skb_trim(skb, sizeof(*rpl5));
-               rpl5 = (void *)rpl;
-               INIT_TP_WR(rpl5, ep->hwtid);
-       } else {
-               skb_trim(skb, sizeof(*rpl));
-               INIT_TP_WR(rpl, ep->hwtid);
-       }
-       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
-                                                   ep->hwtid));
-
-       if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
                u32 isn = (prandom_u32() & ~7UL) - 1;
+
+               skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
+               rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
+               rpl = (void *)rpl5;
+               INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
                opt2 |= T5_OPT_2_VALID_F;
                opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
                opt2 |= T5_ISS_F;
-               rpl5 = (void *)rpl;
-               memset_after(rpl5, 0, iss);
                if (peer2peer)
                        isn += 4;
                rpl5->iss = cpu_to_be32(isn);
                pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
+       } else {
+               skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+               rpl = __skb_put_zero(skb, sizeof(*rpl));
+               INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
        }
 
        rpl->opt0 = cpu_to_be64(opt0);
index 72f08171a28a724aa56d013cf2b7180a926557de..bc3ec22a62c57217874f14d5787f764971997af0 100644 (file)
@@ -407,7 +407,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
                             to_erdma_access_flags(reg_wr(send_wr)->access);
                regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
                regmr_sge->length = cpu_to_le32(mr->ibmr.length);
-               regmr_sge->stag = cpu_to_le32(mr->ibmr.lkey);
+               regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
                attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
                        FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
                        FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
index a7a3d42e20167623ecd6db10fc1e6409d799b716..699bd3f59cd340fa597b37416880d2eede547442 100644 (file)
@@ -280,7 +280,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
        attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
        attr->vendor_part_id = dev->pdev->device;
        attr->hw_ver = dev->pdev->revision;
-       attr->max_qp = dev->attrs.max_qp;
+       attr->max_qp = dev->attrs.max_qp - 1;
        attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
        attr->max_qp_rd_atom = dev->attrs.max_ord;
        attr->max_qp_init_rd_atom = dev->attrs.max_ird;
@@ -291,7 +291,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
        attr->max_send_sge = dev->attrs.max_send_sge;
        attr->max_recv_sge = dev->attrs.max_recv_sge;
        attr->max_sge_rd = dev->attrs.max_sge_rd;
-       attr->max_cq = dev->attrs.max_cq;
+       attr->max_cq = dev->attrs.max_cq - 1;
        attr->max_cqe = dev->attrs.max_cqe;
        attr->max_mr = dev->attrs.max_mr;
        attr->max_pd = dev->attrs.max_pd;
index a174a0eee8dca37f634caa9289d1de8a127fc05d..fc94a1b25485d7ba65cada9af61496e1fe89d432 100644 (file)
@@ -2738,26 +2738,24 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
        int err;
        int port;
 
-       for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
-               dev->port_caps[port - 1].has_smi = false;
-               if (MLX5_CAP_GEN(dev->mdev, port_type) ==
-                   MLX5_CAP_PORT_TYPE_IB) {
-                       if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
-                               err = mlx5_query_hca_vport_context(dev->mdev, 0,
-                                                                  port, 0,
-                                                                  &vport_ctx);
-                               if (err) {
-                                       mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
-                                                   port, err);
-                                       return err;
-                               }
-                               dev->port_caps[port - 1].has_smi =
-                                       vport_ctx.has_smi;
-                       } else {
-                               dev->port_caps[port - 1].has_smi = true;
-                       }
+       if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+               return 0;
+
+       for (port = 1; port <= dev->num_ports; port++) {
+               if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
+                       dev->port_caps[port - 1].has_smi = true;
+                       continue;
                }
+               err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
+                                                  &vport_ctx);
+               if (err) {
+                       mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
+                                   port, err);
+                       return err;
+               }
+               dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
        }
+
        return 0;
 }
 
index bd5f3b5e17278bb3b5b29e75754a2de861375149..7b83f48f60c5ea76b387e1e101c6d5c66d085434 100644 (file)
@@ -537,6 +537,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
        struct iscsi_hdr *hdr;
        char *data;
        int length;
+       bool full_feature_phase;
 
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
                iser_err_comp(wc, "login_rsp");
@@ -550,6 +551,9 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
        hdr = desc->rsp + sizeof(struct iser_ctrl);
        data = desc->rsp + ISER_HEADERS_LEN;
        length = wc->byte_len - ISER_HEADERS_LEN;
+       full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
+                             ISCSI_FULL_FEATURE_PHASE) &&
+                            (hdr->flags & ISCSI_FLAG_CMD_FINAL);
 
        iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
                 hdr->itt, length);
@@ -560,7 +564,8 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
                                      desc->rsp_dma, ISER_RX_LOGIN_SIZE,
                                      DMA_FROM_DEVICE);
 
-       if (iser_conn->iscsi_conn->session->discovery_sess)
+       if (!full_feature_phase ||
+           iser_conn->iscsi_conn->session->discovery_sess)
                return;
 
        /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
index 51bd66a45a11ef95bbda3b7a91f4547b1532545a..e190bb8c225cd66acca5dbd5e5309844ab3aff19 100644 (file)
@@ -68,7 +68,6 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
 {
        struct irq_alloc_info *info = arg;
        struct irq_data *irq_data;
-       struct irq_desc *desc;
        int ret = 0;
 
        if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
@@ -90,8 +89,7 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
         * Hypver-V IO APIC irq affinity should be in the scope of
         * ioapic_max_cpumask because no irq remapping support.
         */
-       desc = irq_data_to_desc(irq_data);
-       cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask);
+       irq_data_update_affinity(irq_data, &ioapic_max_cpumask);
 
        return 0;
 }
index 327f3ab62c0360fffd8a60be33cf5947c4a657ff..741612ba6a5209e5fdc3382439fd749d54106283 100644 (file)
@@ -129,7 +129,7 @@ static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
        clear_csr_ecfg(ECFG0_IM);
        clear_csr_estat(ESTATF_IP);
 
-       cpuintc_handle = irq_domain_alloc_fwnode(NULL);
+       cpuintc_handle = irq_domain_alloc_named_fwnode("CPUINTC");
        irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
                                        &loongarch_cpu_intc_irq_domain_ops, NULL);
 
index 80d8ca6f2d462dd1b61f196f83dc7ad0ed251cde..16e9af8d8b1eabb3d9c21223927aefb06a9cc481 100644 (file)
@@ -111,11 +111,15 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
        regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
 
        /* Mask target vector */
-       csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0);
+       csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
+                       0x0, priv->node * CORES_PER_EIO_NODE);
+
        /* Set route for target vector */
        eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
+
        /* Unmask target vector */
-       csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0);
+       csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
+                       0x0, priv->node * CORES_PER_EIO_NODE);
 
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
@@ -286,7 +290,7 @@ static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi
        }
 }
 
-struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
+static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
 {
        int i;
 
@@ -344,7 +348,8 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
        if (!priv)
                return -ENOMEM;
 
-       priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc);
+       priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
+                                                              acpi_eiointc->node);
        if (!priv->domain_handle) {
                pr_err("Unable to allocate domain handle\n");
                goto out_free_priv;
index c4f3c886ad6150dea1aca63311eba9662be87b5a..0da8716f8f24b8dc06f8a60378cc0b1c7f67a8c9 100644 (file)
@@ -207,7 +207,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
                                        "reg-names", core_reg_names[i]);
 
                        if (index < 0)
-                               return -EINVAL;
+                               goto out_iounmap;
 
                        priv->core_isr[i] = of_iomap(node, index);
                }
@@ -360,7 +360,7 @@ int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic
        parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
        parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
 
-       domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_liointc);
+       domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
        if (!domain_handle) {
                pr_err("Unable to allocate domain handle\n");
                return -ENOMEM;
index d0e8551bebfab7bdb5b1962c6859b9eaaa20b88f..a72ede90ffc69a0b00cbf92e280d70cc30a57368 100644 (file)
@@ -282,7 +282,7 @@ int __init pch_msi_acpi_init(struct irq_domain *parent,
        int ret;
        struct fwnode_handle *domain_handle;
 
-       domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchmsi);
+       domain_handle = irq_domain_alloc_fwnode(&acpi_pchmsi->msg_address);
        ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
                                acpi_pchmsi->count, parent, domain_handle);
        if (ret < 0)
index b6f1392964b14668df4baf9f3cecb98c78f6cfd0..c01b9c2570053aff8e5642d36eae0ec8e864d133 100644 (file)
@@ -48,25 +48,6 @@ static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
 
 struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
 
-int find_pch_pic(u32 gsi)
-{
-       int i;
-
-       /* Find the PCH_PIC that manages this GSI. */
-       for (i = 0; i < MAX_IO_PICS; i++) {
-               struct pch_pic *priv = pch_pic_priv[i];
-
-               if (!priv)
-                       return -1;
-
-               if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
-                       return i;
-       }
-
-       pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
-       return -1;
-}
-
 static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
 {
        u32 reg;
@@ -325,6 +306,25 @@ IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
 #endif
 
 #ifdef CONFIG_ACPI
+int find_pch_pic(u32 gsi)
+{
+       int i;
+
+       /* Find the PCH_PIC that manages this GSI. */
+       for (i = 0; i < MAX_IO_PICS; i++) {
+               struct pch_pic *priv = pch_pic_priv[i];
+
+               if (!priv)
+                       return -1;
+
+               if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
+                       return i;
+       }
+
+       pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
+       return -1;
+}
+
 static int __init
 pch_lpc_parse_madt(union acpi_subtable_headers *header,
                       const unsigned long end)
@@ -349,7 +349,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
 
        vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
 
-       domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchpic);
+       domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
        if (!domain_handle) {
                pr_err("Unable to allocate domain handle\n");
                return -ENOMEM;
index afaf36b2f6ab8b32d53edc24e12ba17fa7a16884..729be2c5296c6c3544abaa7701e3d7b8fa7f9f43 100644 (file)
@@ -5620,6 +5620,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
         * removed (mddev_delayed_delete).
         */
        flush_workqueue(md_misc_wq);
+       flush_workqueue(md_rdev_misc_wq);
 
        mutex_lock(&disks_mutex);
        mddev = mddev_alloc(dev);
@@ -6238,11 +6239,11 @@ static void mddev_detach(struct mddev *mddev)
 static void __md_stop(struct mddev *mddev)
 {
        struct md_personality *pers = mddev->pers;
+       md_bitmap_destroy(mddev);
        mddev_detach(mddev);
        /* Ensure ->event_work is done */
        if (mddev->event_work.func)
                flush_workqueue(md_misc_wq);
-       md_bitmap_destroy(mddev);
        spin_lock(&mddev->lock);
        mddev->pers = NULL;
        spin_unlock(&mddev->lock);
@@ -6260,6 +6261,7 @@ void md_stop(struct mddev *mddev)
        /* stop the array and free an attached data structures.
         * This is called from dm-raid
         */
+       __md_stop_writes(mddev);
        __md_stop(mddev);
        bioset_exit(&mddev->bio_set);
        bioset_exit(&mddev->sync_set);
index 9117fcdee1be126b14235eaeac9984179f4b1578..64d6e4cd8a3a0f87724a5531d5ebc7c28f6bd784 100644 (file)
@@ -2639,18 +2639,18 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
 }
 
 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
-                           int sectors, struct page *page, int rw)
+                           int sectors, struct page *page, enum req_op op)
 {
        sector_t first_bad;
        int bad_sectors;
 
        if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
-           && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
+           && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
                return -1;
-       if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+       if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
                /* success */
                return 1;
-       if (rw == WRITE) {
+       if (op == REQ_OP_WRITE) {
                set_bit(WriteErrorSeen, &rdev->flags);
                if (!test_and_set_bit(WantReplacement, &rdev->flags))
                        set_bit(MD_RECOVERY_NEEDED,
@@ -2780,7 +2780,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        if (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s, conf->tmppage, WRITE)
+                                            s, conf->tmppage, REQ_OP_WRITE)
                            == 0) {
                                /* Well, this device is dead */
                                pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
@@ -2814,8 +2814,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        switch (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s, conf->tmppage,
-                                                READ)) {
+                                            s, conf->tmppage, REQ_OP_READ)) {
                        case 0:
                                /* Well, this device is dead */
                                pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
index 10c563999d3dab719e1c58f47399ed99909ba5f7..e63608834411a64d80168a96d441e927db4572be 100644 (file)
@@ -171,6 +171,7 @@ config MMC_SDHCI_OF_ASPEED
 config MMC_SDHCI_OF_ASPEED_TEST
        bool "Tests for the ASPEED SDHCI driver" if !KUNIT_ALL_TESTS
        depends on MMC_SDHCI_OF_ASPEED && KUNIT
+       depends on (MMC_SDHCI_OF_ASPEED=m || KUNIT=y)
        default KUNIT_ALL_TESTS
        help
          Enable KUnit tests for the ASPEED SDHCI driver. Select this
index 2f08d442e5577a9731601909aee7fcf239bb4917..fc462995cf94a40201d6dfab17bfeb03b9efcd21 100644 (file)
@@ -1172,8 +1172,10 @@ static int meson_mmc_probe(struct platform_device *pdev)
        }
 
        ret = device_reset_optional(&pdev->dev);
-       if (ret)
-               return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+       if (ret) {
+               dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+               goto free_host;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        host->regs = devm_ioremap_resource(&pdev->dev, res);
index 4ff73d1883ded4d55c047584994497ba203abdda..69d78604d1fc3f2d09ab507965ee9825de0a0789 100644 (file)
@@ -2446,6 +2446,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
        /* disable busy check */
        sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
 
+       val = readl(host->base + MSDC_INT);
+       writel(val, host->base + MSDC_INT);
+
        if (recovery) {
                sdr_set_field(host->base + MSDC_DMA_CTRL,
                              MSDC_DMA_CTRL_STOP, 1);
@@ -2932,11 +2935,14 @@ static int __maybe_unused msdc_suspend(struct device *dev)
        struct mmc_host *mmc = dev_get_drvdata(dev);
        struct msdc_host *host = mmc_priv(mmc);
        int ret;
+       u32 val;
 
        if (mmc->caps2 & MMC_CAP2_CQE) {
                ret = cqhci_suspend(mmc);
                if (ret)
                        return ret;
+               val = readl(host->base + MSDC_INT);
+               writel(val, host->base + MSDC_INT);
        }
 
        /*
index 0db9490dc6595a18ddea35717341259168a690f5..e4003f6058eb571fbfb87a7aee59afa10721823b 100644 (file)
@@ -648,7 +648,7 @@ static int pxamci_probe(struct platform_device *pdev)
 
        ret = pxamci_of_init(pdev, mmc);
        if (ret)
-               return ret;
+               goto out;
 
        host = mmc_priv(mmc);
        host->mmc = mmc;
@@ -672,7 +672,7 @@ static int pxamci_probe(struct platform_device *pdev)
 
        ret = pxamci_init_ocr(host);
        if (ret < 0)
-               return ret;
+               goto out;
 
        mmc->caps = 0;
        host->cmdat = 0;
index 4e904850973cd31c061866e0302387e271123e73..a7343d4bc50e75788313217142c8c1d70342c808 100644 (file)
@@ -349,6 +349,15 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
+#ifdef CONFIG_ACPI
+static const struct sdhci_pltfm_data sdhci_dwcmshc_bf3_pdata = {
+       .ops = &sdhci_dwcmshc_ops,
+       .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+       .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+                  SDHCI_QUIRK2_ACMD23_BROKEN,
+};
+#endif
+
 static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
        .ops = &sdhci_dwcmshc_rk35xx_ops,
        .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
@@ -431,7 +440,10 @@ MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id sdhci_dwcmshc_acpi_ids[] = {
-       { .id = "MLNXBF30" },
+       {
+               .id = "MLNXBF30",
+               .driver_data = (kernel_ulong_t)&sdhci_dwcmshc_bf3_pdata,
+       },
        {}
 };
 #endif
@@ -447,7 +459,7 @@ static int dwcmshc_probe(struct platform_device *pdev)
        int err;
        u32 extra;
 
-       pltfm_data = of_device_get_match_data(&pdev->dev);
+       pltfm_data = device_get_match_data(&pdev->dev);
        if (!pltfm_data) {
                dev_err(&pdev->dev, "Error: No device match data found\n");
                return -ENODEV;
index d7fb33c078e81a1c8242d5af793b328a19b79bd2..184608bd89999cfc15f75ae0c259371d6758c7e3 100644 (file)
@@ -84,7 +84,8 @@ enum ad_link_speed_type {
 static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
        0, 0, 0, 0, 0, 0
 };
-static u16 ad_ticks_per_sec;
+
+static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
 static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
 
 static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
@@ -2001,36 +2002,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
 /**
  * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
  * @bond: bonding struct to work on
- * @tick_resolution: tick duration (millisecond resolution)
  *
  * Can be called only after the mac address of the bond is set.
  */
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+void bond_3ad_initialize(struct bonding *bond)
 {
-       /* check that the bond is not initialized yet */
-       if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
-                               bond->dev->dev_addr)) {
-
-               BOND_AD_INFO(bond).aggregator_identifier = 0;
-
-               BOND_AD_INFO(bond).system.sys_priority =
-                       bond->params.ad_actor_sys_prio;
-               if (is_zero_ether_addr(bond->params.ad_actor_system))
-                       BOND_AD_INFO(bond).system.sys_mac_addr =
-                           *((struct mac_addr *)bond->dev->dev_addr);
-               else
-                       BOND_AD_INFO(bond).system.sys_mac_addr =
-                           *((struct mac_addr *)bond->params.ad_actor_system);
-
-               /* initialize how many times this module is called in one
-                * second (should be about every 100ms)
-                */
-               ad_ticks_per_sec = tick_resolution;
+       BOND_AD_INFO(bond).aggregator_identifier = 0;
+       BOND_AD_INFO(bond).system.sys_priority =
+               bond->params.ad_actor_sys_prio;
+       if (is_zero_ether_addr(bond->params.ad_actor_system))
+               BOND_AD_INFO(bond).system.sys_mac_addr =
+                   *((struct mac_addr *)bond->dev->dev_addr);
+       else
+               BOND_AD_INFO(bond).system.sys_mac_addr =
+                   *((struct mac_addr *)bond->params.ad_actor_system);
 
-               bond_3ad_initiate_agg_selection(bond,
-                                               AD_AGGREGATOR_SELECTION_TIMER *
-                                               ad_ticks_per_sec);
-       }
+       bond_3ad_initiate_agg_selection(bond,
+                                       AD_AGGREGATOR_SELECTION_TIMER *
+                                       ad_ticks_per_sec);
 }
 
 /**
index 50e60843020ce2840e1019d5822ccee44c32f80f..2f4da2c13c0af98c857011e713523b7bbb57a7cc 100644 (file)
@@ -2081,7 +2081,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
                        /* Initialize AD with the number of times that the AD timer is called in 1 second
                         * can be called only after the mac address of the bond is set
                         */
-                       bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
+                       bond_3ad_initialize(bond);
                } else {
                        SLAVE_AD_INFO(new_slave)->id =
                                SLAVE_AD_INFO(prev_slave)->id + 1;
index 4b14d80d27ede3bc00f3be8fb2cb69761a61d96a..e4f446db0ca18da695633da40b4d88c3ba974ef4 100644 (file)
@@ -613,6 +613,9 @@ int ksz9477_fdb_dump(struct ksz_device *dev, int port,
                        goto exit;
                }
 
+               if (!(ksz_data & ALU_VALID))
+                       continue;
+
                /* read ALU table */
                ksz9477_read_table(dev, alu_table);
 
index ed7d137cba9942a35a238150b586fbbf36a7546e..6bd69a7e6809db013a2879cbd6eefa2dd15f1ae3 100644 (file)
@@ -803,9 +803,15 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
        if (dev->info->supports_rgmii[port])
                phy_interface_set_rgmii(config->supported_interfaces);
 
-       if (dev->info->internal_phy[port])
+       if (dev->info->internal_phy[port]) {
                __set_bit(PHY_INTERFACE_MODE_INTERNAL,
                          config->supported_interfaces);
+               /* Compatibility for phylib's default interface type when the
+                * phy-mode property is absent
+                */
+               __set_bit(PHY_INTERFACE_MODE_GMII,
+                         config->supported_interfaces);
+       }
 
        if (dev->dev_ops->get_caps)
                dev->dev_ops->get_caps(dev, port, config);
@@ -962,6 +968,7 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
 static int ksz_setup(struct dsa_switch *ds)
 {
        struct ksz_device *dev = ds->priv;
+       struct ksz_port *p;
        const u16 *regs;
        int ret;
 
@@ -1001,6 +1008,14 @@ static int ksz_setup(struct dsa_switch *ds)
                        return ret;
        }
 
+       /* Start with learning disabled on standalone user ports, and enabled
+        * on the CPU port. In lack of other finer mechanisms, learning on the
+        * CPU port will avoid flooding bridge local addresses on the network
+        * in some cases.
+        */
+       p = &dev->ports[dev->cpu_port];
+       p->learning = true;
+
        /* start switch */
        regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
                           SW_START, SW_START);
@@ -1277,6 +1292,8 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
        ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
        data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
 
+       p = &dev->ports[port];
+
        switch (state) {
        case BR_STATE_DISABLED:
                data |= PORT_LEARN_DISABLE;
@@ -1286,9 +1303,13 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
                break;
        case BR_STATE_LEARNING:
                data |= PORT_RX_ENABLE;
+               if (!p->learning)
+                       data |= PORT_LEARN_DISABLE;
                break;
        case BR_STATE_FORWARDING:
                data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+               if (!p->learning)
+                       data |= PORT_LEARN_DISABLE;
                break;
        case BR_STATE_BLOCKING:
                data |= PORT_LEARN_DISABLE;
@@ -1300,12 +1321,38 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
 
        ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
 
-       p = &dev->ports[port];
        p->stp_state = state;
 
        ksz_update_port_member(dev, port);
 }
 
+static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+                                    struct switchdev_brport_flags flags,
+                                    struct netlink_ext_ack *extack)
+{
+       if (flags.mask & ~BR_LEARNING)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
+                                struct switchdev_brport_flags flags,
+                                struct netlink_ext_ack *extack)
+{
+       struct ksz_device *dev = ds->priv;
+       struct ksz_port *p = &dev->ports[port];
+
+       if (flags.mask & BR_LEARNING) {
+               p->learning = !!(flags.val & BR_LEARNING);
+
+               /* Make the change take effect immediately */
+               ksz_port_stp_state_set(ds, port, p->stp_state);
+       }
+
+       return 0;
+}
+
 static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
                                                  int port,
                                                  enum dsa_tag_protocol mp)
@@ -1719,6 +1766,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
        .port_bridge_join       = ksz_port_bridge_join,
        .port_bridge_leave      = ksz_port_bridge_leave,
        .port_stp_state_set     = ksz_port_stp_state_set,
+       .port_pre_bridge_flags  = ksz_port_pre_bridge_flags,
+       .port_bridge_flags      = ksz_port_bridge_flags,
        .port_fast_age          = ksz_port_fast_age,
        .port_vlan_filtering    = ksz_port_vlan_filtering,
        .port_vlan_add          = ksz_port_vlan_add,
index 764ada3a0f42acac38f013a7aa87361ea256979f..0d9520dc6d2dbe2753c88e5e7e08879148607b8f 100644 (file)
@@ -65,6 +65,7 @@ struct ksz_chip_data {
 
 struct ksz_port {
        bool remove_tag;                /* Remove Tag flag set, for ksz8795 only */
+       bool learning;
        int stp_state;
        struct phy_device phydev;
 
index a4c6eb9a52d0df6fcbdce54f949d7702a06d28ed..83dca9179aa07a1d4c1a0b89c8e2305cb1b13f22 100644 (file)
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
        int addr = REG_PORT(p);
        int ret;
 
+       if (dsa_is_unused_port(priv->ds, p))
+               return 0;
+
        /* Do not force flow control, disable Ingress and Egress
         * Header tagging, disable VLAN tunneling, and set the port
         * state to Forwarding.  Additionally, if this is the CPU
index b4034b78c0ca7a25a04802b24316a065182d6ff0..1cdce8a98d1daa7ee5fc8d069c276c0fa029684d 100644 (file)
@@ -274,27 +274,98 @@ static const u32 vsc9959_rew_regmap[] = {
 
 static const u32 vsc9959_sys_regmap[] = {
        REG(SYS_COUNT_RX_OCTETS,                0x000000),
+       REG(SYS_COUNT_RX_UNICAST,               0x000004),
        REG(SYS_COUNT_RX_MULTICAST,             0x000008),
+       REG(SYS_COUNT_RX_BROADCAST,             0x00000c),
        REG(SYS_COUNT_RX_SHORTS,                0x000010),
        REG(SYS_COUNT_RX_FRAGMENTS,             0x000014),
        REG(SYS_COUNT_RX_JABBERS,               0x000018),
+       REG(SYS_COUNT_RX_CRC_ALIGN_ERRS,        0x00001c),
+       REG(SYS_COUNT_RX_SYM_ERRS,              0x000020),
        REG(SYS_COUNT_RX_64,                    0x000024),
        REG(SYS_COUNT_RX_65_127,                0x000028),
        REG(SYS_COUNT_RX_128_255,               0x00002c),
-       REG(SYS_COUNT_RX_256_1023,              0x000030),
-       REG(SYS_COUNT_RX_1024_1526,             0x000034),
-       REG(SYS_COUNT_RX_1527_MAX,              0x000038),
-       REG(SYS_COUNT_RX_LONGS,                 0x000044),
+       REG(SYS_COUNT_RX_256_511,               0x000030),
+       REG(SYS_COUNT_RX_512_1023,              0x000034),
+       REG(SYS_COUNT_RX_1024_1526,             0x000038),
+       REG(SYS_COUNT_RX_1527_MAX,              0x00003c),
+       REG(SYS_COUNT_RX_PAUSE,                 0x000040),
+       REG(SYS_COUNT_RX_CONTROL,               0x000044),
+       REG(SYS_COUNT_RX_LONGS,                 0x000048),
+       REG(SYS_COUNT_RX_CLASSIFIED_DROPS,      0x00004c),
+       REG(SYS_COUNT_RX_RED_PRIO_0,            0x000050),
+       REG(SYS_COUNT_RX_RED_PRIO_1,            0x000054),
+       REG(SYS_COUNT_RX_RED_PRIO_2,            0x000058),
+       REG(SYS_COUNT_RX_RED_PRIO_3,            0x00005c),
+       REG(SYS_COUNT_RX_RED_PRIO_4,            0x000060),
+       REG(SYS_COUNT_RX_RED_PRIO_5,            0x000064),
+       REG(SYS_COUNT_RX_RED_PRIO_6,            0x000068),
+       REG(SYS_COUNT_RX_RED_PRIO_7,            0x00006c),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_0,         0x000070),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_1,         0x000074),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_2,         0x000078),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_3,         0x00007c),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_4,         0x000080),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_5,         0x000084),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_6,         0x000088),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_7,         0x00008c),
+       REG(SYS_COUNT_RX_GREEN_PRIO_0,          0x000090),
+       REG(SYS_COUNT_RX_GREEN_PRIO_1,          0x000094),
+       REG(SYS_COUNT_RX_GREEN_PRIO_2,          0x000098),
+       REG(SYS_COUNT_RX_GREEN_PRIO_3,          0x00009c),
+       REG(SYS_COUNT_RX_GREEN_PRIO_4,          0x0000a0),
+       REG(SYS_COUNT_RX_GREEN_PRIO_5,          0x0000a4),
+       REG(SYS_COUNT_RX_GREEN_PRIO_6,          0x0000a8),
+       REG(SYS_COUNT_RX_GREEN_PRIO_7,          0x0000ac),
        REG(SYS_COUNT_TX_OCTETS,                0x000200),
+       REG(SYS_COUNT_TX_UNICAST,               0x000204),
+       REG(SYS_COUNT_TX_MULTICAST,             0x000208),
+       REG(SYS_COUNT_TX_BROADCAST,             0x00020c),
        REG(SYS_COUNT_TX_COLLISION,             0x000210),
        REG(SYS_COUNT_TX_DROPS,                 0x000214),
+       REG(SYS_COUNT_TX_PAUSE,                 0x000218),
        REG(SYS_COUNT_TX_64,                    0x00021c),
        REG(SYS_COUNT_TX_65_127,                0x000220),
-       REG(SYS_COUNT_TX_128_511,               0x000224),
-       REG(SYS_COUNT_TX_512_1023,              0x000228),
-       REG(SYS_COUNT_TX_1024_1526,             0x00022c),
-       REG(SYS_COUNT_TX_1527_MAX,              0x000230),
+       REG(SYS_COUNT_TX_128_255,               0x000224),
+       REG(SYS_COUNT_TX_256_511,               0x000228),
+       REG(SYS_COUNT_TX_512_1023,              0x00022c),
+       REG(SYS_COUNT_TX_1024_1526,             0x000230),
+       REG(SYS_COUNT_TX_1527_MAX,              0x000234),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_0,         0x000238),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_1,         0x00023c),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_2,         0x000240),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_3,         0x000244),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_4,         0x000248),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_5,         0x00024c),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_6,         0x000250),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_7,         0x000254),
+       REG(SYS_COUNT_TX_GREEN_PRIO_0,          0x000258),
+       REG(SYS_COUNT_TX_GREEN_PRIO_1,          0x00025c),
+       REG(SYS_COUNT_TX_GREEN_PRIO_2,          0x000260),
+       REG(SYS_COUNT_TX_GREEN_PRIO_3,          0x000264),
+       REG(SYS_COUNT_TX_GREEN_PRIO_4,          0x000268),
+       REG(SYS_COUNT_TX_GREEN_PRIO_5,          0x00026c),
+       REG(SYS_COUNT_TX_GREEN_PRIO_6,          0x000270),
+       REG(SYS_COUNT_TX_GREEN_PRIO_7,          0x000274),
        REG(SYS_COUNT_TX_AGING,                 0x000278),
+       REG(SYS_COUNT_DROP_LOCAL,               0x000400),
+       REG(SYS_COUNT_DROP_TAIL,                0x000404),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_0,       0x000408),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_1,       0x00040c),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_2,       0x000410),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_3,       0x000414),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_4,       0x000418),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_5,       0x00041c),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_6,       0x000420),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_7,       0x000424),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_0,        0x000428),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_1,        0x00042c),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_2,        0x000430),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_3,        0x000434),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_4,        0x000438),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_5,        0x00043c),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_6,        0x000440),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_7,        0x000444),
        REG(SYS_RESET_CFG,                      0x000e00),
        REG(SYS_SR_ETYPE_CFG,                   0x000e04),
        REG(SYS_VLAN_ETYPE_CFG,                 0x000e08),
@@ -547,100 +618,379 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
        [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
 };
 
-static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
-       { .offset = 0x00,       .name = "rx_octets", },
-       { .offset = 0x01,       .name = "rx_unicast", },
-       { .offset = 0x02,       .name = "rx_multicast", },
-       { .offset = 0x03,       .name = "rx_broadcast", },
-       { .offset = 0x04,       .name = "rx_shorts", },
-       { .offset = 0x05,       .name = "rx_fragments", },
-       { .offset = 0x06,       .name = "rx_jabbers", },
-       { .offset = 0x07,       .name = "rx_crc_align_errs", },
-       { .offset = 0x08,       .name = "rx_sym_errs", },
-       { .offset = 0x09,       .name = "rx_frames_below_65_octets", },
-       { .offset = 0x0A,       .name = "rx_frames_65_to_127_octets", },
-       { .offset = 0x0B,       .name = "rx_frames_128_to_255_octets", },
-       { .offset = 0x0C,       .name = "rx_frames_256_to_511_octets", },
-       { .offset = 0x0D,       .name = "rx_frames_512_to_1023_octets", },
-       { .offset = 0x0E,       .name = "rx_frames_1024_to_1526_octets", },
-       { .offset = 0x0F,       .name = "rx_frames_over_1526_octets", },
-       { .offset = 0x10,       .name = "rx_pause", },
-       { .offset = 0x11,       .name = "rx_control", },
-       { .offset = 0x12,       .name = "rx_longs", },
-       { .offset = 0x13,       .name = "rx_classified_drops", },
-       { .offset = 0x14,       .name = "rx_red_prio_0", },
-       { .offset = 0x15,       .name = "rx_red_prio_1", },
-       { .offset = 0x16,       .name = "rx_red_prio_2", },
-       { .offset = 0x17,       .name = "rx_red_prio_3", },
-       { .offset = 0x18,       .name = "rx_red_prio_4", },
-       { .offset = 0x19,       .name = "rx_red_prio_5", },
-       { .offset = 0x1A,       .name = "rx_red_prio_6", },
-       { .offset = 0x1B,       .name = "rx_red_prio_7", },
-       { .offset = 0x1C,       .name = "rx_yellow_prio_0", },
-       { .offset = 0x1D,       .name = "rx_yellow_prio_1", },
-       { .offset = 0x1E,       .name = "rx_yellow_prio_2", },
-       { .offset = 0x1F,       .name = "rx_yellow_prio_3", },
-       { .offset = 0x20,       .name = "rx_yellow_prio_4", },
-       { .offset = 0x21,       .name = "rx_yellow_prio_5", },
-       { .offset = 0x22,       .name = "rx_yellow_prio_6", },
-       { .offset = 0x23,       .name = "rx_yellow_prio_7", },
-       { .offset = 0x24,       .name = "rx_green_prio_0", },
-       { .offset = 0x25,       .name = "rx_green_prio_1", },
-       { .offset = 0x26,       .name = "rx_green_prio_2", },
-       { .offset = 0x27,       .name = "rx_green_prio_3", },
-       { .offset = 0x28,       .name = "rx_green_prio_4", },
-       { .offset = 0x29,       .name = "rx_green_prio_5", },
-       { .offset = 0x2A,       .name = "rx_green_prio_6", },
-       { .offset = 0x2B,       .name = "rx_green_prio_7", },
-       { .offset = 0x80,       .name = "tx_octets", },
-       { .offset = 0x81,       .name = "tx_unicast", },
-       { .offset = 0x82,       .name = "tx_multicast", },
-       { .offset = 0x83,       .name = "tx_broadcast", },
-       { .offset = 0x84,       .name = "tx_collision", },
-       { .offset = 0x85,       .name = "tx_drops", },
-       { .offset = 0x86,       .name = "tx_pause", },
-       { .offset = 0x87,       .name = "tx_frames_below_65_octets", },
-       { .offset = 0x88,       .name = "tx_frames_65_to_127_octets", },
-       { .offset = 0x89,       .name = "tx_frames_128_255_octets", },
-       { .offset = 0x8B,       .name = "tx_frames_256_511_octets", },
-       { .offset = 0x8C,       .name = "tx_frames_1024_1526_octets", },
-       { .offset = 0x8D,       .name = "tx_frames_over_1526_octets", },
-       { .offset = 0x8E,       .name = "tx_yellow_prio_0", },
-       { .offset = 0x8F,       .name = "tx_yellow_prio_1", },
-       { .offset = 0x90,       .name = "tx_yellow_prio_2", },
-       { .offset = 0x91,       .name = "tx_yellow_prio_3", },
-       { .offset = 0x92,       .name = "tx_yellow_prio_4", },
-       { .offset = 0x93,       .name = "tx_yellow_prio_5", },
-       { .offset = 0x94,       .name = "tx_yellow_prio_6", },
-       { .offset = 0x95,       .name = "tx_yellow_prio_7", },
-       { .offset = 0x96,       .name = "tx_green_prio_0", },
-       { .offset = 0x97,       .name = "tx_green_prio_1", },
-       { .offset = 0x98,       .name = "tx_green_prio_2", },
-       { .offset = 0x99,       .name = "tx_green_prio_3", },
-       { .offset = 0x9A,       .name = "tx_green_prio_4", },
-       { .offset = 0x9B,       .name = "tx_green_prio_5", },
-       { .offset = 0x9C,       .name = "tx_green_prio_6", },
-       { .offset = 0x9D,       .name = "tx_green_prio_7", },
-       { .offset = 0x9E,       .name = "tx_aged", },
-       { .offset = 0x100,      .name = "drop_local", },
-       { .offset = 0x101,      .name = "drop_tail", },
-       { .offset = 0x102,      .name = "drop_yellow_prio_0", },
-       { .offset = 0x103,      .name = "drop_yellow_prio_1", },
-       { .offset = 0x104,      .name = "drop_yellow_prio_2", },
-       { .offset = 0x105,      .name = "drop_yellow_prio_3", },
-       { .offset = 0x106,      .name = "drop_yellow_prio_4", },
-       { .offset = 0x107,      .name = "drop_yellow_prio_5", },
-       { .offset = 0x108,      .name = "drop_yellow_prio_6", },
-       { .offset = 0x109,      .name = "drop_yellow_prio_7", },
-       { .offset = 0x10A,      .name = "drop_green_prio_0", },
-       { .offset = 0x10B,      .name = "drop_green_prio_1", },
-       { .offset = 0x10C,      .name = "drop_green_prio_2", },
-       { .offset = 0x10D,      .name = "drop_green_prio_3", },
-       { .offset = 0x10E,      .name = "drop_green_prio_4", },
-       { .offset = 0x10F,      .name = "drop_green_prio_5", },
-       { .offset = 0x110,      .name = "drop_green_prio_6", },
-       { .offset = 0x111,      .name = "drop_green_prio_7", },
-       OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
+       [OCELOT_STAT_RX_OCTETS] = {
+               .name = "rx_octets",
+               .reg = SYS_COUNT_RX_OCTETS,
+       },
+       [OCELOT_STAT_RX_UNICAST] = {
+               .name = "rx_unicast",
+               .reg = SYS_COUNT_RX_UNICAST,
+       },
+       [OCELOT_STAT_RX_MULTICAST] = {
+               .name = "rx_multicast",
+               .reg = SYS_COUNT_RX_MULTICAST,
+       },
+       [OCELOT_STAT_RX_BROADCAST] = {
+               .name = "rx_broadcast",
+               .reg = SYS_COUNT_RX_BROADCAST,
+       },
+       [OCELOT_STAT_RX_SHORTS] = {
+               .name = "rx_shorts",
+               .reg = SYS_COUNT_RX_SHORTS,
+       },
+       [OCELOT_STAT_RX_FRAGMENTS] = {
+               .name = "rx_fragments",
+               .reg = SYS_COUNT_RX_FRAGMENTS,
+       },
+       [OCELOT_STAT_RX_JABBERS] = {
+               .name = "rx_jabbers",
+               .reg = SYS_COUNT_RX_JABBERS,
+       },
+       [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+               .name = "rx_crc_align_errs",
+               .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+       },
+       [OCELOT_STAT_RX_SYM_ERRS] = {
+               .name = "rx_sym_errs",
+               .reg = SYS_COUNT_RX_SYM_ERRS,
+       },
+       [OCELOT_STAT_RX_64] = {
+               .name = "rx_frames_below_65_octets",
+               .reg = SYS_COUNT_RX_64,
+       },
+       [OCELOT_STAT_RX_65_127] = {
+               .name = "rx_frames_65_to_127_octets",
+               .reg = SYS_COUNT_RX_65_127,
+       },
+       [OCELOT_STAT_RX_128_255] = {
+               .name = "rx_frames_128_to_255_octets",
+               .reg = SYS_COUNT_RX_128_255,
+       },
+       [OCELOT_STAT_RX_256_511] = {
+               .name = "rx_frames_256_to_511_octets",
+               .reg = SYS_COUNT_RX_256_511,
+       },
+       [OCELOT_STAT_RX_512_1023] = {
+               .name = "rx_frames_512_to_1023_octets",
+               .reg = SYS_COUNT_RX_512_1023,
+       },
+       [OCELOT_STAT_RX_1024_1526] = {
+               .name = "rx_frames_1024_to_1526_octets",
+               .reg = SYS_COUNT_RX_1024_1526,
+       },
+       [OCELOT_STAT_RX_1527_MAX] = {
+               .name = "rx_frames_over_1526_octets",
+               .reg = SYS_COUNT_RX_1527_MAX,
+       },
+       [OCELOT_STAT_RX_PAUSE] = {
+               .name = "rx_pause",
+               .reg = SYS_COUNT_RX_PAUSE,
+       },
+       [OCELOT_STAT_RX_CONTROL] = {
+               .name = "rx_control",
+               .reg = SYS_COUNT_RX_CONTROL,
+       },
+       [OCELOT_STAT_RX_LONGS] = {
+               .name = "rx_longs",
+               .reg = SYS_COUNT_RX_LONGS,
+       },
+       [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+               .name = "rx_classified_drops",
+               .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_0] = {
+               .name = "rx_red_prio_0",
+               .reg = SYS_COUNT_RX_RED_PRIO_0,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_1] = {
+               .name = "rx_red_prio_1",
+               .reg = SYS_COUNT_RX_RED_PRIO_1,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_2] = {
+               .name = "rx_red_prio_2",
+               .reg = SYS_COUNT_RX_RED_PRIO_2,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_3] = {
+               .name = "rx_red_prio_3",
+               .reg = SYS_COUNT_RX_RED_PRIO_3,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_4] = {
+               .name = "rx_red_prio_4",
+               .reg = SYS_COUNT_RX_RED_PRIO_4,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_5] = {
+               .name = "rx_red_prio_5",
+               .reg = SYS_COUNT_RX_RED_PRIO_5,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_6] = {
+               .name = "rx_red_prio_6",
+               .reg = SYS_COUNT_RX_RED_PRIO_6,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_7] = {
+               .name = "rx_red_prio_7",
+               .reg = SYS_COUNT_RX_RED_PRIO_7,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+               .name = "rx_yellow_prio_0",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+               .name = "rx_yellow_prio_1",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+               .name = "rx_yellow_prio_2",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+               .name = "rx_yellow_prio_3",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+               .name = "rx_yellow_prio_4",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+               .name = "rx_yellow_prio_5",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+               .name = "rx_yellow_prio_6",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+               .name = "rx_yellow_prio_7",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+               .name = "rx_green_prio_0",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+               .name = "rx_green_prio_1",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+               .name = "rx_green_prio_2",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+               .name = "rx_green_prio_3",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+               .name = "rx_green_prio_4",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+               .name = "rx_green_prio_5",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+               .name = "rx_green_prio_6",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+               .name = "rx_green_prio_7",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+       },
+       [OCELOT_STAT_TX_OCTETS] = {
+               .name = "tx_octets",
+               .reg = SYS_COUNT_TX_OCTETS,
+       },
+       [OCELOT_STAT_TX_UNICAST] = {
+               .name = "tx_unicast",
+               .reg = SYS_COUNT_TX_UNICAST,
+       },
+       [OCELOT_STAT_TX_MULTICAST] = {
+               .name = "tx_multicast",
+               .reg = SYS_COUNT_TX_MULTICAST,
+       },
+       [OCELOT_STAT_TX_BROADCAST] = {
+               .name = "tx_broadcast",
+               .reg = SYS_COUNT_TX_BROADCAST,
+       },
+       [OCELOT_STAT_TX_COLLISION] = {
+               .name = "tx_collision",
+               .reg = SYS_COUNT_TX_COLLISION,
+       },
+       [OCELOT_STAT_TX_DROPS] = {
+               .name = "tx_drops",
+               .reg = SYS_COUNT_TX_DROPS,
+       },
+       [OCELOT_STAT_TX_PAUSE] = {
+               .name = "tx_pause",
+               .reg = SYS_COUNT_TX_PAUSE,
+       },
+       [OCELOT_STAT_TX_64] = {
+               .name = "tx_frames_below_65_octets",
+               .reg = SYS_COUNT_TX_64,
+       },
+       [OCELOT_STAT_TX_65_127] = {
+               .name = "tx_frames_65_to_127_octets",
+               .reg = SYS_COUNT_TX_65_127,
+       },
+       [OCELOT_STAT_TX_128_255] = {
+               .name = "tx_frames_128_255_octets",
+               .reg = SYS_COUNT_TX_128_255,
+       },
+       [OCELOT_STAT_TX_256_511] = {
+               .name = "tx_frames_256_511_octets",
+               .reg = SYS_COUNT_TX_256_511,
+       },
+       [OCELOT_STAT_TX_512_1023] = {
+               .name = "tx_frames_512_1023_octets",
+               .reg = SYS_COUNT_TX_512_1023,
+       },
+       [OCELOT_STAT_TX_1024_1526] = {
+               .name = "tx_frames_1024_1526_octets",
+               .reg = SYS_COUNT_TX_1024_1526,
+       },
+       [OCELOT_STAT_TX_1527_MAX] = {
+               .name = "tx_frames_over_1526_octets",
+               .reg = SYS_COUNT_TX_1527_MAX,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+               .name = "tx_yellow_prio_0",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+               .name = "tx_yellow_prio_1",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+               .name = "tx_yellow_prio_2",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+               .name = "tx_yellow_prio_3",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+               .name = "tx_yellow_prio_4",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+               .name = "tx_yellow_prio_5",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+               .name = "tx_yellow_prio_6",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+               .name = "tx_yellow_prio_7",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+               .name = "tx_green_prio_0",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+               .name = "tx_green_prio_1",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+               .name = "tx_green_prio_2",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+               .name = "tx_green_prio_3",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+               .name = "tx_green_prio_4",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+               .name = "tx_green_prio_5",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+               .name = "tx_green_prio_6",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+               .name = "tx_green_prio_7",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+       },
+       [OCELOT_STAT_TX_AGED] = {
+               .name = "tx_aged",
+               .reg = SYS_COUNT_TX_AGING,
+       },
+       [OCELOT_STAT_DROP_LOCAL] = {
+               .name = "drop_local",
+               .reg = SYS_COUNT_DROP_LOCAL,
+       },
+       [OCELOT_STAT_DROP_TAIL] = {
+               .name = "drop_tail",
+               .reg = SYS_COUNT_DROP_TAIL,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+               .name = "drop_yellow_prio_0",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+               .name = "drop_yellow_prio_1",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+               .name = "drop_yellow_prio_2",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+               .name = "drop_yellow_prio_3",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+               .name = "drop_yellow_prio_4",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+               .name = "drop_yellow_prio_5",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+               .name = "drop_yellow_prio_6",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+               .name = "drop_yellow_prio_7",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+               .name = "drop_green_prio_0",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+               .name = "drop_green_prio_1",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+               .name = "drop_green_prio_2",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+               .name = "drop_green_prio_3",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+               .name = "drop_green_prio_4",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+               .name = "drop_green_prio_5",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+               .name = "drop_green_prio_6",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+               .name = "drop_green_prio_7",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+       },
 };
 
 static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -2166,7 +2516,7 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
 static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
                                      struct felix_stream_filter_counters *counters)
 {
-       mutex_lock(&ocelot->stats_lock);
+       spin_lock(&ocelot->stats_lock);
 
        ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
                   SYS_STAT_CFG_STAT_VIEW_M,
@@ -2183,7 +2533,7 @@ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
                     SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
                     SYS_STAT_CFG);
 
-       mutex_unlock(&ocelot->stats_lock);
+       spin_unlock(&ocelot->stats_lock);
 }
 
 static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
index ea0649211356882c8aced655710cce6a190b6c5f..b34f4cdfe814c52ea3ffd8999d2866aee5da69a0 100644 (file)
@@ -270,27 +270,98 @@ static const u32 vsc9953_rew_regmap[] = {
 
 static const u32 vsc9953_sys_regmap[] = {
        REG(SYS_COUNT_RX_OCTETS,                0x000000),
+       REG(SYS_COUNT_RX_UNICAST,               0x000004),
        REG(SYS_COUNT_RX_MULTICAST,             0x000008),
+       REG(SYS_COUNT_RX_BROADCAST,             0x00000c),
        REG(SYS_COUNT_RX_SHORTS,                0x000010),
        REG(SYS_COUNT_RX_FRAGMENTS,             0x000014),
        REG(SYS_COUNT_RX_JABBERS,               0x000018),
+       REG(SYS_COUNT_RX_CRC_ALIGN_ERRS,        0x00001c),
+       REG(SYS_COUNT_RX_SYM_ERRS,              0x000020),
        REG(SYS_COUNT_RX_64,                    0x000024),
        REG(SYS_COUNT_RX_65_127,                0x000028),
        REG(SYS_COUNT_RX_128_255,               0x00002c),
-       REG(SYS_COUNT_RX_256_1023,              0x000030),
-       REG(SYS_COUNT_RX_1024_1526,             0x000034),
-       REG(SYS_COUNT_RX_1527_MAX,              0x000038),
+       REG(SYS_COUNT_RX_256_511,               0x000030),
+       REG(SYS_COUNT_RX_512_1023,              0x000034),
+       REG(SYS_COUNT_RX_1024_1526,             0x000038),
+       REG(SYS_COUNT_RX_1527_MAX,              0x00003c),
+       REG(SYS_COUNT_RX_PAUSE,                 0x000040),
+       REG(SYS_COUNT_RX_CONTROL,               0x000044),
        REG(SYS_COUNT_RX_LONGS,                 0x000048),
+       REG(SYS_COUNT_RX_CLASSIFIED_DROPS,      0x00004c),
+       REG(SYS_COUNT_RX_RED_PRIO_0,            0x000050),
+       REG(SYS_COUNT_RX_RED_PRIO_1,            0x000054),
+       REG(SYS_COUNT_RX_RED_PRIO_2,            0x000058),
+       REG(SYS_COUNT_RX_RED_PRIO_3,            0x00005c),
+       REG(SYS_COUNT_RX_RED_PRIO_4,            0x000060),
+       REG(SYS_COUNT_RX_RED_PRIO_5,            0x000064),
+       REG(SYS_COUNT_RX_RED_PRIO_6,            0x000068),
+       REG(SYS_COUNT_RX_RED_PRIO_7,            0x00006c),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_0,         0x000070),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_1,         0x000074),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_2,         0x000078),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_3,         0x00007c),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_4,         0x000080),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_5,         0x000084),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_6,         0x000088),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_7,         0x00008c),
+       REG(SYS_COUNT_RX_GREEN_PRIO_0,          0x000090),
+       REG(SYS_COUNT_RX_GREEN_PRIO_1,          0x000094),
+       REG(SYS_COUNT_RX_GREEN_PRIO_2,          0x000098),
+       REG(SYS_COUNT_RX_GREEN_PRIO_3,          0x00009c),
+       REG(SYS_COUNT_RX_GREEN_PRIO_4,          0x0000a0),
+       REG(SYS_COUNT_RX_GREEN_PRIO_5,          0x0000a4),
+       REG(SYS_COUNT_RX_GREEN_PRIO_6,          0x0000a8),
+       REG(SYS_COUNT_RX_GREEN_PRIO_7,          0x0000ac),
        REG(SYS_COUNT_TX_OCTETS,                0x000100),
+       REG(SYS_COUNT_TX_UNICAST,               0x000104),
+       REG(SYS_COUNT_TX_MULTICAST,             0x000108),
+       REG(SYS_COUNT_TX_BROADCAST,             0x00010c),
        REG(SYS_COUNT_TX_COLLISION,             0x000110),
        REG(SYS_COUNT_TX_DROPS,                 0x000114),
+       REG(SYS_COUNT_TX_PAUSE,                 0x000118),
        REG(SYS_COUNT_TX_64,                    0x00011c),
        REG(SYS_COUNT_TX_65_127,                0x000120),
-       REG(SYS_COUNT_TX_128_511,               0x000124),
-       REG(SYS_COUNT_TX_512_1023,              0x000128),
-       REG(SYS_COUNT_TX_1024_1526,             0x00012c),
-       REG(SYS_COUNT_TX_1527_MAX,              0x000130),
+       REG(SYS_COUNT_TX_128_255,               0x000124),
+       REG(SYS_COUNT_TX_256_511,               0x000128),
+       REG(SYS_COUNT_TX_512_1023,              0x00012c),
+       REG(SYS_COUNT_TX_1024_1526,             0x000130),
+       REG(SYS_COUNT_TX_1527_MAX,              0x000134),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_0,         0x000138),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_1,         0x00013c),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_2,         0x000140),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_3,         0x000144),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_4,         0x000148),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_5,         0x00014c),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_6,         0x000150),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_7,         0x000154),
+       REG(SYS_COUNT_TX_GREEN_PRIO_0,          0x000158),
+       REG(SYS_COUNT_TX_GREEN_PRIO_1,          0x00015c),
+       REG(SYS_COUNT_TX_GREEN_PRIO_2,          0x000160),
+       REG(SYS_COUNT_TX_GREEN_PRIO_3,          0x000164),
+       REG(SYS_COUNT_TX_GREEN_PRIO_4,          0x000168),
+       REG(SYS_COUNT_TX_GREEN_PRIO_5,          0x00016c),
+       REG(SYS_COUNT_TX_GREEN_PRIO_6,          0x000170),
+       REG(SYS_COUNT_TX_GREEN_PRIO_7,          0x000174),
        REG(SYS_COUNT_TX_AGING,                 0x000178),
+       REG(SYS_COUNT_DROP_LOCAL,               0x000200),
+       REG(SYS_COUNT_DROP_TAIL,                0x000204),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_0,       0x000208),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_1,       0x00020c),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_2,       0x000210),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_3,       0x000214),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_4,       0x000218),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_5,       0x00021c),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_6,       0x000220),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_7,       0x000224),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_0,        0x000228),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_1,        0x00022c),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_2,        0x000230),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_3,        0x000234),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_4,        0x000238),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_5,        0x00023c),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_6,        0x000240),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_7,        0x000244),
        REG(SYS_RESET_CFG,                      0x000318),
        REG_RESERVED(SYS_SR_ETYPE_CFG),
        REG(SYS_VLAN_ETYPE_CFG,                 0x000320),
@@ -543,101 +614,379 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
        [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
 };
 
-static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
-       { .offset = 0x00,       .name = "rx_octets", },
-       { .offset = 0x01,       .name = "rx_unicast", },
-       { .offset = 0x02,       .name = "rx_multicast", },
-       { .offset = 0x03,       .name = "rx_broadcast", },
-       { .offset = 0x04,       .name = "rx_shorts", },
-       { .offset = 0x05,       .name = "rx_fragments", },
-       { .offset = 0x06,       .name = "rx_jabbers", },
-       { .offset = 0x07,       .name = "rx_crc_align_errs", },
-       { .offset = 0x08,       .name = "rx_sym_errs", },
-       { .offset = 0x09,       .name = "rx_frames_below_65_octets", },
-       { .offset = 0x0A,       .name = "rx_frames_65_to_127_octets", },
-       { .offset = 0x0B,       .name = "rx_frames_128_to_255_octets", },
-       { .offset = 0x0C,       .name = "rx_frames_256_to_511_octets", },
-       { .offset = 0x0D,       .name = "rx_frames_512_to_1023_octets", },
-       { .offset = 0x0E,       .name = "rx_frames_1024_to_1526_octets", },
-       { .offset = 0x0F,       .name = "rx_frames_over_1526_octets", },
-       { .offset = 0x10,       .name = "rx_pause", },
-       { .offset = 0x11,       .name = "rx_control", },
-       { .offset = 0x12,       .name = "rx_longs", },
-       { .offset = 0x13,       .name = "rx_classified_drops", },
-       { .offset = 0x14,       .name = "rx_red_prio_0", },
-       { .offset = 0x15,       .name = "rx_red_prio_1", },
-       { .offset = 0x16,       .name = "rx_red_prio_2", },
-       { .offset = 0x17,       .name = "rx_red_prio_3", },
-       { .offset = 0x18,       .name = "rx_red_prio_4", },
-       { .offset = 0x19,       .name = "rx_red_prio_5", },
-       { .offset = 0x1A,       .name = "rx_red_prio_6", },
-       { .offset = 0x1B,       .name = "rx_red_prio_7", },
-       { .offset = 0x1C,       .name = "rx_yellow_prio_0", },
-       { .offset = 0x1D,       .name = "rx_yellow_prio_1", },
-       { .offset = 0x1E,       .name = "rx_yellow_prio_2", },
-       { .offset = 0x1F,       .name = "rx_yellow_prio_3", },
-       { .offset = 0x20,       .name = "rx_yellow_prio_4", },
-       { .offset = 0x21,       .name = "rx_yellow_prio_5", },
-       { .offset = 0x22,       .name = "rx_yellow_prio_6", },
-       { .offset = 0x23,       .name = "rx_yellow_prio_7", },
-       { .offset = 0x24,       .name = "rx_green_prio_0", },
-       { .offset = 0x25,       .name = "rx_green_prio_1", },
-       { .offset = 0x26,       .name = "rx_green_prio_2", },
-       { .offset = 0x27,       .name = "rx_green_prio_3", },
-       { .offset = 0x28,       .name = "rx_green_prio_4", },
-       { .offset = 0x29,       .name = "rx_green_prio_5", },
-       { .offset = 0x2A,       .name = "rx_green_prio_6", },
-       { .offset = 0x2B,       .name = "rx_green_prio_7", },
-       { .offset = 0x40,       .name = "tx_octets", },
-       { .offset = 0x41,       .name = "tx_unicast", },
-       { .offset = 0x42,       .name = "tx_multicast", },
-       { .offset = 0x43,       .name = "tx_broadcast", },
-       { .offset = 0x44,       .name = "tx_collision", },
-       { .offset = 0x45,       .name = "tx_drops", },
-       { .offset = 0x46,       .name = "tx_pause", },
-       { .offset = 0x47,       .name = "tx_frames_below_65_octets", },
-       { .offset = 0x48,       .name = "tx_frames_65_to_127_octets", },
-       { .offset = 0x49,       .name = "tx_frames_128_255_octets", },
-       { .offset = 0x4A,       .name = "tx_frames_256_511_octets", },
-       { .offset = 0x4B,       .name = "tx_frames_512_1023_octets", },
-       { .offset = 0x4C,       .name = "tx_frames_1024_1526_octets", },
-       { .offset = 0x4D,       .name = "tx_frames_over_1526_octets", },
-       { .offset = 0x4E,       .name = "tx_yellow_prio_0", },
-       { .offset = 0x4F,       .name = "tx_yellow_prio_1", },
-       { .offset = 0x50,       .name = "tx_yellow_prio_2", },
-       { .offset = 0x51,       .name = "tx_yellow_prio_3", },
-       { .offset = 0x52,       .name = "tx_yellow_prio_4", },
-       { .offset = 0x53,       .name = "tx_yellow_prio_5", },
-       { .offset = 0x54,       .name = "tx_yellow_prio_6", },
-       { .offset = 0x55,       .name = "tx_yellow_prio_7", },
-       { .offset = 0x56,       .name = "tx_green_prio_0", },
-       { .offset = 0x57,       .name = "tx_green_prio_1", },
-       { .offset = 0x58,       .name = "tx_green_prio_2", },
-       { .offset = 0x59,       .name = "tx_green_prio_3", },
-       { .offset = 0x5A,       .name = "tx_green_prio_4", },
-       { .offset = 0x5B,       .name = "tx_green_prio_5", },
-       { .offset = 0x5C,       .name = "tx_green_prio_6", },
-       { .offset = 0x5D,       .name = "tx_green_prio_7", },
-       { .offset = 0x5E,       .name = "tx_aged", },
-       { .offset = 0x80,       .name = "drop_local", },
-       { .offset = 0x81,       .name = "drop_tail", },
-       { .offset = 0x82,       .name = "drop_yellow_prio_0", },
-       { .offset = 0x83,       .name = "drop_yellow_prio_1", },
-       { .offset = 0x84,       .name = "drop_yellow_prio_2", },
-       { .offset = 0x85,       .name = "drop_yellow_prio_3", },
-       { .offset = 0x86,       .name = "drop_yellow_prio_4", },
-       { .offset = 0x87,       .name = "drop_yellow_prio_5", },
-       { .offset = 0x88,       .name = "drop_yellow_prio_6", },
-       { .offset = 0x89,       .name = "drop_yellow_prio_7", },
-       { .offset = 0x8A,       .name = "drop_green_prio_0", },
-       { .offset = 0x8B,       .name = "drop_green_prio_1", },
-       { .offset = 0x8C,       .name = "drop_green_prio_2", },
-       { .offset = 0x8D,       .name = "drop_green_prio_3", },
-       { .offset = 0x8E,       .name = "drop_green_prio_4", },
-       { .offset = 0x8F,       .name = "drop_green_prio_5", },
-       { .offset = 0x90,       .name = "drop_green_prio_6", },
-       { .offset = 0x91,       .name = "drop_green_prio_7", },
-       OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
+       [OCELOT_STAT_RX_OCTETS] = {
+               .name = "rx_octets",
+               .reg = SYS_COUNT_RX_OCTETS,
+       },
+       [OCELOT_STAT_RX_UNICAST] = {
+               .name = "rx_unicast",
+               .reg = SYS_COUNT_RX_UNICAST,
+       },
+       [OCELOT_STAT_RX_MULTICAST] = {
+               .name = "rx_multicast",
+               .reg = SYS_COUNT_RX_MULTICAST,
+       },
+       [OCELOT_STAT_RX_BROADCAST] = {
+               .name = "rx_broadcast",
+               .reg = SYS_COUNT_RX_BROADCAST,
+       },
+       [OCELOT_STAT_RX_SHORTS] = {
+               .name = "rx_shorts",
+               .reg = SYS_COUNT_RX_SHORTS,
+       },
+       [OCELOT_STAT_RX_FRAGMENTS] = {
+               .name = "rx_fragments",
+               .reg = SYS_COUNT_RX_FRAGMENTS,
+       },
+       [OCELOT_STAT_RX_JABBERS] = {
+               .name = "rx_jabbers",
+               .reg = SYS_COUNT_RX_JABBERS,
+       },
+       [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+               .name = "rx_crc_align_errs",
+               .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+       },
+       [OCELOT_STAT_RX_SYM_ERRS] = {
+               .name = "rx_sym_errs",
+               .reg = SYS_COUNT_RX_SYM_ERRS,
+       },
+       [OCELOT_STAT_RX_64] = {
+               .name = "rx_frames_below_65_octets",
+               .reg = SYS_COUNT_RX_64,
+       },
+       [OCELOT_STAT_RX_65_127] = {
+               .name = "rx_frames_65_to_127_octets",
+               .reg = SYS_COUNT_RX_65_127,
+       },
+       [OCELOT_STAT_RX_128_255] = {
+               .name = "rx_frames_128_to_255_octets",
+               .reg = SYS_COUNT_RX_128_255,
+       },
+       [OCELOT_STAT_RX_256_511] = {
+               .name = "rx_frames_256_to_511_octets",
+               .reg = SYS_COUNT_RX_256_511,
+       },
+       [OCELOT_STAT_RX_512_1023] = {
+               .name = "rx_frames_512_to_1023_octets",
+               .reg = SYS_COUNT_RX_512_1023,
+       },
+       [OCELOT_STAT_RX_1024_1526] = {
+               .name = "rx_frames_1024_to_1526_octets",
+               .reg = SYS_COUNT_RX_1024_1526,
+       },
+       [OCELOT_STAT_RX_1527_MAX] = {
+               .name = "rx_frames_over_1526_octets",
+               .reg = SYS_COUNT_RX_1527_MAX,
+       },
+       [OCELOT_STAT_RX_PAUSE] = {
+               .name = "rx_pause",
+               .reg = SYS_COUNT_RX_PAUSE,
+       },
+       [OCELOT_STAT_RX_CONTROL] = {
+               .name = "rx_control",
+               .reg = SYS_COUNT_RX_CONTROL,
+       },
+       [OCELOT_STAT_RX_LONGS] = {
+               .name = "rx_longs",
+               .reg = SYS_COUNT_RX_LONGS,
+       },
+       [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+               .name = "rx_classified_drops",
+               .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_0] = {
+               .name = "rx_red_prio_0",
+               .reg = SYS_COUNT_RX_RED_PRIO_0,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_1] = {
+               .name = "rx_red_prio_1",
+               .reg = SYS_COUNT_RX_RED_PRIO_1,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_2] = {
+               .name = "rx_red_prio_2",
+               .reg = SYS_COUNT_RX_RED_PRIO_2,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_3] = {
+               .name = "rx_red_prio_3",
+               .reg = SYS_COUNT_RX_RED_PRIO_3,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_4] = {
+               .name = "rx_red_prio_4",
+               .reg = SYS_COUNT_RX_RED_PRIO_4,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_5] = {
+               .name = "rx_red_prio_5",
+               .reg = SYS_COUNT_RX_RED_PRIO_5,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_6] = {
+               .name = "rx_red_prio_6",
+               .reg = SYS_COUNT_RX_RED_PRIO_6,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_7] = {
+               .name = "rx_red_prio_7",
+               .reg = SYS_COUNT_RX_RED_PRIO_7,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+               .name = "rx_yellow_prio_0",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+               .name = "rx_yellow_prio_1",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+               .name = "rx_yellow_prio_2",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+               .name = "rx_yellow_prio_3",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+               .name = "rx_yellow_prio_4",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+               .name = "rx_yellow_prio_5",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+               .name = "rx_yellow_prio_6",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+               .name = "rx_yellow_prio_7",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+               .name = "rx_green_prio_0",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+               .name = "rx_green_prio_1",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+               .name = "rx_green_prio_2",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+               .name = "rx_green_prio_3",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+               .name = "rx_green_prio_4",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+               .name = "rx_green_prio_5",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+               .name = "rx_green_prio_6",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+               .name = "rx_green_prio_7",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+       },
+       [OCELOT_STAT_TX_OCTETS] = {
+               .name = "tx_octets",
+               .reg = SYS_COUNT_TX_OCTETS,
+       },
+       [OCELOT_STAT_TX_UNICAST] = {
+               .name = "tx_unicast",
+               .reg = SYS_COUNT_TX_UNICAST,
+       },
+       [OCELOT_STAT_TX_MULTICAST] = {
+               .name = "tx_multicast",
+               .reg = SYS_COUNT_TX_MULTICAST,
+       },
+       [OCELOT_STAT_TX_BROADCAST] = {
+               .name = "tx_broadcast",
+               .reg = SYS_COUNT_TX_BROADCAST,
+       },
+       [OCELOT_STAT_TX_COLLISION] = {
+               .name = "tx_collision",
+               .reg = SYS_COUNT_TX_COLLISION,
+       },
+       [OCELOT_STAT_TX_DROPS] = {
+               .name = "tx_drops",
+               .reg = SYS_COUNT_TX_DROPS,
+       },
+       [OCELOT_STAT_TX_PAUSE] = {
+               .name = "tx_pause",
+               .reg = SYS_COUNT_TX_PAUSE,
+       },
+       [OCELOT_STAT_TX_64] = {
+               .name = "tx_frames_below_65_octets",
+               .reg = SYS_COUNT_TX_64,
+       },
+       [OCELOT_STAT_TX_65_127] = {
+               .name = "tx_frames_65_to_127_octets",
+               .reg = SYS_COUNT_TX_65_127,
+       },
+       [OCELOT_STAT_TX_128_255] = {
+               .name = "tx_frames_128_255_octets",
+               .reg = SYS_COUNT_TX_128_255,
+       },
+       [OCELOT_STAT_TX_256_511] = {
+               .name = "tx_frames_256_511_octets",
+               .reg = SYS_COUNT_TX_256_511,
+       },
+       [OCELOT_STAT_TX_512_1023] = {
+               .name = "tx_frames_512_1023_octets",
+               .reg = SYS_COUNT_TX_512_1023,
+       },
+       [OCELOT_STAT_TX_1024_1526] = {
+               .name = "tx_frames_1024_1526_octets",
+               .reg = SYS_COUNT_TX_1024_1526,
+       },
+       [OCELOT_STAT_TX_1527_MAX] = {
+               .name = "tx_frames_over_1526_octets",
+               .reg = SYS_COUNT_TX_1527_MAX,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+               .name = "tx_yellow_prio_0",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+               .name = "tx_yellow_prio_1",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+               .name = "tx_yellow_prio_2",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+               .name = "tx_yellow_prio_3",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+               .name = "tx_yellow_prio_4",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+               .name = "tx_yellow_prio_5",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+               .name = "tx_yellow_prio_6",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+               .name = "tx_yellow_prio_7",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+               .name = "tx_green_prio_0",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+               .name = "tx_green_prio_1",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+               .name = "tx_green_prio_2",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+               .name = "tx_green_prio_3",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+               .name = "tx_green_prio_4",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+               .name = "tx_green_prio_5",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+               .name = "tx_green_prio_6",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+               .name = "tx_green_prio_7",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+       },
+       [OCELOT_STAT_TX_AGED] = {
+               .name = "tx_aged",
+               .reg = SYS_COUNT_TX_AGING,
+       },
+       [OCELOT_STAT_DROP_LOCAL] = {
+               .name = "drop_local",
+               .reg = SYS_COUNT_DROP_LOCAL,
+       },
+       [OCELOT_STAT_DROP_TAIL] = {
+               .name = "drop_tail",
+               .reg = SYS_COUNT_DROP_TAIL,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+               .name = "drop_yellow_prio_0",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+               .name = "drop_yellow_prio_1",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+               .name = "drop_yellow_prio_2",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+               .name = "drop_yellow_prio_3",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+               .name = "drop_yellow_prio_4",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+               .name = "drop_yellow_prio_5",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+               .name = "drop_yellow_prio_6",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+               .name = "drop_yellow_prio_7",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+               .name = "drop_green_prio_0",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+               .name = "drop_green_prio_1",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+               .name = "drop_green_prio_2",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+               .name = "drop_green_prio_3",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+               .name = "drop_green_prio_4",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+               .name = "drop_green_prio_5",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+               .name = "drop_green_prio_6",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+               .name = "drop_green_prio_7",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+       },
 };
 
 static const struct vcap_field vsc9953_vcap_es0_keys[] = {
index 0569ff066634dee718bfbc7e1b2b40c45bc080aa..10c6fea1227fa698fe8c5a261fb678eb88165f16 100644 (file)
@@ -93,7 +93,7 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
 
                region = dsa_devlink_region_create(ds, ops, 1, size);
                if (IS_ERR(region)) {
-                       while (i-- >= 0)
+                       while (--i >= 0)
                                dsa_devlink_region_destroy(priv->regions[i]);
                        return PTR_ERR(region);
                }
index 7071604f9984cb62db7690605fdf13bc3f5892c2..02808513ffe45b7b09c4fee82e6e79f4849544c8 100644 (file)
@@ -13844,7 +13844,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
 
        /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
         * Since some switches tend to reinit the AN process and clear the
-        * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+        * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
         * and recovered many times
         */
        if (vars->check_kr2_recovery_cnt > 0) {
index ba0f1ffac507126179af73feeb42ba581569bd40..f46eefb5a029252e87c156f8f2f4b77ceae0003e 100644 (file)
@@ -11178,10 +11178,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
        if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
                features &= ~NETIF_F_NTUPLE;
 
-       if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
-               features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
-
-       if (!(bp->flags & BNXT_FLAG_TPA))
+       if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
                features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
 
        if (!(features & NETIF_F_GRO))
index 075c6206325ce387052629a00a1da74b0c0c5462..b1b17f911300636983455eb11c6f50116c3c583b 100644 (file)
@@ -2130,6 +2130,7 @@ struct bnxt {
 #define BNXT_DUMP_CRASH                1
 
        struct bpf_prog         *xdp_prog;
+       u8                      xdp_has_frags;
 
        struct bnxt_ptp_cfg     *ptp_cfg;
        u8                      ptp_all_rx_tstamp;
index 059f96f7a96f6495f691118b7ffdf5cb9675db96..a36803e79e92e1b78147bb308020aba27f4384ab 100644 (file)
@@ -1306,6 +1306,7 @@ int bnxt_dl_register(struct bnxt *bp)
        if (rc)
                goto err_dl_port_unreg;
 
+       devlink_set_features(dl, DEVLINK_F_RELOAD);
 out:
        devlink_register(dl);
        return 0;
index 730febd19330afaa74b25dc2d845a52d79a53bd0..a4cba7cb2783ea70cbcf4b5b40c161b40aa19739 100644 (file)
@@ -623,7 +623,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
                hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
                hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
                if (bp->flags & BNXT_FLAG_CHIP_P5)
-                       hw_resc->max_irqs -= vf_msix * n;
+                       hw_resc->max_nqs -= vf_msix;
 
                rc = pf->active_vfs;
        }
index f53387ed0167bb93dbba6659c42c0ff5c2b63cfb..c3065ec0a47981064c133ec9bff25bf44e3eefd0 100644 (file)
@@ -181,6 +181,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
                        struct xdp_buff *xdp)
 {
        struct bnxt_sw_rx_bd *rx_buf;
+       u32 buflen = PAGE_SIZE;
        struct pci_dev *pdev;
        dma_addr_t mapping;
        u32 offset;
@@ -192,7 +193,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        mapping = rx_buf->mapping - bp->rx_dma_offset;
        dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
 
-       xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
+       if (bp->xdp_has_frags)
+               buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
+
+       xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
        xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
 }
 
@@ -397,8 +401,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
                netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
                return -EOPNOTSUPP;
        }
-       if (prog)
+       if (prog) {
                tx_xdp = bp->rx_nr_rings;
+               bp->xdp_has_frags = prog->aux->xdp_has_frags;
+       }
 
        tc = netdev_get_num_tc(dev);
        if (!tc)
index 84604aff53ce73481f0eeafe699e5dc431671115..89256b86684032c658985be80dffe076312d6732 100644 (file)
@@ -243,7 +243,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
 
                /*
                 * on rx, the iscsi pdu has to be < rx page size and the
-                * the max rx data length programmed in TP
+                * max rx data length programmed in TP
                 */
                val = min(adapter->params.tp.rx_pg_size,
                          ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
index 26433a62d7f0d92ea44bf62768a3f819de578047..fed5f93bf620abfc7e6faddf4abc88ae4aca45ea 100644 (file)
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
        __be32 opt2;
        __be64 opt0;
        __be32 iss;
-       __be32 rsvd[3];
+       __be32 rsvd;
 };
 
 struct cpl_act_open_req {
index 45634579adb67814dc3c228f778cf2fda2028695..a770bab4d1ed2c6b19c37913e0c491abc6bd6382 100644 (file)
@@ -2886,6 +2886,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
 
 /* The Aquantia PHYs are capable of performing rate adaptation */
 #define PHY_VEND_AQUANTIA      0x03a1b400
+#define PHY_VEND_AQUANTIA2     0x31c31c00
 
 static int dpaa_phy_init(struct net_device *net_dev)
 {
@@ -2893,6 +2894,7 @@ static int dpaa_phy_init(struct net_device *net_dev)
        struct mac_device *mac_dev;
        struct phy_device *phy_dev;
        struct dpaa_priv *priv;
+       u32 phy_vendor;
 
        priv = netdev_priv(net_dev);
        mac_dev = priv->mac_dev;
@@ -2905,9 +2907,11 @@ static int dpaa_phy_init(struct net_device *net_dev)
                return -ENODEV;
        }
 
+       phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
        /* Unless the PHY is capable of rate adaptation */
        if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
-           ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+           (phy_vendor != PHY_VEND_AQUANTIA &&
+            phy_vendor != PHY_VEND_AQUANTIA2)) {
                /* remove any features not supported by the controller */
                ethtool_convert_legacy_u32_to_link_mode(mask,
                                                        mac_dev->if_support);
index ed7301b6916941268d1f6ad54adab7c1d3615ca5..0cebe4b63adb76d76f0fe158207c062cf6c692d4 100644 (file)
@@ -634,6 +634,13 @@ struct fec_enet_private {
        int pps_enable;
        unsigned int next_counter;
 
+       struct {
+               struct timespec64 ts_phc;
+               u64 ns_sys;
+               u32 at_corr;
+               u8 at_inc_corr;
+       } ptp_saved_state;
+
        u64 ethtool_stats[];
 };
 
@@ -644,5 +651,8 @@ void fec_ptp_disable_hwts(struct net_device *ndev);
 int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
 int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
 
+void fec_ptp_save_state(struct fec_enet_private *fep);
+int fec_ptp_restore_state(struct fec_enet_private *fep);
+
 /****************************************************************************/
 #endif /* FEC_H */
index e8e2aa1e7f01b783f6e48564fd1c34b698375ef6..b0d60f898249b5ada84e498bca352fc571eb9fd4 100644 (file)
@@ -285,8 +285,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 #define FEC_MMFR_TA            (2 << 16)
 #define FEC_MMFR_DATA(v)       (v & 0xffff)
 /* FEC ECR bits definition */
-#define FEC_ECR_MAGICEN                (1 << 2)
-#define FEC_ECR_SLEEP          (1 << 3)
+#define FEC_ECR_RESET   BIT(0)
+#define FEC_ECR_ETHEREN BIT(1)
+#define FEC_ECR_MAGICEN BIT(2)
+#define FEC_ECR_SLEEP   BIT(3)
+#define FEC_ECR_EN1588  BIT(4)
 
 #define FEC_MII_TIMEOUT                30000 /* us */
 
@@ -982,6 +985,9 @@ fec_restart(struct net_device *ndev)
        u32 temp_mac[2];
        u32 rcntl = OPT_FRAME_SIZE | 0x04;
        u32 ecntl = 0x2; /* ETHEREN */
+       struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
+
+       fec_ptp_save_state(fep);
 
        /* Whack a reset.  We should wait for this.
         * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1135,7 +1141,7 @@ fec_restart(struct net_device *ndev)
        }
 
        if (fep->bufdesc_ex)
-               ecntl |= (1 << 4);
+               ecntl |= FEC_ECR_EN1588;
 
        if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
            fep->rgmii_txc_dly)
@@ -1156,6 +1162,14 @@ fec_restart(struct net_device *ndev)
        if (fep->bufdesc_ex)
                fec_ptp_start_cyclecounter(ndev);
 
+       /* Restart PPS if needed */
+       if (fep->pps_enable) {
+               /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
+               fep->pps_enable = 0;
+               fec_ptp_restore_state(fep);
+               fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
+       }
+
        /* Enable interrupts we wish to service */
        if (fep->link)
                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1206,6 +1220,8 @@ fec_stop(struct net_device *ndev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
        u32 val;
+       struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
+       u32 ecntl = 0;
 
        /* We cannot expect a graceful transmit stop without link !!! */
        if (fep->link) {
@@ -1215,6 +1231,8 @@ fec_stop(struct net_device *ndev)
                        netdev_err(ndev, "Graceful transmit stop did not complete!\n");
        }
 
+       fec_ptp_save_state(fep);
+
        /* Whack a reset.  We should wait for this.
         * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
         * instead of reset MAC itself.
@@ -1234,12 +1252,28 @@ fec_stop(struct net_device *ndev)
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 
+       if (fep->bufdesc_ex)
+               ecntl |= FEC_ECR_EN1588;
+
        /* We have to keep ENET enabled to have MII interrupt stay working */
        if (fep->quirks & FEC_QUIRK_ENET_MAC &&
                !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
-               writel(2, fep->hwp + FEC_ECNTRL);
+               ecntl |= FEC_ECR_ETHEREN;
                writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
        }
+
+       writel(ecntl, fep->hwp + FEC_ECNTRL);
+
+       if (fep->bufdesc_ex)
+               fec_ptp_start_cyclecounter(ndev);
+
+       /* Restart PPS if needed */
+       if (fep->pps_enable) {
+               /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
+               fep->pps_enable = 0;
+               fec_ptp_restore_state(fep);
+               fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
+       }
 }
 
 
index 7d49c28215f315de3b78f0355e7cdcf3c449cdbd..c74d04f4b2fd2d1ee9eab1a211a242a1c021a603 100644 (file)
@@ -135,11 +135,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
                 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
                 * to current timer would be next second.
                 */
-               tempval = readl(fep->hwp + FEC_ATIME_CTRL);
-               tempval |= FEC_T_CTRL_CAPTURE;
-               writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
-               tempval = readl(fep->hwp + FEC_ATIME);
+               tempval = fep->cc.read(&fep->cc);
                /* Convert the ptp local counter to 1588 timestamp */
                ns = timecounter_cyc2time(&fep->tc, tempval);
                ts = ns_to_timespec64(ns);
@@ -637,7 +633,36 @@ void fec_ptp_stop(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       if (fep->pps_enable)
+               fec_ptp_enable_pps(fep, 0);
+
        cancel_delayed_work_sync(&fep->time_keep);
        if (fep->ptp_clock)
                ptp_clock_unregister(fep->ptp_clock);
 }
+
+void fec_ptp_save_state(struct fec_enet_private *fep)
+{
+       u32 atime_inc_corr;
+
+       fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
+       fep->ptp_saved_state.ns_sys = ktime_get_ns();
+
+       fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
+       atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
+       fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
+}
+
+int fec_ptp_restore_state(struct fec_enet_private *fep)
+{
+       u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+       u64 ns_sys;
+
+       writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
+       atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
+       writel(atime_inc, fep->hwp + FEC_ATIME_INC);
+
+       ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys;
+       timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys);
+       return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
+}
index 156e92c437803b30638174b4e54a08abf6f5b081..e9cd0fa6a0d2f2d62df9b7334b6f7e407561880f 100644 (file)
@@ -4485,7 +4485,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
                                    (struct in6_addr *)&ipv6_full_mask))
                        new_mask |= I40E_L3_V6_DST_MASK;
                else if (ipv6_addr_any((struct in6_addr *)
-                                      &usr_ip6_spec->ip6src))
+                                      &usr_ip6_spec->ip6dst))
                        new_mask &= ~I40E_L3_V6_DST_MASK;
                else
                        return -EOPNOTSUPP;
index b36bf9c3e1e49a2dde1f7bde27b16590d8fe5c80..9f1d5de7bf16191dec2c7a939f794666fd4dc776 100644 (file)
@@ -384,7 +384,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
                set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
                break;
        default:
-               netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+               netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+               set_bit(__I40E_DOWN_REQUESTED, pf->state);
+               set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
                break;
        }
 
index f6ba97a0166eb43670654caa593fd254ff1581e0..d4226161a3efc408b96e181d427e46abaa078093 100644 (file)
@@ -3203,11 +3203,13 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
 
        protocol = vlan_get_protocol(skb);
 
-       if (eth_p_mpls(protocol))
+       if (eth_p_mpls(protocol)) {
                ip.hdr = skb_inner_network_header(skb);
-       else
+               l4.hdr = skb_checksum_start(skb);
+       } else {
                ip.hdr = skb_network_header(skb);
-       l4.hdr = skb_checksum_start(skb);
+               l4.hdr = skb_transport_header(skb);
+       }
 
        /* set the tx_flags to indicate the IP protocol type. this is
         * required so that checksum header computation below is accurate.
index cd4e6a22d0f9fb9d2aa835580334d3d40d44238e..9ffbd24d83cb67572aaed3a4ecedda944945a4a5 100644 (file)
@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
 static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
 {
        enum iavf_status ret_code = 0;
+       int i;
 
        if (hw->aq.asq.count > 0) {
                /* queue already initialized */
@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
        /* initialize base registers */
        ret_code = iavf_config_asq_regs(hw);
        if (ret_code)
-               goto init_adminq_free_rings;
+               goto init_free_asq_bufs;
 
        /* success! */
        hw->aq.asq.count = hw->aq.num_asq_entries;
        goto init_adminq_exit;
 
+init_free_asq_bufs:
+       for (i = 0; i < hw->aq.num_asq_entries; i++)
+               iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+       iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
 init_adminq_free_rings:
        iavf_free_adminq_asq(hw);
 
@@ -383,6 +389,7 @@ init_adminq_exit:
 static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
 {
        enum iavf_status ret_code = 0;
+       int i;
 
        if (hw->aq.arq.count > 0) {
                /* queue already initialized */
@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
        /* initialize base registers */
        ret_code = iavf_config_arq_regs(hw);
        if (ret_code)
-               goto init_adminq_free_rings;
+               goto init_free_arq_bufs;
 
        /* success! */
        hw->aq.arq.count = hw->aq.num_arq_entries;
        goto init_adminq_exit;
 
+init_free_arq_bufs:
+       for (i = 0; i < hw->aq.num_arq_entries; i++)
+               iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+       iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
 init_adminq_free_rings:
        iavf_free_adminq_arq(hw);
 
index 45d097a164adc2fd60839674b3b8c6d652a171df..f39440ad5c50d6c07e055c145a4ec66e2a293e2f 100644 (file)
@@ -2367,7 +2367,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
        err = iavf_get_vf_config(adapter);
        if (err == -EALREADY) {
                err = iavf_send_vf_config_msg(adapter);
-               goto err_alloc;
+               goto err;
        } else if (err == -EINVAL) {
                /* We only get -EINVAL if the device is in a very bad
                 * state or if we've been disabled for previous bad
@@ -3086,12 +3086,15 @@ continue_reset:
 
        return;
 reset_err:
+       if (running) {
+               set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+               iavf_free_traffic_irqs(adapter);
+       }
+       iavf_disable_vf(adapter);
+
        mutex_unlock(&adapter->client_lock);
        mutex_unlock(&adapter->crit_lock);
-       if (running)
-               iavf_change_state(adapter, __IAVF_RUNNING);
        dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
-       iavf_close(netdev);
 }
 
 /**
@@ -4085,8 +4088,17 @@ static int iavf_open(struct net_device *netdev)
                return -EIO;
        }
 
-       while (!mutex_trylock(&adapter->crit_lock))
+       while (!mutex_trylock(&adapter->crit_lock)) {
+               /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
+                * is already taken and iavf_open is called from an upper
+                * device's notifier reacting on NETDEV_REGISTER event.
+                * We have to leave here to avoid dead lock.
+                */
+               if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+                       return -EBUSY;
+
                usleep_range(500, 1000);
+       }
 
        if (adapter->state != __IAVF_DOWN) {
                err = -EBUSY;
index cc5b85afd437e70b8a43a910cccda29bd16e7603..841fa149c4076c6c26fe46d84b8baa32b6a3d1da 100644 (file)
@@ -684,8 +684,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
  * ice_xsk_pool - get XSK buffer pool bound to a ring
  * @ring: Rx ring to use
  *
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise.
+ * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
+ * present, NULL otherwise.
  */
 static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
 {
@@ -699,23 +699,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
 }
 
 /**
- * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: Tx ring to use
+ * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
  *
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ * Sets XSK buff pool pointer on XDP ring.
+ *
+ * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
+ * queue id. Reason for doing so is that queue vectors might have assigned more
+ * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
+ * carries a pointer to one of these XDP rings for its own purposes, such as
+ * handling XDP_TX action, therefore we can piggyback here on the
+ * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
  */
-static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
 {
-       struct ice_vsi *vsi = ring->vsi;
-       u16 qid;
+       struct ice_tx_ring *ring;
 
-       qid = ring->q_index - vsi->alloc_txq;
+       ring = vsi->rx_rings[qid]->xdp_ring;
+       if (!ring)
+               return;
 
-       if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
-               return NULL;
+       if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+               ring->xsk_pool = NULL;
+               return;
+       }
 
-       return xsk_get_pool_from_qid(vsi->netdev, qid);
+       ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
 }
 
 /**
index 85a94483c2edca248436ed2bbc0674478c0ddc88..40e678cfb50784aacb3329c46e1a91e550ad0bad 100644 (file)
@@ -62,7 +62,7 @@ ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
        int result;
 
        result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
-       if (result)
+       if (result && result != -EEXIST)
                dev_err(ice_pf_to_dev(pf),
                        "Error setting promisc mode on VSI %i (rc=%d)\n",
                        vsi->vsi_num, result);
@@ -86,7 +86,7 @@ ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
        int result;
 
        result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
-       if (result)
+       if (result && result != -EEXIST)
                dev_err(ice_pf_to_dev(pf),
                        "Error clearing promisc mode on VSI %i (rc=%d)\n",
                        vsi->vsi_num, result);
@@ -109,7 +109,7 @@ ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
        int result;
 
        result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
-       if (result)
+       if (result && result != -EEXIST)
                dev_err(ice_pf_to_dev(pf),
                        "Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
                        ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
@@ -132,7 +132,7 @@ ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
        int result;
 
        result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
-       if (result)
+       if (result && result != -EEXIST)
                dev_err(ice_pf_to_dev(pf),
                        "Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
                        ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
index a830f7f9aed050fb29939c40f26515ec58266940..0c4ec92640710541b0a1220f47c3477c9aa347f3 100644 (file)
@@ -1986,8 +1986,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
        if (ret)
                return ret;
 
-       ice_for_each_xdp_txq(vsi, i)
-               vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
+       ice_for_each_rxq(vsi, i)
+               ice_tx_xsk_pool(vsi, i);
 
        return ret;
 }
@@ -3181,7 +3181,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
 
        pf = vsi->back;
        vtype = vsi->type;
-       if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf)
+       if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
                return -EINVAL;
 
        ice_vsi_init_vlan_ops(vsi);
@@ -4062,7 +4062,11 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
        if (err && err != -EEXIST)
                return err;
 
-       return 0;
+       /* when deleting the last VLAN filter, make sure to disable the VLAN
+        * promisc mode so the filter isn't left by accident
+        */
+       return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+                                   ICE_MCAST_VLAN_PROMISC_BITS, 0);
 }
 
 /**
index eb40526ee179fdb68cdb3fecde962df9b3d64470..173fe6c313418fb63a74e02eb152e4a40cc24cfc 100644 (file)
@@ -267,8 +267,10 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
                status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
                                                  promisc_m, 0);
        }
+       if (status && status != -EEXIST)
+               return status;
 
-       return status;
+       return 0;
 }
 
 /**
@@ -2579,7 +2581,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
                if (ice_setup_tx_ring(xdp_ring))
                        goto free_xdp_rings;
                ice_set_ring_xdp(xdp_ring);
-               xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
                spin_lock_init(&xdp_ring->tx_lock);
                for (j = 0; j < xdp_ring->count; j++) {
                        tx_desc = ICE_TX_DESC(xdp_ring, j);
@@ -2587,13 +2588,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
                }
        }
 
-       ice_for_each_rxq(vsi, i) {
-               if (static_key_enabled(&ice_xdp_locking_key))
-                       vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
-               else
-                       vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
-       }
-
        return 0;
 
 free_xdp_rings:
@@ -2683,6 +2677,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
                xdp_rings_rem -= xdp_rings_per_v;
        }
 
+       ice_for_each_rxq(vsi, i) {
+               if (static_key_enabled(&ice_xdp_locking_key)) {
+                       vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+               } else {
+                       struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
+                       struct ice_tx_ring *ring;
+
+                       ice_for_each_tx_ring(ring, q_vector->tx) {
+                               if (ice_ring_is_xdp(ring)) {
+                                       vsi->rx_rings[i]->xdp_ring = ring;
+                                       break;
+                               }
+                       }
+               }
+               ice_tx_xsk_pool(vsi, i);
+       }
+
        /* omit the scheduler update if in reset path; XDP queues will be
         * taken into account at the end of ice_vsi_rebuild, where
         * ice_cfg_vsi_lan is being called
@@ -3573,6 +3584,14 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
                usleep_range(1000, 2000);
 
+       ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+                                   ICE_MCAST_VLAN_PROMISC_BITS, vid);
+       if (ret) {
+               netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
+                          vsi->vsi_num);
+               vsi->current_netdev_flags |= IFF_ALLMULTI;
+       }
+
        vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
 
        /* Make sure VLAN delete is successful before updating VLAN
index 262e553e3b585ecb29c3eef7a2075afca13cf14e..3808034f7e7e32351a8ea781dd7630ea3071e125 100644 (file)
@@ -4445,6 +4445,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
                goto free_fltr_list;
 
        list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+               /* Avoid enabling or disabling VLAN zero twice when in double
+                * VLAN mode
+                */
+               if (ice_is_dvm_ena(hw) &&
+                   list_itr->fltr_info.l_data.vlan.tpid == 0)
+                       continue;
+
                vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
                if (rm_vlan_promisc)
                        status = ice_clear_vsi_promisc(hw, vsi_handle,
@@ -4452,7 +4459,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
                else
                        status = ice_set_vsi_promisc(hw, vsi_handle,
                                                     promisc_mask, vlan_id);
-               if (status)
+               if (status && status != -EEXIST)
                        break;
        }
 
index 8fd7c3e37f5e3c0c321efa99d3b5765fad27c87f..0abeed092de1d21d86432a02b8b9da2cf3a6ccda 100644 (file)
@@ -571,8 +571,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
 
        if (ice_is_vf_disabled(vf)) {
                vsi = ice_get_vf_vsi(vf);
-               if (WARN_ON(!vsi))
+               if (!vsi) {
+                       dev_dbg(dev, "VF is already removed\n");
                        return -EINVAL;
+               }
                ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
                ice_vsi_stop_all_rx_rings(vsi);
                dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
@@ -762,13 +764,16 @@ static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
 {
        struct ice_vsi_vlan_ops *vlan_ops;
-       int err;
+       int err = 0;
 
        vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
 
-       err = vlan_ops->ena_tx_filtering(vsi);
-       if (err)
-               return err;
+       /* Allow VF with VLAN 0 only to send all tagged traffic */
+       if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
+               err = vlan_ops->ena_tx_filtering(vsi);
+               if (err)
+                       return err;
+       }
 
        return ice_cfg_mac_antispoof(vsi, true);
 }
index 094e3c97a1ea0f02961b9ee9bfe91484fdfb75d2..2b4c791b6cbad3aa9d801357abcf7593cc3d34fd 100644 (file)
@@ -2288,6 +2288,15 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
 
                        /* Enable VLAN filtering on first non-zero VLAN */
                        if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+                               if (vf->spoofchk) {
+                                       status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+                                       if (status) {
+                                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                                               dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
+                                                       vid, status);
+                                               goto error_param;
+                                       }
+                               }
                                if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
                                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
                                        dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
@@ -2333,8 +2342,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
                        }
 
                        /* Disable VLAN filtering when only VLAN 0 is left */
-                       if (!ice_vsi_has_non_zero_vlans(vsi))
+                       if (!ice_vsi_has_non_zero_vlans(vsi)) {
+                               vsi->inner_vlan_ops.dis_tx_filtering(vsi);
                                vsi->inner_vlan_ops.dis_rx_filtering(vsi);
+                       }
 
                        if (vlan_promisc)
                                ice_vf_dis_vlan_promisc(vsi, &vlan);
@@ -2838,6 +2849,13 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
 
                        if (vlan_promisc)
                                ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+                       /* Disable VLAN filtering when only VLAN 0 is left */
+                       if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
+                               err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
+                               if (err)
+                                       return err;
+                       }
                }
 
                vc_vlan = &vlan_fltr->inner;
@@ -2853,8 +2871,17 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
                        /* no support for VLAN promiscuous on inner VLAN unless
                         * we are in Single VLAN Mode (SVM)
                         */
-                       if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
-                               ice_vf_dis_vlan_promisc(vsi, &vlan);
+                       if (!ice_is_dvm_ena(&vsi->back->hw)) {
+                               if (vlan_promisc)
+                                       ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+                               /* Disable VLAN filtering when only VLAN 0 is left */
+                               if (!ice_vsi_has_non_zero_vlans(vsi)) {
+                                       err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+                                       if (err)
+                                               return err;
+                               }
+                       }
                }
        }
 
@@ -2931,6 +2958,13 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
                                if (err)
                                        return err;
                        }
+
+                       /* Enable VLAN filtering on first non-zero VLAN */
+                       if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
+                               err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
+                               if (err)
+                                       return err;
+                       }
                }
 
                vc_vlan = &vlan_fltr->inner;
@@ -2946,10 +2980,19 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
                        /* no support for VLAN promiscuous on inner VLAN unless
                         * we are in Single VLAN Mode (SVM)
                         */
-                       if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
-                               err = ice_vf_ena_vlan_promisc(vsi, &vlan);
-                               if (err)
-                                       return err;
+                       if (!ice_is_dvm_ena(&vsi->back->hw)) {
+                               if (vlan_promisc) {
+                                       err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+                                       if (err)
+                                               return err;
+                               }
+
+                               /* Enable VLAN filtering on first non-zero VLAN */
+                               if (vf->spoofchk && vlan.vid) {
+                                       err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+                                       if (err)
+                                               return err;
+                               }
                        }
                }
        }
index 49ba8bfdbf047a83bad4653cffbdeb389752ba11..e48e29258450f84314cd699edf7f483fe1d65f99 100644 (file)
@@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
                if (err)
                        goto free_buf;
                ice_set_ring_xdp(xdp_ring);
-               xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+               ice_tx_xsk_pool(vsi, q_idx);
        }
 
        err = ice_vsi_cfg_rxq(rx_ring);
@@ -329,6 +329,12 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
        bool if_running, pool_present = !!pool;
        int ret = 0, pool_failure = 0;
 
+       if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
+               netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
+               pool_failure = -EINVAL;
+               goto failure;
+       }
+
        if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
            !is_power_of_2(vsi->tx_rings[qid]->count)) {
                netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
@@ -353,7 +359,7 @@ xsk_pool_if_up:
        if (if_running) {
                ret = ice_qp_ena(vsi, qid);
                if (!ret && pool_present)
-                       napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+                       napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
                else if (ret)
                        netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
        }
@@ -944,13 +950,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
        if (!ice_is_xdp_ena_vsi(vsi))
                return -EINVAL;
 
-       if (queue_id >= vsi->num_txq)
+       if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
                return -EINVAL;
 
-       if (!vsi->xdp_rings[queue_id]->xsk_pool)
-               return -EINVAL;
+       ring = vsi->rx_rings[queue_id]->xdp_ring;
 
-       ring = vsi->xdp_rings[queue_id];
+       if (!ring->xsk_pool)
+               return -EINVAL;
 
        /* The idea here is that if NAPI is running, mark a miss, so
         * it will run again. If not, trigger an interrupt and
index 2d3daf022651ce839b4029d805b821051b16342f..015b781441149b0d03fed5a340b2323083b4eba8 100644 (file)
@@ -664,6 +664,8 @@ struct igb_adapter {
        struct igb_mac_addr *mac_table;
        struct vf_mac_filter vf_macs;
        struct vf_mac_filter *vf_mac_list;
+       /* lock for VF resources */
+       spinlock_t vfs_lock;
 };
 
 /* flags controlling PTP/1588 function */
index d8b836a85cc305f34571b631d6314b1a0740a7db..2796e81d27260ee9ac3b8bb2a5bb4564488b32d2 100644 (file)
@@ -3637,6 +3637,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       unsigned long flags;
 
        /* reclaim resources allocated to VFs */
        if (adapter->vf_data) {
@@ -3649,12 +3650,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
                        pci_disable_sriov(pdev);
                        msleep(500);
                }
-
+               spin_lock_irqsave(&adapter->vfs_lock, flags);
                kfree(adapter->vf_mac_list);
                adapter->vf_mac_list = NULL;
                kfree(adapter->vf_data);
                adapter->vf_data = NULL;
                adapter->vfs_allocated_count = 0;
+               spin_unlock_irqrestore(&adapter->vfs_lock, flags);
                wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
                wrfl();
                msleep(100);
@@ -3814,7 +3816,9 @@ static void igb_remove(struct pci_dev *pdev)
        igb_release_hw_control(adapter);
 
 #ifdef CONFIG_PCI_IOV
+       rtnl_lock();
        igb_disable_sriov(pdev);
+       rtnl_unlock();
 #endif
 
        unregister_netdev(netdev);
@@ -3974,6 +3978,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
 
        spin_lock_init(&adapter->nfc_lock);
        spin_lock_init(&adapter->stats64_lock);
+
+       /* init spinlock to avoid concurrency of VF resources */
+       spin_lock_init(&adapter->vfs_lock);
 #ifdef CONFIG_PCI_IOV
        switch (hw->mac.type) {
        case e1000_82576:
@@ -7958,8 +7965,10 @@ unlock:
 static void igb_msg_task(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
+       unsigned long flags;
        u32 vf;
 
+       spin_lock_irqsave(&adapter->vfs_lock, flags);
        for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
                /* process any reset requests */
                if (!igb_check_for_rst(hw, vf))
@@ -7973,6 +7982,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
                if (!igb_check_for_ack(hw, vf))
                        igb_rcv_ack_from_vf(adapter, vf);
        }
+       spin_unlock_irqrestore(&adapter->vfs_lock, flags);
 }
 
 /**
index 9f06896a049b40470dd91e9bd3ab28710d6b1837..f8605f57bd067d64f44ee028c80f1606107f019f 100644 (file)
@@ -1214,7 +1214,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
        struct cyclecounter cc;
        unsigned long flags;
        u32 incval = 0;
-       u32 tsauxc = 0;
        u32 fuse0 = 0;
 
        /* For some of the boards below this mask is technically incorrect.
@@ -1249,18 +1248,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
        case ixgbe_mac_x550em_a:
        case ixgbe_mac_X550:
                cc.read = ixgbe_ptp_read_X550;
-
-               /* enable SYSTIME counter */
-               IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
-               IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
-               IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
-               tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
-               IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
-                               tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
-               IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
-               IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
-
-               IXGBE_WRITE_FLUSH(hw);
                break;
        case ixgbe_mac_X540:
                cc.read = ixgbe_ptp_read_82599;
@@ -1292,6 +1279,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
        spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
 }
 
+/**
+ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
+ * @adapter: the ixgbe private board structure
+ *
+ * Initialize and start the SYSTIME registers.
+ */
+static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 tsauxc;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
+       case ixgbe_mac_X550:
+               tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+
+               /* Reset SYSTIME registers to 0 */
+               IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+               IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+               IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+
+               /* Reset interrupt settings */
+               IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+
+               /* Activate the SYSTIME counter */
+               IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+                               tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+               break;
+       case ixgbe_mac_X540:
+       case ixgbe_mac_82599EB:
+               /* Reset SYSTIME registers to 0 */
+               IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+               IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+               break;
+       default:
+               /* Other devices aren't supported */
+               return;
+       };
+
+       IXGBE_WRITE_FLUSH(hw);
+}
+
 /**
  * ixgbe_ptp_reset
  * @adapter: the ixgbe private board structure
@@ -1318,6 +1349,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
 
        ixgbe_ptp_start_cyclecounter(adapter);
 
+       ixgbe_ptp_init_systime(adapter);
+
        spin_lock_irqsave(&adapter->tmreg_lock, flags);
        timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
                         ktime_to_ns(ktime_get_real()));
index 5edb68a8aab1ececb61a652ca6163a4302beabd7..57f27cc7724e7b63873b3205e42a08bd3a30a697 100644 (file)
@@ -193,6 +193,7 @@ static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int
 
        ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
        if (!ch->rx_buff[ch->dma.desc]) {
+               ch->rx_buff[ch->dma.desc] = buf;
                ret = -ENOMEM;
                goto skip;
        }
@@ -239,6 +240,12 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
        }
 
        skb = build_skb(buf, priv->rx_skb_size);
+       if (!skb) {
+               skb_free_frag(buf);
+               net_dev->stats.rx_dropped++;
+               return -ENOMEM;
+       }
+
        skb_reserve(skb, NET_SKB_PAD);
        skb_put(skb, len);
 
@@ -288,7 +295,7 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
                        if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
                                continue;
                        if (ret != XRX200_DMA_PACKET_COMPLETE)
-                               return ret;
+                               break;
                        rx++;
                } else {
                        break;
index d9426b01f462844dfac8d0e9b05d663ff25cf4c1..5ace4609de47d22eb88c5eca283068ec3c7e669b 100644 (file)
@@ -1732,7 +1732,7 @@ static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
        case XDP_TX: {
                struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
 
-               if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+               if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
                        count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
                        act = XDP_DROP;
                        break;
@@ -1891,10 +1891,19 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                skb->dev = netdev;
                bytes += skb->len;
 
-               if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+               if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+                       hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+                       if (hash != MTK_RXD5_FOE_ENTRY)
+                               skb_set_hash(skb, jhash_1word(hash, 0),
+                                            PKT_HASH_TYPE_L4);
                        rxdcsum = &trxd.rxd3;
-               else
+               } else {
+                       hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+                       if (hash != MTK_RXD4_FOE_ENTRY)
+                               skb_set_hash(skb, jhash_1word(hash, 0),
+                                            PKT_HASH_TYPE_L4);
                        rxdcsum = &trxd.rxd4;
+               }
 
                if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1902,16 +1911,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                        skb_checksum_none_assert(skb);
                skb->protocol = eth_type_trans(skb, netdev);
 
-               hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
-               if (hash != MTK_RXD4_FOE_ENTRY) {
-                       hash = jhash_1word(hash, 0);
-                       skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
-               }
-
                reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
                if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
-                       mtk_ppe_check_skb(eth->ppe, skb,
-                                         trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
+                       mtk_ppe_check_skb(eth->ppe, skb, hash);
 
                if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
                        if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
index 7405c97cda660eae1d777efdcd5a22414def0f1e..ecf85e9ed824023be00881fc27bbd25fc1761353 100644 (file)
 #define RX_DMA_L4_VALID_PDMA   BIT(30)         /* when PDMA is used */
 #define RX_DMA_SPECIAL_TAG     BIT(22)
 
+/* PDMA descriptor rxd5 */
+#define MTK_RXD5_FOE_ENTRY     GENMASK(14, 0)
+#define MTK_RXD5_PPE_CPU_REASON        GENMASK(22, 18)
+#define MTK_RXD5_SRC_PORT      GENMASK(29, 26)
+
 #define RX_DMA_GET_SPORT(x)    (((x) >> 19) & 0xf)
 #define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
 
index 37522352e4b23011d17fd8225b7438452e77e868..c8e5ca65bb6ec395116352f0a5816b4e03026f1f 100644 (file)
@@ -79,6 +79,10 @@ tc_act_police_offload(struct mlx5e_priv *priv,
        struct mlx5e_flow_meter_handle *meter;
        int err = 0;
 
+       err = mlx5e_policer_validate(&fl_act->action, act, fl_act->extack);
+       if (err)
+               return err;
+
        err = fill_meter_params_from_act(act, &params);
        if (err)
                return err;
index 0aef69527226443f6b15d3127c4155d246ff093e..3a1f76eac542d0f55fd5bd757b757ff60874110a 100644 (file)
@@ -246,7 +246,7 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
 static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
                                           struct list_head *list, int size)
 {
-       struct mlx5e_ktls_offload_context_tx *obj;
+       struct mlx5e_ktls_offload_context_tx *obj, *n;
        struct mlx5e_async_ctx *bulk_async;
        int i;
 
@@ -255,7 +255,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
                return;
 
        i = 0;
-       list_for_each_entry(obj, list, list_node) {
+       list_for_each_entry_safe(obj, n, list, list_node) {
                mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
                i++;
        }
index e2a9b9be5c1fba84499005133feb14e7dc133258..e0ce5a233d0b4a46358e07527df4639e896c0bdb 100644 (file)
@@ -1395,10 +1395,11 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
        }
 
        return fs;
-err_free_fs:
-       kvfree(fs);
+
 err_free_vlan:
        mlx5e_fs_vlan_free(fs);
+err_free_fs:
+       kvfree(fs);
 err:
        return NULL;
 }
index d858667736a32c8cf0cbabea5f231f7d5f370a36..02eb2f0fa2ae785c1022432bc64376a67fee249d 100644 (file)
@@ -3682,7 +3682,9 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
        int err = 0;
 
 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-       if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
+       int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
+                                                 MLX5_TC_FLAG(NIC_OFFLOAD);
+       if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
                netdev_err(netdev,
                           "Active offloaded tc filters, can't turn hw_tc_offload off\n");
                return -EINVAL;
@@ -4769,14 +4771,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
        /* RQ */
        mlx5e_build_rq_params(mdev, params);
 
-       /* HW LRO */
-       if (MLX5_CAP_ETH(mdev, lro_cap) &&
-           params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
-               /* No XSK params: checking the availability of striding RQ in general. */
-               if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
-                       params->packet_merge.type = slow_pci_heuristic(mdev) ?
-                               MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
-       }
        params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
 
        /* CQ moderation params */
index 4c1599de652c1422a50e522c6991f7aedcf82645..759f7d3c2cfd82553d5a8bc16a4d36a47d5eea58 100644 (file)
@@ -662,6 +662,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
 
        params->mqprio.num_tc       = 1;
        params->tunneled_offload_en = false;
+       if (rep->vport != MLX5_VPORT_UPLINK)
+               params->vlan_strip_disable = true;
 
        mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
 }
@@ -696,6 +698,13 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
+       priv->fs = mlx5e_fs_init(priv->profile, mdev,
+                                !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+       if (!priv->fs) {
+               netdev_err(priv->netdev, "FS allocation failed\n");
+               return -ENOMEM;
+       }
+
        mlx5e_build_rep_params(netdev);
        mlx5e_timestamp_init(priv);
 
@@ -708,12 +717,21 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
        struct mlx5e_priv *priv = netdev_priv(netdev);
        int err;
 
+       priv->fs = mlx5e_fs_init(priv->profile, mdev,
+                                !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+       if (!priv->fs) {
+               netdev_err(priv->netdev, "FS allocation failed\n");
+               return -ENOMEM;
+       }
+
        err = mlx5e_ipsec_init(priv);
        if (err)
                mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
 
        mlx5e_vxlan_set_netdev_info(priv);
-       return mlx5e_init_rep(mdev, netdev);
+       mlx5e_build_rep_params(netdev);
+       mlx5e_timestamp_init(priv);
+       return 0;
 }
 
 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
@@ -836,13 +854,6 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
        struct mlx5_core_dev *mdev = priv->mdev;
        int err;
 
-       priv->fs = mlx5e_fs_init(priv->profile, mdev,
-                                !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
-       if (!priv->fs) {
-               netdev_err(priv->netdev, "FS allocation failed\n");
-               return -ENOMEM;
-       }
-
        priv->rx_res = mlx5e_rx_res_alloc();
        if (!priv->rx_res) {
                err = -ENOMEM;
index ed73132129aae8f0c81fedb2d42ff6637a3faca0..a9f4c652f859c770a65aee830139de7814d29631 100644 (file)
@@ -427,7 +427,8 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
                dest[dest_idx].vport.vhca_id =
                        MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
                dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
-               if (mlx5_lag_mpesw_is_activated(esw->dev))
+               if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
+                   mlx5_lag_mpesw_is_activated(esw->dev))
                        dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
        }
        if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
@@ -3115,8 +3116,10 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
 
                err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
                                                  MLX5_VPORT_UC_ADDR_CHANGE);
-               if (err)
+               if (err) {
+                       devl_unlock(devlink);
                        return;
+               }
        }
        esw->esw_funcs.num_vfs = new_num_vfs;
        devl_unlock(devlink);
index 0f34e3c80d1f412a15d30027d286d11363dae889..065102278cb8080a8059a930d34dd77aa0582e4f 100644 (file)
@@ -1067,30 +1067,32 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
                                 struct net_device *netdev)
 {
        unsigned int fn = mlx5_get_dev_index(dev);
+       unsigned long flags;
 
        if (fn >= ldev->ports)
                return;
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        ldev->pf[fn].netdev = netdev;
        ldev->tracker.netdev_state[fn].link_up = 0;
        ldev->tracker.netdev_state[fn].tx_enabled = 0;
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
 }
 
 static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
                                    struct net_device *netdev)
 {
+       unsigned long flags;
        int i;
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        for (i = 0; i < ldev->ports; i++) {
                if (ldev->pf[i].netdev == netdev) {
                        ldev->pf[i].netdev = NULL;
                        break;
                }
        }
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
 }
 
 static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
@@ -1234,7 +1236,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
        mlx5_ldev_add_netdev(ldev, dev, netdev);
 
        for (i = 0; i < ldev->ports; i++)
-               if (!ldev->pf[i].dev)
+               if (!ldev->pf[i].netdev)
                        break;
 
        if (i >= ldev->ports)
@@ -1246,12 +1248,13 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev;
+       unsigned long flags;
        bool res;
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        ldev = mlx5_lag_dev(dev);
        res  = ldev && __mlx5_lag_is_roce(ldev);
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
 
        return res;
 }
@@ -1260,12 +1263,13 @@ EXPORT_SYMBOL(mlx5_lag_is_roce);
 bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev;
+       unsigned long flags;
        bool res;
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        ldev = mlx5_lag_dev(dev);
        res  = ldev && __mlx5_lag_is_active(ldev);
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
 
        return res;
 }
@@ -1274,13 +1278,14 @@ EXPORT_SYMBOL(mlx5_lag_is_active);
 bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev;
+       unsigned long flags;
        bool res;
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        ldev = mlx5_lag_dev(dev);
        res = ldev && __mlx5_lag_is_active(ldev) &&
                dev == ldev->pf[MLX5_LAG_P1].dev;
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
 
        return res;
 }
@@ -1289,12 +1294,13 @@ EXPORT_SYMBOL(mlx5_lag_is_master);
 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev;
+       unsigned long flags;
        bool res;
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        ldev = mlx5_lag_dev(dev);
        res  = ldev && __mlx5_lag_is_sriov(ldev);
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
 
        return res;
 }
@@ -1303,13 +1309,14 @@ EXPORT_SYMBOL(mlx5_lag_is_sriov);
 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev;
+       unsigned long flags;
        bool res;
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        ldev = mlx5_lag_dev(dev);
        res = ldev && __mlx5_lag_is_sriov(ldev) &&
              test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
 
        return res;
 }
@@ -1352,9 +1359,10 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
 {
        struct net_device *ndev = NULL;
        struct mlx5_lag *ldev;
+       unsigned long flags;
        int i;
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        ldev = mlx5_lag_dev(dev);
 
        if (!(ldev && __mlx5_lag_is_roce(ldev)))
@@ -1373,7 +1381,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
                dev_hold(ndev);
 
 unlock:
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
 
        return ndev;
 }
@@ -1383,10 +1391,11 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
                           struct net_device *slave)
 {
        struct mlx5_lag *ldev;
+       unsigned long flags;
        u8 port = 0;
        int i;
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        ldev = mlx5_lag_dev(dev);
        if (!(ldev && __mlx5_lag_is_roce(ldev)))
                goto unlock;
@@ -1401,7 +1410,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
        port = ldev->v2p_map[port * ldev->buckets];
 
 unlock:
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
        return port;
 }
 EXPORT_SYMBOL(mlx5_lag_get_slave_port);
@@ -1422,8 +1431,9 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_dev *peer_dev = NULL;
        struct mlx5_lag *ldev;
+       unsigned long flags;
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        ldev = mlx5_lag_dev(dev);
        if (!ldev)
                goto unlock;
@@ -1433,7 +1443,7 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
                           ldev->pf[MLX5_LAG_P1].dev;
 
 unlock:
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
        return peer_dev;
 }
 EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
@@ -1446,6 +1456,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
        int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
        struct mlx5_core_dev **mdev;
        struct mlx5_lag *ldev;
+       unsigned long flags;
        int num_ports;
        int ret, i, j;
        void *out;
@@ -1462,7 +1473,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
 
        memset(values, 0, sizeof(*values) * num_counters);
 
-       spin_lock(&lag_lock);
+       spin_lock_irqsave(&lag_lock, flags);
        ldev = mlx5_lag_dev(dev);
        if (ldev && __mlx5_lag_is_active(ldev)) {
                num_ports = ldev->ports;
@@ -1472,7 +1483,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
                num_ports = 1;
                mdev[MLX5_LAG_P1] = dev;
        }
-       spin_unlock(&lag_lock);
+       spin_unlock_irqrestore(&lag_lock, flags);
 
        for (i = 0; i < num_ports; ++i) {
                u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
index bec8d6d0b5f67690513237f03bfc525045dc2319..c085b031abfc157cba1db0974a9cd92564d3f121 100644 (file)
@@ -1530,7 +1530,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
        memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
        INIT_LIST_HEAD(&priv->ctx_list);
        spin_lock_init(&priv->ctx_lock);
+       lockdep_register_key(&dev->lock_key);
        mutex_init(&dev->intf_state_mutex);
+       lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
 
        mutex_init(&priv->bfregs.reg_head.lock);
        mutex_init(&priv->bfregs.wc_head.lock);
@@ -1597,6 +1599,7 @@ err_timeout_init:
        mutex_destroy(&priv->bfregs.wc_head.lock);
        mutex_destroy(&priv->bfregs.reg_head.lock);
        mutex_destroy(&dev->intf_state_mutex);
+       lockdep_unregister_key(&dev->lock_key);
        return err;
 }
 
@@ -1618,6 +1621,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
        mutex_destroy(&priv->bfregs.wc_head.lock);
        mutex_destroy(&priv->bfregs.reg_head.lock);
        mutex_destroy(&dev->intf_state_mutex);
+       lockdep_unregister_key(&dev->lock_key);
 }
 
 static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
index ec76a8b1acc1cccaace705fd0ba424123ce467c7..60596357bfc7ae94f7201037a8014f3b81f941c7 100644 (file)
@@ -376,8 +376,8 @@ retry:
                        goto out_dropped;
                }
        }
+       err = mlx5_cmd_check(dev, err, in, out);
        if (err) {
-               err = mlx5_cmd_check(dev, err, in, out);
                mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
                               func_id, npages, err);
                goto out_dropped;
@@ -524,10 +524,13 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
                dev->priv.reclaim_pages_discard += npages;
        }
        /* if triggered by FW event and failed by FW then ignore */
-       if (event && err == -EREMOTEIO)
+       if (event && err == -EREMOTEIO) {
                err = 0;
+               goto out_free;
+       }
+
+       err = mlx5_cmd_check(dev, err, in, out);
        if (err) {
-               err = mlx5_cmd_check(dev, err, in, out);
                mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
                goto out_free;
        }
index ee2e1b7c1310d51ce3ba91b7cf6991c634747559..c0e6c487c63c1ef105c92c4dcc6c552f5a95202f 100644 (file)
@@ -159,11 +159,11 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
 
        devl_lock(devlink);
        err = mlx5_device_enable_sriov(dev, num_vfs);
+       devl_unlock(devlink);
        if (err) {
                mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
                return err;
        }
-       devl_unlock(devlink);
 
        err = pci_enable_sriov(pdev, num_vfs);
        if (err) {
index 1e240cdd9cbde10efc4a7deb6e0c75e7724f450d..30c7b0e1572181b76e87e3597a28e7652ed9d4aa 100644 (file)
@@ -1897,9 +1897,9 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
 
        cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
        cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
-       mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
        mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
        unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+       mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
        mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
        mlxsw_sp->ports[local_port] = NULL;
        mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
index 2e0b704b8a3194dfc233117f1341d275109936b0..7b01b9c20722a8271ba9bf96f370aab49d661afc 100644 (file)
@@ -46,6 +46,7 @@ struct mlxsw_sp2_ptp_state {
                                          * enabled.
                                          */
        struct hwtstamp_config config;
+       struct mutex lock; /* Protects 'config' and HW configuration. */
 };
 
 struct mlxsw_sp1_ptp_key {
@@ -1374,6 +1375,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
                goto err_ptp_traps_set;
 
        refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+       mutex_init(&ptp_state->lock);
        return &ptp_state->common;
 
 err_ptp_traps_set:
@@ -1388,6 +1390,7 @@ void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
 
        ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
 
+       mutex_destroy(&ptp_state->lock);
        mlxsw_sp_ptp_traps_unset(mlxsw_sp);
        kfree(ptp_state);
 }
@@ -1461,7 +1464,10 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
 
        ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
 
+       mutex_lock(&ptp_state->lock);
        *config = ptp_state->config;
+       mutex_unlock(&ptp_state->lock);
+
        return 0;
 }
 
@@ -1523,6 +1529,9 @@ mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
                return -EINVAL;
        }
 
+       if ((ing_types && !egr_types) || (!ing_types && egr_types))
+               return -EINVAL;
+
        *p_ing_types = ing_types;
        *p_egr_types = egr_types;
        return 0;
@@ -1574,8 +1583,6 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
        struct mlxsw_sp2_ptp_state *ptp_state;
        int err;
 
-       ASSERT_RTNL();
-
        ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
 
        if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
@@ -1597,8 +1604,6 @@ static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
        struct mlxsw_sp2_ptp_state *ptp_state;
        int err;
 
-       ASSERT_RTNL();
-
        ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
 
        if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
@@ -1618,16 +1623,20 @@ err_ptp_disable:
 int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
                               struct hwtstamp_config *config)
 {
+       struct mlxsw_sp2_ptp_state *ptp_state;
        enum hwtstamp_rx_filters rx_filter;
        struct hwtstamp_config new_config;
        u16 new_ing_types, new_egr_types;
        bool ptp_enabled;
        int err;
 
+       ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+       mutex_lock(&ptp_state->lock);
+
        err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
                                              &new_egr_types, &rx_filter);
        if (err)
-               return err;
+               goto err_get_message_types;
 
        new_config.flags = config->flags;
        new_config.tx_type = config->tx_type;
@@ -1640,11 +1649,11 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
                err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
                                                   new_egr_types, new_config);
                if (err)
-                       return err;
+                       goto err_configure_port;
        } else if (!new_ing_types && !new_egr_types && ptp_enabled) {
                err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
                if (err)
-                       return err;
+                       goto err_deconfigure_port;
        }
 
        mlxsw_sp_port->ptp.ing_types = new_ing_types;
@@ -1652,8 +1661,15 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
        /* Notify the ioctl caller what we are actually timestamping. */
        config->rx_filter = rx_filter;
+       mutex_unlock(&ptp_state->lock);
 
        return 0;
+
+err_deconfigure_port:
+err_configure_port:
+err_get_message_types:
+       mutex_unlock(&ptp_state->lock);
+       return err;
 }
 
 int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
index 2d1628fdefc12c91738cbd8a49841feb5e4afe76..a8b88230959a23f524f6c437b51bec5374796a94 100644 (file)
@@ -171,10 +171,11 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 {
 }
 
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
-                                struct mlxsw_sp_port *mlxsw_sp_port,
-                                struct sk_buff *skb,
-                                const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+                            struct mlxsw_sp_port *mlxsw_sp_port,
+                            struct sk_buff *skb,
+                            const struct mlxsw_tx_info *tx_info)
 {
        return -EOPNOTSUPP;
 }
@@ -231,10 +232,11 @@ static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
        return mlxsw_sp_ptp_get_ts_info_noptp(info);
 }
 
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
-                                 struct mlxsw_sp_port *mlxsw_sp_port,
-                                 struct sk_buff *skb,
-                                 const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+                             struct mlxsw_sp_port *mlxsw_sp_port,
+                             struct sk_buff *skb,
+                             const struct mlxsw_tx_info *tx_info)
 {
        return -EOPNOTSUPP;
 }
index 1d6e3b641b2e666acbb787da91cc27530eeadf2b..d928b75f37803992b87298753d894e546dd4d65f 100644 (file)
@@ -710,7 +710,7 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
        disable_irq(lan966x->xtr_irq);
        lan966x->xtr_irq = -ENXIO;
 
-       if (lan966x->ana_irq) {
+       if (lan966x->ana_irq > 0) {
                disable_irq(lan966x->ana_irq);
                lan966x->ana_irq = -ENXIO;
        }
@@ -718,10 +718,10 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
        if (lan966x->fdma)
                devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
 
-       if (lan966x->ptp_irq)
+       if (lan966x->ptp_irq > 0)
                devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
 
-       if (lan966x->ptp_ext_irq)
+       if (lan966x->ptp_ext_irq > 0)
                devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
 }
 
@@ -1049,7 +1049,7 @@ static int lan966x_probe(struct platform_device *pdev)
        }
 
        lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
-       if (lan966x->ana_irq) {
+       if (lan966x->ana_irq > 0) {
                err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
                                                lan966x_ana_irq_handler, IRQF_ONESHOT,
                                                "ana irq", lan966x);
index a3214a762e4b3e7ae4d5f5bdce16994a318d971f..9e57d23e57bf43acf00f4ca64b642ed43d550cc8 100644 (file)
@@ -62,9 +62,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
 {
        struct sockaddr *address = addr;
 
-       if (!is_valid_ether_addr(address->sa_data))
-               return -EADDRNOTAVAIL;
-
        eth_hw_addr_set(ndev, address->sa_data);
        moxart_update_mac_address(ndev);
 
@@ -74,11 +71,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
 static void moxart_mac_free_memory(struct net_device *ndev)
 {
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
-       int i;
-
-       for (i = 0; i < RX_DESC_NUM; i++)
-               dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
-                                priv->rx_buf_size, DMA_FROM_DEVICE);
 
        if (priv->tx_desc_base)
                dma_free_coherent(&priv->pdev->dev,
@@ -147,11 +139,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
                       desc + RX_REG_OFFSET_DESC1);
 
                priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
-               priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+               priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
                                                     priv->rx_buf[i],
                                                     priv->rx_buf_size,
                                                     DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+               if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
                        netdev_err(ndev, "DMA mapping error\n");
 
                moxart_desc_write(priv->rx_mapping[i],
@@ -172,9 +164,6 @@ static int moxart_mac_open(struct net_device *ndev)
 {
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
 
-       if (!is_valid_ether_addr(ndev->dev_addr))
-               return -EADDRNOTAVAIL;
-
        napi_enable(&priv->napi);
 
        moxart_mac_reset(ndev);
@@ -193,6 +182,7 @@ static int moxart_mac_open(struct net_device *ndev)
 static int moxart_mac_stop(struct net_device *ndev)
 {
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+       int i;
 
        napi_disable(&priv->napi);
 
@@ -204,6 +194,11 @@ static int moxart_mac_stop(struct net_device *ndev)
        /* disable all functions */
        writel(0, priv->base + REG_MAC_CTRL);
 
+       /* unmap areas mapped in moxart_mac_setup_desc_ring() */
+       for (i = 0; i < RX_DESC_NUM; i++)
+               dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
+                                priv->rx_buf_size, DMA_FROM_DEVICE);
+
        return 0;
 }
 
@@ -240,7 +235,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                if (len > RX_BUF_SIZE)
                        len = RX_BUF_SIZE;
 
-               dma_sync_single_for_cpu(&ndev->dev,
+               dma_sync_single_for_cpu(&priv->pdev->dev,
                                        priv->rx_mapping[rx_head],
                                        priv->rx_buf_size, DMA_FROM_DEVICE);
                skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +289,7 @@ static void moxart_tx_finished(struct net_device *ndev)
        unsigned int tx_tail = priv->tx_tail;
 
        while (tx_tail != tx_head) {
-               dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+               dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
                                 priv->tx_len[tx_tail], DMA_TO_DEVICE);
 
                ndev->stats.tx_packets++;
@@ -358,9 +353,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
 
        len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
 
-       priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+       priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
                                                   len, DMA_TO_DEVICE);
-       if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+       if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
                netdev_err(ndev, "DMA mapping error\n");
                goto out_unlock;
        }
@@ -379,7 +374,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
                len = ETH_ZLEN;
        }
 
-       dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+       dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
                                   priv->tx_buf_size, DMA_TO_DEVICE);
 
        txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -488,12 +483,19 @@ static int moxart_mac_probe(struct platform_device *pdev)
        }
        ndev->base_addr = res->start;
 
+       ret = platform_get_ethdev_address(p_dev, ndev);
+       if (ret == -EPROBE_DEFER)
+               goto init_fail;
+       if (ret)
+               eth_hw_addr_random(ndev);
+       moxart_update_mac_address(ndev);
+
        spin_lock_init(&priv->txlock);
 
        priv->tx_buf_size = TX_BUF_SIZE;
        priv->rx_buf_size = RX_BUF_SIZE;
 
-       priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+       priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
                                                TX_DESC_NUM, &priv->tx_base,
                                                GFP_DMA | GFP_KERNEL);
        if (!priv->tx_desc_base) {
@@ -501,7 +503,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
                goto init_fail;
        }
 
-       priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+       priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
                                                RX_DESC_NUM, &priv->rx_base,
                                                GFP_DMA | GFP_KERNEL);
        if (!priv->rx_desc_base) {
index d4649e4ee0e7ff4a58d435bd18fed0887f43fdc7..306026e6aa111b392e7be765b262d83a88078bbe 100644 (file)
@@ -1860,16 +1860,20 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
        if (sset != ETH_SS_STATS)
                return;
 
-       for (i = 0; i < ocelot->num_stats; i++)
+       for (i = 0; i < OCELOT_NUM_STATS; i++) {
+               if (ocelot->stats_layout[i].name[0] == '\0')
+                       continue;
+
                memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
                       ETH_GSTRING_LEN);
+       }
 }
 EXPORT_SYMBOL(ocelot_get_strings);
 
 /* Caller must hold &ocelot->stats_lock */
 static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
 {
-       unsigned int idx = port * ocelot->num_stats;
+       unsigned int idx = port * OCELOT_NUM_STATS;
        struct ocelot_stats_region *region;
        int err, j;
 
@@ -1877,9 +1881,8 @@ static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
        ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
 
        list_for_each_entry(region, &ocelot->stats_regions, node) {
-               err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
-                                          region->offset, region->buf,
-                                          region->count);
+               err = ocelot_bulk_read(ocelot, region->base, region->buf,
+                                      region->count);
                if (err)
                        return err;
 
@@ -1906,13 +1909,13 @@ static void ocelot_check_stats_work(struct work_struct *work)
                                             stats_work);
        int i, err;
 
-       mutex_lock(&ocelot->stats_lock);
+       spin_lock(&ocelot->stats_lock);
        for (i = 0; i < ocelot->num_phys_ports; i++) {
                err = ocelot_port_update_stats(ocelot, i);
                if (err)
                        break;
        }
-       mutex_unlock(&ocelot->stats_lock);
+       spin_unlock(&ocelot->stats_lock);
 
        if (err)
                dev_err(ocelot->dev, "Error %d updating ethtool stats\n",  err);
@@ -1925,16 +1928,22 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
 {
        int i, err;
 
-       mutex_lock(&ocelot->stats_lock);
+       spin_lock(&ocelot->stats_lock);
 
        /* check and update now */
        err = ocelot_port_update_stats(ocelot, port);
 
-       /* Copy all counters */
-       for (i = 0; i < ocelot->num_stats; i++)
-               *data++ = ocelot->stats[port * ocelot->num_stats + i];
+       /* Copy all supported counters */
+       for (i = 0; i < OCELOT_NUM_STATS; i++) {
+               int index = port * OCELOT_NUM_STATS + i;
+
+               if (ocelot->stats_layout[i].name[0] == '\0')
+                       continue;
+
+               *data++ = ocelot->stats[index];
+       }
 
-       mutex_unlock(&ocelot->stats_lock);
+       spin_unlock(&ocelot->stats_lock);
 
        if (err)
                dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1943,10 +1952,16 @@ EXPORT_SYMBOL(ocelot_get_ethtool_stats);
 
 int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
 {
+       int i, num_stats = 0;
+
        if (sset != ETH_SS_STATS)
                return -EOPNOTSUPP;
 
-       return ocelot->num_stats;
+       for (i = 0; i < OCELOT_NUM_STATS; i++)
+               if (ocelot->stats_layout[i].name[0] != '\0')
+                       num_stats++;
+
+       return num_stats;
 }
 EXPORT_SYMBOL(ocelot_get_sset_count);
 
@@ -1958,8 +1973,11 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
 
        INIT_LIST_HEAD(&ocelot->stats_regions);
 
-       for (i = 0; i < ocelot->num_stats; i++) {
-               if (region && ocelot->stats_layout[i].offset == last + 1) {
+       for (i = 0; i < OCELOT_NUM_STATS; i++) {
+               if (ocelot->stats_layout[i].name[0] == '\0')
+                       continue;
+
+               if (region && ocelot->stats_layout[i].reg == last + 4) {
                        region->count++;
                } else {
                        region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -1967,12 +1985,12 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
                        if (!region)
                                return -ENOMEM;
 
-                       region->offset = ocelot->stats_layout[i].offset;
+                       region->base = ocelot->stats_layout[i].reg;
                        region->count = 1;
                        list_add_tail(&region->node, &ocelot->stats_regions);
                }
 
-               last = ocelot->stats_layout[i].offset;
+               last = ocelot->stats_layout[i].reg;
        }
 
        list_for_each_entry(region, &ocelot->stats_regions, node) {
@@ -3340,7 +3358,6 @@ static void ocelot_detect_features(struct ocelot *ocelot)
 
 int ocelot_init(struct ocelot *ocelot)
 {
-       const struct ocelot_stat_layout *stat;
        char queue_name[32];
        int i, ret;
        u32 port;
@@ -3353,17 +3370,13 @@ int ocelot_init(struct ocelot *ocelot)
                }
        }
 
-       ocelot->num_stats = 0;
-       for_each_stat(ocelot, stat)
-               ocelot->num_stats++;
-
        ocelot->stats = devm_kcalloc(ocelot->dev,
-                                    ocelot->num_phys_ports * ocelot->num_stats,
+                                    ocelot->num_phys_ports * OCELOT_NUM_STATS,
                                     sizeof(u64), GFP_KERNEL);
        if (!ocelot->stats)
                return -ENOMEM;
 
-       mutex_init(&ocelot->stats_lock);
+       spin_lock_init(&ocelot->stats_lock);
        mutex_init(&ocelot->ptp_lock);
        mutex_init(&ocelot->mact_lock);
        mutex_init(&ocelot->fwd_domain_lock);
@@ -3511,7 +3524,6 @@ void ocelot_deinit(struct ocelot *ocelot)
        cancel_delayed_work(&ocelot->stats_work);
        destroy_workqueue(ocelot->stats_queue);
        destroy_workqueue(ocelot->owq);
-       mutex_destroy(&ocelot->stats_lock);
 }
 EXPORT_SYMBOL(ocelot_deinit);
 
index 5e6136e80282b092bbf0bdb9220abce29913ad8f..330d30841cdc47c351531e26c34cb339c1c41c93 100644 (file)
@@ -725,37 +725,42 @@ static void ocelot_get_stats64(struct net_device *dev,
        struct ocelot_port_private *priv = netdev_priv(dev);
        struct ocelot *ocelot = priv->port.ocelot;
        int port = priv->port.index;
+       u64 *s;
 
-       /* Configure the port to read the stats from */
-       ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
-                    SYS_STAT_CFG);
+       spin_lock(&ocelot->stats_lock);
+
+       s = &ocelot->stats[port * OCELOT_NUM_STATS];
 
        /* Get Rx stats */
-       stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
-       stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_64) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
-       stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
+       stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
+       stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
+                           s[OCELOT_STAT_RX_FRAGMENTS] +
+                           s[OCELOT_STAT_RX_JABBERS] +
+                           s[OCELOT_STAT_RX_LONGS] +
+                           s[OCELOT_STAT_RX_64] +
+                           s[OCELOT_STAT_RX_65_127] +
+                           s[OCELOT_STAT_RX_128_255] +
+                           s[OCELOT_STAT_RX_256_511] +
+                           s[OCELOT_STAT_RX_512_1023] +
+                           s[OCELOT_STAT_RX_1024_1526] +
+                           s[OCELOT_STAT_RX_1527_MAX];
+       stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
        stats->rx_dropped = dev->stats.rx_dropped;
 
        /* Get Tx stats */
-       stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
-       stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
-       stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_AGING);
-       stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+       stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
+       stats->tx_packets = s[OCELOT_STAT_TX_64] +
+                           s[OCELOT_STAT_TX_65_127] +
+                           s[OCELOT_STAT_TX_128_255] +
+                           s[OCELOT_STAT_TX_256_511] +
+                           s[OCELOT_STAT_TX_512_1023] +
+                           s[OCELOT_STAT_TX_1024_1526] +
+                           s[OCELOT_STAT_TX_1527_MAX];
+       stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
+                           s[OCELOT_STAT_TX_AGED];
+       stats->collisions = s[OCELOT_STAT_TX_COLLISION];
+
+       spin_unlock(&ocelot->stats_lock);
 }
 
 static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
index 961f803aca19222410e28b2806dfa068cbaad189..9c488953f541daa12e5c9d26676efae241ed1002 100644 (file)
@@ -96,101 +96,379 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
        [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
 };
 
-static const struct ocelot_stat_layout ocelot_stats_layout[] = {
-       { .name = "rx_octets", .offset = 0x00, },
-       { .name = "rx_unicast", .offset = 0x01, },
-       { .name = "rx_multicast", .offset = 0x02, },
-       { .name = "rx_broadcast", .offset = 0x03, },
-       { .name = "rx_shorts", .offset = 0x04, },
-       { .name = "rx_fragments", .offset = 0x05, },
-       { .name = "rx_jabbers", .offset = 0x06, },
-       { .name = "rx_crc_align_errs", .offset = 0x07, },
-       { .name = "rx_sym_errs", .offset = 0x08, },
-       { .name = "rx_frames_below_65_octets", .offset = 0x09, },
-       { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
-       { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
-       { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
-       { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
-       { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
-       { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
-       { .name = "rx_pause", .offset = 0x10, },
-       { .name = "rx_control", .offset = 0x11, },
-       { .name = "rx_longs", .offset = 0x12, },
-       { .name = "rx_classified_drops", .offset = 0x13, },
-       { .name = "rx_red_prio_0", .offset = 0x14, },
-       { .name = "rx_red_prio_1", .offset = 0x15, },
-       { .name = "rx_red_prio_2", .offset = 0x16, },
-       { .name = "rx_red_prio_3", .offset = 0x17, },
-       { .name = "rx_red_prio_4", .offset = 0x18, },
-       { .name = "rx_red_prio_5", .offset = 0x19, },
-       { .name = "rx_red_prio_6", .offset = 0x1A, },
-       { .name = "rx_red_prio_7", .offset = 0x1B, },
-       { .name = "rx_yellow_prio_0", .offset = 0x1C, },
-       { .name = "rx_yellow_prio_1", .offset = 0x1D, },
-       { .name = "rx_yellow_prio_2", .offset = 0x1E, },
-       { .name = "rx_yellow_prio_3", .offset = 0x1F, },
-       { .name = "rx_yellow_prio_4", .offset = 0x20, },
-       { .name = "rx_yellow_prio_5", .offset = 0x21, },
-       { .name = "rx_yellow_prio_6", .offset = 0x22, },
-       { .name = "rx_yellow_prio_7", .offset = 0x23, },
-       { .name = "rx_green_prio_0", .offset = 0x24, },
-       { .name = "rx_green_prio_1", .offset = 0x25, },
-       { .name = "rx_green_prio_2", .offset = 0x26, },
-       { .name = "rx_green_prio_3", .offset = 0x27, },
-       { .name = "rx_green_prio_4", .offset = 0x28, },
-       { .name = "rx_green_prio_5", .offset = 0x29, },
-       { .name = "rx_green_prio_6", .offset = 0x2A, },
-       { .name = "rx_green_prio_7", .offset = 0x2B, },
-       { .name = "tx_octets", .offset = 0x40, },
-       { .name = "tx_unicast", .offset = 0x41, },
-       { .name = "tx_multicast", .offset = 0x42, },
-       { .name = "tx_broadcast", .offset = 0x43, },
-       { .name = "tx_collision", .offset = 0x44, },
-       { .name = "tx_drops", .offset = 0x45, },
-       { .name = "tx_pause", .offset = 0x46, },
-       { .name = "tx_frames_below_65_octets", .offset = 0x47, },
-       { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
-       { .name = "tx_frames_128_255_octets", .offset = 0x49, },
-       { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
-       { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
-       { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
-       { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
-       { .name = "tx_yellow_prio_0", .offset = 0x4E, },
-       { .name = "tx_yellow_prio_1", .offset = 0x4F, },
-       { .name = "tx_yellow_prio_2", .offset = 0x50, },
-       { .name = "tx_yellow_prio_3", .offset = 0x51, },
-       { .name = "tx_yellow_prio_4", .offset = 0x52, },
-       { .name = "tx_yellow_prio_5", .offset = 0x53, },
-       { .name = "tx_yellow_prio_6", .offset = 0x54, },
-       { .name = "tx_yellow_prio_7", .offset = 0x55, },
-       { .name = "tx_green_prio_0", .offset = 0x56, },
-       { .name = "tx_green_prio_1", .offset = 0x57, },
-       { .name = "tx_green_prio_2", .offset = 0x58, },
-       { .name = "tx_green_prio_3", .offset = 0x59, },
-       { .name = "tx_green_prio_4", .offset = 0x5A, },
-       { .name = "tx_green_prio_5", .offset = 0x5B, },
-       { .name = "tx_green_prio_6", .offset = 0x5C, },
-       { .name = "tx_green_prio_7", .offset = 0x5D, },
-       { .name = "tx_aged", .offset = 0x5E, },
-       { .name = "drop_local", .offset = 0x80, },
-       { .name = "drop_tail", .offset = 0x81, },
-       { .name = "drop_yellow_prio_0", .offset = 0x82, },
-       { .name = "drop_yellow_prio_1", .offset = 0x83, },
-       { .name = "drop_yellow_prio_2", .offset = 0x84, },
-       { .name = "drop_yellow_prio_3", .offset = 0x85, },
-       { .name = "drop_yellow_prio_4", .offset = 0x86, },
-       { .name = "drop_yellow_prio_5", .offset = 0x87, },
-       { .name = "drop_yellow_prio_6", .offset = 0x88, },
-       { .name = "drop_yellow_prio_7", .offset = 0x89, },
-       { .name = "drop_green_prio_0", .offset = 0x8A, },
-       { .name = "drop_green_prio_1", .offset = 0x8B, },
-       { .name = "drop_green_prio_2", .offset = 0x8C, },
-       { .name = "drop_green_prio_3", .offset = 0x8D, },
-       { .name = "drop_green_prio_4", .offset = 0x8E, },
-       { .name = "drop_green_prio_5", .offset = 0x8F, },
-       { .name = "drop_green_prio_6", .offset = 0x90, },
-       { .name = "drop_green_prio_7", .offset = 0x91, },
-       OCELOT_STAT_END
+static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
+       [OCELOT_STAT_RX_OCTETS] = {
+               .name = "rx_octets",
+               .reg = SYS_COUNT_RX_OCTETS,
+       },
+       [OCELOT_STAT_RX_UNICAST] = {
+               .name = "rx_unicast",
+               .reg = SYS_COUNT_RX_UNICAST,
+       },
+       [OCELOT_STAT_RX_MULTICAST] = {
+               .name = "rx_multicast",
+               .reg = SYS_COUNT_RX_MULTICAST,
+       },
+       [OCELOT_STAT_RX_BROADCAST] = {
+               .name = "rx_broadcast",
+               .reg = SYS_COUNT_RX_BROADCAST,
+       },
+       [OCELOT_STAT_RX_SHORTS] = {
+               .name = "rx_shorts",
+               .reg = SYS_COUNT_RX_SHORTS,
+       },
+       [OCELOT_STAT_RX_FRAGMENTS] = {
+               .name = "rx_fragments",
+               .reg = SYS_COUNT_RX_FRAGMENTS,
+       },
+       [OCELOT_STAT_RX_JABBERS] = {
+               .name = "rx_jabbers",
+               .reg = SYS_COUNT_RX_JABBERS,
+       },
+       [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+               .name = "rx_crc_align_errs",
+               .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+       },
+       [OCELOT_STAT_RX_SYM_ERRS] = {
+               .name = "rx_sym_errs",
+               .reg = SYS_COUNT_RX_SYM_ERRS,
+       },
+       [OCELOT_STAT_RX_64] = {
+               .name = "rx_frames_below_65_octets",
+               .reg = SYS_COUNT_RX_64,
+       },
+       [OCELOT_STAT_RX_65_127] = {
+               .name = "rx_frames_65_to_127_octets",
+               .reg = SYS_COUNT_RX_65_127,
+       },
+       [OCELOT_STAT_RX_128_255] = {
+               .name = "rx_frames_128_to_255_octets",
+               .reg = SYS_COUNT_RX_128_255,
+       },
+       [OCELOT_STAT_RX_256_511] = {
+               .name = "rx_frames_256_to_511_octets",
+               .reg = SYS_COUNT_RX_256_511,
+       },
+       [OCELOT_STAT_RX_512_1023] = {
+               .name = "rx_frames_512_to_1023_octets",
+               .reg = SYS_COUNT_RX_512_1023,
+       },
+       [OCELOT_STAT_RX_1024_1526] = {
+               .name = "rx_frames_1024_to_1526_octets",
+               .reg = SYS_COUNT_RX_1024_1526,
+       },
+       [OCELOT_STAT_RX_1527_MAX] = {
+               .name = "rx_frames_over_1526_octets",
+               .reg = SYS_COUNT_RX_1527_MAX,
+       },
+       [OCELOT_STAT_RX_PAUSE] = {
+               .name = "rx_pause",
+               .reg = SYS_COUNT_RX_PAUSE,
+       },
+       [OCELOT_STAT_RX_CONTROL] = {
+               .name = "rx_control",
+               .reg = SYS_COUNT_RX_CONTROL,
+       },
+       [OCELOT_STAT_RX_LONGS] = {
+               .name = "rx_longs",
+               .reg = SYS_COUNT_RX_LONGS,
+       },
+       [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+               .name = "rx_classified_drops",
+               .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_0] = {
+               .name = "rx_red_prio_0",
+               .reg = SYS_COUNT_RX_RED_PRIO_0,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_1] = {
+               .name = "rx_red_prio_1",
+               .reg = SYS_COUNT_RX_RED_PRIO_1,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_2] = {
+               .name = "rx_red_prio_2",
+               .reg = SYS_COUNT_RX_RED_PRIO_2,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_3] = {
+               .name = "rx_red_prio_3",
+               .reg = SYS_COUNT_RX_RED_PRIO_3,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_4] = {
+               .name = "rx_red_prio_4",
+               .reg = SYS_COUNT_RX_RED_PRIO_4,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_5] = {
+               .name = "rx_red_prio_5",
+               .reg = SYS_COUNT_RX_RED_PRIO_5,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_6] = {
+               .name = "rx_red_prio_6",
+               .reg = SYS_COUNT_RX_RED_PRIO_6,
+       },
+       [OCELOT_STAT_RX_RED_PRIO_7] = {
+               .name = "rx_red_prio_7",
+               .reg = SYS_COUNT_RX_RED_PRIO_7,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+               .name = "rx_yellow_prio_0",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+               .name = "rx_yellow_prio_1",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+               .name = "rx_yellow_prio_2",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+               .name = "rx_yellow_prio_3",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+               .name = "rx_yellow_prio_4",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+               .name = "rx_yellow_prio_5",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+               .name = "rx_yellow_prio_6",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+       },
+       [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+               .name = "rx_yellow_prio_7",
+               .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+               .name = "rx_green_prio_0",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+               .name = "rx_green_prio_1",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+               .name = "rx_green_prio_2",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+               .name = "rx_green_prio_3",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+               .name = "rx_green_prio_4",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+               .name = "rx_green_prio_5",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+               .name = "rx_green_prio_6",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+       },
+       [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+               .name = "rx_green_prio_7",
+               .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+       },
+       [OCELOT_STAT_TX_OCTETS] = {
+               .name = "tx_octets",
+               .reg = SYS_COUNT_TX_OCTETS,
+       },
+       [OCELOT_STAT_TX_UNICAST] = {
+               .name = "tx_unicast",
+               .reg = SYS_COUNT_TX_UNICAST,
+       },
+       [OCELOT_STAT_TX_MULTICAST] = {
+               .name = "tx_multicast",
+               .reg = SYS_COUNT_TX_MULTICAST,
+       },
+       [OCELOT_STAT_TX_BROADCAST] = {
+               .name = "tx_broadcast",
+               .reg = SYS_COUNT_TX_BROADCAST,
+       },
+       [OCELOT_STAT_TX_COLLISION] = {
+               .name = "tx_collision",
+               .reg = SYS_COUNT_TX_COLLISION,
+       },
+       [OCELOT_STAT_TX_DROPS] = {
+               .name = "tx_drops",
+               .reg = SYS_COUNT_TX_DROPS,
+       },
+       [OCELOT_STAT_TX_PAUSE] = {
+               .name = "tx_pause",
+               .reg = SYS_COUNT_TX_PAUSE,
+       },
+       [OCELOT_STAT_TX_64] = {
+               .name = "tx_frames_below_65_octets",
+               .reg = SYS_COUNT_TX_64,
+       },
+       [OCELOT_STAT_TX_65_127] = {
+               .name = "tx_frames_65_to_127_octets",
+               .reg = SYS_COUNT_TX_65_127,
+       },
+       [OCELOT_STAT_TX_128_255] = {
+               .name = "tx_frames_128_255_octets",
+               .reg = SYS_COUNT_TX_128_255,
+       },
+       [OCELOT_STAT_TX_256_511] = {
+               .name = "tx_frames_256_511_octets",
+               .reg = SYS_COUNT_TX_256_511,
+       },
+       [OCELOT_STAT_TX_512_1023] = {
+               .name = "tx_frames_512_1023_octets",
+               .reg = SYS_COUNT_TX_512_1023,
+       },
+       [OCELOT_STAT_TX_1024_1526] = {
+               .name = "tx_frames_1024_1526_octets",
+               .reg = SYS_COUNT_TX_1024_1526,
+       },
+       [OCELOT_STAT_TX_1527_MAX] = {
+               .name = "tx_frames_over_1526_octets",
+               .reg = SYS_COUNT_TX_1527_MAX,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+               .name = "tx_yellow_prio_0",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+               .name = "tx_yellow_prio_1",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+               .name = "tx_yellow_prio_2",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+               .name = "tx_yellow_prio_3",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+               .name = "tx_yellow_prio_4",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+               .name = "tx_yellow_prio_5",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+               .name = "tx_yellow_prio_6",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+       },
+       [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+               .name = "tx_yellow_prio_7",
+               .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+               .name = "tx_green_prio_0",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+               .name = "tx_green_prio_1",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+               .name = "tx_green_prio_2",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+               .name = "tx_green_prio_3",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+               .name = "tx_green_prio_4",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+               .name = "tx_green_prio_5",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+               .name = "tx_green_prio_6",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+       },
+       [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+               .name = "tx_green_prio_7",
+               .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+       },
+       [OCELOT_STAT_TX_AGED] = {
+               .name = "tx_aged",
+               .reg = SYS_COUNT_TX_AGING,
+       },
+       [OCELOT_STAT_DROP_LOCAL] = {
+               .name = "drop_local",
+               .reg = SYS_COUNT_DROP_LOCAL,
+       },
+       [OCELOT_STAT_DROP_TAIL] = {
+               .name = "drop_tail",
+               .reg = SYS_COUNT_DROP_TAIL,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+               .name = "drop_yellow_prio_0",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+               .name = "drop_yellow_prio_1",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+               .name = "drop_yellow_prio_2",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+               .name = "drop_yellow_prio_3",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+               .name = "drop_yellow_prio_4",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+               .name = "drop_yellow_prio_5",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+               .name = "drop_yellow_prio_6",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+       },
+       [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+               .name = "drop_yellow_prio_7",
+               .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+               .name = "drop_green_prio_0",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+               .name = "drop_green_prio_1",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+               .name = "drop_green_prio_2",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+               .name = "drop_green_prio_3",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+               .name = "drop_green_prio_4",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+               .name = "drop_green_prio_5",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+               .name = "drop_green_prio_6",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+       },
+       [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+               .name = "drop_green_prio_7",
+               .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+       },
 };
 
 static void ocelot_pll5_init(struct ocelot *ocelot)
index c2af4eb8ca5d345ad1df5ee5fdacdee124b101e2..9cf82ecf191cd0727300d9d5df94b584b08a7e44 100644 (file)
@@ -180,13 +180,38 @@ const u32 vsc7514_sys_regmap[] = {
        REG(SYS_COUNT_RX_64,                            0x000024),
        REG(SYS_COUNT_RX_65_127,                        0x000028),
        REG(SYS_COUNT_RX_128_255,                       0x00002c),
-       REG(SYS_COUNT_RX_256_1023,                      0x000030),
-       REG(SYS_COUNT_RX_1024_1526,                     0x000034),
-       REG(SYS_COUNT_RX_1527_MAX,                      0x000038),
-       REG(SYS_COUNT_RX_PAUSE,                         0x00003c),
-       REG(SYS_COUNT_RX_CONTROL,                       0x000040),
-       REG(SYS_COUNT_RX_LONGS,                         0x000044),
-       REG(SYS_COUNT_RX_CLASSIFIED_DROPS,              0x000048),
+       REG(SYS_COUNT_RX_256_511,                       0x000030),
+       REG(SYS_COUNT_RX_512_1023,                      0x000034),
+       REG(SYS_COUNT_RX_1024_1526,                     0x000038),
+       REG(SYS_COUNT_RX_1527_MAX,                      0x00003c),
+       REG(SYS_COUNT_RX_PAUSE,                         0x000040),
+       REG(SYS_COUNT_RX_CONTROL,                       0x000044),
+       REG(SYS_COUNT_RX_LONGS,                         0x000048),
+       REG(SYS_COUNT_RX_CLASSIFIED_DROPS,              0x00004c),
+       REG(SYS_COUNT_RX_RED_PRIO_0,                    0x000050),
+       REG(SYS_COUNT_RX_RED_PRIO_1,                    0x000054),
+       REG(SYS_COUNT_RX_RED_PRIO_2,                    0x000058),
+       REG(SYS_COUNT_RX_RED_PRIO_3,                    0x00005c),
+       REG(SYS_COUNT_RX_RED_PRIO_4,                    0x000060),
+       REG(SYS_COUNT_RX_RED_PRIO_5,                    0x000064),
+       REG(SYS_COUNT_RX_RED_PRIO_6,                    0x000068),
+       REG(SYS_COUNT_RX_RED_PRIO_7,                    0x00006c),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_0,                 0x000070),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_1,                 0x000074),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_2,                 0x000078),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_3,                 0x00007c),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_4,                 0x000080),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_5,                 0x000084),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_6,                 0x000088),
+       REG(SYS_COUNT_RX_YELLOW_PRIO_7,                 0x00008c),
+       REG(SYS_COUNT_RX_GREEN_PRIO_0,                  0x000090),
+       REG(SYS_COUNT_RX_GREEN_PRIO_1,                  0x000094),
+       REG(SYS_COUNT_RX_GREEN_PRIO_2,                  0x000098),
+       REG(SYS_COUNT_RX_GREEN_PRIO_3,                  0x00009c),
+       REG(SYS_COUNT_RX_GREEN_PRIO_4,                  0x0000a0),
+       REG(SYS_COUNT_RX_GREEN_PRIO_5,                  0x0000a4),
+       REG(SYS_COUNT_RX_GREEN_PRIO_6,                  0x0000a8),
+       REG(SYS_COUNT_RX_GREEN_PRIO_7,                  0x0000ac),
        REG(SYS_COUNT_TX_OCTETS,                        0x000100),
        REG(SYS_COUNT_TX_UNICAST,                       0x000104),
        REG(SYS_COUNT_TX_MULTICAST,                     0x000108),
@@ -196,11 +221,46 @@ const u32 vsc7514_sys_regmap[] = {
        REG(SYS_COUNT_TX_PAUSE,                         0x000118),
        REG(SYS_COUNT_TX_64,                            0x00011c),
        REG(SYS_COUNT_TX_65_127,                        0x000120),
-       REG(SYS_COUNT_TX_128_511,                       0x000124),
-       REG(SYS_COUNT_TX_512_1023,                      0x000128),
-       REG(SYS_COUNT_TX_1024_1526,                     0x00012c),
-       REG(SYS_COUNT_TX_1527_MAX,                      0x000130),
-       REG(SYS_COUNT_TX_AGING,                         0x000170),
+       REG(SYS_COUNT_TX_128_255,                       0x000124),
+       REG(SYS_COUNT_TX_256_511,                       0x000128),
+       REG(SYS_COUNT_TX_512_1023,                      0x00012c),
+       REG(SYS_COUNT_TX_1024_1526,                     0x000130),
+       REG(SYS_COUNT_TX_1527_MAX,                      0x000134),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_0,                 0x000138),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_1,                 0x00013c),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_2,                 0x000140),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_3,                 0x000144),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_4,                 0x000148),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_5,                 0x00014c),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_6,                 0x000150),
+       REG(SYS_COUNT_TX_YELLOW_PRIO_7,                 0x000154),
+       REG(SYS_COUNT_TX_GREEN_PRIO_0,                  0x000158),
+       REG(SYS_COUNT_TX_GREEN_PRIO_1,                  0x00015c),
+       REG(SYS_COUNT_TX_GREEN_PRIO_2,                  0x000160),
+       REG(SYS_COUNT_TX_GREEN_PRIO_3,                  0x000164),
+       REG(SYS_COUNT_TX_GREEN_PRIO_4,                  0x000168),
+       REG(SYS_COUNT_TX_GREEN_PRIO_5,                  0x00016c),
+       REG(SYS_COUNT_TX_GREEN_PRIO_6,                  0x000170),
+       REG(SYS_COUNT_TX_GREEN_PRIO_7,                  0x000174),
+       REG(SYS_COUNT_TX_AGING,                         0x000178),
+       REG(SYS_COUNT_DROP_LOCAL,                       0x000200),
+       REG(SYS_COUNT_DROP_TAIL,                        0x000204),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_0,               0x000208),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_1,               0x00020c),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_2,               0x000210),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_3,               0x000214),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_4,               0x000218),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_5,               0x00021c),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_6,               0x000220),
+       REG(SYS_COUNT_DROP_YELLOW_PRIO_7,               0x000214),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_0,                0x000218),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_1,                0x00021c),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_2,                0x000220),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_3,                0x000224),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_4,                0x000228),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_5,                0x00022c),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_6,                0x000230),
+       REG(SYS_COUNT_DROP_GREEN_PRIO_7,                0x000234),
        REG(SYS_RESET_CFG,                              0x000508),
        REG(SYS_CMID,                                   0x00050c),
        REG(SYS_VLAN_ETYPE_CFG,                         0x000510),
index 1443f788ee37cf911a0c28ae3c0ef6b8a11a1398..0be79c51678133c7b98668d1670cfeeb54bafb85 100644 (file)
@@ -1564,8 +1564,67 @@ static int ionic_set_features(struct net_device *netdev,
        return err;
 }
 
+static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
+{
+       struct ionic_admin_ctx ctx = {
+               .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+               .cmd.lif_setattr = {
+                       .opcode = IONIC_CMD_LIF_SETATTR,
+                       .index = cpu_to_le16(lif->index),
+                       .attr = IONIC_LIF_ATTR_MAC,
+               },
+       };
+
+       ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
+       return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
+{
+       struct ionic_admin_ctx ctx = {
+               .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+               .cmd.lif_getattr = {
+                       .opcode = IONIC_CMD_LIF_GETATTR,
+                       .index = cpu_to_le16(lif->index),
+                       .attr = IONIC_LIF_ATTR_MAC,
+               },
+       };
+       int err;
+
+       err = ionic_adminq_post_wait(lif, &ctx);
+       if (err)
+               return err;
+
+       ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
+       return 0;
+}
+
+static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
+{
+       u8  get_mac[ETH_ALEN];
+       int err;
+
+       err = ionic_set_attr_mac(lif, mac);
+       if (err)
+               return err;
+
+       err = ionic_get_attr_mac(lif, get_mac);
+       if (err)
+               return err;
+
+       /* To deal with older firmware that silently ignores the set attr mac:
+        * doesn't actually change the mac and doesn't return an error, so we
+        * do the get attr to verify whether or not the set actually happened
+        */
+       if (!ether_addr_equal(get_mac, mac))
+               return 1;
+
+       return 0;
+}
+
 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
 {
+       struct ionic_lif *lif = netdev_priv(netdev);
        struct sockaddr *addr = sa;
        u8 *mac;
        int err;
@@ -1574,6 +1633,14 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
        if (ether_addr_equal(netdev->dev_addr, mac))
                return 0;
 
+       err = ionic_program_mac(lif, mac);
+       if (err < 0)
+               return err;
+
+       if (err > 0)
+               netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
+                          __func__);
+
        err = eth_prepare_mac_addr_change(netdev, addr);
        if (err)
                return err;
@@ -2963,6 +3030,9 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
 
        mutex_lock(&lif->queue_lock);
 
+       if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
+               dev_info(ionic->dev, "FW Up: clearing broken state\n");
+
        err = ionic_qcqs_alloc(lif);
        if (err)
                goto err_unlock;
@@ -3169,6 +3239,7 @@ static int ionic_station_set(struct ionic_lif *lif)
                        .attr = IONIC_LIF_ATTR_MAC,
                },
        };
+       u8 mac_address[ETH_ALEN];
        struct sockaddr addr;
        int err;
 
@@ -3177,8 +3248,23 @@ static int ionic_station_set(struct ionic_lif *lif)
                return err;
        netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
                   ctx.comp.lif_getattr.mac);
-       if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
-               return 0;
+       ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
+
+       if (is_zero_ether_addr(mac_address)) {
+               eth_hw_addr_random(netdev);
+               netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
+               ether_addr_copy(mac_address, netdev->dev_addr);
+
+               err = ionic_program_mac(lif, mac_address);
+               if (err < 0)
+                       return err;
+
+               if (err > 0) {
+                       netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
+                                  __func__);
+                       return 0;
+               }
+       }
 
        if (!is_zero_ether_addr(netdev->dev_addr)) {
                /* If the netdev mac is non-zero and doesn't match the default
@@ -3186,12 +3272,11 @@ static int ionic_station_set(struct ionic_lif *lif)
                 * likely here again after a fw-upgrade reset.  We need to be
                 * sure the netdev mac is in our filter list.
                 */
-               if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
-                                     netdev->dev_addr))
+               if (!ether_addr_equal(mac_address, netdev->dev_addr))
                        ionic_lif_addr_add(lif, netdev->dev_addr);
        } else {
                /* Update the netdev mac with the device's mac */
-               memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+               ether_addr_copy(addr.sa_data, mac_address);
                addr.sa_family = AF_INET;
                err = eth_prepare_mac_addr_change(netdev, &addr);
                if (err) {
index 4029b4e021f868bab3b45743294cd004640e13f3..56f93b030551909339e6027153326d67e9104199 100644 (file)
@@ -474,8 +474,8 @@ try_again:
                                ionic_opcode_to_str(opcode), opcode,
                                ionic_error_to_str(err), err);
 
-                       msleep(1000);
                        iowrite32(0, &idev->dev_cmd_regs->done);
+                       msleep(1000);
                        iowrite32(1, &idev->dev_cmd_regs->doorbell);
                        goto try_again;
                }
@@ -488,6 +488,8 @@ try_again:
                return ionic_error_to_errno(err);
        }
 
+       ionic_dev_cmd_clean(ionic);
+
        return 0;
 }
 
index 52f9ed8db9c9806148ac751017d865928fcc6b6f..4f2b82a884b9161e4af3880ebaf080956a35a4af 100644 (file)
@@ -1134,6 +1134,7 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
 
        stmmac_dvr_remove(&pdev->dev);
 
+       clk_disable_unprepare(priv->plat->stmmac_clk);
        clk_unregister_fixed_rate(priv->plat->stmmac_clk);
 
        pcim_iounmap_regions(pdev, BIT(0));
index caa4bfc4c1d62effb20b08de3e7d495743d9eb53..9b6138b11776656a91e3fa0c293c5d13177fda7c 100644 (file)
@@ -258,14 +258,18 @@ EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
 /* Enable disable MAC RX/TX */
 void stmmac_set_mac(void __iomem *ioaddr, bool enable)
 {
-       u32 value = readl(ioaddr + MAC_CTRL_REG);
+       u32 old_val, value;
+
+       old_val = readl(ioaddr + MAC_CTRL_REG);
+       value = old_val;
 
        if (enable)
                value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
        else
                value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
 
-       writel(value, ioaddr + MAC_CTRL_REG);
+       if (value != old_val)
+               writel(value, ioaddr + MAC_CTRL_REG);
 }
 
 void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
index 070b5ef165eba4026d25a7d4f2476cbb5a42ed31..592d29abcb1c25fcc329a2aa8a5539b0c87d8b1d 100644 (file)
@@ -986,10 +986,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
                               bool tx_pause, bool rx_pause)
 {
        struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
-       u32 ctrl;
+       u32 old_ctrl, ctrl;
 
-       ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
-       ctrl &= ~priv->hw->link.speed_mask;
+       old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+       ctrl = old_ctrl & ~priv->hw->link.speed_mask;
 
        if (interface == PHY_INTERFACE_MODE_USXGMII) {
                switch (speed) {
@@ -1064,7 +1064,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
        if (tx_pause && rx_pause)
                stmmac_mac_flow_ctrl(priv, duplex);
 
-       writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+       if (ctrl != old_ctrl)
+               writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
 
        stmmac_mac_set(priv, priv->ioaddr, true);
        if (phy && priv->dma_cap.eee) {
index 76c4a709d73daf3b1170a83a78b553f049e61d79..e97db826cdd45bc44fb5dea7a8a2b01b35d226a9 100644 (file)
@@ -348,7 +348,7 @@ do {                                                                        \
  *             This macro is invoked by the OS-specific before it left the
  *             function mac_drv_rx_complete. This macro calls mac_drv_fill_rxd
  *             if the number of used RxDs is equal or lower than the
- *             the given low water mark.
+ *             given low water mark.
  *
  * para        low_water       low water mark of used RxD's
  *
index 1e9eae208e44f89d61e9fb8a280a81fd63724567..53a1dbeaffa6ddc909fbbf4b7018eba290aef5d2 100644 (file)
@@ -568,7 +568,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
        }
 
        /* Align the address down and the size up to a page boundary */
-       addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+       addr = qcom_smem_virt_to_phys(virt);
        phys = addr & PAGE_MASK;
        size = PAGE_ALIGN(size + addr - phys);
        iova = phys;    /* We just want a direct mapping */
index a5b355384d4ae31ec47d0e2d667e824497fc364d..6f35438cda890ededc18de5b4fdf8d411c5ca1c7 100644 (file)
@@ -48,7 +48,7 @@ struct ipa;
  *
  * The offset of registers related to resource types is computed by a macro
  * that is supplied a parameter "rt".  The "rt" represents a resource type,
- * which is is a member of the ipa_resource_type_src enumerated type for
+ * which is a member of the ipa_resource_type_src enumerated type for
  * source endpoint resources or the ipa_resource_type_dst enumerated type
  * for destination endpoint resources.
  *
index ef02f2cf5ce13d9a3db210f83b801f1a3fde0976..cbabca167a0785d136f59c11935f4c934cf8ad51 100644 (file)
@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
        .notifier_call  = ipvtap_device_event,
 };
 
-static int ipvtap_init(void)
+static int __init ipvtap_init(void)
 {
        int err;
 
@@ -228,7 +228,7 @@ out1:
 }
 module_init(ipvtap_init);
 
-static void ipvtap_exit(void)
+static void __exit ipvtap_exit(void)
 {
        rtnl_link_unregister(&ipvtap_link_ops);
        unregister_netdevice_notifier(&ipvtap_notifier_block);
index ee6087e7b2bfb5db7cd22103285a9a28aa2b8995..c6d271e5687e9e047ee947bbbe10fd9dae29a13e 100644 (file)
@@ -462,11 +462,6 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
        return (struct macsec_eth_header *)skb_mac_header(skb);
 }
 
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
-       return make_sci(dev->dev_addr, port);
-}
-
 static void __macsec_pn_wrapped(struct macsec_secy *secy,
                                struct macsec_tx_sa *tx_sa)
 {
@@ -3661,7 +3656,6 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
 
 out:
        eth_hw_addr_set(dev, addr->sa_data);
-       macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
 
        /* If h/w offloading is available, propagate to the device */
        if (macsec_is_offloaded(macsec)) {
@@ -4000,6 +3994,11 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
        return false;
 }
 
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+       return make_sci(dev->dev_addr, port);
+}
+
 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
 {
        struct macsec_dev *macsec = macsec_priv(dev);
index 0c6efd79269071ec5844940aa0c9dbcf1e09b49f..12ff276b80aed7e22a817c940907f59d17ea8ad7 100644 (file)
@@ -316,11 +316,11 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
 
        phydev->suspended_by_mdio_bus = 0;
 
-       /* If we managed to get here with the PHY state machine in a state other
-        * than PHY_HALTED this is an indication that something went wrong and
-        * we should most likely be using MAC managed PM and we are not.
+       /* If we manged to get here with the PHY state machine in a state neither
+        * PHY_HALTED nor PHY_READY this is an indication that something went wrong
+        * and we should most likely be using MAC managed PM and we are not.
         */
-       WARN_ON(phydev->state != PHY_HALTED && !phydev->mac_managed_pm);
+       WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
 
        ret = phy_init_hw(phydev);
        if (ret < 0)
index 0f6efaabaa32b892ccb57355bb9db3941ec216ec..d142ac8fcf6e2dd8fe8f473d9a8fff1cdf5c3adb 100644 (file)
@@ -5906,6 +5906,11 @@ static void r8153_enter_oob(struct r8152 *tp)
        ocp_data &= ~NOW_IS_OOB;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
+       /* RX FIFO settings for OOB */
+       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
+
        rtl_disable(tp);
        rtl_reset_bmu(tp);
 
@@ -6431,21 +6436,8 @@ static void r8156_fc_parameter(struct r8152 *tp)
        u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
        u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
 
-       switch (tp->version) {
-       case RTL_VER_10:
-       case RTL_VER_11:
-               ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8);
-               ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8);
-               break;
-       case RTL_VER_12:
-       case RTL_VER_13:
-       case RTL_VER_15:
-               ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
-               ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
-               break;
-       default:
-               break;
-       }
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
 }
 
 static void rtl8156_change_mtu(struct r8152 *tp)
@@ -6557,6 +6549,11 @@ static void rtl8156_down(struct r8152 *tp)
        ocp_data &= ~NOW_IS_OOB;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
+       /* RX FIFO settings for OOB */
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16);
+
        rtl_disable(tp);
        rtl_reset_bmu(tp);
 
index d934774e9733bc1077c05f026127ea143fdc9f8f..9cce7dec7366d41816f0e09ce9342d9869ea9ac9 100644 (file)
@@ -1211,7 +1211,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
        if (!hdr_hash || !skb)
                return;
 
-       switch ((int)hdr_hash->hash_report) {
+       switch (__le16_to_cpu(hdr_hash->hash_report)) {
        case VIRTIO_NET_HASH_REPORT_TCPv4:
        case VIRTIO_NET_HASH_REPORT_UDPv4:
        case VIRTIO_NET_HASH_REPORT_TCPv6:
@@ -1229,7 +1229,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
        default:
                rss_hash_type = PKT_HASH_TYPE_NONE;
        }
-       skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
+       skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
 }
 
 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
@@ -3432,29 +3432,6 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
                   (unsigned int)GOOD_PACKET_LEN);
 }
 
-static void virtnet_config_sizes(struct virtnet_info *vi, u32 *sizes)
-{
-       u32 i, rx_size, tx_size;
-
-       if (vi->speed == SPEED_UNKNOWN || vi->speed < SPEED_10000) {
-               rx_size = 1024;
-               tx_size = 1024;
-
-       } else if (vi->speed < SPEED_40000) {
-               rx_size = 1024 * 4;
-               tx_size = 1024 * 4;
-
-       } else {
-               rx_size = 1024 * 8;
-               tx_size = 1024 * 8;
-       }
-
-       for (i = 0; i < vi->max_queue_pairs; i++) {
-               sizes[rxq2vq(i)] = rx_size;
-               sizes[txq2vq(i)] = tx_size;
-       }
-}
-
 static int virtnet_find_vqs(struct virtnet_info *vi)
 {
        vq_callback_t **callbacks;
@@ -3462,7 +3439,6 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
        int ret = -ENOMEM;
        int i, total_vqs;
        const char **names;
-       u32 *sizes;
        bool *ctx;
 
        /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
@@ -3490,15 +3466,10 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
                ctx = NULL;
        }
 
-       sizes = kmalloc_array(total_vqs, sizeof(*sizes), GFP_KERNEL);
-       if (!sizes)
-               goto err_sizes;
-
        /* Parameters for control virtqueue, if any */
        if (vi->has_cvq) {
                callbacks[total_vqs - 1] = NULL;
                names[total_vqs - 1] = "control";
-               sizes[total_vqs - 1] = 64;
        }
 
        /* Allocate/initialize parameters for send/receive virtqueues */
@@ -3513,10 +3484,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
                        ctx[rxq2vq(i)] = true;
        }
 
-       virtnet_config_sizes(vi, sizes);
-
-       ret = virtio_find_vqs_ctx_size(vi->vdev, total_vqs, vqs, callbacks,
-                                      names, sizes, ctx, NULL);
+       ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
+                                 names, ctx, NULL);
        if (ret)
                goto err_find;
 
@@ -3536,8 +3505,6 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
 
 
 err_find:
-       kfree(sizes);
-err_sizes:
        kfree(ctx);
 err_ctx:
        kfree(names);
@@ -3897,9 +3864,6 @@ static int virtnet_probe(struct virtio_device *vdev)
                vi->curr_queue_pairs = num_online_cpus();
        vi->max_queue_pairs = max_queue_pairs;
 
-       virtnet_init_settings(dev);
-       virtnet_update_settings(vi);
-
        /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
        err = init_vqs(vi);
        if (err)
@@ -3912,6 +3876,8 @@ static int virtnet_probe(struct virtio_device *vdev)
        netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
        netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
 
+       virtnet_init_settings(dev);
+
        if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
                vi->failover = net_failover_create(vi->dev);
                if (IS_ERR(vi->failover)) {
index 2caf997f9bc94a2456b1d70c4847f8c4320ee435..07596bf5f7d6d6adda6481bb2e58824c056ce82e 100644 (file)
@@ -310,6 +310,7 @@ static void pn532_uart_remove(struct serdev_device *serdev)
        pn53x_unregister_nfc(pn532->priv);
        serdev_device_close(serdev);
        pn53x_common_clean(pn532->priv);
+       del_timer_sync(&pn532->cmd_timeout);
        kfree_skb(pn532->recv_skb);
        kfree(pn532);
 }
index 9be007c9420f9e87a675a611b62f4f4f62faa661..f223afe47d1049849e88e678e994b6549bf3d03e 100644 (file)
@@ -268,7 +268,7 @@ static int ioc_count;
 *   Each bit can represent a number of pages.
 *   LSbs represent lower addresses (IOVA's).
 *
-*   This was was copied from sba_iommu.c. Don't try to unify
+*   This was copied from sba_iommu.c. Don't try to unify
 *   the two resource managers unless a way to have different
 *   allocation policies is also adjusted. We'd like to avoid
 *   I/O TLB thrashing by having resource allocation policy
@@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
        }
 }
 
-static void __init ccio_init_resources(struct ioc *ioc)
+static int __init ccio_init_resources(struct ioc *ioc)
 {
        struct resource *res = ioc->mmio_region;
        char *name = kmalloc(14, GFP_KERNEL);
-
+       if (unlikely(!name))
+               return -ENOMEM;
        snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
 
        ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
        ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
+       return 0;
 }
 
 static int new_ioc_area(struct resource *res, unsigned long size,
@@ -1543,7 +1545,10 @@ static int __init ccio_probe(struct parisc_device *dev)
                return -ENOMEM;
        }
        ccio_ioc_init(ioc);
-       ccio_init_resources(ioc);
+       if (ccio_init_resources(ioc)) {
+               kfree(ioc);
+               return -ENOMEM;
+       }
        hppa_dma_ops = &ccio_ops;
 
        hba = kzalloc(sizeof(*hba), GFP_KERNEL);
index 1e4a5663d01122bffa88f0ecae49fdf3d5a6f3de..d4be9d2ee74d9254b7d9a57797d2460c646060ad 100644 (file)
@@ -646,7 +646,7 @@ int lcd_print( const char *str )
                cancel_delayed_work_sync(&led_task);
 
        /* copy display string to buffer for procfs */
-       strlcpy(lcd_text, str, sizeof(lcd_text));
+       strscpy(lcd_text, str, sizeof(lcd_text));
 
        /* Set LCD Cursor to 1st character */
        gsc_writeb(lcd_info.reset_cmd1, LCD_CMD_REG);
index 342778782359f84f36d559cedc135571a2839d97..2c20b0de8cb094aff055cc8fa915db0c4ff490b0 100644 (file)
@@ -72,7 +72,7 @@ static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
        local64_set(&hwc->prev_count, initial_val);
 }
 
-/**
+/*
  * This is just a simple implementation to allow legacy implementations
  * compatible with new RISC-V PMU driver framework.
  * This driver only allows reading two counters i.e CYCLE & INSTRET.
index 8be13d416f485d4a9dd17afcf486626591ee690c..1ae3c56b66b097797f69b5af07c2e5993e597bdd 100644 (file)
@@ -928,7 +928,6 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
                                        struct virtqueue *vqs[],
                                        vq_callback_t *callbacks[],
                                        const char * const names[],
-                                       u32 sizes[],
                                        const bool *ctx,
                                        struct irq_affinity *desc)
 {
index 67feed25c9dbcc7b389d998353c2435bc448228e..5362f1a7b77c5df4c654c62c8b14fc09910feb16 100644 (file)
@@ -328,6 +328,7 @@ static const struct acpi_device_id smi_acpi_ids[] = {
        { "INT3515", (unsigned long)&int3515_data },
        /* Non-conforming _HID for Cirrus Logic already released */
        { "CLSA0100", (unsigned long)&cs35l41_hda },
+       { "CLSA0101", (unsigned long)&cs35l41_hda },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, smi_acpi_ids);
index 7150b1d0159e5da8868badd8faafe17eb1a56d73..d8373cb04f9038af50c1798ea3d16b142a8159a1 100644 (file)
@@ -4784,10 +4784,10 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
                consumers[i].consumer = regulator_get(dev,
                                                      consumers[i].supply);
                if (IS_ERR(consumers[i].consumer)) {
-                       consumers[i].consumer = NULL;
                        ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
                                            "Failed to get supply '%s'",
                                            consumers[i].supply);
+                       consumers[i].consumer = NULL;
                        goto err;
                }
 
index 81c4f57761092f8a53aeff941b40df873c6bed5f..0f7706e23eb91f8a578c2db0bea9f56e93351c64 100644 (file)
@@ -158,7 +158,6 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                                 struct virtqueue *vqs[],
                                 vq_callback_t *callbacks[],
                                 const char * const names[],
-                                u32 sizes[],
                                 const bool * ctx,
                                 struct irq_affinity *desc)
 {
index 8f1d1cf23d4442b2448b7839eafbf2b21032902a..59ac98f2bd2756fa2cbae94ac11f1301babc9ad7 100644 (file)
@@ -2086,6 +2086,9 @@ static inline void ap_scan_adapter(int ap)
  */
 static bool ap_get_configuration(void)
 {
+       if (!ap_qci_info)       /* QCI not supported */
+               return false;
+
        memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
        ap_fetch_qci_info(ap_qci_info);
 
index 0c40af157df2339034ddb796018a12683a71a4b0..0f17933954fb2a9b923c5cd03f3e5e9a52650c6b 100644 (file)
@@ -148,12 +148,16 @@ struct ap_driver {
        /*
         * Called at the start of the ap bus scan function when
         * the crypto config information (qci) has changed.
+        * This callback is not invoked if there is no AP
+        * QCI support available.
         */
        void (*on_config_changed)(struct ap_config_info *new_config_info,
                                  struct ap_config_info *old_config_info);
        /*
         * Called at the end of the ap bus scan function when
         * the crypto config information (qci) has changed.
+        * This callback is not invoked if there is no AP
+        * QCI support available.
         */
        void (*on_scan_complete)(struct ap_config_info *new_config_info,
                                 struct ap_config_info *old_config_info);
index 896896e326645ef615cdba25bf399eeb3d75dced..a10dbe632ef9bdd697586d2307c902855a86dbb1 100644 (file)
@@ -637,7 +637,6 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                               struct virtqueue *vqs[],
                               vq_callback_t *callbacks[],
                               const char * const names[],
-                              u32 sizes[],
                               const bool *ctx,
                               struct irq_affinity *desc)
 {
index a3e117a4b8e746981daa7315b716e6e0dc2c8a4e..f6c37a97544ea838ef8274f903f2feba83162f50 100644 (file)
@@ -7153,22 +7153,18 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
        switch (instance->adapter_type) {
        case MFI_SERIES:
                if (megasas_alloc_mfi_ctrl_mem(instance))
-                       goto fail;
+                       return -ENOMEM;
                break;
        case AERO_SERIES:
        case VENTURA_SERIES:
        case THUNDERBOLT_SERIES:
        case INVADER_SERIES:
                if (megasas_alloc_fusion_context(instance))
-                       goto fail;
+                       return -ENOMEM;
                break;
        }
 
        return 0;
- fail:
-       kfree(instance->reply_map);
-       instance->reply_map = NULL;
-       return -ENOMEM;
 }
 
 /*
index e48d4261d0bcacf3ed63f46b4815d4b8e050c138..09c5fe37754c5947cbbbaae660444d536d8ce8d6 100644 (file)
@@ -5310,7 +5310,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
                if (!fusion->log_to_span) {
                        dev_err(&instance->pdev->dev, "Failed from %s %d\n",
                                __func__, __LINE__);
-                       kfree(instance->ctrl_context);
                        return -ENOMEM;
                }
        }
index 2b2f682883752956f49281d0c91ae35c5fc97884..62666df1a59eb770ea864c80819ab4223ce8e417 100644 (file)
@@ -6935,14 +6935,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
 
        if (ha->flags.msix_enabled) {
                if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
-                       if (IS_QLA2071(ha)) {
-                               /* 4 ports Baker: Enable Interrupt Handshake */
-                               icb->msix_atio = 0;
-                               icb->firmware_options_2 |= cpu_to_le32(BIT_26);
-                       } else {
-                               icb->msix_atio = cpu_to_le16(msix->entry);
-                               icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
-                       }
+                       icb->msix_atio = cpu_to_le16(msix->entry);
+                       icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
                        ql_dbg(ql_dbg_init, vha, 0xf072,
                            "Registering ICB vector 0x%x for atio que.\n",
                            msix->entry);
index 4dbd29ab1dcc37792688849a468e5b8e5dc72ede..ef08029a00793e5ba12c67551296781ec8e42c03 100644 (file)
@@ -111,7 +111,7 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
        }
 }
 
-static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
 {
        struct request *rq = scsi_cmd_to_rq(cmd);
 
@@ -121,7 +121,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
        } else {
                WARN_ON_ONCE(true);
        }
-       blk_mq_requeue_request(rq, true);
+
+       if (msecs) {
+               blk_mq_requeue_request(rq, false);
+               blk_mq_delay_kick_requeue_list(rq->q, msecs);
+       } else
+               blk_mq_requeue_request(rq, true);
 }
 
 /**
@@ -651,14 +656,6 @@ static unsigned int scsi_rq_err_bytes(const struct request *rq)
        return bytes;
 }
 
-/* Helper for scsi_io_completion() when "reprep" action required. */
-static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
-                                     struct request_queue *q)
-{
-       /* A new command will be prepared and issued. */
-       scsi_mq_requeue_cmd(cmd);
-}
-
 static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
 {
        struct request *req = scsi_cmd_to_rq(cmd);
@@ -676,14 +673,21 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
        return false;
 }
 
+/*
+ * When ALUA transition state is returned, reprep the cmd to
+ * use the ALUA handler's transition timeout. Delay the reprep
+ * 1 sec to avoid aggressive retries of the target in that
+ * state.
+ */
+#define ALUA_TRANSITION_REPREP_DELAY   1000
+
 /* Helper for scsi_io_completion() when special action required. */
 static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
 {
-       struct request_queue *q = cmd->device->request_queue;
        struct request *req = scsi_cmd_to_rq(cmd);
        int level = 0;
-       enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
-             ACTION_DELAYED_RETRY} action;
+       enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
+             ACTION_RETRY, ACTION_DELAYED_RETRY} action;
        struct scsi_sense_hdr sshdr;
        bool sense_valid;
        bool sense_current = true;      /* false implies "deferred sense" */
@@ -772,8 +776,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
                                        action = ACTION_DELAYED_RETRY;
                                        break;
                                case 0x0a: /* ALUA state transition */
-                                       blk_stat = BLK_STS_TRANSPORT;
-                                       fallthrough;
+                                       action = ACTION_DELAYED_REPREP;
+                                       break;
                                default:
                                        action = ACTION_FAIL;
                                        break;
@@ -832,7 +836,10 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
                        return;
                fallthrough;
        case ACTION_REPREP:
-               scsi_io_completion_reprep(cmd, q);
+               scsi_mq_requeue_cmd(cmd, 0);
+               break;
+       case ACTION_DELAYED_REPREP:
+               scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
                break;
        case ACTION_RETRY:
                /* Retry the same command immediately */
@@ -926,7 +933,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
  * command block will be released and the queue function will be goosed. If we
  * are not done then we have to figure out what to do next:
  *
- *   a) We can call scsi_io_completion_reprep().  The request will be
+ *   a) We can call scsi_mq_requeue_cmd().  The request will be
  *     unprepared and put back on the queue.  Then a new command will
  *     be created for it.  This should be used if we made forward
  *     progress, or if we want to switch from READ(10) to READ(6) for
@@ -942,7 +949,6 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 {
        int result = cmd->result;
-       struct request_queue *q = cmd->device->request_queue;
        struct request *req = scsi_cmd_to_rq(cmd);
        blk_status_t blk_stat = BLK_STS_OK;
 
@@ -979,7 +985,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
         * request just queue the command up again.
         */
        if (likely(result == 0))
-               scsi_io_completion_reprep(cmd, q);
+               scsi_mq_requeue_cmd(cmd, 0);
        else
                scsi_io_completion_action(cmd, result);
 }
@@ -1542,7 +1548,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
        scsi_init_command(sdev, cmd);
 
        cmd->eh_eflags = 0;
-       cmd->allowed = 0;
        cmd->prot_type = 0;
        cmd->prot_flags = 0;
        cmd->submitter = 0;
@@ -1593,6 +1598,8 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
                        return ret;
        }
 
+       /* Usually overridden by the ULP */
+       cmd->allowed = 0;
        memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
        return scsi_cmd_to_driver(cmd)->init_command(cmd);
 }
index 8f79fa6318fec25bd7a50e5e9c3b19addc3361cd..eb76ba0550216d7d55d998e4609ed7ea5d4bdb9a 100644 (file)
@@ -103,7 +103,6 @@ static void sd_config_discard(struct scsi_disk *, unsigned int);
 static void sd_config_write_same(struct scsi_disk *);
 static int  sd_revalidate_disk(struct gendisk *);
 static void sd_unlock_native_capacity(struct gendisk *disk);
-static void sd_start_done_work(struct work_struct *work);
 static int  sd_probe(struct device *);
 static int  sd_remove(struct device *);
 static void sd_shutdown(struct device *);
@@ -3471,7 +3470,6 @@ static int sd_probe(struct device *dev)
        sdkp->max_retries = SD_MAX_RETRIES;
        atomic_set(&sdkp->openers, 0);
        atomic_set(&sdkp->device->ioerr_cnt, 0);
-       INIT_WORK(&sdkp->start_done_work, sd_start_done_work);
 
        if (!sdp->request_queue->rq_timeout) {
                if (sdp->type != TYPE_MOD)
@@ -3594,69 +3592,12 @@ static void scsi_disk_release(struct device *dev)
        kfree(sdkp);
 }
 
-/* Process sense data after a START command finished. */
-static void sd_start_done_work(struct work_struct *work)
-{
-       struct scsi_disk *sdkp = container_of(work, typeof(*sdkp),
-                                             start_done_work);
-       struct scsi_sense_hdr sshdr;
-       int res = sdkp->start_result;
-
-       if (res == 0)
-               return;
-
-       sd_print_result(sdkp, "Start/Stop Unit failed", res);
-
-       if (res < 0)
-               return;
-
-       if (scsi_normalize_sense(sdkp->start_sense_buffer,
-                                sdkp->start_sense_len, &sshdr))
-               sd_print_sense_hdr(sdkp, &sshdr);
-}
-
-/* A START command finished. May be called from interrupt context. */
-static void sd_start_done(struct request *req, blk_status_t status)
-{
-       const struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
-       struct scsi_disk *sdkp = scsi_disk(req->q->disk);
-
-       sdkp->start_result = scmd->result;
-       WARN_ON_ONCE(scmd->sense_len > SCSI_SENSE_BUFFERSIZE);
-       sdkp->start_sense_len = scmd->sense_len;
-       memcpy(sdkp->start_sense_buffer, scmd->sense_buffer,
-              ARRAY_SIZE(sdkp->start_sense_buffer));
-       WARN_ON_ONCE(!schedule_work(&sdkp->start_done_work));
-}
-
-/* Submit a START command asynchronously. */
-static int sd_submit_start(struct scsi_disk *sdkp, u8 cmd[], u8 cmd_len)
-{
-       struct scsi_device *sdev = sdkp->device;
-       struct request_queue *q = sdev->request_queue;
-       struct request *req;
-       struct scsi_cmnd *scmd;
-
-       req = scsi_alloc_request(q, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
-
-       scmd = blk_mq_rq_to_pdu(req);
-       scmd->cmd_len = cmd_len;
-       memcpy(scmd->cmnd, cmd, cmd_len);
-       scmd->allowed = sdkp->max_retries;
-       req->timeout = SD_TIMEOUT;
-       req->rq_flags |= RQF_PM | RQF_QUIET;
-       req->end_io = sd_start_done;
-       blk_execute_rq_nowait(req, /*at_head=*/true);
-
-       return 0;
-}
-
 static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
 {
        unsigned char cmd[6] = { START_STOP };  /* START_VALID */
+       struct scsi_sense_hdr sshdr;
        struct scsi_device *sdp = sdkp->device;
+       int res;
 
        if (start)
                cmd[4] |= 1;    /* START */
@@ -3667,10 +3608,23 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
        if (!scsi_device_online(sdp))
                return -ENODEV;
 
-       /* Wait until processing of sense data has finished. */
-       flush_work(&sdkp->start_done_work);
+       res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+                       SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
+       if (res) {
+               sd_print_result(sdkp, "Start/Stop Unit failed", res);
+               if (res > 0 && scsi_sense_valid(&sshdr)) {
+                       sd_print_sense_hdr(sdkp, &sshdr);
+                       /* 0x3a is medium not present */
+                       if (sshdr.asc == 0x3a)
+                               res = 0;
+               }
+       }
 
-       return sd_submit_start(sdkp, cmd, sizeof(cmd));
+       /* SCSI error codes must not go to the generic layer */
+       if (res)
+               return -EIO;
+
+       return 0;
 }
 
 /*
@@ -3697,8 +3651,6 @@ static void sd_shutdown(struct device *dev)
                sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
                sd_start_stop_device(sdkp, 0);
        }
-
-       flush_work(&sdkp->start_done_work);
 }
 
 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
index b89187761d61f33af307bc0a91b656b1df4373bc..5eea762f84d188e15b516b05f0f98ba3892209a5 100644 (file)
@@ -150,11 +150,6 @@ struct scsi_disk {
        unsigned        urswrz : 1;
        unsigned        security : 1;
        unsigned        ignore_medium_access_errors : 1;
-
-       int             start_result;
-       u32             start_sense_len;
-       u8              start_sense_buffer[SCSI_SENSE_BUFFERSIZE];
-       struct work_struct start_done_work;
 };
 #define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
 
index fe000da113327021c93f6c255acda683e86673af..8ced292c4b96236a96187bdd7e7fb20130f2925b 100644 (file)
@@ -2012,7 +2012,7 @@ static int storvsc_probe(struct hv_device *device,
         */
        host_dev->handle_error_wq =
                        alloc_ordered_workqueue("storvsc_error_wq_%d",
-                                               WQ_MEM_RECLAIM,
+                                               0,
                                                host->host_no);
        if (!host_dev->handle_error_wq) {
                ret = -ENOMEM;
index d6b30d521307db7762cd060e5a3378c38c30ac6f..775da69b8efa80a27753b41854c5d14875de6141 100644 (file)
@@ -684,13 +684,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
        const struct of_device_id *of_id = NULL;
        struct device_node *dn;
        void __iomem *base;
-       int ret, i;
+       int ret, i, s;
 
        /* AON ctrl registers */
        base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
        if (IS_ERR(base)) {
                pr_err("error mapping AON_CTRL\n");
-               return PTR_ERR(base);
+               ret = PTR_ERR(base);
+               goto aon_err;
        }
        ctrl.aon_ctrl_base = base;
 
@@ -700,8 +701,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
                /* Assume standard offset */
                ctrl.aon_sram = ctrl.aon_ctrl_base +
                                     AON_CTRL_SYSTEM_DATA_RAM_OFS;
+               s = 0;
        } else {
                ctrl.aon_sram = base;
+               s = 1;
        }
 
        writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
@@ -711,7 +714,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
                                     (const void **)&ddr_phy_data);
        if (IS_ERR(base)) {
                pr_err("error mapping DDR PHY\n");
-               return PTR_ERR(base);
+               ret = PTR_ERR(base);
+               goto ddr_phy_err;
        }
        ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
        ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
@@ -731,17 +735,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
        for_each_matching_node(dn, ddr_shimphy_dt_ids) {
                i = ctrl.num_memc;
                if (i >= MAX_NUM_MEMC) {
+                       of_node_put(dn);
                        pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
                        break;
                }
 
                base = of_io_request_and_map(dn, 0, dn->full_name);
                if (IS_ERR(base)) {
+                       of_node_put(dn);
                        if (!ctrl.support_warm_boot)
                                break;
 
                        pr_err("error mapping DDR SHIMPHY %d\n", i);
-                       return PTR_ERR(base);
+                       ret = PTR_ERR(base);
+                       goto ddr_shimphy_err;
                }
                ctrl.memcs[i].ddr_shimphy_base = base;
                ctrl.num_memc++;
@@ -752,14 +759,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
        for_each_matching_node(dn, brcmstb_memc_of_match) {
                base = of_iomap(dn, 0);
                if (!base) {
+                       of_node_put(dn);
                        pr_err("error mapping DDR Sequencer %d\n", i);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto brcmstb_memc_err;
                }
 
                of_id = of_match_node(brcmstb_memc_of_match, dn);
                if (!of_id) {
                        iounmap(base);
-                       return -EINVAL;
+                       of_node_put(dn);
+                       ret = -EINVAL;
+                       goto brcmstb_memc_err;
                }
 
                ddr_seq_data = of_id->data;
@@ -779,21 +790,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
        dn = of_find_matching_node(NULL, sram_dt_ids);
        if (!dn) {
                pr_err("SRAM not found\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto brcmstb_memc_err;
        }
 
        ret = brcmstb_init_sram(dn);
        of_node_put(dn);
        if (ret) {
                pr_err("error setting up SRAM for PM\n");
-               return ret;
+               goto brcmstb_memc_err;
        }
 
        ctrl.pdev = pdev;
 
        ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
-       if (!ctrl.s3_params)
-               return -ENOMEM;
+       if (!ctrl.s3_params) {
+               ret = -ENOMEM;
+               goto s3_params_err;
+       }
        ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
                                           sizeof(*ctrl.s3_params),
                                           DMA_TO_DEVICE);
@@ -813,7 +827,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
 
 out:
        kfree(ctrl.s3_params);
-
+s3_params_err:
+       iounmap(ctrl.boot_sram);
+brcmstb_memc_err:
+       for (i--; i >= 0; i--)
+               iounmap(ctrl.memcs[i].ddr_ctrl);
+ddr_shimphy_err:
+       for (i = 0; i < ctrl.num_memc; i++)
+               iounmap(ctrl.memcs[i].ddr_shimphy_base);
+
+       iounmap(ctrl.memcs[0].ddr_phy_base);
+ddr_phy_err:
+       iounmap(ctrl.aon_ctrl_base);
+       if (s)
+               iounmap(ctrl.aon_sram);
+aon_err:
        pr_warn("PM: initialization failed with code %d\n", ret);
 
        return ret;
index 07d52cafbb3133476bf1e338de898dee9a95b123..fcec6ed83d5e275342274a59db5f7ad48e0bbfb9 100644 (file)
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
         tristate "QorIQ DPAA2 DPIO driver"
         depends on FSL_MC_BUS
         select SOC_BUS
+        select FSL_GUTS
         select DIMLIB
         help
          Driver for the DPAA2 DPIO object.  A DPIO provides queue and
index 6383a4edc3607fe44018665f3baa08c5d7cbcaae..88aee59730e397eb86105a3bcfbd8072586bd9c3 100644 (file)
@@ -335,6 +335,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
                }
        }
 
+       reset_control_assert(domain->reset);
+
        /* Enable reset clocks for all devices in the domain */
        ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
        if (ret) {
@@ -342,7 +344,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
                goto out_regulator_disable;
        }
 
-       reset_control_assert(domain->reset);
+       /* delays for reset to propagate */
+       udelay(5);
 
        if (domain->bits.pxx) {
                /* request the domain to power up */
index dff7529268e4dcec93b2d12a61e125799fc96e1f..972f289d300a05fd07a7a4e4bfdd68cf3ec70dc1 100644 (file)
@@ -243,7 +243,6 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
                        ret = PTR_ERR(domain->power_dev);
                        goto cleanup_pds;
                }
-               dev_set_name(domain->power_dev, "%s", data->name);
 
                domain->genpd.name = data->name;
                domain->genpd.power_on = imx8m_blk_ctrl_power_on;
index 0bc7daa7afc83d0d20424800845bb16f1c2c473f..e4cb52e1fe2619d7699413d05d5dacc1cbf32a0e 100644 (file)
@@ -156,6 +156,7 @@ struct meson_spicc_device {
        void __iomem                    *base;
        struct clk                      *core;
        struct clk                      *pclk;
+       struct clk_divider              pow2_div;
        struct clk                      *clk;
        struct spi_message              *message;
        struct spi_transfer             *xfer;
@@ -168,6 +169,8 @@ struct meson_spicc_device {
        unsigned long                   xfer_remain;
 };
 
+#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
+
 static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
 {
        u32 conf;
@@ -421,7 +424,7 @@ static int meson_spicc_prepare_message(struct spi_master *master,
 {
        struct meson_spicc_device *spicc = spi_master_get_devdata(master);
        struct spi_device *spi = message->spi;
-       u32 conf = 0;
+       u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
 
        /* Store current message */
        spicc->message = message;
@@ -458,8 +461,6 @@ static int meson_spicc_prepare_message(struct spi_master *master,
        /* Select CS */
        conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
 
-       /* Default Clock rate core/4 */
-
        /* Default 8bit word */
        conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
 
@@ -476,12 +477,16 @@ static int meson_spicc_prepare_message(struct spi_master *master,
 static int meson_spicc_unprepare_transfer(struct spi_master *master)
 {
        struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+       u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
 
        /* Disable all IRQs */
        writel(0, spicc->base + SPICC_INTREG);
 
        device_reset_optional(&spicc->pdev->dev);
 
+       /* Set default configuration, keeping datarate field */
+       writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
        return 0;
 }
 
@@ -518,14 +523,60 @@ static void meson_spicc_cleanup(struct spi_device *spi)
  * Clk path for G12A series:
  *    pclk -> pow2 fixed div -> pow2 div -> mux -> out
  *    pclk -> enh fixed div -> enh div -> mux -> out
+ *
+ * The pow2 divider is tied to the controller HW state, and the
+ * divider is only valid when the controller is initialized.
+ *
+ * A set of clock ops is added to make sure we don't read/set this
+ * clock rate while the controller is in an unknown state.
  */
 
-static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
+static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
+                                                 unsigned long parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+       if (!spicc->master->cur_msg || !spicc->master->busy)
+               return 0;
+
+       return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
+                                          struct clk_rate_request *req)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+       if (!spicc->master->cur_msg || !spicc->master->busy)
+               return -EINVAL;
+
+       return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+       if (!spicc->master->cur_msg || !spicc->master->busy)
+               return -EINVAL;
+
+       return clk_divider_ops.set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops meson_spicc_pow2_clk_ops = {
+       .recalc_rate = meson_spicc_pow2_recalc_rate,
+       .determine_rate = meson_spicc_pow2_determine_rate,
+       .set_rate = meson_spicc_pow2_set_rate,
+};
+
+static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
 {
        struct device *dev = &spicc->pdev->dev;
-       struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
-       struct clk_divider *pow2_div, *enh_div;
-       struct clk_mux *mux;
+       struct clk_fixed_factor *pow2_fixed_div;
        struct clk_init_data init;
        struct clk *clk;
        struct clk_parent_data parent_data[2];
@@ -560,31 +611,45 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
        if (WARN_ON(IS_ERR(clk)))
                return PTR_ERR(clk);
 
-       pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
-       if (!pow2_div)
-               return -ENOMEM;
-
        snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
        init.name = name;
-       init.ops = &clk_divider_ops;
-       init.flags = CLK_SET_RATE_PARENT;
+       init.ops = &meson_spicc_pow2_clk_ops;
+       /*
+        * Set NOCACHE here to make sure we read the actual HW value
+        * since we reset the HW after each transfer.
+        */
+       init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
        parent_data[0].hw = &pow2_fixed_div->hw;
        init.num_parents = 1;
 
-       pow2_div->shift = 16,
-       pow2_div->width = 3,
-       pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
-       pow2_div->reg = spicc->base + SPICC_CONREG;
-       pow2_div->hw.init = &init;
+       spicc->pow2_div.shift = 16,
+       spicc->pow2_div.width = 3,
+       spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
+       spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
+       spicc->pow2_div.hw.init = &init;
 
-       clk = devm_clk_register(dev, &pow2_div->hw);
-       if (WARN_ON(IS_ERR(clk)))
-               return PTR_ERR(clk);
+       spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
+       if (WARN_ON(IS_ERR(spicc->clk)))
+               return PTR_ERR(spicc->clk);
 
-       if (!spicc->data->has_enhance_clk_div) {
-               spicc->clk = clk;
-               return 0;
-       }
+       return 0;
+}
+
+static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
+{
+       struct device *dev = &spicc->pdev->dev;
+       struct clk_fixed_factor *enh_fixed_div;
+       struct clk_divider *enh_div;
+       struct clk_mux *mux;
+       struct clk_init_data init;
+       struct clk *clk;
+       struct clk_parent_data parent_data[2];
+       char name[64];
+
+       memset(&init, 0, sizeof(init));
+       memset(&parent_data, 0, sizeof(parent_data));
+
+       init.parent_data = parent_data;
 
        /* algorithm for enh div: rate = freq / 2 / (N + 1) */
 
@@ -637,7 +702,7 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
        snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
        init.name = name;
        init.ops = &clk_mux_ops;
-       parent_data[0].hw = &pow2_div->hw;
+       parent_data[0].hw = &spicc->pow2_div.hw;
        parent_data[1].hw = &enh_div->hw;
        init.num_parents = 2;
        init.flags = CLK_SET_RATE_PARENT;
@@ -754,12 +819,20 @@ static int meson_spicc_probe(struct platform_device *pdev)
 
        meson_spicc_oen_enable(spicc);
 
-       ret = meson_spicc_clk_init(spicc);
+       ret = meson_spicc_pow2_clk_init(spicc);
        if (ret) {
-               dev_err(&pdev->dev, "clock registration failed\n");
+               dev_err(&pdev->dev, "pow2 clock registration failed\n");
                goto out_clk;
        }
 
+       if (spicc->data->has_enhance_clk_div) {
+               ret = meson_spicc_enh_clk_init(spicc);
+               if (ret) {
+                       dev_err(&pdev->dev, "clock registration failed\n");
+                       goto out_clk;
+               }
+       }
+
        ret = devm_spi_register_master(&pdev->dev, master);
        if (ret) {
                dev_err(&pdev->dev, "spi master registration failed\n");
index 8f97a3eacdeab01d738036593faee2ac039988e2..83da8862b8f22ee414bc7f92af8550285e43e4d1 100644 (file)
@@ -95,7 +95,7 @@ static ssize_t driver_override_show(struct device *dev,
 }
 static DEVICE_ATTR_RW(driver_override);
 
-static struct spi_statistics *spi_alloc_pcpu_stats(struct device *dev)
+static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
 {
        struct spi_statistics __percpu *pcpu_stats;
 
@@ -162,7 +162,7 @@ static struct device_attribute dev_attr_spi_device_##field = {              \
 }
 
 #define SPI_STATISTICS_SHOW_NAME(name, file, field)                    \
-static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
+static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
                                            char *buf)                  \
 {                                                                      \
        ssize_t len;                                                    \
@@ -309,7 +309,7 @@ static const struct attribute_group *spi_master_groups[] = {
        NULL,
 };
 
-static void spi_statistics_add_transfer_stats(struct spi_statistics *pcpu_stats,
+static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
                                              struct spi_transfer *xfer,
                                              struct spi_controller *ctlr)
 {
@@ -1275,8 +1275,8 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
                             struct spi_message *msg,
                             struct spi_transfer *xfer)
 {
-       struct spi_statistics *statm = ctlr->pcpu_statistics;
-       struct spi_statistics *stats = msg->spi->pcpu_statistics;
+       struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+       struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
        u32 speed_hz = xfer->speed_hz;
        unsigned long long ms;
 
@@ -1432,8 +1432,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
        struct spi_transfer *xfer;
        bool keep_cs = false;
        int ret = 0;
-       struct spi_statistics *statm = ctlr->pcpu_statistics;
-       struct spi_statistics *stats = msg->spi->pcpu_statistics;
+       struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+       struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
 
        spi_set_cs(msg->spi, true, false);
 
index f2b1bcefcadd7c513424d71880834805b7cf7cfd..27295bda3e0bd25ec08fcbdbedf165df220ac139 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/tee_drv.h>
+#include <linux/uaccess.h>
 #include <linux/uio.h>
 #include "tee_private.h"
 
@@ -326,6 +327,9 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
        void *ret;
        int id;
 
+       if (!access_ok((void __user *)addr, length))
+               return ERR_PTR(-EFAULT);
+
        mutex_lock(&teedev->mutex);
        id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
        mutex_unlock(&teedev->mutex);
index 80d4e0676083ac56ade7bea96834203f1d95eed0..365489bf4b8c1b0870c4d36ec14158c10b7e5531 100644 (file)
@@ -527,7 +527,7 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
        priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
                                   obj->package.elements[0].buffer.length,
                                   GFP_KERNEL);
-       if (!priv->data_vault)
+       if (ZERO_OR_NULL_PTR(priv->data_vault))
                goto out_free;
 
        bin_attr_data_vault.private = priv->data_vault;
@@ -597,7 +597,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
                        goto free_imok;
        }
 
-       if (priv->data_vault) {
+       if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
                result = sysfs_create_group(&pdev->dev.kobj,
                                            &data_attribute_group);
                if (result)
@@ -615,7 +615,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
 free_sysfs:
        cleanup_odvp(priv);
        if (priv->data_vault) {
-               sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
+               if (!ZERO_OR_NULL_PTR(priv->data_vault))
+                       sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
                kfree(priv->data_vault);
        }
 free_uuid:
@@ -647,7 +648,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
        if (!priv->rel_misc_dev_res)
                acpi_thermal_rel_misc_device_remove(priv->adev->handle);
 
-       if (priv->data_vault)
+       if (!ZERO_OR_NULL_PTR(priv->data_vault))
                sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
        sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
        sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
index 6a5d0ae5d7a4c0c12ed980dd434e676878284025..50d50cec777403ff241082c50459785add7c0993 100644 (file)
@@ -1329,6 +1329,7 @@ free_tz:
        kfree(tz);
        return ERR_PTR(result);
 }
+EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips);
 
 struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
                                                         void *devdata, struct thermal_zone_device_ops *ops,
index 6bc679d22927998eacbd779d69d4cfa9a9f9b6c5..a202d7d5240d813bd92f31bc7524779d40d4c6b1 100644 (file)
@@ -8741,6 +8741,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
        struct scsi_device *sdp;
        unsigned long flags;
        int ret, retries;
+       unsigned long deadline;
+       int32_t remaining;
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        sdp = hba->ufs_device_wlun;
@@ -8773,9 +8775,14 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
         * callbacks hence set the RQF_PM flag so that it doesn't resume the
         * already suspended childs.
         */
+       deadline = jiffies + 10 * HZ;
        for (retries = 3; retries > 0; --retries) {
+               ret = -ETIMEDOUT;
+               remaining = deadline - jiffies;
+               if (remaining <= 0)
+                       break;
                ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
-                               START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+                                  remaining / HZ, 0, 0, RQF_PM, NULL);
                if (!scsi_status_is_check_condition(ret) ||
                                !scsi_sense_valid(&sshdr) ||
                                sshdr.sense_key != UNIT_ATTENTION)
index eced9753808203f1258b89e4246f4431f6469698..c3628a8645a565a2e373a46d16bf2aa13c2cc012 100644 (file)
@@ -1711,7 +1711,7 @@ static struct exynos_ufs_uic_attr fsd_uic_attr = {
        .pa_dbg_option_suite            = 0x2E820183,
 };
 
-struct exynos_ufs_drv_data fsd_ufs_drvs = {
+static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
        .uic_attr               = &fsd_uic_attr,
        .quirks                 = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
                                  UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
index bd4dc97d4d340245bc60acb4b33f0d85f3355418..db568f67e4dc2b191ad58e1b4296deb0971c4f60 100644 (file)
@@ -290,7 +290,7 @@ static char default_sti_path[21] __read_mostly;
 static int __init sti_setup(char *str)
 {
        if (str)
-               strlcpy (default_sti_path, str, sizeof (default_sti_path));
+               strscpy(default_sti_path, str, sizeof(default_sti_path));
        
        return 1;
 }
index a3e6faed7745a4b2c5ab877b152ecce97a777091..14eb718bd67c74d91da05c8052cb1060dc6e7bb6 100644 (file)
@@ -3891,7 +3891,7 @@ static int __init atyfb_setup(char *options)
                         && (!strncmp(this_opt, "Mach64:", 7))) {
                        static unsigned char m64_num;
                        static char mach64_str[80];
-                       strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str));
+                       strscpy(mach64_str, this_opt + 7, sizeof(mach64_str));
                        if (!store_video_par(mach64_str, m64_num)) {
                                m64_num++;
                                mach64_count = m64_num;
index 6851f47613e17e52e950ad0fd551d8d8b540a5b8..a14a8d73035c03ccb48db8bd3abe1136796fa3f1 100644 (file)
@@ -1980,7 +1980,7 @@ static int radeon_set_fbinfo(struct radeonfb_info *rinfo)
        info->screen_base = rinfo->fb_base;
        info->screen_size = rinfo->mapped_vram;
        /* Fill fix common fields */
-       strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
+       strscpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
         info->fix.smem_start = rinfo->fb_base_phys;
         info->fix.smem_len = rinfo->video_ram;
         info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -2094,34 +2094,34 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
        u32 tmp;
 
        /* framebuffer size */
-        if ((rinfo->family == CHIP_FAMILY_RS100) ||
+       if ((rinfo->family == CHIP_FAMILY_RS100) ||
             (rinfo->family == CHIP_FAMILY_RS200) ||
             (rinfo->family == CHIP_FAMILY_RS300) ||
             (rinfo->family == CHIP_FAMILY_RC410) ||
             (rinfo->family == CHIP_FAMILY_RS400) ||
            (rinfo->family == CHIP_FAMILY_RS480) ) {
-          u32 tom = INREG(NB_TOM);
-          tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
-
-               radeon_fifo_wait(6);
-          OUTREG(MC_FB_LOCATION, tom);
-          OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
-          OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
-          OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
-
-          /* This is supposed to fix the crtc2 noise problem. */
-          OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
-
-          if ((rinfo->family == CHIP_FAMILY_RS100) ||
-              (rinfo->family == CHIP_FAMILY_RS200)) {
-             /* This is to workaround the asic bug for RMX, some versions
-                of BIOS doesn't have this register initialized correctly.
-             */
-             OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
-                     ~CRTC_H_CUTOFF_ACTIVE_EN);
-          }
-        } else {
-          tmp = INREG(CNFG_MEMSIZE);
+               u32 tom = INREG(NB_TOM);
+
+               tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
+               radeon_fifo_wait(6);
+               OUTREG(MC_FB_LOCATION, tom);
+               OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+               OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+               OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
+
+               /* This is supposed to fix the crtc2 noise problem. */
+               OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
+
+               if ((rinfo->family == CHIP_FAMILY_RS100) ||
+                   (rinfo->family == CHIP_FAMILY_RS200)) {
+                       /* This is to workaround the asic bug for RMX, some versions
+                        * of BIOS doesn't have this register initialized correctly.
+                        */
+                       OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
+                               ~CRTC_H_CUTOFF_ACTIVE_EN);
+               }
+       } else {
+               tmp = INREG(CNFG_MEMSIZE);
         }
 
        /* mem size is bits [28:0], mask off the rest */
index e7702fe1fe7d76eca07d9e62738c385c03ac9382..6403ae07970d6cbd7110f7b781627d628e69fd83 100644 (file)
@@ -182,7 +182,7 @@ static int bw2_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
 
 static void bw2_init_fix(struct fb_info *info, int linebytes)
 {
-       strlcpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
+       strscpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
 
        info->fix.type = FB_TYPE_PACKED_PIXELS;
        info->fix.visual = FB_VISUAL_MONO01;
index 393894af26f849981e71f22b22923e49d6de8d57..2b00a9d554fc06a9c898b58a6882d3b1b5a86e48 100644 (file)
@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
  err_release_fb:
        framebuffer_release(p);
  err_disable:
+       pci_disable_device(dp);
  err_out:
        return rc;
 }
index a41a75841e10ef06f5d506da669c440a5b3257af..2a9fa06881b5eb8b359aa7a5b27feb173bf3ba81 100644 (file)
@@ -1999,7 +1999,7 @@ static int cirrusfb_set_fbinfo(struct fb_info *info)
        }
 
        /* Fill fix common fields */
-       strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
+       strscpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
                sizeof(info->fix.id));
 
        /* monochrome: only 1 memory plane */
index 771ce1f769515554ee65604cd63e5fc66410916d..a1061c2f16406e2e610bd3991731fa4967962b18 100644 (file)
@@ -326,7 +326,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
        info->var.vmode = FB_VMODE_NONINTERLACED;
        info->fix.type = FB_TYPE_PACKED_PIXELS;
        info->fix.accel = FB_ACCEL_NONE;
-       strlcpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
+       strscpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
        fb_videomode_to_var(&info->var, &cfb->mode);
 
        ret = fb_alloc_cmap(&info->cmap, BIT(CLPS711X_FB_BPP_MAX), 0);
index cf9ac4da0a82ceb70a2c171328b42f3f00f934ba..098b62f7b701e5815c67994424550416418e98be 100644 (file)
@@ -412,7 +412,7 @@ static int __init fb_console_setup(char *this_opt)
 
        while ((options = strsep(&this_opt, ",")) != NULL) {
                if (!strncmp(options, "font:", 5)) {
-                       strlcpy(fontname, options + 5, sizeof(fontname));
+                       strscpy(fontname, options + 5, sizeof(fontname));
                        continue;
                }
                
@@ -2401,15 +2401,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
        struct fb_info *info = fbcon_info_from_console(vc->vc_num);
        struct fbcon_ops *ops = info->fbcon_par;
        struct fbcon_display *p = &fb_display[vc->vc_num];
-       int resize;
+       int resize, ret, old_userfont, old_width, old_height, old_charcount;
        char *old_data = NULL;
 
        resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
        if (p->userfont)
                old_data = vc->vc_font.data;
        vc->vc_font.data = (void *)(p->fontdata = data);
+       old_userfont = p->userfont;
        if ((p->userfont = userfont))
                REFCOUNT(data)++;
+
+       old_width = vc->vc_font.width;
+       old_height = vc->vc_font.height;
+       old_charcount = vc->vc_font.charcount;
+
        vc->vc_font.width = w;
        vc->vc_font.height = h;
        vc->vc_font.charcount = charcount;
@@ -2425,7 +2431,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
                rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
                cols /= w;
                rows /= h;
-               vc_resize(vc, cols, rows);
+               ret = vc_resize(vc, cols, rows);
+               if (ret)
+                       goto err_out;
        } else if (con_is_visible(vc)
                   && vc->vc_mode == KD_TEXT) {
                fbcon_clear_margins(vc, 0);
@@ -2435,6 +2443,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
        if (old_data && (--REFCOUNT(old_data) == 0))
                kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
        return 0;
+
+err_out:
+       p->fontdata = old_data;
+       vc->vc_font.data = (void *)old_data;
+
+       if (userfont) {
+               p->userfont = old_userfont;
+               REFCOUNT(data)--;
+       }
+
+       vc->vc_font.width = old_width;
+       vc->vc_font.height = old_height;
+       vc->vc_font.charcount = old_charcount;
+
+       return ret;
 }
 
 /*
index c2a60b187467e5f3a1bda2892ed61903f31984ce..4d7f63892dcc4381a78433577cb62a06f305c06c 100644 (file)
@@ -84,6 +84,10 @@ void framebuffer_release(struct fb_info *info)
        if (WARN_ON(refcount_read(&info->count)))
                return;
 
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+       mutex_destroy(&info->bl_curve_mutex);
+#endif
+
        kfree(info->apertures);
        kfree(info);
 }
index d45355b9a58ca3aae87833054fef2386d7a7da85..8f041f9b14c7103d72b70b5ceca3560a91ccc995 100644 (file)
@@ -1134,7 +1134,7 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx)
                info->fb_size         = int_cfb_info->fb.fix.smem_len;
                info->info            = int_cfb_info;
 
-               strlcpy(info->dev_name, int_cfb_info->fb.fix.id,
+               strscpy(info->dev_name, int_cfb_info->fb.fix.id,
                        sizeof(info->dev_name));
        }
 
@@ -1229,7 +1229,7 @@ static int cyber2000fb_ddc_getsda(void *data)
 
 static int cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
 {
-       strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
+       strscpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
                sizeof(cfb->ddc_adapter.name));
        cfb->ddc_adapter.owner          = THIS_MODULE;
        cfb->ddc_adapter.class          = I2C_CLASS_DDC;
@@ -1304,7 +1304,7 @@ static int cyber2000fb_i2c_getscl(void *data)
 
 static int cyber2000fb_i2c_register(struct cfb_info *cfb)
 {
-       strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
+       strscpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
                sizeof(cfb->i2c_adapter.name));
        cfb->i2c_adapter.owner = THIS_MODULE;
        cfb->i2c_adapter.algo_data = &cfb->i2c_algo;
@@ -1500,7 +1500,7 @@ static int cyber2000fb_setup(char *options)
                if (strncmp(opt, "font:", 5) == 0) {
                        static char default_font_storage[40];
 
-                       strlcpy(default_font_storage, opt + 5,
+                       strscpy(default_font_storage, opt + 5,
                                sizeof(default_font_storage));
                        default_font = default_font_storage;
                        continue;
index b3d580e57221ebedd14e65bf9f68d1b84cc67a0a..7cba3969a9702ca9f82eef36fd7e12e176cc3164 100644 (file)
@@ -883,7 +883,7 @@ static void ffb_init_fix(struct fb_info *info)
        } else
                ffb_type_name = "Elite 3D";
 
-       strlcpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
+       strscpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
 
        info->fix.type = FB_TYPE_PACKED_PIXELS;
        info->fix.visual = FB_VISUAL_TRUECOLOR;
index 5d34d89fb665346e0688c380aff0854b0bd4db10..e41204ecb0e35e70a80daed7ce523a07045fb6ed 100644 (file)
@@ -410,13 +410,13 @@ static void __init gx1fb_setup(char *options)
                        continue;
 
                if (!strncmp(this_opt, "mode:", 5))
-                       strlcpy(mode_option, this_opt + 5, sizeof(mode_option));
+                       strscpy(mode_option, this_opt + 5, sizeof(mode_option));
                else if (!strncmp(this_opt, "crt:", 4))
                        crt_option = !!simple_strtoul(this_opt + 4, NULL, 0);
                else if (!strncmp(this_opt, "panel:", 6))
-                       strlcpy(panel_option, this_opt + 6, sizeof(panel_option));
+                       strscpy(panel_option, this_opt + 6, sizeof(panel_option));
                else
-                       strlcpy(mode_option, this_opt, sizeof(mode_option));
+                       strscpy(mode_option, this_opt, sizeof(mode_option));
        }
 }
 #endif
index e5475ae1e1587ef5cf7dbd2d504c4cd21e02a6f4..94588b809ebf84e12b2ad945f0df248d76428f40 100644 (file)
@@ -650,7 +650,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        cardtype = ent->driver_data;
        par->refclk_ps = cardinfo[cardtype].refclk_ps;
        info->fix = gxt4500_fix;
-       strlcpy(info->fix.id, cardinfo[cardtype].cardname,
+       strscpy(info->fix.id, cardinfo[cardtype].cardname,
                sizeof(info->fix.id));
        info->pseudo_palette = par->pseudo_palette;
 
index 7f09a0daaaa24dd7c6b37dcf219c0129eca3c407..bd30d8314b6874e424bfb1972c94d4a3b3517377 100644 (file)
@@ -159,7 +159,7 @@ static int i740fb_setup_ddc_bus(struct fb_info *info)
 {
        struct i740fb_par *par = info->par;
 
-       strlcpy(par->ddc_adapter.name, info->fix.id,
+       strscpy(par->ddc_adapter.name, info->fix.id,
                sizeof(par->ddc_adapter.name));
        par->ddc_adapter.owner          = THIS_MODULE;
        par->ddc_adapter.class          = I2C_CLASS_DDC;
index d97d7456d15a0b6eafd92be10787d5d5da436873..94f3bc637fc88558b90490a1c32cac1cd78c1120 100644 (file)
@@ -681,7 +681,7 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
 
        fbi->devtype = pdev->id_entry->driver_data;
 
-       strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
+       strscpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
 
        info->fix.type                  = FB_TYPE_PACKED_PIXELS;
        info->fix.type_aux              = 0;
index 236521b19daf7799f76a219585f827aeba385eaf..68bba2688f4c19aa68454416d71f7e38ec0a4713 100644 (file)
@@ -2383,9 +2383,9 @@ static int __init matroxfb_setup(char *options) {
                else if (!strncmp(this_opt, "mem:", 4))
                        mem = simple_strtoul(this_opt+4, NULL, 0);
                else if (!strncmp(this_opt, "mode:", 5))
-                       strlcpy(videomode, this_opt+5, sizeof(videomode));
+                       strscpy(videomode, this_opt + 5, sizeof(videomode));
                else if (!strncmp(this_opt, "outputs:", 8))
-                       strlcpy(outputs, this_opt+8, sizeof(outputs));
+                       strscpy(outputs, this_opt + 8, sizeof(outputs));
                else if (!strncmp(this_opt, "dfp:", 4)) {
                        dfp_type = simple_strtoul(this_opt+4, NULL, 0);
                        dfp = 1;
@@ -2455,7 +2455,7 @@ static int __init matroxfb_setup(char *options) {
                        else if (!strcmp(this_opt, "dfp"))
                                dfp = value;
                        else {
-                               strlcpy(videomode, this_opt, sizeof(videomode));
+                               strscpy(videomode, this_opt, sizeof(videomode));
                        }
                }
        }
index dfb4ddc45701eb0136086e9ec986db1ba20ac6e3..17cda576568382b041f379f91df5cb12504a0881 100644 (file)
@@ -1642,15 +1642,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
                goto cleanup;
        }
        fbdev->int_irq = platform_get_irq(pdev, 0);
-       if (!fbdev->int_irq) {
-               dev_err(&pdev->dev, "unable to get irq\n");
+       if (fbdev->int_irq < 0) {
                r = ENXIO;
                goto cleanup;
        }
 
        fbdev->ext_irq = platform_get_irq(pdev, 1);
-       if (!fbdev->ext_irq) {
-               dev_err(&pdev->dev, "unable to get irq\n");
+       if (fbdev->ext_irq < 0) {
                r = ENXIO;
                goto cleanup;
        }
index afa688e754b9592278728aa2c3bf7921bca64d5c..5ccddcfce7228c5c87b93d8903822abb1c0974e2 100644 (file)
@@ -1331,7 +1331,7 @@ static void clear_fb_info(struct fb_info *fbi)
 {
        memset(&fbi->var, 0, sizeof(fbi->var));
        memset(&fbi->fix, 0, sizeof(fbi->fix));
-       strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
+       strscpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
 }
 
 static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev)
index d3be2c64f1c08dce2b1c10eb0be7ee397e030bff..8fd79deb1e2ae0fe65be8a74017cc3245efbffc3 100644 (file)
@@ -617,6 +617,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
                return -EINVAL;
        }
 
+       if (!var->pixclock) {
+               DPRINTK("pixclock is zero\n");
+               return -EINVAL;
+       }
+
        if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
                DPRINTK("pixclock too high (%ldKHz)\n",
                        PICOS2KHZ(var->pixclock));
index e943300d23e8ebd94eb09e39cde5d6df85a4c157..d5d0bbd39213bcd4c628d1bd6bec337b137a4841 100644 (file)
@@ -640,7 +640,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
        info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
                      FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
        info->node = -1;
-       strlcpy(info->fix.id, mi->id, 16);
+       strscpy(info->fix.id, mi->id, 16);
        info->fix.type = FB_TYPE_PACKED_PIXELS;
        info->fix.type_aux = 0;
        info->fix.xpanstep = 0;
index 66cfc3e9d3cfd73120b407e8dad6e13d3554834f..696ac54311809b2a2bbf50a06bba08db8b46e702 100644 (file)
@@ -2042,7 +2042,7 @@ static int __init pxafb_setup_options(void)
                return -ENODEV;
 
        if (options)
-               strlcpy(g_options, options, sizeof(g_options));
+               strscpy(g_options, options, sizeof(g_options));
 
        return 0;
 }
index 5069f6f67923f81532f5326532b473f4e9450958..67b63a753cb276e41215f74c2d59f256e9732525 100644 (file)
@@ -248,7 +248,7 @@ static int s3fb_setup_ddc_bus(struct fb_info *info)
 {
        struct s3fb_info *par = info->par;
 
-       strlcpy(par->ddc_adapter.name, info->fix.id,
+       strscpy(par->ddc_adapter.name, info->fix.id,
                sizeof(par->ddc_adapter.name));
        par->ddc_adapter.owner          = THIS_MODULE;
        par->ddc_adapter.class          = I2C_CLASS_DDC;
index cf2a90ecd64e0a8b4cd718e3068fe4b88deccbfa..e770b4a356b57cb0dd4cff8981ebca0f4d1ee332 100644 (file)
@@ -355,7 +355,7 @@ static int simplefb_regulators_get(struct simplefb_par *par,
                if (!p || p == prop->name)
                        continue;
 
-               strlcpy(name, prop->name,
+               strscpy(name, prop->name,
                        strlen(prop->name) - strlen(SUPPLY_SUFFIX) + 1);
                regulator = devm_regulator_get_optional(&pdev->dev, name);
                if (IS_ERR(regulator)) {
index f28fd69d5eb75919920465ae533ce532633dd7cf..c9e77429dfa304883cb6fbdbc3d77e45ef74ca44 100644 (file)
@@ -649,37 +649,37 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
        u16 xres=0, yres, myres;
 
 #ifdef CONFIG_FB_SIS_300
-       if(ivideo->sisvga_engine == SIS_300_VGA) {
-               if(!(sisbios_mode[myindex].chipset & MD_SIS300))
+       if (ivideo->sisvga_engine == SIS_300_VGA) {
+               if (!(sisbios_mode[myindex].chipset & MD_SIS300))
                        return -1 ;
        }
 #endif
 #ifdef CONFIG_FB_SIS_315
-       if(ivideo->sisvga_engine == SIS_315_VGA) {
-               if(!(sisbios_mode[myindex].chipset & MD_SIS315))
+       if (ivideo->sisvga_engine == SIS_315_VGA) {
+               if (!(sisbios_mode[myindex].chipset & MD_SIS315))
                        return -1;
        }
 #endif
 
        myres = sisbios_mode[myindex].yres;
 
-       switch(vbflags & VB_DISPTYPE_DISP2) {
+       switch (vbflags & VB_DISPTYPE_DISP2) {
 
        case CRT2_LCD:
                xres = ivideo->lcdxres; yres = ivideo->lcdyres;
 
-               if((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
-                  (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
-                       if(sisbios_mode[myindex].xres > xres)
+               if ((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
+                   (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
+                       if (sisbios_mode[myindex].xres > xres)
                                return -1;
-                       if(myres > yres)
+                       if (myres > yres)
                                return -1;
                }
 
-               if(ivideo->sisfb_fstn) {
-                       if(sisbios_mode[myindex].xres == 320) {
-                               if(myres == 240) {
-                                       switch(sisbios_mode[myindex].mode_no[1]) {
+               if (ivideo->sisfb_fstn) {
+                       if (sisbios_mode[myindex].xres == 320) {
+                               if (myres == 240) {
+                                       switch (sisbios_mode[myindex].mode_no[1]) {
                                                case 0x50: myindex = MODE_FSTN_8;  break;
                                                case 0x56: myindex = MODE_FSTN_16; break;
                                                case 0x53: return -1;
@@ -688,7 +688,7 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
                        }
                }
 
-               if(SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+               if (SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
                                sisbios_mode[myindex].yres, 0, ivideo->sisfb_fstn,
                                ivideo->SiS_Pr.SiS_CustomT, xres, yres, ivideo->vbflags2) < 0x14) {
                        return -1;
@@ -696,14 +696,14 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
                break;
 
        case CRT2_TV:
-               if(SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+               if (SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
                                sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
                        return -1;
                }
                break;
 
        case CRT2_VGA:
-               if(SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+               if (SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
                                sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
                        return -1;
                }
@@ -1872,7 +1872,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
 
        memset(fix, 0, sizeof(struct fb_fix_screeninfo));
 
-       strlcpy(fix->id, ivideo->myid, sizeof(fix->id));
+       strscpy(fix->id, ivideo->myid, sizeof(fix->id));
 
        mutex_lock(&info->mm_lock);
        fix->smem_start  = ivideo->video_base + ivideo->video_offset;
@@ -2204,82 +2204,88 @@ static bool sisfb_test_DDC1(struct sis_video_info *ivideo)
 
 static void sisfb_sense_crt1(struct sis_video_info *ivideo)
 {
-    bool mustwait = false;
-    u8  sr1F, cr17;
+       bool mustwait = false;
+       u8  sr1F, cr17;
 #ifdef CONFIG_FB_SIS_315
-    u8  cr63=0;
+       u8  cr63 = 0;
 #endif
-    u16 temp = 0xffff;
-    int i;
+       u16 temp = 0xffff;
+       int i;
+
+       sr1F = SiS_GetReg(SISSR, 0x1F);
+       SiS_SetRegOR(SISSR, 0x1F, 0x04);
+       SiS_SetRegAND(SISSR, 0x1F, 0x3F);
 
-    sr1F = SiS_GetReg(SISSR, 0x1F);
-    SiS_SetRegOR(SISSR, 0x1F, 0x04);
-    SiS_SetRegAND(SISSR, 0x1F, 0x3F);
-    if(sr1F & 0xc0) mustwait = true;
+       if (sr1F & 0xc0)
+               mustwait = true;
 
 #ifdef CONFIG_FB_SIS_315
-    if(ivideo->sisvga_engine == SIS_315_VGA) {
-       cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
-       cr63 &= 0x40;
-       SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
-    }
+       if (ivideo->sisvga_engine == SIS_315_VGA) {
+               cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
+               cr63 &= 0x40;
+               SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
+       }
 #endif
 
-    cr17 = SiS_GetReg(SISCR, 0x17);
-    cr17 &= 0x80;
-    if(!cr17) {
-       SiS_SetRegOR(SISCR, 0x17, 0x80);
-       mustwait = true;
-       SiS_SetReg(SISSR, 0x00, 0x01);
-       SiS_SetReg(SISSR, 0x00, 0x03);
-    }
+       cr17 = SiS_GetReg(SISCR, 0x17);
+       cr17 &= 0x80;
 
-    if(mustwait) {
-       for(i=0; i < 10; i++) sisfbwaitretracecrt1(ivideo);
-    }
+       if (!cr17) {
+               SiS_SetRegOR(SISCR, 0x17, 0x80);
+               mustwait = true;
+               SiS_SetReg(SISSR, 0x00, 0x01);
+               SiS_SetReg(SISSR, 0x00, 0x03);
+       }
 
+       if (mustwait) {
+               for (i = 0; i < 10; i++)
+                       sisfbwaitretracecrt1(ivideo);
+       }
 #ifdef CONFIG_FB_SIS_315
-    if(ivideo->chip >= SIS_330) {
-       SiS_SetRegAND(SISCR, 0x32, ~0x20);
-       if(ivideo->chip >= SIS_340) {
-          SiS_SetReg(SISCR, 0x57, 0x4a);
-       } else {
-          SiS_SetReg(SISCR, 0x57, 0x5f);
-       }
-       SiS_SetRegOR(SISCR, 0x53, 0x02);
-       while ((SiS_GetRegByte(SISINPSTAT)) & 0x01)    break;
-       while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01)) break;
-       if ((SiS_GetRegByte(SISMISCW)) & 0x10) temp = 1;
-       SiS_SetRegAND(SISCR, 0x53, 0xfd);
-       SiS_SetRegAND(SISCR, 0x57, 0x00);
-    }
+       if (ivideo->chip >= SIS_330) {
+               SiS_SetRegAND(SISCR, 0x32, ~0x20);
+               if (ivideo->chip >= SIS_340)
+                       SiS_SetReg(SISCR, 0x57, 0x4a);
+               else
+                       SiS_SetReg(SISCR, 0x57, 0x5f);
+
+               SiS_SetRegOR(SISCR, 0x53, 0x02);
+               while ((SiS_GetRegByte(SISINPSTAT)) & 0x01)
+                       break;
+               while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01))
+                       break;
+               if ((SiS_GetRegByte(SISMISCW)) & 0x10)
+                       temp = 1;
+
+               SiS_SetRegAND(SISCR, 0x53, 0xfd);
+               SiS_SetRegAND(SISCR, 0x57, 0x00);
+       }
 #endif
 
-    if(temp == 0xffff) {
-       i = 3;
-       do {
-         temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
-               ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
-       } while(((temp == 0) || (temp == 0xffff)) && i--);
+       if (temp == 0xffff) {
+               i = 3;
 
-       if((temp == 0) || (temp == 0xffff)) {
-          if(sisfb_test_DDC1(ivideo)) temp = 1;
-       }
-    }
+               do {
+                       temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
+                       ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
+               } while (((temp == 0) || (temp == 0xffff)) && i--);
 
-    if((temp) && (temp != 0xffff)) {
-       SiS_SetRegOR(SISCR, 0x32, 0x20);
-    }
+               if ((temp == 0) || (temp == 0xffff)) {
+                       if (sisfb_test_DDC1(ivideo))
+                               temp = 1;
+               }
+       }
+
+       if ((temp) && (temp != 0xffff))
+               SiS_SetRegOR(SISCR, 0x32, 0x20);
 
 #ifdef CONFIG_FB_SIS_315
-    if(ivideo->sisvga_engine == SIS_315_VGA) {
-       SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
-    }
+       if (ivideo->sisvga_engine == SIS_315_VGA)
+               SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
 #endif
 
-    SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
-
-    SiS_SetReg(SISSR, 0x1F, sr1F);
+       SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
+       SiS_SetReg(SISSR, 0x1F, sr1F);
 }
 
 /* Determine and detect attached devices on SiS30x */
@@ -2293,25 +2299,25 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
        ivideo->SiS_Pr.PanelSelfDetected = false;
 
        /* LCD detection only for TMDS bridges */
-       if(!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
+       if (!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
                return;
-       if(ivideo->vbflags2 & VB2_30xBDH)
+       if (ivideo->vbflags2 & VB2_30xBDH)
                return;
 
        /* If LCD already set up by BIOS, skip it */
        reg = SiS_GetReg(SISCR, 0x32);
-       if(reg & 0x08)
+       if (reg & 0x08)
                return;
 
        realcrtno = 1;
-       if(ivideo->SiS_Pr.DDCPortMixup)
+       if (ivideo->SiS_Pr.DDCPortMixup)
                realcrtno = 0;
 
        /* Check DDC capabilities */
        temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine,
                                realcrtno, 0, &buffer[0], ivideo->vbflags2);
 
-       if((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
+       if ((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
                return;
 
        /* Read DDC data */
@@ -2320,17 +2326,17 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
                temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
                                ivideo->sisvga_engine, realcrtno, 1,
                                &buffer[0], ivideo->vbflags2);
-       } while((temp) && i--);
+       } while ((temp) && i--);
 
-       if(temp)
+       if (temp)
                return;
 
        /* No digital device */
-       if(!(buffer[0x14] & 0x80))
+       if (!(buffer[0x14] & 0x80))
                return;
 
        /* First detailed timing preferred timing? */
-       if(!(buffer[0x18] & 0x02))
+       if (!(buffer[0x18] & 0x02))
                return;
 
        xres = buffer[0x38] | ((buffer[0x3a] & 0xf0) << 4);
@@ -2338,26 +2344,26 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
 
        switch(xres) {
                case 1024:
-                       if(yres == 768)
+                       if (yres == 768)
                                paneltype = 0x02;
                        break;
                case 1280:
-                       if(yres == 1024)
+                       if (yres == 1024)
                                paneltype = 0x03;
                        break;
                case 1600:
-                       if((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
+                       if ((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
                                paneltype = 0x0b;
                        break;
        }
 
-       if(!paneltype)
+       if (!paneltype)
                return;
 
-       if(buffer[0x23])
+       if (buffer[0x23])
                cr37 |= 0x10;
 
-       if((buffer[0x47] & 0x18) == 0x18)
+       if ((buffer[0x47] & 0x18) == 0x18)
                cr37 |= ((((buffer[0x47] & 0x06) ^ 0x06) << 5) | 0x20);
        else
                cr37 |= 0xc0;
@@ -2372,31 +2378,34 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
 
 static int SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test)
 {
-    int temp, mytest, result, i, j;
-
-    for(j = 0; j < 10; j++) {
-       result = 0;
-       for(i = 0; i < 3; i++) {
-          mytest = test;
-          SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
-          temp = (type >> 8) | (mytest & 0x00ff);
-         SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
-          SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
-          mytest >>= 8;
-          mytest &= 0x7f;
-          temp = SiS_GetReg(SISPART4, 0x03);
-          temp ^= 0x0e;
-          temp &= mytest;
-          if(temp == mytest) result++;
+       int temp, mytest, result, i, j;
+
+       for (j = 0; j < 10; j++) {
+               result = 0;
+               for (i = 0; i < 3; i++) {
+                       mytest = test;
+                       SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
+                       temp = (type >> 8) | (mytest & 0x00ff);
+                       SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
+                       SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
+                       mytest >>= 8;
+                       mytest &= 0x7f;
+                       temp = SiS_GetReg(SISPART4, 0x03);
+                       temp ^= 0x0e;
+                       temp &= mytest;
+                       if (temp == mytest)
+                               result++;
 #if 1
-         SiS_SetReg(SISPART4, 0x11, 0x00);
-         SiS_SetRegAND(SISPART4, 0x10, 0xe0);
-         SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
+                       SiS_SetReg(SISPART4, 0x11, 0x00);
+                       SiS_SetRegAND(SISPART4, 0x10, 0xe0);
+                       SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
 #endif
-       }
-       if((result == 0) || (result >= 2)) break;
-    }
-    return result;
+               }
+
+               if ((result == 0) || (result >= 2))
+                       break;
+       }
+       return result;
 }
 
 static void SiS_Sense30x(struct sis_video_info *ivideo)
@@ -4262,18 +4271,17 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
        unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
        unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
 
-        for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
-
+       for (k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
                RankCapacity = buswidth * SiS_DRAMType[k][3];
 
-               if(RankCapacity != PseudoRankCapacity)
+               if (RankCapacity != PseudoRankCapacity)
                        continue;
 
-               if((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
+               if ((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
                        continue;
 
                BankNumHigh = RankCapacity * 16 * iteration - 1;
-               if(iteration == 3) {             /* Rank No */
+               if (iteration == 3) {             /* Rank No */
                        BankNumMid  = RankCapacity * 16 - 1;
                } else {
                        BankNumMid  = RankCapacity * 16 * iteration / 2 - 1;
@@ -4287,18 +4295,22 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
                SiS_SetRegAND(SISSR, 0x15, 0xFB); /* Test */
                SiS_SetRegOR(SISSR, 0x15, 0x04);  /* Test */
                sr14 = (SiS_DRAMType[k][3] * buswidth) - 1;
-               if(buswidth == 4)      sr14 |= 0x80;
-               else if(buswidth == 2) sr14 |= 0x40;
+
+               if (buswidth == 4)
+                       sr14 |= 0x80;
+               else if (buswidth == 2)
+                       sr14 |= 0x40;
+
                SiS_SetReg(SISSR, 0x13, SiS_DRAMType[k][4]);
                SiS_SetReg(SISSR, 0x14, sr14);
 
                BankNumHigh <<= 16;
                BankNumMid <<= 16;
 
-               if((BankNumHigh + PhysicalAdrHigh      >= mapsize) ||
-                  (BankNumMid  + PhysicalAdrHigh      >= mapsize) ||
-                  (BankNumHigh + PhysicalAdrHalfPage  >= mapsize) ||
-                  (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
+               if ((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
+                   (BankNumMid  + PhysicalAdrHigh >= mapsize) ||
+                   (BankNumHigh + PhysicalAdrHalfPage  >= mapsize) ||
+                   (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
                        continue;
 
                /* Write data */
@@ -4312,7 +4324,7 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
                                (FBAddr + BankNumHigh + PhysicalAdrOtherPage));
 
                /* Read data */
-               if(readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
+               if (readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
                        return 1;
        }
 
@@ -5867,7 +5879,7 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        ivideo->cardnumber++;
        }
 
-       strlcpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
+       strscpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
 
        ivideo->warncount = 0;
        ivideo->chip_id = pdev->device;
@@ -6150,24 +6162,20 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 #endif
 
 #ifdef CONFIG_FB_SIS_315
-               if(ivideo->sisvga_engine == SIS_315_VGA) {
+               if (ivideo->sisvga_engine == SIS_315_VGA) {
                        int result = 1;
-               /*      if((ivideo->chip == SIS_315H)   ||
-                          (ivideo->chip == SIS_315)    ||
-                          (ivideo->chip == SIS_315PRO) ||
-                          (ivideo->chip == SIS_330)) {
-                               sisfb_post_sis315330(pdev);
-                       } else */ if(ivideo->chip == XGI_20) {
+
+                       if (ivideo->chip == XGI_20) {
                                result = sisfb_post_xgi(pdev);
                                ivideo->sisfb_can_post = 1;
-                       } else if((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
+                       } else if ((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
                                result = sisfb_post_xgi(pdev);
                                ivideo->sisfb_can_post = 1;
                        } else {
                                printk(KERN_INFO "sisfb: Card is not "
                                        "POSTed and sisfb can't do this either.\n");
                        }
-                       if(!result) {
+                       if (!result) {
                                printk(KERN_ERR "sisfb: Failed to POST card\n");
                                ret = -ENODEV;
                                goto error_3;
index 6a52eba645596a4b3abab5f16340d3be5e0c8ef1..fce6cfbadfd60e30e7c28c4afe13267e866c116f 100644 (file)
@@ -1719,7 +1719,7 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
                enable = 0;
        }
 
-       strlcpy(fb->fix.id, fbname, sizeof(fb->fix.id));
+       strscpy(fb->fix.id, fbname, sizeof(fb->fix.id));
 
        memcpy(&par->ops,
               (head == HEAD_CRT) ? &sm501fb_ops_crt : &sm501fb_ops_pnl,
index 5c765655d000a79fdf502a114798834d12e27dde..52e4ed9da78cd3a18b4a26d5f975ad0f94f4abcf 100644 (file)
@@ -450,7 +450,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
        if (ret < 0)
                return ret;
 
-       /* Set Set Area Color Mode ON/OFF & Low Power Display Mode */
+       /* Set Area Color Mode ON/OFF & Low Power Display Mode */
        if (par->area_color_enable || par->low_power) {
                u32 mode;
 
index 27d4b0ace2d61bcf2b3ec84dd081535449eee92f..cd4d640f94779183849baf6eea3faa99496d87ef 100644 (file)
@@ -1382,7 +1382,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto fail;
        }
        sst_get_memsize(info, &fix->smem_len);
-       strlcpy(fix->id, spec->name, sizeof(fix->id));
+       strscpy(fix->id, spec->name, sizeof(fix->id));
 
        printk(KERN_INFO "%s (revision %d) with %s dac\n",
                fix->id, par->revision, par->dac_sw.name);
index 15b079505a0006409bffd8d950b0a0ddf47f41f2..490bd9a147638e6514cdc5fe23a8afacce2a5d3d 100644 (file)
@@ -80,7 +80,7 @@ static int gfb_set_fbinfo(struct gfb_info *gp)
        info->pseudo_palette = gp->pseudo_palette;
 
        /* Fill fix common fields */
-       strlcpy(info->fix.id, "gfb", sizeof(info->fix.id));
+       strscpy(info->fix.id, "gfb", sizeof(info->fix.id));
         info->fix.smem_start = gp->fb_base_phys;
         info->fix.smem_len = gp->fb_size;
         info->fix.type = FB_TYPE_PACKED_PIXELS;
index 1d3bacd9d5acd80f916829b5a83970b1fd8993aa..1279b02234f8708e2b40aded81a02b5d24d41626 100644 (file)
@@ -84,7 +84,7 @@ static int s3d_set_fbinfo(struct s3d_info *sp)
        info->pseudo_palette = sp->pseudo_palette;
 
        /* Fill fix common fields */
-       strlcpy(info->fix.id, "s3d", sizeof(info->fix.id));
+       strscpy(info->fix.id, "s3d", sizeof(info->fix.id));
         info->fix.smem_start = sp->fb_base_phys;
         info->fix.smem_len = sp->fb_size;
         info->fix.type = FB_TYPE_PACKED_PIXELS;
index 9daf17b111065761d456f1b0dca70ff4618eeff4..f7b463633ba0574dda8988c62f5fd72856711af9 100644 (file)
@@ -207,7 +207,7 @@ static int e3d_set_fbinfo(struct e3d_info *ep)
        info->pseudo_palette = ep->pseudo_palette;
 
        /* Fill fix common fields */
-       strlcpy(info->fix.id, "e3d", sizeof(info->fix.id));
+       strscpy(info->fix.id, "e3d", sizeof(info->fix.id));
         info->fix.smem_start = ep->fb_base_phys;
         info->fix.smem_len = ep->fb_size;
         info->fix.type = FB_TYPE_PACKED_PIXELS;
index 1638a40fed2254c74b2807812317942c182dcf2f..01d87f53324d985452afb511f343d16ab67ec0dd 100644 (file)
@@ -333,7 +333,7 @@ tcx_init_fix(struct fb_info *info, int linebytes)
        else
                tcx_name = "TCX24";
 
-       strlcpy(info->fix.id, tcx_name, sizeof(info->fix.id));
+       strscpy(info->fix.id, tcx_name, sizeof(info->fix.id));
 
        info->fix.type = FB_TYPE_PACKED_PIXELS;
        info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
index 67e37a62b07c3fec02ac9b4518453d87c2ef746f..8a8122f8bfeb39c6aef7c7e05a109ca4ac7890cf 100644 (file)
@@ -1264,7 +1264,7 @@ static int tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan, const char *name,
 {
        int rc;
 
-       strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+       strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
        chan->adapter.owner             = THIS_MODULE;
        chan->adapter.class             = I2C_CLASS_DDC;
        chan->adapter.algo_data         = &chan->algo;
@@ -1293,7 +1293,7 @@ static int tdfxfb_setup_i2c_bus(struct tdfxfb_i2c_chan *chan, const char *name,
 {
        int rc;
 
-       strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+       strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
        chan->adapter.owner             = THIS_MODULE;
        chan->adapter.algo_data         = &chan->algo;
        chan->adapter.dev.parent        = dev;
index ae0cf55406369c44c22865ab8afdf41d13c4ae4f..1fff5fd7ab51201669f7987e0b5aa80daf5dc872 100644 (file)
@@ -1344,7 +1344,7 @@ tgafb_init_fix(struct fb_info *info)
                memory_size = 16777216;
        }
 
-       strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
+       strscpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
 
        info->fix.type = FB_TYPE_PACKED_PIXELS;
        info->fix.type_aux = 0;
index 319131bd72cffa11a965f69588102012942d40fd..cda095420ee8546c8fc97133ee59bab33516eb6f 100644 (file)
@@ -270,7 +270,7 @@ static int tridentfb_setup_ddc_bus(struct fb_info *info)
 {
        struct tridentfb_par *par = info->par;
 
-       strlcpy(par->ddc_adapter.name, info->fix.id,
+       strscpy(par->ddc_adapter.name, info->fix.id,
                sizeof(par->ddc_adapter.name));
        par->ddc_adapter.owner          = THIS_MODULE;
        par->ddc_adapter.class          = I2C_CLASS_DDC;
index c492a57531c613f6923a3751880e40ef34481d1c..3ff746e3f24aa0306b4ce715acbd3980d6e59908 100644 (file)
@@ -360,7 +360,7 @@ static void vm_synchronize_cbs(struct virtio_device *vdev)
 
 static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
                                  void (*callback)(struct virtqueue *vq),
-                                 const char *name, u32 size, bool ctx)
+                                 const char *name, bool ctx)
 {
        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
        struct virtio_mmio_vq_info *info;
@@ -395,11 +395,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
                goto error_new_virtqueue;
        }
 
-       if (!size || size > num)
-               size = num;
-
        /* Create the vring */
-       vq = vring_create_virtqueue(index, size, VIRTIO_MMIO_VRING_ALIGN, vdev,
+       vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
                                 true, true, ctx, vm_notify, callback, name);
        if (!vq) {
                err = -ENOMEM;
@@ -477,7 +474,6 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                       struct virtqueue *vqs[],
                       vq_callback_t *callbacks[],
                       const char * const names[],
-                      u32 sizes[],
                       const bool *ctx,
                       struct irq_affinity *desc)
 {
@@ -503,7 +499,6 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                }
 
                vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
-                                    sizes ? sizes[i] : 0,
                                     ctx ? ctx[i] : false);
                if (IS_ERR(vqs[i])) {
                        vm_del_vqs(vdev);
index 00ad476a815d7254109cbc4e08785ff64db135d0..ad258a9d3b9f453862e454741e099b6ae750a8a3 100644 (file)
@@ -174,7 +174,6 @@ error:
 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
                                     void (*callback)(struct virtqueue *vq),
                                     const char *name,
-                                    u32 size,
                                     bool ctx,
                                     u16 msix_vec)
 {
@@ -187,7 +186,7 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
        if (!info)
                return ERR_PTR(-ENOMEM);
 
-       vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, size, ctx,
+       vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
                              msix_vec);
        if (IS_ERR(vq))
                goto out_info;
@@ -284,7 +283,7 @@ void vp_del_vqs(struct virtio_device *vdev)
 
 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
-               const char * const names[], u32 sizes[], bool per_vq_vectors,
+               const char * const names[], bool per_vq_vectors,
                const bool *ctx,
                struct irq_affinity *desc)
 {
@@ -327,8 +326,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
                else
                        msix_vec = VP_MSIX_VQ_VECTOR;
                vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
-                                    sizes ? sizes[i] : 0,
-                                    ctx ? ctx[i] : false, msix_vec);
+                                    ctx ? ctx[i] : false,
+                                    msix_vec);
                if (IS_ERR(vqs[i])) {
                        err = PTR_ERR(vqs[i]);
                        goto error_find;
@@ -358,7 +357,7 @@ error_find:
 
 static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
-               const char * const names[], u32 sizes[], const bool *ctx)
+               const char * const names[], const bool *ctx)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        int i, err, queue_idx = 0;
@@ -380,7 +379,6 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
                        continue;
                }
                vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
-                                    sizes ? sizes[i] : 0,
                                     ctx ? ctx[i] : false,
                                     VIRTIO_MSI_NO_VECTOR);
                if (IS_ERR(vqs[i])) {
@@ -398,21 +396,21 @@ out_del_vqs:
 /* the config->find_vqs() implementation */
 int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
-               const char * const names[], u32 sizes[], const bool *ctx,
+               const char * const names[], const bool *ctx,
                struct irq_affinity *desc)
 {
        int err;
 
        /* Try MSI-X with one vector per queue. */
-       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, sizes, true, ctx, desc);
+       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
        if (!err)
                return 0;
        /* Fallback: MSI-X with one vector for config, one shared for queues. */
-       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, sizes, false, ctx, desc);
+       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
        if (!err)
                return 0;
        /* Finally fall back to regular interrupts. */
-       return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, sizes, ctx);
+       return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
 }
 
 const char *vp_bus_name(struct virtio_device *vdev)
index c0448378b698623bfebeece90decfd76140eaafe..23112d84218fbd4f283c69319a1a0039fadcc9a8 100644 (file)
@@ -80,7 +80,6 @@ struct virtio_pci_device {
                                      unsigned int idx,
                                      void (*callback)(struct virtqueue *vq),
                                      const char *name,
-                                     u32 size,
                                      bool ctx,
                                      u16 msix_vec);
        void (*del_vq)(struct virtio_pci_vq_info *info);
@@ -111,7 +110,7 @@ void vp_del_vqs(struct virtio_device *vdev);
 /* the config->find_vqs() implementation */
 int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
-               const char * const names[], u32 sizes[], const bool *ctx,
+               const char * const names[], const bool *ctx,
                struct irq_affinity *desc);
 const char *vp_bus_name(struct virtio_device *vdev);
 
index d75e5c4e637fc4979d506e3f372202223be11dba..2257f1b3d8ae1b5561b154955ce860d17c176944 100644 (file)
@@ -112,7 +112,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
                                  unsigned int index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name,
-                                 u32 size,
                                  bool ctx,
                                  u16 msix_vec)
 {
@@ -126,13 +125,10 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        if (!num || vp_legacy_get_queue_enable(&vp_dev->ldev, index))
                return ERR_PTR(-ENOENT);
 
-       if (!size || size > num)
-               size = num;
-
        info->msix_vector = msix_vec;
 
        /* create the vring */
-       vq = vring_create_virtqueue(index, size,
+       vq = vring_create_virtqueue(index, num,
                                    VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
                                    true, false, ctx,
                                    vp_notify, callback, name);
index f7965c5dd36b00218ffc9b4ce16b94e9f8399c71..c3b9f27618497a54a13b367445542ae163961918 100644 (file)
@@ -293,7 +293,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
                                  unsigned int index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name,
-                                 u32 size,
                                  bool ctx,
                                  u16 msix_vec)
 {
@@ -311,18 +310,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        if (!num || vp_modern_get_queue_enable(mdev, index))
                return ERR_PTR(-ENOENT);
 
-       if (!size || size > num)
-               size = num;
-
-       if (size & (size - 1)) {
-               dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", size);
+       if (num & (num - 1)) {
+               dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
                return ERR_PTR(-EINVAL);
        }
 
        info->msix_vector = msix_vec;
 
        /* create the vring */
-       vq = vring_create_virtqueue(index, size,
+       vq = vring_create_virtqueue(index, num,
                                    SMP_CACHE_BYTES, &vp_dev->vdev,
                                    true, true, ctx,
                                    vp_notify, callback, name);
@@ -351,15 +347,12 @@ err:
 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                              struct virtqueue *vqs[],
                              vq_callback_t *callbacks[],
-                             const char * const names[],
-                             u32 sizes[],
-                             const bool *ctx,
+                             const char * const names[], const bool *ctx,
                              struct irq_affinity *desc)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        struct virtqueue *vq;
-       int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, sizes, ctx,
-                            desc);
+       int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
 
        if (rc)
                return rc;
index d66c8e6d0ef313f93663bfc420603034c03d8b58..4620e9d79dde8cebff063b473cd59d2641f9f15b 100644 (file)
@@ -2426,6 +2426,14 @@ static inline bool more_used(const struct vring_virtqueue *vq)
        return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
 }
 
+/**
+ * vring_interrupt - notify a virtqueue on an interrupt
+ * @irq: the IRQ number (ignored)
+ * @_vq: the struct virtqueue to notify
+ *
+ * Calls the callback function of @_vq to process the virtqueue
+ * notification.
+ */
 irqreturn_t vring_interrupt(int irq, void *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
index 9bc4d110b80003ce912ba5888ffebad177b3ddff..9670cc79371d870c724d560ce2494584ad017883 100644 (file)
@@ -131,7 +131,7 @@ static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
 static struct virtqueue *
 virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
                     void (*callback)(struct virtqueue *vq),
-                    const char *name, u32 size, bool ctx)
+                    const char *name, bool ctx)
 {
        struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
@@ -168,17 +168,14 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
                goto error_new_virtqueue;
        }
 
-       if (!size || size > max_num)
-               size = max_num;
-
        if (ops->get_vq_num_min)
                min_num = ops->get_vq_num_min(vdpa);
 
-       may_reduce_num = (size == min_num) ? false : true;
+       may_reduce_num = (max_num == min_num) ? false : true;
 
        /* Create the vring */
        align = ops->get_vq_align(vdpa);
-       vq = vring_create_virtqueue(index, size, align, vdev,
+       vq = vring_create_virtqueue(index, max_num, align, vdev,
                                    true, may_reduce_num, ctx,
                                    virtio_vdpa_notify, callback, name);
        if (!vq) {
@@ -272,7 +269,6 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                                struct virtqueue *vqs[],
                                vq_callback_t *callbacks[],
                                const char * const names[],
-                               u32 sizes[],
                                const bool *ctx,
                                struct irq_affinity *desc)
 {
@@ -288,9 +284,9 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                        continue;
                }
 
-               vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++, callbacks[i],
-                                                 names[i], sizes ? sizes[i] : 0,
-                                                 ctx ? ctx[i] : false);
+               vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
+                                             callbacks[i], names[i], ctx ?
+                                             ctx[i] : false);
                if (IS_ERR(vqs[i])) {
                        err = PTR_ERR(vqs[i]);
                        goto err_setup_vq;
index 3369734108af23724e728531ce4024ea752b7fbc..e88e8f6f0a334ade815c95b423f380b7ff3d057a 100644 (file)
@@ -581,27 +581,30 @@ static int lock_pages(
        struct privcmd_dm_op_buf kbufs[], unsigned int num,
        struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
 {
-       unsigned int i;
+       unsigned int i, off = 0;
 
-       for (i = 0; i < num; i++) {
+       for (i = 0; i < num; ) {
                unsigned int requested;
                int page_count;
 
                requested = DIV_ROUND_UP(
                        offset_in_page(kbufs[i].uptr) + kbufs[i].size,
-                       PAGE_SIZE);
+                       PAGE_SIZE) - off;
                if (requested > nr_pages)
                        return -ENOSPC;
 
                page_count = pin_user_pages_fast(
-                       (unsigned long) kbufs[i].uptr,
+                       (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
                        requested, FOLL_WRITE, pages);
-               if (page_count < 0)
-                       return page_count;
+               if (page_count <= 0)
+                       return page_count ? : -EFAULT;
 
                *pinned += page_count;
                nr_pages -= page_count;
                pages += page_count;
+
+               off = (requested == page_count) ? 0 : off + page_count;
+               i += !off;
        }
 
        return 0;
@@ -677,10 +680,8 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
        }
 
        rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
-       if (rc < 0) {
-               nr_pages = pinned;
+       if (rc < 0)
                goto out;
-       }
 
        for (i = 0; i < kdata.num; i++) {
                set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
@@ -692,7 +693,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
        xen_preemptible_hcall_end();
 
 out:
-       unlock_pages(pages, nr_pages);
+       unlock_pages(pages, pinned);
        kfree(xbufs);
        kfree(pages);
        kfree(kbufs);
index 7a0c93acc2c5760335c072d929c7d59d4102b357..d3dcda3449892aadfc57b42c48daf48cb9b6d6ad 100644 (file)
@@ -1121,7 +1121,7 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
                                "%s: writing %s", __func__, state);
                return;
        }
-       strlcpy(phy, val, VSCSI_NAMELEN);
+       strscpy(phy, val, VSCSI_NAMELEN);
        kfree(val);
 
        /* virtual SCSI device */
index 07b010a68fcf9cd5e6bca1e4ec47d8487013e227..f44d5a64351e4ab0d081234b94c7a288b2f4bd58 100644 (file)
@@ -40,7 +40,7 @@ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
                return -EINVAL;
        }
 
-       strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
+       strscpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
        if (!strchr(bus_id, '/')) {
                pr_warn("bus_id %s no slash\n", bus_id);
                return -EINVAL;
index c3aecfb0a71d2b199800f8f97ebb4a7bf646306a..e0375ba9d0fedf654023b41f9405502c8079ae5d 100644 (file)
@@ -440,39 +440,26 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
        btrfs_put_caching_control(caching_ctl);
 }
 
-int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
+static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
+                                      struct btrfs_caching_control *caching_ctl)
+{
+       wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
+       return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
+}
+
+static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
 {
        struct btrfs_caching_control *caching_ctl;
-       int ret = 0;
+       int ret;
 
        caching_ctl = btrfs_get_caching_control(cache);
        if (!caching_ctl)
                return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
-
-       wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
-       if (cache->cached == BTRFS_CACHE_ERROR)
-               ret = -EIO;
+       ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
        btrfs_put_caching_control(caching_ctl);
        return ret;
 }
 
-static bool space_cache_v1_done(struct btrfs_block_group *cache)
-{
-       bool ret;
-
-       spin_lock(&cache->lock);
-       ret = cache->cached != BTRFS_CACHE_FAST;
-       spin_unlock(&cache->lock);
-
-       return ret;
-}
-
-void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
-                               struct btrfs_caching_control *caching_ctl)
-{
-       wait_event(caching_ctl->wait, space_cache_v1_done(cache));
-}
-
 #ifdef CONFIG_BTRFS_DEBUG
 static void fragment_free_space(struct btrfs_block_group *block_group)
 {
@@ -750,9 +737,8 @@ done:
        btrfs_put_block_group(block_group);
 }
 
-int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
+int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
 {
-       DEFINE_WAIT(wait);
        struct btrfs_fs_info *fs_info = cache->fs_info;
        struct btrfs_caching_control *caching_ctl = NULL;
        int ret = 0;
@@ -785,10 +771,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
        }
        WARN_ON(cache->caching_ctl);
        cache->caching_ctl = caching_ctl;
-       if (btrfs_test_opt(fs_info, SPACE_CACHE))
-               cache->cached = BTRFS_CACHE_FAST;
-       else
-               cache->cached = BTRFS_CACHE_STARTED;
+       cache->cached = BTRFS_CACHE_STARTED;
        cache->has_caching_ctl = 1;
        spin_unlock(&cache->lock);
 
@@ -801,8 +784,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
 
        btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
 out:
-       if (load_cache_only && caching_ctl)
-               btrfs_wait_space_cache_v1_finished(cache, caching_ctl);
+       if (wait && caching_ctl)
+               ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
        if (caching_ctl)
                btrfs_put_caching_control(caching_ctl);
 
@@ -1640,9 +1623,11 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
                                div64_u64(zone_unusable * 100, bg->length));
                trace_btrfs_reclaim_block_group(bg);
                ret = btrfs_relocate_chunk(fs_info, bg->start);
-               if (ret)
+               if (ret) {
+                       btrfs_dec_block_group_ro(bg);
                        btrfs_err(fs_info, "error relocating chunk %llu",
                                  bg->start);
+               }
 
 next:
                btrfs_put_block_group(bg);
@@ -3310,7 +3295,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
                 * space back to the block group, otherwise we will leak space.
                 */
                if (!alloc && !btrfs_block_group_done(cache))
-                       btrfs_cache_block_group(cache, 1);
+                       btrfs_cache_block_group(cache, true);
 
                byte_in_group = bytenr - cache->start;
                WARN_ON(byte_in_group > cache->length);
index 35e0e860cc0bf161b630043444cea767decc2bcf..6b3cdc4cbc41e64892165ba788d9c23f80af2658 100644 (file)
@@ -263,9 +263,7 @@ void btrfs_dec_nocow_writers(struct btrfs_block_group *bg);
 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
                                           u64 num_bytes);
-int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
-int btrfs_cache_block_group(struct btrfs_block_group *cache,
-                           int load_cache_only);
+int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
 void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
 struct btrfs_caching_control *btrfs_get_caching_control(
                struct btrfs_block_group *cache);
index 6e556031a8f3a11047441118f57a1023ebdc6f6d..ebfa35fe1c38b0fe6194ee43abc0e79c5cd89978 100644 (file)
@@ -2075,6 +2075,9 @@ cow_done:
 
                if (!p->skip_locking) {
                        level = btrfs_header_level(b);
+
+                       btrfs_maybe_reset_lockdep_class(root, b);
+
                        if (level <= write_lock_level) {
                                btrfs_tree_lock(b);
                                p->locks[level] = BTRFS_WRITE_LOCK;
index 4db85b9dc7edd674198465184a72b6b4f99efe55..9ef162dbd4bc11fd84c649d6b7c8b453313a8571 100644 (file)
@@ -505,7 +505,6 @@ struct btrfs_free_cluster {
 enum btrfs_caching_type {
        BTRFS_CACHE_NO,
        BTRFS_CACHE_STARTED,
-       BTRFS_CACHE_FAST,
        BTRFS_CACHE_FINISHED,
        BTRFS_CACHE_ERROR,
 };
@@ -1173,6 +1172,8 @@ enum {
        BTRFS_ROOT_ORPHAN_CLEANUP,
        /* This root has a drop operation that was started previously. */
        BTRFS_ROOT_UNFINISHED_DROP,
+       /* This reloc root needs to have its buffers lockdep class reset. */
+       BTRFS_ROOT_RESET_LOCKDEP_CLASS,
 };
 
 static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
index f43196a893ca3c44797d364a74538b5895cb3b91..41cddd3ff0593c068d1e6fbf036fad3d63ca223e 100644 (file)
@@ -165,7 +165,7 @@ no_valid_dev_replace_entry_found:
                 */
                if (btrfs_find_device(fs_info->fs_devices, &args)) {
                        btrfs_err(fs_info,
-                       "replace devid present without an active replace item");
+"replace without active item, run 'device scan --forget' on the target device");
                        ret = -EUCLEAN;
                } else {
                        dev_replace->srcdev = NULL;
@@ -1129,8 +1129,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
                up_write(&dev_replace->rwsem);
 
                /* Scrub for replace must not be running in suspended state */
-               ret = btrfs_scrub_cancel(fs_info);
-               ASSERT(ret != -ENOTCONN);
+               btrfs_scrub_cancel(fs_info);
 
                trans = btrfs_start_transaction(root, 0);
                if (IS_ERR(trans)) {
index 4c3166f3c72567c67114a5a0b66624851d543565..820b1f1e6b6723dbd6ffcb4da6255331c319a26e 100644 (file)
@@ -86,88 +86,6 @@ struct async_submit_bio {
        blk_status_t status;
 };
 
-/*
- * Lockdep class keys for extent_buffer->lock's in this root.  For a given
- * eb, the lockdep key is determined by the btrfs_root it belongs to and
- * the level the eb occupies in the tree.
- *
- * Different roots are used for different purposes and may nest inside each
- * other and they require separate keysets.  As lockdep keys should be
- * static, assign keysets according to the purpose of the root as indicated
- * by btrfs_root->root_key.objectid.  This ensures that all special purpose
- * roots have separate keysets.
- *
- * Lock-nesting across peer nodes is always done with the immediate parent
- * node locked thus preventing deadlock.  As lockdep doesn't know this, use
- * subclass to avoid triggering lockdep warning in such cases.
- *
- * The key is set by the readpage_end_io_hook after the buffer has passed
- * csum validation but before the pages are unlocked.  It is also set by
- * btrfs_init_new_buffer on freshly allocated blocks.
- *
- * We also add a check to make sure the highest level of the tree is the
- * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
- * needs update as well.
- */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# if BTRFS_MAX_LEVEL != 8
-#  error
-# endif
-
-#define DEFINE_LEVEL(stem, level)                                      \
-       .names[level] = "btrfs-" stem "-0" #level,
-
-#define DEFINE_NAME(stem)                                              \
-       DEFINE_LEVEL(stem, 0)                                           \
-       DEFINE_LEVEL(stem, 1)                                           \
-       DEFINE_LEVEL(stem, 2)                                           \
-       DEFINE_LEVEL(stem, 3)                                           \
-       DEFINE_LEVEL(stem, 4)                                           \
-       DEFINE_LEVEL(stem, 5)                                           \
-       DEFINE_LEVEL(stem, 6)                                           \
-       DEFINE_LEVEL(stem, 7)
-
-static struct btrfs_lockdep_keyset {
-       u64                     id;             /* root objectid */
-       /* Longest entry: btrfs-free-space-00 */
-       char                    names[BTRFS_MAX_LEVEL][20];
-       struct lock_class_key   keys[BTRFS_MAX_LEVEL];
-} btrfs_lockdep_keysets[] = {
-       { .id = BTRFS_ROOT_TREE_OBJECTID,       DEFINE_NAME("root")     },
-       { .id = BTRFS_EXTENT_TREE_OBJECTID,     DEFINE_NAME("extent")   },
-       { .id = BTRFS_CHUNK_TREE_OBJECTID,      DEFINE_NAME("chunk")    },
-       { .id = BTRFS_DEV_TREE_OBJECTID,        DEFINE_NAME("dev")      },
-       { .id = BTRFS_CSUM_TREE_OBJECTID,       DEFINE_NAME("csum")     },
-       { .id = BTRFS_QUOTA_TREE_OBJECTID,      DEFINE_NAME("quota")    },
-       { .id = BTRFS_TREE_LOG_OBJECTID,        DEFINE_NAME("log")      },
-       { .id = BTRFS_TREE_RELOC_OBJECTID,      DEFINE_NAME("treloc")   },
-       { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc")   },
-       { .id = BTRFS_UUID_TREE_OBJECTID,       DEFINE_NAME("uuid")     },
-       { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
-       { .id = 0,                              DEFINE_NAME("tree")     },
-};
-
-#undef DEFINE_LEVEL
-#undef DEFINE_NAME
-
-void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
-                                   int level)
-{
-       struct btrfs_lockdep_keyset *ks;
-
-       BUG_ON(level >= ARRAY_SIZE(ks->keys));
-
-       /* find the matching keyset, id 0 is the default entry */
-       for (ks = btrfs_lockdep_keysets; ks->id; ks++)
-               if (ks->id == objectid)
-                       break;
-
-       lockdep_set_class_and_name(&eb->lock,
-                                  &ks->keys[level], ks->names[level]);
-}
-
-#endif
-
 /*
  * Compute the csum of a btree block and store the result to provided buffer.
  */
index 8993b428e09ceb72205368a78e124b2c11640d69..47ad8e0a2d33f6accdb0fa9ddff54218a8ac92de 100644 (file)
@@ -137,14 +137,4 @@ int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
 int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid);
 int btrfs_init_root_free_objectid(struct btrfs_root *root);
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void btrfs_set_buffer_lockdep_class(u64 objectid,
-                                   struct extent_buffer *eb, int level);
-#else
-static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
-                                       struct extent_buffer *eb, int level)
-{
-}
-#endif
-
 #endif
index ea3ec1e761e846fc4be8c6b46061e831f6ea4588..6914cd8024ba040b21552be249ddfdf1da8a1d8e 100644 (file)
@@ -2551,17 +2551,10 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
                return -EINVAL;
 
        /*
-        * pull in the free space cache (if any) so that our pin
-        * removes the free space from the cache.  We have load_only set
-        * to one because the slow code to read in the free extents does check
-        * the pinned extents.
+        * Fully cache the free space first so that our pin removes the free space
+        * from the cache.
         */
-       btrfs_cache_block_group(cache, 1);
-       /*
-        * Make sure we wait until the cache is completely built in case it is
-        * missing or is invalid and therefore needs to be rebuilt.
-        */
-       ret = btrfs_wait_block_group_cache_done(cache);
+       ret = btrfs_cache_block_group(cache, true);
        if (ret)
                goto out;
 
@@ -2584,12 +2577,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
        if (!block_group)
                return -EINVAL;
 
-       btrfs_cache_block_group(block_group, 1);
-       /*
-        * Make sure we wait until the cache is completely built in case it is
-        * missing or is invalid and therefore needs to be rebuilt.
-        */
-       ret = btrfs_wait_block_group_cache_done(block_group);
+       ret = btrfs_cache_block_group(block_group, true);
        if (ret)
                goto out;
 
@@ -4399,7 +4387,7 @@ have_block_group:
                ffe_ctl->cached = btrfs_block_group_done(block_group);
                if (unlikely(!ffe_ctl->cached)) {
                        ffe_ctl->have_caching_bg = true;
-                       ret = btrfs_cache_block_group(block_group, 0);
+                       ret = btrfs_cache_block_group(block_group, false);
 
                        /*
                         * If we get ENOMEM here or something else we want to
@@ -4867,6 +4855,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct extent_buffer *buf;
+       u64 lockdep_owner = owner;
 
        buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level);
        if (IS_ERR(buf))
@@ -4885,12 +4874,27 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                return ERR_PTR(-EUCLEAN);
        }
 
+       /*
+        * The reloc trees are just snapshots, so we need them to appear to be
+        * just like any other fs tree WRT lockdep.
+        *
+        * The exception however is in replace_path() in relocation, where we
+        * hold the lock on the original fs root and then search for the reloc
+        * root.  At that point we need to make sure any reloc root buffers are
+        * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make
+        * lockdep happy.
+        */
+       if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID &&
+           !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
+               lockdep_owner = BTRFS_FS_TREE_OBJECTID;
+
        /*
         * This needs to stay, because we could allocate a freed block from an
         * old tree into a new tree, so we need to make sure this new block is
         * set to the appropriate level and owner.
         */
-       btrfs_set_buffer_lockdep_class(owner, buf, level);
+       btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level);
+
        __btrfs_tree_lock(buf, nest);
        btrfs_clean_tree_block(buf);
        clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
@@ -6153,13 +6157,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
 
                if (end - start >= range->minlen) {
                        if (!btrfs_block_group_done(cache)) {
-                               ret = btrfs_cache_block_group(cache, 0);
-                               if (ret) {
-                                       bg_failed++;
-                                       bg_ret = ret;
-                                       continue;
-                               }
-                               ret = btrfs_wait_block_group_cache_done(cache);
+                               ret = btrfs_cache_block_group(cache, true);
                                if (ret) {
                                        bg_failed++;
                                        bg_ret = ret;
index bfae67c593c591e81c788bd4e5c387f9000dc5da..cf4f19e80e2f7f174ecdd245f4dd4b5e9d7dbd87 100644 (file)
@@ -3233,7 +3233,7 @@ static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
        u32 bio_size = bio->bi_iter.bi_size;
        u32 real_size;
        const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
-       bool contig;
+       bool contig = false;
        int ret;
 
        ASSERT(bio);
@@ -3242,10 +3242,35 @@ static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
        if (bio_ctrl->compress_type != compress_type)
                return 0;
 
-       if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
+
+       if (bio->bi_iter.bi_size == 0) {
+               /* We can always add a page into an empty bio. */
+               contig = true;
+       } else if (bio_ctrl->compress_type == BTRFS_COMPRESS_NONE) {
+               struct bio_vec *bvec = bio_last_bvec_all(bio);
+
+               /*
+                * The contig check requires the following conditions to be met:
+                * 1) The pages are belonging to the same inode
+                *    This is implied by the call chain.
+                *
+                * 2) The range has adjacent logical bytenr
+                *
+                * 3) The range has adjacent file offset
+                *    This is required for the usage of btrfs_bio->file_offset.
+                */
+               if (bio_end_sector(bio) == sector &&
+                   page_offset(bvec->bv_page) + bvec->bv_offset +
+                   bvec->bv_len == page_offset(page) + pg_offset)
+                       contig = true;
+       } else {
+               /*
+                * For compression, all IO should have its logical bytenr
+                * set to the starting bytenr of the compressed extent.
+                */
                contig = bio->bi_iter.bi_sector == sector;
-       else
-               contig = bio_end_sector(bio) == sector;
+       }
+
        if (!contig)
                return 0;
 
@@ -6140,6 +6165,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        struct extent_buffer *exists = NULL;
        struct page *p;
        struct address_space *mapping = fs_info->btree_inode->i_mapping;
+       u64 lockdep_owner = owner_root;
        int uptodate = 1;
        int ret;
 
@@ -6164,7 +6190,15 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        eb = __alloc_extent_buffer(fs_info, start, len);
        if (!eb)
                return ERR_PTR(-ENOMEM);
-       btrfs_set_buffer_lockdep_class(owner_root, eb, level);
+
+       /*
+        * The reloc trees are just snapshots, so we need them to appear to be
+        * just like any other fs tree WRT lockdep.
+        */
+       if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
+               lockdep_owner = BTRFS_FS_TREE_OBJECTID;
+
+       btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
 
        num_pages = num_extent_pages(eb);
        for (i = 0; i < num_pages; i++, index++) {
index 66c822182ecce7899c0786a42969c0fc893de4e2..5a3f6e0d9688f2cf0dd09ab7cab77df877c95488 100644 (file)
@@ -2482,6 +2482,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
                btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
                btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
                btrfs_set_file_extent_offset(leaf, fi, 0);
+               btrfs_set_file_extent_generation(leaf, fi, trans->transid);
                btrfs_mark_buffer_dirty(leaf);
                goto out;
        }
@@ -2498,6 +2499,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
                btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
                btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
                btrfs_set_file_extent_offset(leaf, fi, 0);
+               btrfs_set_file_extent_generation(leaf, fi, trans->transid);
                btrfs_mark_buffer_dirty(leaf);
                goto out;
        }
index f0c97d25b4a0e762849ac63c3fa7f40d8bf38f2f..ad250892028d6e0387ff8ad101e0ee2a9b9452f2 100644 (file)
@@ -7693,6 +7693,20 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
        const u64 data_alloc_len = length;
        bool unlock_extents = false;
 
+       /*
+        * We could potentially fault if we have a buffer > PAGE_SIZE, and if
+        * we're NOWAIT we may submit a bio for a partial range and return
+        * EIOCBQUEUED, which would result in an errant short read.
+        *
+        * The best way to handle this would be to allow for partial completions
+        * of iocb's, so we could submit the partial bio, return and fault in
+        * the rest of the pages, and then submit the io for the rest of the
+        * range.  However we don't have that currently, so simply return
+        * -EAGAIN at this point so that the normal path is used.
+        */
+       if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
+               return -EAGAIN;
+
        /*
         * Cap the size of reads to that usually seen in buffered I/O as we need
         * to allocate a contiguous array for the checksums.
index 33461b4f9c8b5c3023756f770fe7176f0a5f5103..9063072b399bd833423b7faeb84f523a54ef0f88 100644 (file)
 #include "extent_io.h"
 #include "locking.h"
 
+/*
+ * Lockdep class keys for extent_buffer->lock's in this root.  For a given
+ * eb, the lockdep key is determined by the btrfs_root it belongs to and
+ * the level the eb occupies in the tree.
+ *
+ * Different roots are used for different purposes and may nest inside each
+ * other and they require separate keysets.  As lockdep keys should be
+ * static, assign keysets according to the purpose of the root as indicated
+ * by btrfs_root->root_key.objectid.  This ensures that all special purpose
+ * roots have separate keysets.
+ *
+ * Lock-nesting across peer nodes is always done with the immediate parent
+ * node locked thus preventing deadlock.  As lockdep doesn't know this, use
+ * subclass to avoid triggering lockdep warning in such cases.
+ *
+ * The key is set by the readpage_end_io_hook after the buffer has passed
+ * csum validation but before the pages are unlocked.  It is also set by
+ * btrfs_init_new_buffer on freshly allocated blocks.
+ *
+ * We also add a check to make sure the highest level of the tree is the
+ * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
+ * needs update as well.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if BTRFS_MAX_LEVEL != 8
+#error
+#endif
+
+#define DEFINE_LEVEL(stem, level)                                      \
+       .names[level] = "btrfs-" stem "-0" #level,
+
+#define DEFINE_NAME(stem)                                              \
+       DEFINE_LEVEL(stem, 0)                                           \
+       DEFINE_LEVEL(stem, 1)                                           \
+       DEFINE_LEVEL(stem, 2)                                           \
+       DEFINE_LEVEL(stem, 3)                                           \
+       DEFINE_LEVEL(stem, 4)                                           \
+       DEFINE_LEVEL(stem, 5)                                           \
+       DEFINE_LEVEL(stem, 6)                                           \
+       DEFINE_LEVEL(stem, 7)
+
+static struct btrfs_lockdep_keyset {
+       u64                     id;             /* root objectid */
+       /* Longest entry: btrfs-free-space-00 */
+       char                    names[BTRFS_MAX_LEVEL][20];
+       struct lock_class_key   keys[BTRFS_MAX_LEVEL];
+} btrfs_lockdep_keysets[] = {
+       { .id = BTRFS_ROOT_TREE_OBJECTID,       DEFINE_NAME("root")     },
+       { .id = BTRFS_EXTENT_TREE_OBJECTID,     DEFINE_NAME("extent")   },
+       { .id = BTRFS_CHUNK_TREE_OBJECTID,      DEFINE_NAME("chunk")    },
+       { .id = BTRFS_DEV_TREE_OBJECTID,        DEFINE_NAME("dev")      },
+       { .id = BTRFS_CSUM_TREE_OBJECTID,       DEFINE_NAME("csum")     },
+       { .id = BTRFS_QUOTA_TREE_OBJECTID,      DEFINE_NAME("quota")    },
+       { .id = BTRFS_TREE_LOG_OBJECTID,        DEFINE_NAME("log")      },
+       { .id = BTRFS_TREE_RELOC_OBJECTID,      DEFINE_NAME("treloc")   },
+       { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc")   },
+       { .id = BTRFS_UUID_TREE_OBJECTID,       DEFINE_NAME("uuid")     },
+       { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
+       { .id = 0,                              DEFINE_NAME("tree")     },
+};
+
+#undef DEFINE_LEVEL
+#undef DEFINE_NAME
+
+void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level)
+{
+       struct btrfs_lockdep_keyset *ks;
+
+       BUG_ON(level >= ARRAY_SIZE(ks->keys));
+
+       /* Find the matching keyset, id 0 is the default entry */
+       for (ks = btrfs_lockdep_keysets; ks->id; ks++)
+               if (ks->id == objectid)
+                       break;
+
+       lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]);
+}
+
+void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb)
+{
+       if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
+               btrfs_set_buffer_lockdep_class(root->root_key.objectid,
+                                              eb, btrfs_header_level(eb));
+}
+
+#endif
+
 /*
  * Extent buffer locking
  * =====================
@@ -164,6 +251,8 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
 
        while (1) {
                eb = btrfs_root_node(root);
+
+               btrfs_maybe_reset_lockdep_class(root, eb);
                btrfs_tree_lock(eb);
                if (eb == root->node)
                        break;
@@ -185,6 +274,8 @@ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
 
        while (1) {
                eb = btrfs_root_node(root);
+
+               btrfs_maybe_reset_lockdep_class(root, eb);
                btrfs_tree_read_lock(eb);
                if (eb == root->node)
                        break;
index bbc45534ae9a6081af80204ad21b2033c5ae46af..ab268be09bb542fe5df6aa53fd59c9f6977f68a6 100644 (file)
@@ -131,4 +131,18 @@ void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level);
+void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb);
+#else
+static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
+                                       struct extent_buffer *eb, int level)
+{
+}
+static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root,
+                                                  struct extent_buffer *eb)
+{
+}
+#endif
+
 #endif
index a6dc827e75af06ba82fb56e768c77fd4dc681217..45c02aba2492b3f014dc70712fa6fda829d7ee06 100644 (file)
@@ -1326,7 +1326,9 @@ again:
                btrfs_release_path(path);
 
                path->lowest_level = level;
+               set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
                ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
+               clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
                path->lowest_level = 0;
                if (ret) {
                        if (ret > 0)
@@ -3573,7 +3575,12 @@ int prepare_to_relocate(struct reloc_control *rc)
                 */
                return PTR_ERR(trans);
        }
-       return btrfs_commit_transaction(trans);
+
+       ret = btrfs_commit_transaction(trans);
+       if (ret)
+               unset_reloc_control(rc);
+
+       return ret;
 }
 
 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
index a64b26b169040100b94f45acfefb050a5a5defba..d647cb2938c0184be095a175942814952a86aa17 100644 (file)
@@ -349,9 +349,10 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
        key.offset = ref_id;
 again:
        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
-       if (ret < 0)
+       if (ret < 0) {
+               err = ret;
                goto out;
-       if (ret == 0) {
+       } else if (ret == 0) {
                leaf = path->nodes[0];
                ref = btrfs_item_ptr(leaf, path->slots[0],
                                     struct btrfs_root_ref);
index 9e0e0ae2288cd4c89d298d45b04f417a4da77a50..43f905ab0a18d97cdec6d1255536b043891949f0 100644 (file)
@@ -1233,7 +1233,8 @@ static void extent_err(const struct extent_buffer *eb, int slot,
 }
 
 static int check_extent_item(struct extent_buffer *leaf,
-                            struct btrfs_key *key, int slot)
+                            struct btrfs_key *key, int slot,
+                            struct btrfs_key *prev_key)
 {
        struct btrfs_fs_info *fs_info = leaf->fs_info;
        struct btrfs_extent_item *ei;
@@ -1453,6 +1454,26 @@ static int check_extent_item(struct extent_buffer *leaf,
                           total_refs, inline_refs);
                return -EUCLEAN;
        }
+
+       if ((prev_key->type == BTRFS_EXTENT_ITEM_KEY) ||
+           (prev_key->type == BTRFS_METADATA_ITEM_KEY)) {
+               u64 prev_end = prev_key->objectid;
+
+               if (prev_key->type == BTRFS_METADATA_ITEM_KEY)
+                       prev_end += fs_info->nodesize;
+               else
+                       prev_end += prev_key->offset;
+
+               if (unlikely(prev_end > key->objectid)) {
+                       extent_err(leaf, slot,
+       "previous extent [%llu %u %llu] overlaps current extent [%llu %u %llu]",
+                                  prev_key->objectid, prev_key->type,
+                                  prev_key->offset, key->objectid, key->type,
+                                  key->offset);
+                       return -EUCLEAN;
+               }
+       }
+
        return 0;
 }
 
@@ -1621,7 +1642,7 @@ static int check_leaf_item(struct extent_buffer *leaf,
                break;
        case BTRFS_EXTENT_ITEM_KEY:
        case BTRFS_METADATA_ITEM_KEY:
-               ret = check_extent_item(leaf, key, slot);
+               ret = check_extent_item(leaf, key, slot, prev_key);
                break;
        case BTRFS_TREE_BLOCK_REF_KEY:
        case BTRFS_SHARED_DATA_REF_KEY:
index dcf75a8daa200be8f013f77690da91986e2a4367..9205c4a5ca81dd4ea4854ba2b1a29d149121a0e3 100644 (file)
@@ -1146,7 +1146,9 @@ again:
        extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
                                           inode_objectid, parent_objectid, 0,
                                           0);
-       if (!IS_ERR_OR_NULL(extref)) {
+       if (IS_ERR(extref)) {
+               return PTR_ERR(extref);
+       } else if (extref) {
                u32 item_size;
                u32 cur_offset = 0;
                unsigned long base;
@@ -1457,7 +1459,7 @@ static int add_link(struct btrfs_trans_handle *trans,
         * on the inode will not free it. We will fixup the link count later.
         */
        if (other_inode->i_nlink == 0)
-               inc_nlink(other_inode);
+               set_nlink(other_inode, 1);
 add_link:
        ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
                             name, namelen, 0, ref_index);
@@ -1600,7 +1602,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                                 * free it. We will fixup the link count later.
                                 */
                                if (!ret && inode->i_nlink == 0)
-                                       inc_nlink(inode);
+                                       set_nlink(inode, 1);
                        }
                        if (ret < 0)
                                goto out;
index 272901514b0c14dc7074e31907df1f46caf3705e..064ab2a79c805f5f07921a4b1f3641d6bb6736a8 100644 (file)
@@ -2345,8 +2345,11 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
 
        ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
                                    &bdev, &disk_super);
-       if (ret)
+       if (ret) {
+               btrfs_put_dev_args_from_path(args);
                return ret;
+       }
+
        args->devid = btrfs_stack_device_id(&disk_super->dev_item);
        memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
        if (btrfs_fs_incompat(fs_info, METADATA_UUID))
index 7421abcf325a54ececa7f9c1ad7105921ce2f59d..5bb8d8c86311902ea8065862e66b35ec652f6c85 100644 (file)
@@ -371,6 +371,9 @@ static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
                                   const char *name, const void *buffer,
                                   size_t size, int flags)
 {
+       if (btrfs_root_readonly(BTRFS_I(inode)->root))
+               return -EROFS;
+
        name = xattr_full_name(handler, name);
        return btrfs_setxattr_trans(inode, name, buffer, size, flags);
 }
index 11fd85de721793ec9955fe51114a52a8edf16bc2..c05477e28cffa698868655e24142aaf0e52f83a6 100644 (file)
@@ -42,7 +42,7 @@ void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
                 smb->Command, smb->Status.CifsError,
                 smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
        cifs_dbg(VFS, "smb buf %p len %u\n", smb,
-                server->ops->calc_smb_size(smb, server));
+                server->ops->calc_smb_size(smb));
 #endif /* CONFIG_CIFS_DEBUG2 */
 }
 
index 8f7835ccbca16a880c0b1083cf5fb7bd14497765..46f5718754f948d8d5046b3112891aec15e8cce7 100644 (file)
@@ -32,10 +32,9 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
        int rc;
        struct kvec *iov = rqst->rq_iov;
        int n_vec = rqst->rq_nvec;
-       int is_smb2 = server->vals->header_preamble_size == 0;
 
        /* iov[0] is actual data and not the rfc1002 length for SMB2+ */
-       if (is_smb2) {
+       if (!is_smb1(server)) {
                if (iov[0].iov_len <= 4)
                        return -EIO;
                i = 0;
index bc0ee2d4b47b27857c246578b6eab8a0622416d6..ae7f571a7dba2c4c497b22f4a2b7fefc55388de4 100644 (file)
@@ -417,7 +417,7 @@ struct smb_version_operations {
        int (*close_dir)(const unsigned int, struct cifs_tcon *,
                         struct cifs_fid *);
        /* calculate a size of SMB message */
-       unsigned int (*calc_smb_size)(void *buf, struct TCP_Server_Info *ptcpi);
+       unsigned int (*calc_smb_size)(void *buf);
        /* check for STATUS_PENDING and process the response if yes */
        bool (*is_status_pending)(char *buf, struct TCP_Server_Info *server);
        /* check for STATUS_NETWORK_SESSION_EXPIRED */
@@ -557,6 +557,8 @@ struct smb_version_values {
 
 #define HEADER_SIZE(server) (server->vals->header_size)
 #define MAX_HEADER_SIZE(server) (server->vals->max_header_size)
+#define HEADER_PREAMBLE_SIZE(server) (server->vals->header_preamble_size)
+#define MID_HEADER_SIZE(server) (HEADER_SIZE(server) - 1 - HEADER_PREAMBLE_SIZE(server))
 
 /**
  * CIFS superblock mount flags (mnt_cifs_flags) to consider when
@@ -750,6 +752,11 @@ struct TCP_Server_Info {
 #endif
 };
 
+static inline bool is_smb1(struct TCP_Server_Info *server)
+{
+       return HEADER_PREAMBLE_SIZE(server) != 0;
+}
+
 static inline void cifs_server_lock(struct TCP_Server_Info *server)
 {
        unsigned int nofs_flag = memalloc_nofs_save();
index 87a77a684339fe6ceccbd4d7192dacee577d8421..3bc94bcc7177eb7471b35606e4961bfc554aa001 100644 (file)
@@ -151,7 +151,7 @@ extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
 extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
 extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
                                  struct cifsFileInfo **ret_file);
-extern unsigned int smbCalcSize(void *buf, struct TCP_Server_Info *server);
+extern unsigned int smbCalcSize(void *buf);
 extern int decode_negTokenInit(unsigned char *security_blob, int length,
                        struct TCP_Server_Info *server);
 extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
index 9e91a5a40aaec21fd31ebfa6b0a6df70ad9ae30c..56ec1b233f52e22b499d813c15f5153ebb550e0f 100644 (file)
@@ -59,7 +59,7 @@ static int __init cifs_root_setup(char *line)
                        pr_err("Root-CIFS: UNC path too long\n");
                        return 1;
                }
-               strlcpy(root_dev, line, len);
+               strscpy(root_dev, line, len);
                srvaddr = parse_srvaddr(&line[2], s);
                if (*s) {
                        int n = snprintf(root_opts,
index 9111c025bcb8eaeb4da9e3d39c8b061e3e6d95b6..a0a06b6f252be3a6d205d325c735d7a17ee02c0a 100644 (file)
@@ -871,7 +871,7 @@ smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
        /*
         * SMB1 does not use credits.
         */
-       if (server->vals->header_preamble_size)
+       if (is_smb1(server))
                return 0;
 
        return le16_to_cpu(shdr->CreditRequest);
@@ -1050,7 +1050,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 
        /* make sure this will fit in a large buffer */
        if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
-               server->vals->header_preamble_size) {
+           HEADER_PREAMBLE_SIZE(server)) {
                cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
                cifs_reconnect(server, true);
                return -ECONNABORTED;
@@ -1065,8 +1065,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 
        /* now read the rest */
        length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
-                                      pdu_length - HEADER_SIZE(server) + 1
-                                      + server->vals->header_preamble_size);
+                                      pdu_length - MID_HEADER_SIZE(server));
 
        if (length < 0)
                return length;
@@ -1122,7 +1121,7 @@ smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
        /*
         * SMB1 does not use credits.
         */
-       if (server->vals->header_preamble_size)
+       if (is_smb1(server))
                return;
 
        if (shdr->CreditRequest) {
@@ -1180,10 +1179,10 @@ cifs_demultiplex_thread(void *p)
                if (length < 0)
                        continue;
 
-               if (server->vals->header_preamble_size == 0)
-                       server->total_read = 0;
-               else
+               if (is_smb1(server))
                        server->total_read = length;
+               else
+                       server->total_read = 0;
 
                /*
                 * The right amount was read from socket - 4 bytes,
@@ -1198,8 +1197,7 @@ next_pdu:
                server->pdu_size = pdu_length;
 
                /* make sure we have enough to get to the MID */
-               if (server->pdu_size < HEADER_SIZE(server) - 1 -
-                   server->vals->header_preamble_size) {
+               if (server->pdu_size < MID_HEADER_SIZE(server)) {
                        cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
                                 server->pdu_size);
                        cifs_reconnect(server, true);
@@ -1208,9 +1206,8 @@ next_pdu:
 
                /* read down to the MID */
                length = cifs_read_from_socket(server,
-                            buf + server->vals->header_preamble_size,
-                            HEADER_SIZE(server) - 1
-                            - server->vals->header_preamble_size);
+                            buf + HEADER_PREAMBLE_SIZE(server),
+                            MID_HEADER_SIZE(server));
                if (length < 0)
                        continue;
                server->total_read += length;
@@ -3994,7 +3991,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
                }
                bcc_ptr += length + 1;
                bytes_left -= (length + 1);
-               strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
+               strscpy(tcon->treeName, tree, sizeof(tcon->treeName));
 
                /* mostly informational -- no need to fail on error here */
                kfree(tcon->nativeFileSystem);
index 34d990f06fd6a4dddc03d3270955986671d3534c..87f60f7367315635949a924de6081ab336715e4c 100644 (file)
@@ -354,7 +354,7 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
        /* otherwise, there is enough to get to the BCC */
        if (check_smb_hdr(smb))
                return -EIO;
-       clc_len = smbCalcSize(smb, server);
+       clc_len = smbCalcSize(smb);
 
        if (4 + rfclen != total_read) {
                cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
@@ -737,6 +737,8 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
        list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
                if (delayed_work_pending(&cfile->deferred)) {
                        if (cancel_delayed_work(&cfile->deferred)) {
+                               cifs_del_deferred_close(cfile);
+
                                tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
                                if (tmp_list == NULL)
                                        break;
@@ -766,6 +768,8 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
        list_for_each_entry(cfile, &tcon->openFileList, tlist) {
                if (delayed_work_pending(&cfile->deferred)) {
                        if (cancel_delayed_work(&cfile->deferred)) {
+                               cifs_del_deferred_close(cfile);
+
                                tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
                                if (tmp_list == NULL)
                                        break;
@@ -799,6 +803,8 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
                if (strstr(full_path, path)) {
                        if (delayed_work_pending(&cfile->deferred)) {
                                if (cancel_delayed_work(&cfile->deferred)) {
+                                       cifs_del_deferred_close(cfile);
+
                                        tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
                                        if (tmp_list == NULL)
                                                break;
index 28caae7aed1bb0ed7e234d52fa324139b5bfcd6a..1b52e6ac431cb045e2495bc7437760217d107865 100644 (file)
@@ -909,7 +909,7 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
  * portion, the number of word parameters and the data portion of the message
  */
 unsigned int
-smbCalcSize(void *buf, struct TCP_Server_Info *server)
+smbCalcSize(void *buf)
 {
        struct smb_hdr *ptr = buf;
        return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
index 2eece8a07c1124ea298964559824e0228d508211..8e060c00c969011bd2c6a8f5415c672d0dca8cfb 100644 (file)
@@ -806,8 +806,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
 
                end_of_smb = cfile->srch_inf.ntwrk_buf_start +
                        server->ops->calc_smb_size(
-                                       cfile->srch_inf.ntwrk_buf_start,
-                                       server);
+                                       cfile->srch_inf.ntwrk_buf_start);
 
                cur_ent = cfile->srch_inf.srch_entries_start;
                first_entry_in_buffer = cfile->srch_inf.index_of_last_entry
@@ -1161,8 +1160,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
        cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
                 num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
        max_len = tcon->ses->server->ops->calc_smb_size(
-                       cifsFile->srch_inf.ntwrk_buf_start,
-                       tcon->ses->server);
+                       cifsFile->srch_inf.ntwrk_buf_start);
        end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
 
        tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);
index f5dcc4940b6da6da1b192ac3cdb4dc7ddcacd5a7..9dfd2dd612c25cca19dd3459fad9164859970c22 100644 (file)
@@ -61,7 +61,6 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
                nr_ioctl_req.Reserved = 0;
                rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
                        fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
-                       true /* is_fsctl */,
                        (char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
                        CIFSMaxBufSize, NULL, NULL /* no return info */);
                if (rc == -EOPNOTSUPP) {
index 6a6ec6efb45a99c3dbb4da654a23514f86c941ff..d73e5672aac493b8e1694250c9b8b650d9e6e314 100644 (file)
@@ -222,7 +222,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
                }
        }
 
-       calc_len = smb2_calc_size(buf, server);
+       calc_len = smb2_calc_size(buf);
 
        /* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might
         * be 0, and not a real miscalculation */
@@ -410,7 +410,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
  * portion, the number of word parameters and the data portion of the message.
  */
 unsigned int
-smb2_calc_size(void *buf, struct TCP_Server_Info *srvr)
+smb2_calc_size(void *buf)
 {
        struct smb2_pdu *pdu = buf;
        struct smb2_hdr *shdr = &pdu->hdr;
index f406af59688775904d4135803f291b8701a30a8b..4810bd62266a573ff02414dfec45510dc4a0af39 100644 (file)
@@ -387,7 +387,7 @@ smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
                 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
                 shdr->Id.SyncId.ProcessId);
        cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
-                server->ops->calc_smb_size(buf, server));
+                server->ops->calc_smb_size(buf));
 #endif
 }
 
@@ -681,7 +681,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
        struct cifs_ses *ses = tcon->ses;
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
-                       FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
+                       FSCTL_QUERY_NETWORK_INTERFACE_INFO,
                        NULL /* no data input */, 0 /* no data input */,
                        CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
        if (rc == -EOPNOTSUPP) {
@@ -1323,9 +1323,8 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
        struct resume_key_req *res_key;
 
        rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
-                       FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
-                       NULL, 0 /* no input */, CIFSMaxBufSize,
-                       (char **)&res_key, &ret_data_len);
+                       FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */,
+                       CIFSMaxBufSize, (char **)&res_key, &ret_data_len);
 
        if (rc == -EOPNOTSUPP) {
                pr_warn_once("Server share %s does not support copy range\n", tcon->treeName);
@@ -1467,7 +1466,7 @@ smb2_ioctl_query_info(const unsigned int xid,
                rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
 
                rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
-                                    qi.info_type, true, buffer, qi.output_buffer_length,
+                                    qi.info_type, buffer, qi.output_buffer_length,
                                     CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
                                     MAX_SMB2_CLOSE_RESPONSE_SIZE);
                free_req1_func = SMB2_ioctl_free;
@@ -1643,9 +1642,8 @@ smb2_copychunk_range(const unsigned int xid,
                retbuf = NULL;
                rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
                        trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
-                       true /* is_fsctl */, (char *)pcchunk,
-                       sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
-                       (char **)&retbuf, &ret_data_len);
+                       (char *)pcchunk, sizeof(struct copychunk_ioctl),
+                       CIFSMaxBufSize, (char **)&retbuf, &ret_data_len);
                if (rc == 0) {
                        if (ret_data_len !=
                                        sizeof(struct copychunk_ioctl_rsp)) {
@@ -1805,7 +1803,6 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
-                       true /* is_fctl */,
                        &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
        if (rc) {
                tcon->broken_sparse_sup = true;
@@ -1888,7 +1885,6 @@ smb2_duplicate_extents(const unsigned int xid,
        rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
                        trgtfile->fid.volatile_fid,
                        FSCTL_DUPLICATE_EXTENTS_TO_FILE,
-                       true /* is_fsctl */,
                        (char *)&dup_ext_buf,
                        sizeof(struct duplicate_extents_to_file),
                        CIFSMaxBufSize, NULL,
@@ -1923,7 +1919,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
        return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid,
                        FSCTL_SET_INTEGRITY_INFORMATION,
-                       true /* is_fsctl */,
                        (char *)&integr_info,
                        sizeof(struct fsctl_set_integrity_information_req),
                        CIFSMaxBufSize, NULL,
@@ -1976,7 +1971,6 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid,
                        FSCTL_SRV_ENUMERATE_SNAPSHOTS,
-                       true /* is_fsctl */,
                        NULL, 0 /* no input data */, max_response_size,
                        (char **)&retbuf,
                        &ret_data_len);
@@ -2699,7 +2693,6 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
        do {
                rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                                FSCTL_DFS_GET_REFERRALS,
-                               true /* is_fsctl */,
                                (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
                                (char **)&dfs_rsp, &dfs_rsp_size);
                if (!is_retryable_error(rc))
@@ -2906,8 +2899,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_ioctl_init(tcon, server,
                             &rqst[1], fid.persistent_fid,
-                            fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
-                            true /* is_fctl */, NULL, 0,
+                            fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0,
                             CIFSMaxBufSize -
                             MAX_SMB2_CREATE_RESPONSE_SIZE -
                             MAX_SMB2_CLOSE_RESPONSE_SIZE);
@@ -3087,8 +3079,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_ioctl_init(tcon, server,
                             &rqst[1], COMPOUND_FID,
-                            COMPOUND_FID, FSCTL_GET_REPARSE_POINT,
-                            true /* is_fctl */, NULL, 0,
+                            COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0,
                             CIFSMaxBufSize -
                             MAX_SMB2_CREATE_RESPONSE_SIZE -
                             MAX_SMB2_CLOSE_RESPONSE_SIZE);
@@ -3316,26 +3307,43 @@ get_smb2_acl(struct cifs_sb_info *cifs_sb,
        return pntsd;
 }
 
+static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
+                            loff_t offset, loff_t len, unsigned int xid)
+{
+       struct cifsFileInfo *cfile = file->private_data;
+       struct file_zero_data_information fsctl_buf;
+
+       cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
+
+       fsctl_buf.FileOffset = cpu_to_le64(offset);
+       fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+
+       return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                         cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+                         (char *)&fsctl_buf,
+                         sizeof(struct file_zero_data_information),
+                         0, NULL, NULL);
+}
+
 static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
                            loff_t offset, loff_t len, bool keep_size)
 {
        struct cifs_ses *ses = tcon->ses;
-       struct inode *inode;
-       struct cifsInodeInfo *cifsi;
+       struct inode *inode = file_inode(file);
+       struct cifsInodeInfo *cifsi = CIFS_I(inode);
        struct cifsFileInfo *cfile = file->private_data;
-       struct file_zero_data_information fsctl_buf;
        long rc;
        unsigned int xid;
        __le64 eof;
 
        xid = get_xid();
 
-       inode = d_inode(cfile->dentry);
-       cifsi = CIFS_I(inode);
-
        trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
                              ses->Suid, offset, len);
 
+       inode_lock(inode);
+       filemap_invalidate_lock(inode->i_mapping);
+
        /*
         * We zero the range through ioctl, so we need remove the page caches
         * first, otherwise the data may be inconsistent with the server.
@@ -3343,26 +3351,12 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
        truncate_pagecache_range(inode, offset, offset + len - 1);
 
        /* if file not oplocked can't be sure whether asking to extend size */
-       if (!CIFS_CACHE_READ(cifsi))
-               if (keep_size == false) {
-                       rc = -EOPNOTSUPP;
-                       trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
-                               tcon->tid, ses->Suid, offset, len, rc);
-                       free_xid(xid);
-                       return rc;
-               }
-
-       cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
-
-       fsctl_buf.FileOffset = cpu_to_le64(offset);
-       fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+       rc = -EOPNOTSUPP;
+       if (keep_size == false && !CIFS_CACHE_READ(cifsi))
+               goto zero_range_exit;
 
-       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
-                       cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
-                       (char *)&fsctl_buf,
-                       sizeof(struct file_zero_data_information),
-                       0, NULL, NULL);
-       if (rc)
+       rc = smb3_zero_data(file, tcon, offset, len, xid);
+       if (rc < 0)
                goto zero_range_exit;
 
        /*
@@ -3375,6 +3369,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
        }
 
  zero_range_exit:
+       filemap_invalidate_unlock(inode->i_mapping);
+       inode_unlock(inode);
        free_xid(xid);
        if (rc)
                trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
@@ -3388,7 +3384,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
 static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
                            loff_t offset, loff_t len)
 {
-       struct inode *inode;
+       struct inode *inode = file_inode(file);
        struct cifsFileInfo *cfile = file->private_data;
        struct file_zero_data_information fsctl_buf;
        long rc;
@@ -3397,14 +3393,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
 
        xid = get_xid();
 
-       inode = d_inode(cfile->dentry);
-
+       inode_lock(inode);
        /* Need to make file sparse, if not already, before freeing range. */
        /* Consider adding equivalent for compressed since it could also work */
        if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
                rc = -EOPNOTSUPP;
-               free_xid(xid);
-               return rc;
+               goto out;
        }
 
        filemap_invalidate_lock(inode->i_mapping);
@@ -3421,11 +3415,13 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
 
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
-                       true /* is_fctl */, (char *)&fsctl_buf,
+                       (char *)&fsctl_buf,
                        sizeof(struct file_zero_data_information),
                        CIFSMaxBufSize, NULL, NULL);
-       free_xid(xid);
        filemap_invalidate_unlock(inode->i_mapping);
+out:
+       inode_unlock(inode);
+       free_xid(xid);
        return rc;
 }
 
@@ -3481,7 +3477,7 @@ static int smb3_simple_fallocate_range(unsigned int xid,
        in_data.length = cpu_to_le64(len);
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid,
-                       FSCTL_QUERY_ALLOCATED_RANGES, true,
+                       FSCTL_QUERY_ALLOCATED_RANGES,
                        (char *)&in_data, sizeof(in_data),
                        1024 * sizeof(struct file_allocated_range_buffer),
                        (char **)&out_data, &out_data_len);
@@ -3802,7 +3798,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
 
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid,
-                       FSCTL_QUERY_ALLOCATED_RANGES, true,
+                       FSCTL_QUERY_ALLOCATED_RANGES,
                        (char *)&in_data, sizeof(in_data),
                        sizeof(struct file_allocated_range_buffer),
                        (char **)&out_data, &out_data_len);
@@ -3862,7 +3858,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon,
 
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid,
-                       FSCTL_QUERY_ALLOCATED_RANGES, true,
+                       FSCTL_QUERY_ALLOCATED_RANGES,
                        (char *)&in_data, sizeof(in_data),
                        1024 * sizeof(struct file_allocated_range_buffer),
                        (char **)&out_data, &out_data_len);
index 9b31ea946d454f0581dee451e196b4e0cfdaef30..128e44e5752802b8d0b0d17b795c77eda899f741 100644 (file)
@@ -1173,7 +1173,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
        }
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
-               FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
+               FSCTL_VALIDATE_NEGOTIATE_INFO,
                (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
                (char **)&pneg_rsp, &rsplen);
        if (rc == -EOPNOTSUPP) {
@@ -1928,7 +1928,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
        tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
        tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
-       strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
+       strscpy(tcon->treeName, tree, sizeof(tcon->treeName));
 
        if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
            ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
@@ -2572,19 +2572,15 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
 
        path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
 
-       /*
-        * make room for one path separator between the treename and
-        * path
-        */
-       *out_len = treename_len + 1 + path_len;
+       /* make room for one path separator only if @path isn't empty */
+       *out_len = treename_len + (path[0] ? 1 : 0) + path_len;
 
        /*
-        * final path needs to be null-terminated UTF16 with a
-        * size aligned to 8
+        * final path needs to be 8-byte aligned as specified in
+        * MS-SMB2 2.2.13 SMB2 CREATE Request.
         */
-
-       *out_size = roundup((*out_len+1)*2, 8);
-       *out_path = kzalloc(*out_size, GFP_KERNEL);
+       *out_size = roundup(*out_len * sizeof(__le16), 8);
+       *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL);
        if (!*out_path)
                return -ENOMEM;
 
@@ -3056,7 +3052,7 @@ int
 SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
                struct smb_rqst *rqst,
                u64 persistent_fid, u64 volatile_fid, u32 opcode,
-               bool is_fsctl, char *in_data, u32 indatalen,
+               char *in_data, u32 indatalen,
                __u32 max_response_size)
 {
        struct smb2_ioctl_req *req;
@@ -3131,10 +3127,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
        req->hdr.CreditCharge =
                cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
                                         SMB2_MAX_BUFFER_SIZE));
-       if (is_fsctl)
-               req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
-       else
-               req->Flags = 0;
+       /* always an FSCTL (for now) */
+       req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
 
        /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
        if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
@@ -3161,9 +3155,9 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
  */
 int
 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
-          u64 volatile_fid, u32 opcode, bool is_fsctl,
-          char *in_data, u32 indatalen, u32 max_out_data_len,
-          char **out_data, u32 *plen /* returned data len */)
+          u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
+          u32 max_out_data_len, char **out_data,
+          u32 *plen /* returned data len */)
 {
        struct smb_rqst rqst;
        struct smb2_ioctl_rsp *rsp = NULL;
@@ -3205,7 +3199,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 
        rc = SMB2_ioctl_init(tcon, server,
                             &rqst, persistent_fid, volatile_fid, opcode,
-                            is_fsctl, in_data, indatalen, max_out_data_len);
+                            in_data, indatalen, max_out_data_len);
        if (rc)
                goto ioctl_exit;
 
@@ -3297,7 +3291,7 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
                        cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
 
        rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
-                       FSCTL_SET_COMPRESSION, true /* is_fsctl */,
+                       FSCTL_SET_COMPRESSION,
                        (char *)&fsctl_input /* data input */,
                        2 /* in data len */, CIFSMaxBufSize /* max out data */,
                        &ret_data /* out data */, NULL);
index 51c5bf4a338aec684f0f0abea22a6d73c12f8875..3f740f24b96a7750be18f3b7a2bb02da46b031d0 100644 (file)
@@ -23,7 +23,7 @@ struct smb_rqst;
 extern int map_smb2_to_linux_error(char *buf, bool log_err);
 extern int smb2_check_message(char *buf, unsigned int length,
                              struct TCP_Server_Info *server);
-extern unsigned int smb2_calc_size(void *buf, struct TCP_Server_Info *server);
+extern unsigned int smb2_calc_size(void *buf);
 extern char *smb2_get_data_area_len(int *off, int *len,
                                    struct smb2_hdr *shdr);
 extern __le16 *cifs_convert_path_to_utf16(const char *from,
@@ -137,13 +137,13 @@ extern int SMB2_open_init(struct cifs_tcon *tcon,
 extern void SMB2_open_free(struct smb_rqst *rqst);
 extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, u32 opcode,
-                    bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
+                    char *in_data, u32 indatalen, u32 maxoutlen,
                     char **out_data, u32 *plen /* returned data len */);
 extern int SMB2_ioctl_init(struct cifs_tcon *tcon,
                           struct TCP_Server_Info *server,
                           struct smb_rqst *rqst,
                           u64 persistent_fid, u64 volatile_fid, u32 opcode,
-                          bool is_fsctl, char *in_data, u32 indatalen,
+                          char *in_data, u32 indatalen,
                           __u32 max_response_size);
 extern void SMB2_ioctl_free(struct smb_rqst *rqst);
 extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
index de7aeced7e16bbe952dd3f9aeb16385bcf40de8d..c2fe035e573ba01eaf366668e83c9802e5752a4d 100644 (file)
@@ -261,8 +261,8 @@ smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
        int nvec;
        unsigned long buflen = 0;
 
-       if (server->vals->header_preamble_size == 0 &&
-           rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
+       if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
+           rqst->rq_iov[0].iov_len == 4) {
                iov = &rqst->rq_iov[1];
                nvec = rqst->rq_nvec - 1;
        } else {
@@ -346,7 +346,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
        sigprocmask(SIG_BLOCK, &mask, &oldmask);
 
        /* Generate a rfc1002 marker for SMB2+ */
-       if (server->vals->header_preamble_size == 0) {
+       if (!is_smb1(server)) {
                struct kvec hiov = {
                        .iov_base = &rfc1002_marker,
                        .iov_len  = 4
@@ -1238,7 +1238,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                buf = (char *)midQ[i]->resp_buf;
                resp_iov[i].iov_base = buf;
                resp_iov[i].iov_len = midQ[i]->resp_buf_size +
-                       server->vals->header_preamble_size;
+                       HEADER_PREAMBLE_SIZE(server);
 
                if (midQ[i]->large_buf)
                        resp_buf_type[i] = CIFS_LARGE_BUFFER;
@@ -1643,7 +1643,7 @@ int
 cifs_discard_remaining_data(struct TCP_Server_Info *server)
 {
        unsigned int rfclen = server->pdu_size;
-       int remaining = rfclen + server->vals->header_preamble_size -
+       int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
                server->total_read;
 
        while (remaining > 0) {
@@ -1689,8 +1689,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        unsigned int data_offset, data_len;
        struct cifs_readdata *rdata = mid->callback_data;
        char *buf = server->smallbuf;
-       unsigned int buflen = server->pdu_size +
-               server->vals->header_preamble_size;
+       unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
        bool use_rdma_mr = false;
 
        cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
@@ -1724,10 +1723,10 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 
        /* set up first two iov for signature check and to get credits */
        rdata->iov[0].iov_base = buf;
-       rdata->iov[0].iov_len = server->vals->header_preamble_size;
-       rdata->iov[1].iov_base = buf + server->vals->header_preamble_size;
+       rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
+       rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
        rdata->iov[1].iov_len =
-               server->total_read - server->vals->header_preamble_size;
+               server->total_read - HEADER_PREAMBLE_SIZE(server);
        cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
                 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
        cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
@@ -1752,7 +1751,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        }
 
        data_offset = server->ops->read_data_offset(buf) +
-               server->vals->header_preamble_size;
+               HEADER_PREAMBLE_SIZE(server);
        if (data_offset < server->total_read) {
                /*
                 * win2k8 sometimes sends an offset of 0 when the read
index c5dc32a59c7699107c233034254d890e8b806dde..bb0c4d0038dbdc5a715edeab6e825c4bfe8b6b72 100644 (file)
@@ -2270,6 +2270,48 @@ bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
 }
 EXPORT_SYMBOL_GPL(d_same_name);
 
+/*
+ * This is __d_lookup_rcu() when the parent dentry has
+ * DCACHE_OP_COMPARE, which makes things much nastier.
+ */
+static noinline struct dentry *__d_lookup_rcu_op_compare(
+       const struct dentry *parent,
+       const struct qstr *name,
+       unsigned *seqp)
+{
+       u64 hashlen = name->hash_len;
+       struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
+       struct hlist_bl_node *node;
+       struct dentry *dentry;
+
+       hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
+               int tlen;
+               const char *tname;
+               unsigned seq;
+
+seqretry:
+               seq = raw_seqcount_begin(&dentry->d_seq);
+               if (dentry->d_parent != parent)
+                       continue;
+               if (d_unhashed(dentry))
+                       continue;
+               if (dentry->d_name.hash != hashlen_hash(hashlen))
+                       continue;
+               tlen = dentry->d_name.len;
+               tname = dentry->d_name.name;
+               /* we want a consistent (name,len) pair */
+               if (read_seqcount_retry(&dentry->d_seq, seq)) {
+                       cpu_relax();
+                       goto seqretry;
+               }
+               if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
+                       continue;
+               *seqp = seq;
+               return dentry;
+       }
+       return NULL;
+}
+
 /**
  * __d_lookup_rcu - search for a dentry (racy, store-free)
  * @parent: parent dentry
@@ -2316,6 +2358,9 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
         * Keep the two functions in sync.
         */
 
+       if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
+               return __d_lookup_rcu_op_compare(parent, name, seqp);
+
        /*
         * The hash list is protected using RCU.
         *
@@ -2332,7 +2377,6 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
        hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
                unsigned seq;
 
-seqretry:
                /*
                 * The dentry sequence count protects us from concurrent
                 * renames, and thus protects parent and name fields.
@@ -2355,28 +2399,10 @@ seqretry:
                        continue;
                if (d_unhashed(dentry))
                        continue;
-
-               if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
-                       int tlen;
-                       const char *tname;
-                       if (dentry->d_name.hash != hashlen_hash(hashlen))
-                               continue;
-                       tlen = dentry->d_name.len;
-                       tname = dentry->d_name.name;
-                       /* we want a consistent (name,len) pair */
-                       if (read_seqcount_retry(&dentry->d_seq, seq)) {
-                               cpu_relax();
-                               goto seqretry;
-                       }
-                       if (parent->d_op->d_compare(dentry,
-                                                   tlen, tname, name) != 0)
-                               continue;
-               } else {
-                       if (dentry->d_name.hash_len != hashlen)
-                               continue;
-                       if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
-                               continue;
-               }
+               if (dentry->d_name.hash_len != hashlen)
+                       continue;
+               if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
+                       continue;
                *seqp = seq;
                return dentry;
        }
index f793221f4eb63e3bb781fa766190a389e6038c95..9a5ca7b82bfc5ee62e1533c35fbf7e587b077572 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -584,11 +584,11 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
 
                                if (kmapped_page) {
                                        flush_dcache_page(kmapped_page);
-                                       kunmap(kmapped_page);
+                                       kunmap_local(kaddr);
                                        put_arg_page(kmapped_page);
                                }
                                kmapped_page = page;
-                               kaddr = kmap(kmapped_page);
+                               kaddr = kmap_local_page(kmapped_page);
                                kpos = pos & PAGE_MASK;
                                flush_arg_page(bprm, kpos, kmapped_page);
                        }
@@ -602,7 +602,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
 out:
        if (kmapped_page) {
                flush_dcache_page(kmapped_page);
-               kunmap(kmapped_page);
+               kunmap_local(kaddr);
                put_arg_page(kmapped_page);
        }
        return ret;
@@ -880,11 +880,11 @@ int transfer_args_to_stack(struct linux_binprm *bprm,
 
        for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
                unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
-               char *src = kmap(bprm->page[index]) + offset;
+               char *src = kmap_local_page(bprm->page[index]) + offset;
                sp -= PAGE_SIZE - offset;
                if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
                        ret = -EFAULT;
-               kunmap(bprm->page[index]);
+               kunmap_local(src);
                if (ret)
                        goto out;
        }
@@ -1686,13 +1686,13 @@ int remove_arg_zero(struct linux_binprm *bprm)
                        ret = -EFAULT;
                        goto out;
                }
-               kaddr = kmap_atomic(page);
+               kaddr = kmap_local_page(page);
 
                for (; offset < PAGE_SIZE && kaddr[offset];
                                offset++, bprm->p++)
                        ;
 
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                put_arg_page(page);
        } while (offset == PAGE_SIZE);
 
index 05221366a16dc913961f3e3f7b4a703477bc7bb9..08a1993ab7fd3943f586e2c08ade7c1d5431de63 100644 (file)
@@ -134,10 +134,10 @@ static bool inode_io_list_move_locked(struct inode *inode,
 
 static void wb_wakeup(struct bdi_writeback *wb)
 {
-       spin_lock_bh(&wb->work_lock);
+       spin_lock_irq(&wb->work_lock);
        if (test_bit(WB_registered, &wb->state))
                mod_delayed_work(bdi_wq, &wb->dwork, 0);
-       spin_unlock_bh(&wb->work_lock);
+       spin_unlock_irq(&wb->work_lock);
 }
 
 static void finish_writeback_work(struct bdi_writeback *wb,
@@ -164,7 +164,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
        if (work->done)
                atomic_inc(&work->done->cnt);
 
-       spin_lock_bh(&wb->work_lock);
+       spin_lock_irq(&wb->work_lock);
 
        if (test_bit(WB_registered, &wb->state)) {
                list_add_tail(&work->list, &wb->work_list);
@@ -172,7 +172,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
        } else
                finish_writeback_work(wb, work);
 
-       spin_unlock_bh(&wb->work_lock);
+       spin_unlock_irq(&wb->work_lock);
 }
 
 /**
@@ -2082,13 +2082,13 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
 {
        struct wb_writeback_work *work = NULL;
 
-       spin_lock_bh(&wb->work_lock);
+       spin_lock_irq(&wb->work_lock);
        if (!list_empty(&wb->work_list)) {
                work = list_entry(wb->work_list.next,
                                  struct wb_writeback_work, list);
                list_del_init(&work->list);
        }
-       spin_unlock_bh(&wb->work_lock);
+       spin_unlock_irq(&wb->work_lock);
        return work;
 }
 
index 6462276dfdf04d9ce648b7a2f4a06faa727d9de0..ba1de23c13c1ed35f87d896fd7d58f2446fa2c20 100644 (file)
@@ -2018,23 +2018,25 @@ static int __file_remove_privs(struct file *file, unsigned int flags)
 {
        struct dentry *dentry = file_dentry(file);
        struct inode *inode = file_inode(file);
-       int error;
+       int error = 0;
        int kill;
 
        if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
                return 0;
 
        kill = dentry_needs_remove_privs(dentry);
-       if (kill <= 0)
+       if (kill < 0)
                return kill;
 
-       if (flags & IOCB_NOWAIT)
-               return -EAGAIN;
+       if (kill) {
+               if (flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+
+               error = __remove_privs(file_mnt_user_ns(file), dentry, kill);
+       }
 
-       error = __remove_privs(file_mnt_user_ns(file), dentry, kill);
        if (!error)
                inode_has_no_xattr(inode);
-
        return error;
 }
 
index 52aa0adeb95198f56142e46ebebd98ddce401f67..e0cbcfa98c7ebb64e506404d9c476afcdede782f 100644 (file)
@@ -349,6 +349,7 @@ enum KSMBD_TREE_CONN_STATUS {
 #define KSMBD_SHARE_FLAG_STREAMS               BIT(11)
 #define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS       BIT(12)
 #define KSMBD_SHARE_FLAG_ACL_XATTR             BIT(13)
+#define KSMBD_SHARE_FLAG_UPDATE                BIT(14)
 
 /*
  * Tree connect request flags.
@@ -364,6 +365,7 @@ enum KSMBD_TREE_CONN_STATUS {
 #define KSMBD_TREE_CONN_FLAG_READ_ONLY         BIT(1)
 #define KSMBD_TREE_CONN_FLAG_WRITABLE          BIT(2)
 #define KSMBD_TREE_CONN_FLAG_ADMIN_ACCOUNT     BIT(3)
+#define KSMBD_TREE_CONN_FLAG_UPDATE            BIT(4)
 
 /*
  * RPC over IPC.
index 70655af93b440b7711f3757c6b9d464c54f28f97..c9bca1c2c83490ebcd1b51de7b96f1126c46b23a 100644 (file)
@@ -51,12 +51,16 @@ static void kill_share(struct ksmbd_share_config *share)
        kfree(share);
 }
 
-void __ksmbd_share_config_put(struct ksmbd_share_config *share)
+void ksmbd_share_config_del(struct ksmbd_share_config *share)
 {
        down_write(&shares_table_lock);
        hash_del(&share->hlist);
        up_write(&shares_table_lock);
+}
 
+void __ksmbd_share_config_put(struct ksmbd_share_config *share)
+{
+       ksmbd_share_config_del(share);
        kill_share(share);
 }
 
index 28bf3511763f4bac6ebecf21ad746a65431fa616..902f2cb1963a93b9f442341afd7a7d453fd81efc 100644 (file)
@@ -64,6 +64,7 @@ static inline int test_share_config_flag(struct ksmbd_share_config *share,
        return share->flags & flag;
 }
 
+void ksmbd_share_config_del(struct ksmbd_share_config *share);
 void __ksmbd_share_config_put(struct ksmbd_share_config *share);
 
 static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
index b35ea6a6abc53ef6798dabcf9acec924d401c75d..97ab7987df6ebfa8b004b924513f560bedaaae6a 100644 (file)
@@ -19,7 +19,7 @@ struct ksmbd_tree_conn_status
 ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
                        char *share_name)
 {
-       struct ksmbd_tree_conn_status status = {-EINVAL, NULL};
+       struct ksmbd_tree_conn_status status = {-ENOENT, NULL};
        struct ksmbd_tree_connect_response *resp = NULL;
        struct ksmbd_share_config *sc;
        struct ksmbd_tree_connect *tree_conn = NULL;
@@ -57,6 +57,20 @@ ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
                goto out_error;
 
        tree_conn->flags = resp->connection_flags;
+       if (test_tree_conn_flag(tree_conn, KSMBD_TREE_CONN_FLAG_UPDATE)) {
+               struct ksmbd_share_config *new_sc;
+
+               ksmbd_share_config_del(sc);
+               new_sc = ksmbd_share_config_get(share_name);
+               if (!new_sc) {
+                       pr_err("Failed to update stale share config\n");
+                       status.ret = -ESTALE;
+                       goto out_error;
+               }
+               ksmbd_share_config_put(sc);
+               sc = new_sc;
+       }
+
        tree_conn->user = sess->user;
        tree_conn->share_conf = sc;
        status.tree_conn = tree_conn;
index 9751cc92c111b6121becb976f8ffedbd662d157e..19412ac701a65cbfcf25a9a0f1a1b8a234c54cb7 100644 (file)
@@ -1944,8 +1944,10 @@ out_err1:
                rsp->hdr.Status = STATUS_SUCCESS;
                rc = 0;
                break;
+       case -ESTALE:
+       case -ENOENT:
        case KSMBD_TREE_CONN_STATUS_NO_SHARE:
-               rsp->hdr.Status = STATUS_BAD_NETWORK_PATH;
+               rsp->hdr.Status = STATUS_BAD_NETWORK_NAME;
                break;
        case -ENOMEM:
        case KSMBD_TREE_CONN_STATUS_NOMEM:
@@ -2328,15 +2330,15 @@ static int smb2_remove_smb_xattrs(struct path *path)
                        name += strlen(name) + 1) {
                ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
 
-               if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
-                   strncmp(&name[XATTR_USER_PREFIX_LEN], DOS_ATTRIBUTE_PREFIX,
-                           DOS_ATTRIBUTE_PREFIX_LEN) &&
-                   strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX, STREAM_PREFIX_LEN))
-                       continue;
-
-               err = ksmbd_vfs_remove_xattr(user_ns, path->dentry, name);
-               if (err)
-                       ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
+               if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+                   !strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
+                            STREAM_PREFIX_LEN)) {
+                       err = ksmbd_vfs_remove_xattr(user_ns, path->dentry,
+                                                    name);
+                       if (err)
+                               ksmbd_debug(SMB, "remove xattr failed : %s\n",
+                                           name);
+               }
        }
 out:
        kvfree(xattr_list);
@@ -3042,12 +3044,6 @@ int smb2_open(struct ksmbd_work *work)
        list_add(&fp->node, &fp->f_ci->m_fp_list);
        write_unlock(&fp->f_ci->m_lock);
 
-       rc = ksmbd_vfs_getattr(&path, &stat);
-       if (rc) {
-               generic_fillattr(user_ns, d_inode(path.dentry), &stat);
-               rc = 0;
-       }
-
        /* Check delete pending among previous fp before oplock break */
        if (ksmbd_inode_pending_delete(fp)) {
                rc = -EBUSY;
@@ -3134,6 +3130,10 @@ int smb2_open(struct ksmbd_work *work)
                }
        }
 
+       rc = ksmbd_vfs_getattr(&path, &stat);
+       if (rc)
+               goto err_out;
+
        if (stat.result_mask & STATX_BTIME)
                fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
        else
@@ -3149,9 +3149,6 @@ int smb2_open(struct ksmbd_work *work)
 
        memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
 
-       generic_fillattr(user_ns, file_inode(fp->filp),
-                        &stat);
-
        rsp->StructureSize = cpu_to_le16(89);
        rcu_read_lock();
        opinfo = rcu_dereference(fp->f_opinfo);
index c266cfdc3291f92902481f8d9ed289ae08fe839c..607f94a0e789fc71407780e12580ca89891a1dad 100644 (file)
@@ -2129,6 +2129,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
        else
                error = locks_lock_file_wait(f.file, &fl);
 
+       locks_release_private(&fl);
  out_putf:
        fdput(f);
 
index 68789f896f0819e0abeb509e90aea20dbc0fc7c7..df137ba19d3756bd08d544f0d7e95eb52cb4869c 100644 (file)
@@ -4238,6 +4238,13 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
                err = -EPERM;
                goto out_fput;
        }
+
+       /* We're not controlling the target namespace. */
+       if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
+               err = -EPERM;
+               goto out_fput;
+       }
+
        kattr->mnt_userns = get_user_ns(mnt_userns);
 
 out_fput:
index dbab3caa15ed505fa88c10b22af5eefbd7884771..5d6c2ddc7ea6f1282a34d871b238b4a2dd5cd62f 100644 (file)
@@ -2382,7 +2382,8 @@ static void nfs_dentry_remove_handle_error(struct inode *dir,
 {
        switch (error) {
        case -ENOENT:
-               d_delete(dentry);
+               if (d_really_is_positive(dentry))
+                       d_delete(dentry);
                nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
                break;
        case 0:
@@ -2484,8 +2485,10 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
         */
        error = -ETXTBSY;
        if (WARN_ON(dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
-           WARN_ON(dentry->d_fsdata == NFS_FSDATA_BLOCKED))
+           WARN_ON(dentry->d_fsdata == NFS_FSDATA_BLOCKED)) {
+               spin_unlock(&dentry->d_lock);
                goto out;
+       }
        if (dentry->d_fsdata)
                /* old devname */
                kfree(dentry->d_fsdata);
index d2bcd4834c0e6857713aae58c3910d800594dd64..e032fe201a367421cd2831671174173f2cb151b5 100644 (file)
@@ -221,8 +221,10 @@ nfs_file_fsync_commit(struct file *file, int datasync)
 int
 nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct nfs_open_context *ctx = nfs_file_open_context(file);
        struct inode *inode = file_inode(file);
+       struct nfs_inode *nfsi = NFS_I(inode);
+       long save_nredirtied = atomic_long_read(&nfsi->redirtied_pages);
+       long nredirtied;
        int ret;
 
        trace_nfs_fsync_enter(inode);
@@ -237,15 +239,10 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
                ret = pnfs_sync_inode(inode, !!datasync);
                if (ret != 0)
                        break;
-               if (!test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags))
+               nredirtied = atomic_long_read(&nfsi->redirtied_pages);
+               if (nredirtied == save_nredirtied)
                        break;
-               /*
-                * If nfs_file_fsync_commit detected a server reboot, then
-                * resend all dirty pages that might have been covered by
-                * the NFS_CONTEXT_RESEND_WRITES flag
-                */
-               start = 0;
-               end = LLONG_MAX;
+               save_nredirtied = nredirtied;
        }
 
        trace_nfs_fsync_exit(inode, ret);
index b4e46b0ffa2dc04d268827312286af190fef1ab3..bea7c005119c3e64921fbdd85b63e3865822b483 100644 (file)
@@ -426,6 +426,7 @@ nfs_ilookup(struct super_block *sb, struct nfs_fattr *fattr, struct nfs_fh *fh)
 static void nfs_inode_init_regular(struct nfs_inode *nfsi)
 {
        atomic_long_set(&nfsi->nrequests, 0);
+       atomic_long_set(&nfsi->redirtied_pages, 0);
        INIT_LIST_HEAD(&nfsi->commit_info.list);
        atomic_long_set(&nfsi->commit_info.ncommit, 0);
        atomic_set(&nfsi->commit_info.rpcs_out, 0);
index e88f6b18445ece3b51ea0781861575908c393893..9eb18128787951218b3f7a898dd9747a112777de 100644 (file)
@@ -340,6 +340,11 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
                goto out;
        }
 
+       if (!S_ISREG(fattr->mode)) {
+               res = ERR_PTR(-EBADF);
+               goto out;
+       }
+
        res = ERR_PTR(-ENOMEM);
        len = strlen(SSC_READ_NAME_BODY) + 16;
        read_name = kzalloc(len, GFP_KERNEL);
@@ -357,6 +362,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
                                     r_ino->i_fop);
        if (IS_ERR(filep)) {
                res = ERR_CAST(filep);
+               iput(r_ino);
                goto out_free_name;
        }
 
index 41a9b6b58fb9fa15c9f162fa8dfa267a1c16e049..2613b7e36eb956374d70315171460a7d35d0ce51 100644 (file)
@@ -2817,7 +2817,6 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
        /* Resend all requests through the MDS */
        nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
                              hdr->completion_ops);
-       set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
        return nfs_pageio_resend(&pgio, hdr);
 }
 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
index 51a7e202d6e542d53382fe2a38b8a7fe2a83fdcd..1843fa235d9b64c045cc89f3767b2ed9a1cbf6e5 100644 (file)
@@ -1420,10 +1420,12 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
  */
 static void nfs_redirty_request(struct nfs_page *req)
 {
+       struct nfs_inode *nfsi = NFS_I(page_file_mapping(req->wb_page)->host);
+
        /* Bump the transmission count */
        req->wb_nio++;
        nfs_mark_request_dirty(req);
-       set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
+       atomic_long_inc(&nfsi->redirtied_pages);
        nfs_end_page_writeback(req);
        nfs_release_request(req);
 }
@@ -1904,7 +1906,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
                /* We have a mismatch. Write the page again */
                dprintk_cont(" mismatch\n");
                nfs_mark_request_dirty(req);
-               set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
+               atomic_long_inc(&NFS_I(data->inode)->redirtied_pages);
        next:
                nfs_unlock_and_release_request(req);
                /* Latency breaker */
index e8c00dda42adbbff372908fdcb3d7d6814567cdf..71f870d497aed7a1f84b805066deaf55cabf3e72 100644 (file)
@@ -84,8 +84,8 @@ static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
 /*
  * attr_load_runs - Load all runs stored in @attr.
  */
-int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
-                  struct runs_tree *run, const CLST *vcn)
+static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
+                         struct runs_tree *run, const CLST *vcn)
 {
        int err;
        CLST svcn = le64_to_cpu(attr->nres.svcn);
@@ -140,7 +140,10 @@ failed:
                }
 
                if (lcn != SPARSE_LCN) {
-                       mark_as_free_ex(sbi, lcn, clen, trim);
+                       if (sbi) {
+                               /* mark bitmap range [lcn + clen) as free and trim clusters. */
+                               mark_as_free_ex(sbi, lcn, clen, trim);
+                       }
                        dn += clen;
                }
 
@@ -173,7 +176,6 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
 {
        int err;
        CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
-       struct wnd_bitmap *wnd = &sbi->used.bitmap;
        size_t cnt = run->count;
 
        for (;;) {
@@ -196,9 +198,7 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
                /* Add new fragment into run storage. */
                if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
                        /* Undo last 'ntfs_look_for_free_space' */
-                       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
-                       wnd_set_free(wnd, lcn, flen);
-                       up_write(&wnd->rw_lock);
+                       mark_as_free_ex(sbi, lcn, len, false);
                        err = -ENOMEM;
                        goto out;
                }
@@ -320,7 +320,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
 
        err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
                                    attr_s->name_len, run, 0, alen,
-                                   attr_s->flags, &attr, NULL);
+                                   attr_s->flags, &attr, NULL, NULL);
        if (err)
                goto out3;
 
@@ -419,40 +419,44 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
        struct mft_inode *mi, *mi_b;
        CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
        CLST next_svcn, pre_alloc = -1, done = 0;
-       bool is_ext;
+       bool is_ext, is_bad = false;
        u32 align;
        struct MFT_REC *rec;
 
 again:
+       alen = 0;
        le_b = NULL;
        attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
                              &mi_b);
        if (!attr_b) {
                err = -ENOENT;
-               goto out;
+               goto bad_inode;
        }
 
        if (!attr_b->non_res) {
                err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
                                        &attr_b);
-               if (err || !attr_b->non_res)
-                       goto out;
+               if (err)
+                       return err;
+
+               /* Return if file is still resident. */
+               if (!attr_b->non_res)
+                       goto ok1;
 
                /* Layout of records may be changed, so do a full search. */
                goto again;
        }
 
        is_ext = is_attr_ext(attr_b);
-
-again_1:
        align = sbi->cluster_size;
-
        if (is_ext)
                align <<= attr_b->nres.c_unit;
 
        old_valid = le64_to_cpu(attr_b->nres.valid_size);
        old_size = le64_to_cpu(attr_b->nres.data_size);
        old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
+
+again_1:
        old_alen = old_alloc >> cluster_bits;
 
        new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
@@ -475,24 +479,27 @@ again_1:
                mi = mi_b;
        } else if (!le_b) {
                err = -EINVAL;
-               goto out;
+               goto bad_inode;
        } else {
                le = le_b;
                attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
                                    &mi);
                if (!attr) {
                        err = -EINVAL;
-                       goto out;
+                       goto bad_inode;
                }
 
 next_le_1:
                svcn = le64_to_cpu(attr->nres.svcn);
                evcn = le64_to_cpu(attr->nres.evcn);
        }
-
+       /*
+        * Here we have:
+        * attr,mi,le - last attribute segment (containing 'vcn').
+        * attr_b,mi_b,le_b - base (primary) attribute segment.
+        */
 next_le:
        rec = mi->mrec;
-
        err = attr_load_runs(attr, ni, run, NULL);
        if (err)
                goto out;
@@ -507,6 +514,13 @@ next_le:
                        goto ok;
                }
 
+               /*
+                * Add clusters. In simple case we have to:
+                *  - allocate space (vcn, lcn, len)
+                *  - update packed run in 'mi'
+                *  - update attr->nres.evcn
+                *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
+                */
                to_allocate = new_alen - old_alen;
 add_alloc_in_same_attr_seg:
                lcn = 0;
@@ -520,9 +534,11 @@ add_alloc_in_same_attr_seg:
                        pre_alloc = 0;
                        if (type == ATTR_DATA && !name_len &&
                            sbi->options->prealloc) {
-                               CLST new_alen2 = bytes_to_cluster(
-                                       sbi, get_pre_allocated(new_size));
-                               pre_alloc = new_alen2 - new_alen;
+                               pre_alloc =
+                                       bytes_to_cluster(
+                                               sbi,
+                                               get_pre_allocated(new_size)) -
+                                       new_alen;
                        }
 
                        /* Get the last LCN to allocate from. */
@@ -580,7 +596,7 @@ add_alloc_in_same_attr_seg:
 pack_runs:
                err = mi_pack_runs(mi, attr, run, vcn - svcn);
                if (err)
-                       goto out;
+                       goto undo_1;
 
                next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
                new_alloc_tmp = (u64)next_svcn << cluster_bits;
@@ -614,7 +630,7 @@ pack_runs:
                if (type == ATTR_LIST) {
                        err = ni_expand_list(ni);
                        if (err)
-                               goto out;
+                               goto undo_2;
                        if (next_svcn < vcn)
                                goto pack_runs;
 
@@ -624,8 +640,9 @@ pack_runs:
 
                if (!ni->attr_list.size) {
                        err = ni_create_attr_list(ni);
+                       /* In case of error layout of records is not changed. */
                        if (err)
-                               goto out;
+                               goto undo_2;
                        /* Layout of records is changed. */
                }
 
@@ -637,48 +654,57 @@ pack_runs:
                /* Insert new attribute segment. */
                err = ni_insert_nonresident(ni, type, name, name_len, run,
                                            next_svcn, vcn - next_svcn,
-                                           attr_b->flags, &attr, &mi);
-               if (err)
-                       goto out;
-
-               if (!is_mft)
-                       run_truncate_head(run, evcn + 1);
-
-               svcn = le64_to_cpu(attr->nres.svcn);
-               evcn = le64_to_cpu(attr->nres.evcn);
+                                           attr_b->flags, &attr, &mi, NULL);
 
-               le_b = NULL;
                /*
                 * Layout of records maybe changed.
                 * Find base attribute to update.
                 */
+               le_b = NULL;
                attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
                                      NULL, &mi_b);
                if (!attr_b) {
-                       err = -ENOENT;
-                       goto out;
+                       err = -EINVAL;
+                       goto bad_inode;
                }
 
-               attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
-               attr_b->nres.data_size = attr_b->nres.alloc_size;
-               attr_b->nres.valid_size = attr_b->nres.alloc_size;
+               if (err) {
+                       /* ni_insert_nonresident failed. */
+                       attr = NULL;
+                       goto undo_2;
+               }
+
+               if (!is_mft)
+                       run_truncate_head(run, evcn + 1);
+
+               svcn = le64_to_cpu(attr->nres.svcn);
+               evcn = le64_to_cpu(attr->nres.evcn);
+
+               /*
+                * Attribute is in consistency state.
+                * Save this point to restore to if next steps fail.
+                */
+               old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
+               attr_b->nres.valid_size = attr_b->nres.data_size =
+                       attr_b->nres.alloc_size = cpu_to_le64(old_size);
                mi_b->dirty = true;
                goto again_1;
        }
 
        if (new_size != old_size ||
            (new_alloc != old_alloc && !keep_prealloc)) {
+               /*
+                * Truncate clusters. In simple case we have to:
+                *  - update packed run in 'mi'
+                *  - update attr->nres.evcn
+                *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
+                *  - mark and trim clusters as free (vcn, lcn, len)
+                */
+               CLST dlen = 0;
+
                vcn = max(svcn, new_alen);
                new_alloc_tmp = (u64)vcn << cluster_bits;
 
-               alen = 0;
-               err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
-                                       true);
-               if (err)
-                       goto out;
-
-               run_truncate(run, vcn);
-
                if (vcn > svcn) {
                        err = mi_pack_runs(mi, attr, run, vcn - svcn);
                        if (err)
@@ -697,7 +723,7 @@ pack_runs:
 
                        if (!al_remove_le(ni, le)) {
                                err = -EINVAL;
-                               goto out;
+                               goto bad_inode;
                        }
 
                        le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
@@ -723,12 +749,20 @@ pack_runs:
                                attr_b->nres.valid_size =
                                        attr_b->nres.alloc_size;
                }
+               mi_b->dirty = true;
 
-               if (is_ext)
+               err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
+                                       true);
+               if (err)
+                       goto out;
+
+               if (is_ext) {
+                       /* dlen - really deallocated clusters. */
                        le64_sub_cpu(&attr_b->nres.total_size,
-                                    ((u64)alen << cluster_bits));
+                                    ((u64)dlen << cluster_bits));
+               }
 
-               mi_b->dirty = true;
+               run_truncate(run, vcn);
 
                if (new_alloc_tmp <= new_alloc)
                        goto ok;
@@ -747,7 +781,7 @@ pack_runs:
                if (le->type != type || le->name_len != name_len ||
                    memcmp(le_name(le), name, name_len * sizeof(short))) {
                        err = -EINVAL;
-                       goto out;
+                       goto bad_inode;
                }
 
                err = ni_load_mi(ni, le, &mi);
@@ -757,7 +791,7 @@ pack_runs:
                attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
                if (!attr) {
                        err = -EINVAL;
-                       goto out;
+                       goto bad_inode;
                }
                goto next_le_1;
        }
@@ -772,13 +806,13 @@ ok:
                }
        }
 
-out:
-       if (!err && attr_b && ret)
+ok1:
+       if (ret)
                *ret = attr_b;
 
        /* Update inode_set_bytes. */
-       if (!err && ((type == ATTR_DATA && !name_len) ||
-                    (type == ATTR_ALLOC && name == I30_NAME))) {
+       if (((type == ATTR_DATA && !name_len) ||
+            (type == ATTR_ALLOC && name == I30_NAME))) {
                bool dirty = false;
 
                if (ni->vfs_inode.i_size != new_size) {
@@ -786,7 +820,7 @@ out:
                        dirty = true;
                }
 
-               if (attr_b && attr_b->non_res) {
+               if (attr_b->non_res) {
                        new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
                        if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
                                inode_set_bytes(&ni->vfs_inode, new_alloc);
@@ -800,6 +834,47 @@ out:
                }
        }
 
+       return 0;
+
+undo_2:
+       vcn -= alen;
+       attr_b->nres.data_size = cpu_to_le64(old_size);
+       attr_b->nres.valid_size = cpu_to_le64(old_valid);
+       attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
+
+       /* Restore 'attr' and 'mi'. */
+       if (attr)
+               goto restore_run;
+
+       if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
+           svcn <= le64_to_cpu(attr_b->nres.evcn)) {
+               attr = attr_b;
+               le = le_b;
+               mi = mi_b;
+       } else if (!le_b) {
+               err = -EINVAL;
+               goto bad_inode;
+       } else {
+               le = le_b;
+               attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
+                                   &svcn, &mi);
+               if (!attr)
+                       goto bad_inode;
+       }
+
+restore_run:
+       if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
+               is_bad = true;
+
+undo_1:
+       run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
+
+       run_truncate(run, vcn);
+out:
+       if (is_bad) {
+bad_inode:
+               _ntfs_bad_inode(&ni->vfs_inode);
+       }
        return err;
 }
 
@@ -855,7 +930,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
                goto out;
        }
 
-       asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
+       asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
        if (vcn >= asize) {
                err = -EINVAL;
                goto out;
@@ -1047,7 +1122,7 @@ ins_ext:
        if (evcn1 > next_svcn) {
                err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
                                            next_svcn, evcn1 - next_svcn,
-                                           attr_b->flags, &attr, &mi);
+                                           attr_b->flags, &attr, &mi, NULL);
                if (err)
                        goto out;
        }
@@ -1173,7 +1248,7 @@ int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
 {
        struct ntfs_sb_info *sbi = ni->mi.sbi;
        u8 cluster_bits = sbi->cluster_bits;
-       CLST vcn = from >> cluster_bits;
+       CLST vcn;
        CLST vcn_last = (to - 1) >> cluster_bits;
        CLST lcn, clen;
        int err;
@@ -1647,7 +1722,7 @@ ins_ext:
        if (evcn1 > next_svcn) {
                err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
                                            next_svcn, evcn1 - next_svcn,
-                                           attr_b->flags, &attr, &mi);
+                                           attr_b->flags, &attr, &mi, NULL);
                if (err)
                        goto out;
        }
@@ -1812,18 +1887,12 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
                                err = ni_insert_nonresident(
                                        ni, ATTR_DATA, NULL, 0, run, next_svcn,
                                        evcn1 - eat - next_svcn, a_flags, &attr,
-                                       &mi);
+                                       &mi, &le);
                                if (err)
                                        goto out;
 
                                /* Layout of records maybe changed. */
                                attr_b = NULL;
-                               le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
-                                               &next_svcn);
-                               if (!le) {
-                                       err = -EINVAL;
-                                       goto out;
-                               }
                        }
 
                        /* Free all allocated memory. */
@@ -1918,7 +1987,7 @@ next_attr:
 out:
        up_write(&ni->file.run_lock);
        if (err)
-               make_bad_inode(&ni->vfs_inode);
+               _ntfs_bad_inode(&ni->vfs_inode);
 
        return err;
 }
@@ -1936,9 +2005,11 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
        struct ATTRIB *attr = NULL, *attr_b;
        struct ATTR_LIST_ENTRY *le, *le_b;
        struct mft_inode *mi, *mi_b;
-       CLST svcn, evcn1, vcn, len, end, alen, dealloc;
+       CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
        u64 total_size, alloc_size;
        u32 mask;
+       __le16 a_flags;
+       struct runs_tree run2;
 
        if (!bytes)
                return 0;
@@ -1990,6 +2061,9 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
        }
 
        down_write(&ni->file.run_lock);
+       run_init(&run2);
+       run_truncate(run, 0);
+
        /*
         * Enumerate all attribute segments and punch hole where necessary.
         */
@@ -1997,10 +2071,11 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
        vcn = vbo >> sbi->cluster_bits;
        len = bytes >> sbi->cluster_bits;
        end = vcn + len;
-       dealloc = 0;
+       hole = 0;
 
        svcn = le64_to_cpu(attr_b->nres.svcn);
        evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
+       a_flags = attr_b->flags;
 
        if (svcn <= vcn && vcn < evcn1) {
                attr = attr_b;
@@ -2008,14 +2083,14 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
                mi = mi_b;
        } else if (!le_b) {
                err = -EINVAL;
-               goto out;
+               goto bad_inode;
        } else {
                le = le_b;
                attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
                                    &mi);
                if (!attr) {
                        err = -EINVAL;
-                       goto out;
+                       goto bad_inode;
                }
 
                svcn = le64_to_cpu(attr->nres.svcn);
@@ -2023,49 +2098,91 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
        }
 
        while (svcn < end) {
-               CLST vcn1, zero, dealloc2;
+               CLST vcn1, zero, hole2 = hole;
 
                err = attr_load_runs(attr, ni, run, &svcn);
                if (err)
-                       goto out;
+                       goto done;
                vcn1 = max(vcn, svcn);
                zero = min(end, evcn1) - vcn1;
 
-               dealloc2 = dealloc;
-               err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
+               /*
+                * Check range [vcn1 + zero).
+                * Calculate how many clusters there are.
+                * Don't do any destructive actions.
+                */
+               err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
                if (err)
-                       goto out;
+                       goto done;
 
-               if (dealloc2 == dealloc) {
-                       /* Looks like the required range is already sparsed. */
-               } else {
-                       if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
-                                          false)) {
-                               err = -ENOMEM;
-                               goto out;
-                       }
+               /* Check if required range is already hole. */
+               if (hole2 == hole)
+                       goto next_attr;
+
+               /* Make a clone of run to undo. */
+               err = run_clone(run, &run2);
+               if (err)
+                       goto done;
+
+               /* Make a hole range (sparse) [vcn1 + zero). */
+               if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
+                       err = -ENOMEM;
+                       goto done;
+               }
 
-                       err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
+               /* Update run in attribute segment. */
+               err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
+               if (err)
+                       goto done;
+               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
+               if (next_svcn < evcn1) {
+                       /* Insert new attribute segment. */
+                       err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
+                                                   next_svcn,
+                                                   evcn1 - next_svcn, a_flags,
+                                                   &attr, &mi, &le);
                        if (err)
-                               goto out;
+                               goto undo_punch;
+
+                       /* Layout of records maybe changed. */
+                       attr_b = NULL;
                }
+
+               /* Real deallocate. Should not fail. */
+               run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
+
+next_attr:
                /* Free all allocated memory. */
                run_truncate(run, 0);
 
                if (evcn1 >= alen)
                        break;
 
+               /* Get next attribute segment. */
                attr = ni_enum_attr_ex(ni, attr, &le, &mi);
                if (!attr) {
                        err = -EINVAL;
-                       goto out;
+                       goto bad_inode;
                }
 
                svcn = le64_to_cpu(attr->nres.svcn);
                evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
        }
 
-       total_size -= (u64)dealloc << sbi->cluster_bits;
+done:
+       if (!hole)
+               goto out;
+
+       if (!attr_b) {
+               attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
+                                     &mi_b);
+               if (!attr_b) {
+                       err = -EINVAL;
+                       goto bad_inode;
+               }
+       }
+
+       total_size -= (u64)hole << sbi->cluster_bits;
        attr_b->nres.total_size = cpu_to_le64(total_size);
        mi_b->dirty = true;
 
@@ -2075,9 +2192,263 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
        mark_inode_dirty(&ni->vfs_inode);
 
 out:
+       run_close(&run2);
        up_write(&ni->file.run_lock);
+       return err;
+
+bad_inode:
+       _ntfs_bad_inode(&ni->vfs_inode);
+       goto out;
+
+undo_punch:
+       /*
+        * Restore packed runs.
+        * 'mi_pack_runs' should not fail, cause we restore original.
+        */
+       if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
+               goto bad_inode;
+
+       goto done;
+}
+
+/*
+ * attr_insert_range - Insert range (hole) in file.
+ * Not for normal files.
+ */
+int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
+{
+       int err = 0;
+       struct runs_tree *run = &ni->file.run;
+       struct ntfs_sb_info *sbi = ni->mi.sbi;
+       struct ATTRIB *attr = NULL, *attr_b;
+       struct ATTR_LIST_ENTRY *le, *le_b;
+       struct mft_inode *mi, *mi_b;
+       CLST vcn, svcn, evcn1, len, next_svcn;
+       u64 data_size, alloc_size;
+       u32 mask;
+       __le16 a_flags;
+
+       if (!bytes)
+               return 0;
+
+       le_b = NULL;
+       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
+       if (!attr_b)
+               return -ENOENT;
+
+       if (!is_attr_ext(attr_b)) {
+               /* It was checked above. See fallocate. */
+               return -EOPNOTSUPP;
+       }
+
+       if (!attr_b->non_res) {
+               data_size = le32_to_cpu(attr_b->res.data_size);
+               alloc_size = data_size;
+               mask = sbi->cluster_mask; /* cluster_size - 1 */
+       } else {
+               data_size = le64_to_cpu(attr_b->nres.data_size);
+               alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
+               mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
+       }
+
+       if (vbo > data_size) {
+               /* Insert range after the file size is not allowed. */
+               return -EINVAL;
+       }
+
+       if ((vbo & mask) || (bytes & mask)) {
+               /* Allow to insert only frame aligned ranges. */
+               return -EINVAL;
+       }
+
+       /*
+        * valid_size <= data_size <= alloc_size
+        * Check alloc_size for maximum possible.
+        */
+       if (bytes > sbi->maxbytes_sparse - alloc_size)
+               return -EFBIG;
+
+       vcn = vbo >> sbi->cluster_bits;
+       len = bytes >> sbi->cluster_bits;
+
+       down_write(&ni->file.run_lock);
+
+       if (!attr_b->non_res) {
+               err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
+                                   data_size + bytes, NULL, false, NULL);
+
+               le_b = NULL;
+               attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
+                                     &mi_b);
+               if (!attr_b) {
+                       err = -EINVAL;
+                       goto bad_inode;
+               }
+
+               if (err)
+                       goto out;
+
+               if (!attr_b->non_res) {
+                       /* Still resident. */
+                       char *data = Add2Ptr(attr_b, attr_b->res.data_off);
+
+                       memmove(data + bytes, data, bytes);
+                       memset(data, 0, bytes);
+                       goto done;
+               }
+
+               /* Resident files becomes nonresident. */
+               data_size = le64_to_cpu(attr_b->nres.data_size);
+               alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
+       }
+
+       /*
+        * Enumerate all attribute segments and shift start vcn.
+        */
+       a_flags = attr_b->flags;
+       svcn = le64_to_cpu(attr_b->nres.svcn);
+       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
+
+       if (svcn <= vcn && vcn < evcn1) {
+               attr = attr_b;
+               le = le_b;
+               mi = mi_b;
+       } else if (!le_b) {
+               err = -EINVAL;
+               goto bad_inode;
+       } else {
+               le = le_b;
+               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
+                                   &mi);
+               if (!attr) {
+                       err = -EINVAL;
+                       goto bad_inode;
+               }
+
+               svcn = le64_to_cpu(attr->nres.svcn);
+               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+       }
+
+       run_truncate(run, 0); /* clear cached values. */
+       err = attr_load_runs(attr, ni, run, NULL);
+       if (err)
+               goto out;
+
+       if (!run_insert_range(run, vcn, len)) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       /* Try to pack in current record as much as possible. */
+       err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
        if (err)
-               make_bad_inode(&ni->vfs_inode);
+               goto out;
+
+       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
+
+       while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
+              attr->type == ATTR_DATA && !attr->name_len) {
+               le64_add_cpu(&attr->nres.svcn, len);
+               le64_add_cpu(&attr->nres.evcn, len);
+               if (le) {
+                       le->vcn = attr->nres.svcn;
+                       ni->attr_list.dirty = true;
+               }
+               mi->dirty = true;
+       }
+
+       if (next_svcn < evcn1 + len) {
+               err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
+                                           next_svcn, evcn1 + len - next_svcn,
+                                           a_flags, NULL, NULL, NULL);
+
+               le_b = NULL;
+               attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
+                                     &mi_b);
+               if (!attr_b) {
+                       err = -EINVAL;
+                       goto bad_inode;
+               }
+
+               if (err) {
+                       /* ni_insert_nonresident failed. Try to undo. */
+                       goto undo_insert_range;
+               }
+       }
+
+       /*
+        * Update primary attribute segment.
+        */
+       if (vbo <= ni->i_valid)
+               ni->i_valid += bytes;
+
+       attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
+       attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);
+
+       /* ni->valid may be not equal valid_size (temporary). */
+       if (ni->i_valid > data_size + bytes)
+               attr_b->nres.valid_size = attr_b->nres.data_size;
+       else
+               attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
+       mi_b->dirty = true;
+
+done:
+       ni->vfs_inode.i_size += bytes;
+       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+       mark_inode_dirty(&ni->vfs_inode);
+
+out:
+       run_truncate(run, 0); /* clear cached values. */
+
+       up_write(&ni->file.run_lock);
 
        return err;
+
+bad_inode:
+       _ntfs_bad_inode(&ni->vfs_inode);
+       goto out;
+
+undo_insert_range:
+       svcn = le64_to_cpu(attr_b->nres.svcn);
+       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
+
+       if (svcn <= vcn && vcn < evcn1) {
+               attr = attr_b;
+               le = le_b;
+               mi = mi_b;
+       } else if (!le_b) {
+               goto bad_inode;
+       } else {
+               le = le_b;
+               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
+                                   &mi);
+               if (!attr) {
+                       goto bad_inode;
+               }
+
+               svcn = le64_to_cpu(attr->nres.svcn);
+               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+       }
+
+       if (attr_load_runs(attr, ni, run, NULL))
+               goto bad_inode;
+
+       if (!run_collapse_range(run, vcn, len))
+               goto bad_inode;
+
+       if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
+               goto bad_inode;
+
+       while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
+              attr->type == ATTR_DATA && !attr->name_len) {
+               le64_sub_cpu(&attr->nres.svcn, len);
+               le64_sub_cpu(&attr->nres.evcn, len);
+               if (le) {
+                       le->vcn = attr->nres.svcn;
+                       ni->attr_list.dirty = true;
+               }
+               mi->dirty = true;
+       }
+
+       goto out;
 }
index aa184407520f0263844839e03d2f1edde3c6628f..5d44ceac855b7477afc0aa5e8e418bea881fd070 100644 (file)
@@ -51,11 +51,6 @@ void ntfs3_exit_bitmap(void)
        kmem_cache_destroy(ntfs_enode_cachep);
 }
 
-static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
-{
-       return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
-}
-
 /*
  * wnd_scan
  *
@@ -1333,9 +1328,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
                if (!new_free)
                        return -ENOMEM;
 
-               if (new_free != wnd->free_bits)
-                       memcpy(new_free, wnd->free_bits,
-                              wnd->nwnd * sizeof(short));
+               memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
                memset(new_free + wnd->nwnd, 0,
                       (new_wnd - wnd->nwnd) * sizeof(short));
                kfree(wnd->free_bits);
@@ -1395,9 +1388,8 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
 
 void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
 {
-       size_t zlen;
+       size_t zlen = wnd->zone_end - wnd->zone_bit;
 
-       zlen = wnd->zone_end - wnd->zone_bit;
        if (zlen)
                wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
 
index 4a21745711fec991e7e6076f25eb6fb93b7ae485..4f2ffc7ef296f1f7a8bb22a330b4321a0e6fbcc9 100644 (file)
@@ -530,21 +530,35 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
 {
        struct inode *inode = file->f_mapping->host;
+       struct address_space *mapping = inode->i_mapping;
        struct super_block *sb = inode->i_sb;
        struct ntfs_sb_info *sbi = sb->s_fs_info;
        struct ntfs_inode *ni = ntfs_i(inode);
        loff_t end = vbo + len;
        loff_t vbo_down = round_down(vbo, PAGE_SIZE);
-       loff_t i_size;
+       bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
+       loff_t i_size, new_size;
+       bool map_locked;
        int err;
 
        /* No support for dir. */
        if (!S_ISREG(inode->i_mode))
                return -EOPNOTSUPP;
 
-       /* Return error if mode is not supported. */
-       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
-                    FALLOC_FL_COLLAPSE_RANGE)) {
+       /*
+        * vfs_fallocate checks all possible combinations of mode.
+        * Do additional checks here before ntfs_set_state(dirty).
+        */
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               if (!is_supported_holes)
+                       return -EOPNOTSUPP;
+       } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
+       } else if (mode & FALLOC_FL_INSERT_RANGE) {
+               if (!is_supported_holes)
+                       return -EOPNOTSUPP;
+       } else if (mode &
+                  ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+                    FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
                ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
                                mode);
                return -EOPNOTSUPP;
@@ -554,6 +568,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
 
        inode_lock(inode);
        i_size = inode->i_size;
+       new_size = max(end, i_size);
+       map_locked = false;
 
        if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
                /* Should never be here, see ntfs_file_open. */
@@ -561,38 +577,27 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
                goto out;
        }
 
+       if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
+                   FALLOC_FL_INSERT_RANGE)) {
+               inode_dio_wait(inode);
+               filemap_invalidate_lock(mapping);
+               map_locked = true;
+       }
+
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                u32 frame_size;
                loff_t mask, vbo_a, end_a, tmp;
 
-               if (!(mode & FALLOC_FL_KEEP_SIZE)) {
-                       err = -EINVAL;
-                       goto out;
-               }
-
-               err = filemap_write_and_wait_range(inode->i_mapping, vbo,
-                                                  end - 1);
+               err = filemap_write_and_wait_range(mapping, vbo, end - 1);
                if (err)
                        goto out;
 
-               err = filemap_write_and_wait_range(inode->i_mapping, end,
-                                                  LLONG_MAX);
+               err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
                if (err)
                        goto out;
 
-               inode_dio_wait(inode);
-
                truncate_pagecache(inode, vbo_down);
 
-               if (!is_sparsed(ni) && !is_compressed(ni)) {
-                       /*
-                        * Normal file, can't make hole.
-                        * TODO: Try to find way to save info about hole.
-                        */
-                       err = -EOPNOTSUPP;
-                       goto out;
-               }
-
                ni_lock(ni);
                err = attr_punch_hole(ni, vbo, len, &frame_size);
                ni_unlock(ni);
@@ -624,17 +629,11 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
                        ni_unlock(ni);
                }
        } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
-               if (mode & ~FALLOC_FL_COLLAPSE_RANGE) {
-                       err = -EINVAL;
-                       goto out;
-               }
-
                /*
                 * Write tail of the last page before removed range since
                 * it will get removed from the page cache below.
                 */
-               err = filemap_write_and_wait_range(inode->i_mapping, vbo_down,
-                                                  vbo);
+               err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
                if (err)
                        goto out;
 
@@ -642,34 +641,58 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
                 * Write data that will be shifted to preserve them
                 * when discarding page cache below.
                 */
-               err = filemap_write_and_wait_range(inode->i_mapping, end,
-                                                  LLONG_MAX);
+               err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
                if (err)
                        goto out;
 
-               /* Wait for existing dio to complete. */
-               inode_dio_wait(inode);
-
                truncate_pagecache(inode, vbo_down);
 
                ni_lock(ni);
                err = attr_collapse_range(ni, vbo, len);
                ni_unlock(ni);
+       } else if (mode & FALLOC_FL_INSERT_RANGE) {
+               /* Check new size. */
+               err = inode_newsize_ok(inode, new_size);
+               if (err)
+                       goto out;
+
+               /* Write out all dirty pages. */
+               err = filemap_write_and_wait_range(mapping, vbo_down,
+                                                  LLONG_MAX);
+               if (err)
+                       goto out;
+               truncate_pagecache(inode, vbo_down);
+
+               ni_lock(ni);
+               err = attr_insert_range(ni, vbo, len);
+               ni_unlock(ni);
        } else {
-               /*
-                * Normal file: Allocate clusters, do not change 'valid' size.
-                */
-               loff_t new_size = max(end, i_size);
+               /* Check new size. */
+
+               /* generic/213: expected -ENOSPC instead of -EFBIG. */
+               if (!is_supported_holes) {
+                       loff_t to_alloc = new_size - inode_get_bytes(inode);
+
+                       if (to_alloc > 0 &&
+                           (to_alloc >> sbi->cluster_bits) >
+                                   wnd_zeroes(&sbi->used.bitmap)) {
+                               err = -ENOSPC;
+                               goto out;
+                       }
+               }
 
                err = inode_newsize_ok(inode, new_size);
                if (err)
                        goto out;
 
+               /*
+                * Allocate clusters, do not change 'valid' size.
+                */
                err = ntfs_set_size(inode, new_size);
                if (err)
                        goto out;
 
-               if (is_sparsed(ni) || is_compressed(ni)) {
+               if (is_supported_holes) {
                        CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
                        CLST vcn = vbo >> sbi->cluster_bits;
                        CLST cend = bytes_to_cluster(sbi, end);
@@ -717,8 +740,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
        }
 
 out:
-       if (err == -EFBIG)
-               err = -ENOSPC;
+       if (map_locked)
+               filemap_invalidate_unlock(mapping);
 
        if (!err) {
                inode->i_ctime = inode->i_mtime = current_time(inode);
@@ -989,7 +1012,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
                if (bytes > count)
                        bytes = count;
 
-               frame = pos >> frame_bits;
                frame_vbo = pos & ~(frame_size - 1);
                index = frame_vbo >> PAGE_SHIFT;
 
index 18842998c8fa3bdfc370fd01da437774e8fd6414..381a38a06ec2209fd1f98fb896000d56b174623f 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/fiemap.h>
 #include <linux/fs.h>
+#include <linux/minmax.h>
 #include <linux/vmalloc.h>
 
 #include "debug.h"
@@ -468,7 +469,7 @@ ni_ins_new_attr(struct ntfs_inode *ni, struct mft_inode *mi,
                                &ref, &le);
                if (err) {
                        /* No memory or no space. */
-                       return NULL;
+                       return ERR_PTR(err);
                }
                le_added = true;
 
@@ -649,6 +650,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
        struct mft_inode *mi;
        u32 asize, free;
        struct MFT_REF ref;
+       struct MFT_REC *mrec;
        __le16 id;
 
        if (!ni->attr_list.dirty)
@@ -692,11 +694,17 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
                free -= asize;
        }
 
+       /* Make a copy of primary record to restore if error. */
+       mrec = kmemdup(ni->mi.mrec, sbi->record_size, GFP_NOFS);
+       if (!mrec)
+               return 0; /* Not critical. */
+
        /* It seems that attribute list can be removed from primary record. */
        mi_remove_attr(NULL, &ni->mi, attr_list);
 
        /*
-        * Repeat the cycle above and move all attributes to primary record.
+        * Repeat the cycle above and copy all attributes to primary record.
+        * Do not remove original attributes from subrecords!
         * It should be success!
         */
        le = NULL;
@@ -707,14 +715,14 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
                mi = ni_find_mi(ni, ino_get(&le->ref));
                if (!mi) {
                        /* Should never happened, 'cause already checked. */
-                       goto bad;
+                       goto out;
                }
 
                attr = mi_find_attr(mi, NULL, le->type, le_name(le),
                                    le->name_len, &le->id);
                if (!attr) {
                        /* Should never happened, 'cause already checked. */
-                       goto bad;
+                       goto out;
                }
                asize = le32_to_cpu(attr->size);
 
@@ -724,18 +732,33 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
                                          le16_to_cpu(attr->name_off));
                if (!attr_ins) {
                        /*
-                        * Internal error.
-                        * Either no space in primary record (already checked).
-                        * Either tried to insert another
-                        * non indexed attribute (logic error).
+                        * No space in primary record (already checked).
                         */
-                       goto bad;
+                       goto out;
                }
 
                /* Copy all except id. */
                id = attr_ins->id;
                memcpy(attr_ins, attr, asize);
                attr_ins->id = id;
+       }
+
+       /*
+        * Repeat the cycle above and remove all attributes from subrecords.
+        */
+       le = NULL;
+       while ((le = al_enumerate(ni, le))) {
+               if (!memcmp(&le->ref, &ref, sizeof(ref)))
+                       continue;
+
+               mi = ni_find_mi(ni, ino_get(&le->ref));
+               if (!mi)
+                       continue;
+
+               attr = mi_find_attr(mi, NULL, le->type, le_name(le),
+                                   le->name_len, &le->id);
+               if (!attr)
+                       continue;
 
                /* Remove from original record. */
                mi_remove_attr(NULL, mi, attr);
@@ -748,11 +771,13 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
        ni->attr_list.le = NULL;
        ni->attr_list.dirty = false;
 
+       kfree(mrec);
+       return 0;
+out:
+       /* Restore primary record. */
+       swap(mrec, ni->mi.mrec);
+       kfree(mrec);
        return 0;
-bad:
-       ntfs_inode_err(&ni->vfs_inode, "Internal error");
-       make_bad_inode(&ni->vfs_inode);
-       return -EINVAL;
 }
 
 /*
@@ -986,6 +1011,8 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
                                       name_off, svcn, ins_le);
                if (!attr)
                        continue;
+               if (IS_ERR(attr))
+                       return PTR_ERR(attr);
 
                if (ins_attr)
                        *ins_attr = attr;
@@ -1007,8 +1034,15 @@ insert_ext:
 
        attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
                               name_off, svcn, ins_le);
-       if (!attr)
+       if (!attr) {
+               err = -EINVAL;
                goto out2;
+       }
+
+       if (IS_ERR(attr)) {
+               err = PTR_ERR(attr);
+               goto out2;
+       }
 
        if (ins_attr)
                *ins_attr = attr;
@@ -1020,10 +1054,9 @@ insert_ext:
 out2:
        ni_remove_mi(ni, mi);
        mi_put(mi);
-       err = -EINVAL;
 
 out1:
-       ntfs_mark_rec_free(sbi, rno);
+       ntfs_mark_rec_free(sbi, rno, is_mft);
 
 out:
        return err;
@@ -1076,6 +1109,11 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
        if (asize <= free) {
                attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len,
                                       asize, name_off, svcn, ins_le);
+               if (IS_ERR(attr)) {
+                       err = PTR_ERR(attr);
+                       goto out;
+               }
+
                if (attr) {
                        if (ins_attr)
                                *ins_attr = attr;
@@ -1173,6 +1211,11 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
                goto out;
        }
 
+       if (IS_ERR(attr)) {
+               err = PTR_ERR(attr);
+               goto out;
+       }
+
        if (ins_attr)
                *ins_attr = attr;
        if (ins_mi)
@@ -1218,7 +1261,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
                mft_min = mft_new;
                mi_min = mi_new;
        } else {
-               ntfs_mark_rec_free(sbi, mft_new);
+               ntfs_mark_rec_free(sbi, mft_new, true);
                mft_new = 0;
                ni_remove_mi(ni, mi_new);
        }
@@ -1262,7 +1305,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
        done = asize - run_size - SIZEOF_NONRESIDENT;
        le32_sub_cpu(&ni->mi.mrec->used, done);
 
-       /* Estimate the size of second part: run_buf=NULL. */
+       /* Estimate packed size (run_buf=NULL). */
        err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,
                       &plen);
        if (err < 0)
@@ -1288,10 +1331,16 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
                goto out;
        }
 
+       if (IS_ERR(attr)) {
+               err = PTR_ERR(attr);
+               goto out;
+       }
+
        attr->non_res = 1;
        attr->name_off = SIZEOF_NONRESIDENT_LE;
        attr->flags = 0;
 
+       /* This function can't fail - cause already checked above. */
        run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
                 run_size, &plen);
 
@@ -1301,7 +1350,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
 
 out:
        if (mft_new) {
-               ntfs_mark_rec_free(sbi, mft_new);
+               ntfs_mark_rec_free(sbi, mft_new, true);
                ni_remove_mi(ni, mi_new);
        }
 
@@ -1367,8 +1416,6 @@ int ni_expand_list(struct ntfs_inode *ni)
 
        /* Split MFT data as much as possible. */
        err = ni_expand_mft_list(ni);
-       if (err)
-               goto out;
 
 out:
        return !err && !done ? -EOPNOTSUPP : err;
@@ -1381,7 +1428,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
                          const __le16 *name, u8 name_len,
                          const struct runs_tree *run, CLST svcn, CLST len,
                          __le16 flags, struct ATTRIB **new_attr,
-                         struct mft_inode **mi)
+                         struct mft_inode **mi, struct ATTR_LIST_ENTRY **le)
 {
        int err;
        CLST plen;
@@ -1394,6 +1441,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
        u32 run_size, asize;
        struct ntfs_sb_info *sbi = ni->mi.sbi;
 
+       /* Estimate packed size (run_buf=NULL). */
        err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off,
                       &plen);
        if (err < 0)
@@ -1414,7 +1462,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
        }
 
        err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn,
-                            &attr, mi, NULL);
+                            &attr, mi, le);
 
        if (err)
                goto out;
@@ -1423,12 +1471,12 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
        attr->name_off = cpu_to_le16(name_off);
        attr->flags = flags;
 
+       /* This function can't fail - cause already checked above. */
        run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen);
 
        attr->nres.svcn = cpu_to_le64(svcn);
        attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1);
 
-       err = 0;
        if (new_attr)
                *new_attr = attr;
 
@@ -1560,7 +1608,7 @@ int ni_delete_all(struct ntfs_inode *ni)
                mi->dirty = true;
                mi_write(mi, 0);
 
-               ntfs_mark_rec_free(sbi, mi->rno);
+               ntfs_mark_rec_free(sbi, mi->rno, false);
                ni_remove_mi(ni, mi);
                mi_put(mi);
                node = next;
@@ -1571,7 +1619,7 @@ int ni_delete_all(struct ntfs_inode *ni)
        ni->mi.dirty = true;
        err = mi_write(&ni->mi, 0);
 
-       ntfs_mark_rec_free(sbi, ni->mi.rno);
+       ntfs_mark_rec_free(sbi, ni->mi.rno, false);
 
        return err;
 }
@@ -1589,7 +1637,8 @@ struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
        struct ATTRIB *attr = NULL;
        struct ATTR_FILE_NAME *fname;
 
-       *le = NULL;
+       if (le)
+               *le = NULL;
 
        /* Enumerate all names. */
 next:
@@ -1605,7 +1654,7 @@ next:
                goto next;
 
        if (!uni)
-               goto next;
+               return fname;
 
        if (uni->len != fname->name_len)
                goto next;
@@ -2302,10 +2351,8 @@ remove_wof:
 
 out:
        kfree(pages);
-       if (err) {
-               make_bad_inode(inode);
-               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
-       }
+       if (err)
+               _ntfs_bad_inode(inode);
 
        return err;
 }
@@ -2944,7 +2991,7 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
 }
 
 /*
- * ni_add_name - Add new name in MFT and in directory.
+ * ni_add_name - Add new name into MFT and into directory.
  */
 int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
                struct NTFS_DE *de)
@@ -2953,13 +3000,20 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
        struct ATTRIB *attr;
        struct ATTR_LIST_ENTRY *le;
        struct mft_inode *mi;
+       struct ATTR_FILE_NAME *fname;
        struct ATTR_FILE_NAME *de_name = (struct ATTR_FILE_NAME *)(de + 1);
        u16 de_key_size = le16_to_cpu(de->key_size);
 
        mi_get_ref(&ni->mi, &de->ref);
        mi_get_ref(&dir_ni->mi, &de_name->home);
 
-       /* Insert new name in MFT. */
+       /* Fill duplicate from any ATTR_NAME. */
+       fname = ni_fname_name(ni, NULL, NULL, NULL, NULL);
+       if (fname)
+               memcpy(&de_name->dup, &fname->dup, sizeof(fname->dup));
+       de_name->dup.fa = ni->std_fa;
+
+       /* Insert new name into MFT. */
        err = ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, &attr,
                                 &mi, &le);
        if (err)
@@ -2967,7 +3021,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
 
        memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de_name, de_key_size);
 
-       /* Insert new name in directory. */
+       /* Insert new name into directory. */
        err = indx_insert_entry(&dir_ni->dir, dir_ni, de, ni->mi.sbi, NULL, 0);
        if (err)
                ni_remove_attr_le(ni, attr, mi, le);
@@ -2991,7 +3045,7 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
         * 1) Add new name and remove old name.
         * 2) Remove old name and add new name.
         *
-        * In most cases (not all!) adding new name in MFT and in directory can
+        * In most cases (not all!) adding new name into MFT and into directory can
         * allocate additional cluster(s).
         * Second way may result to bad inode if we can't add new name
         * and then can't restore (add) old name.
@@ -3261,7 +3315,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
                        err = err2;
 
                if (is_empty) {
-                       ntfs_mark_rec_free(sbi, mi->rno);
+                       ntfs_mark_rec_free(sbi, mi->rno, false);
                        rb_erase(node, &ni->mi_tree);
                        mi_put(mi);
                }
index 49b7df6167785d44d811151d84283b16ba744188..e7c494005122c00baf6d48361e1cc85b47d49720 100644 (file)
@@ -3843,6 +3843,8 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
 
        memset(&rst_info2, 0, sizeof(struct restart_info));
        err = log_read_rst(log, l_size, false, &rst_info2);
+       if (err)
+               goto out;
 
        /* Determine which restart area to use. */
        if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
@@ -5057,7 +5059,7 @@ undo_action_next:
                goto add_allocated_vcns;
 
        vcn = le64_to_cpu(lrh->target_vcn);
-       vcn &= ~(log->clst_per_page - 1);
+       vcn &= ~(u64)(log->clst_per_page - 1);
 
 add_allocated_vcns:
        for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
index 1835e35199c269bfdabd057c5852471dda277c7f..4ed15f64b17f68bab6d794147a09592562b9595f 100644 (file)
@@ -703,12 +703,14 @@ out:
 
 /*
  * ntfs_mark_rec_free - Mark record as free.
+ * is_mft - true if we are changing MFT
  */
-void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
+void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
 {
        struct wnd_bitmap *wnd = &sbi->mft.bitmap;
 
-       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
+       if (!is_mft)
+               down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
        if (rno >= wnd->nbits)
                goto out;
 
@@ -727,7 +729,8 @@ void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
                sbi->mft.next_free = rno;
 
 out:
-       up_write(&wnd->rw_lock);
+       if (!is_mft)
+               up_write(&wnd->rw_lock);
 }
 
 /*
@@ -780,7 +783,7 @@ out:
  */
 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
 {
-       CLST zone_limit, zone_max, lcn, vcn, len;
+       CLST lcn, vcn, len;
        size_t lcn_s, zlen;
        struct wnd_bitmap *wnd = &sbi->used.bitmap;
        struct ntfs_inode *ni = sbi->mft.ni;
@@ -789,16 +792,6 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
        if (wnd_zone_len(wnd))
                return 0;
 
-       /*
-        * Compute the MFT zone at two steps.
-        * It would be nice if we are able to allocate 1/8 of
-        * total clusters for MFT but not more then 512 MB.
-        */
-       zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
-       zone_max = wnd->nbits >> 3;
-       if (zone_max > zone_limit)
-               zone_max = zone_limit;
-
        vcn = bytes_to_cluster(sbi,
                               (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
 
@@ -812,13 +805,7 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
        lcn_s = lcn + 1;
 
        /* Try to allocate clusters after last MFT run. */
-       zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
-       if (!zlen) {
-               ntfs_notice(sbi->sb, "MftZone: unavailable");
-               return 0;
-       }
-
-       /* Truncate too large zone. */
+       zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
        wnd_zone_set(wnd, lcn_s, zlen);
 
        return 0;
@@ -827,16 +814,21 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
 /*
  * ntfs_update_mftmirr - Update $MFTMirr data.
  */
-int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
+void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
 {
        int err;
        struct super_block *sb = sbi->sb;
-       u32 blocksize = sb->s_blocksize;
+       u32 blocksize;
        sector_t block1, block2;
        u32 bytes;
 
+       if (!sb)
+               return;
+
+       blocksize = sb->s_blocksize;
+
        if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
-               return 0;
+               return;
 
        err = 0;
        bytes = sbi->mft.recs_mirr << sbi->record_bits;
@@ -847,16 +839,13 @@ int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
                struct buffer_head *bh1, *bh2;
 
                bh1 = sb_bread(sb, block1++);
-               if (!bh1) {
-                       err = -EIO;
-                       goto out;
-               }
+               if (!bh1)
+                       return;
 
                bh2 = sb_getblk(sb, block2++);
                if (!bh2) {
                        put_bh(bh1);
-                       err = -EIO;
-                       goto out;
+                       return;
                }
 
                if (buffer_locked(bh2))
@@ -876,13 +865,24 @@ int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
 
                put_bh(bh2);
                if (err)
-                       goto out;
+                       return;
        }
 
        sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
+}
 
-out:
-       return err;
+/*
+ * ntfs_bad_inode
+ *
+ * Marks inode as bad and marks fs as 'dirty'
+ */
+void ntfs_bad_inode(struct inode *inode, const char *hint)
+{
+       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
+
+       ntfs_inode_err(inode, "%s", hint);
+       make_bad_inode(inode);
+       ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
 }
 
 /*
@@ -1395,7 +1395,7 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
                if (buffer_locked(bh))
                        __wait_on_buffer(bh);
 
-               lock_buffer(nb->bh[idx]);
+               lock_buffer(bh);
 
                bh_data = bh->b_data + off;
                end_data = Add2Ptr(bh_data, op);
@@ -2424,7 +2424,7 @@ static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
 
 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
 {
-       CLST end, i;
+       CLST end, i, zone_len, zlen;
        struct wnd_bitmap *wnd = &sbi->used.bitmap;
 
        down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
@@ -2459,6 +2459,28 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
                ntfs_unmap_and_discard(sbi, lcn, len);
        wnd_set_free(wnd, lcn, len);
 
+       /* append to MFT zone, if possible. */
+       zone_len = wnd_zone_len(wnd);
+       zlen = min(zone_len + len, sbi->zone_max);
+
+       if (zlen == zone_len) {
+               /* MFT zone already has maximum size. */
+       } else if (!zone_len) {
+               /* Create MFT zone only if 'zlen' is large enough. */
+               if (zlen == sbi->zone_max)
+                       wnd_zone_set(wnd, lcn, zlen);
+       } else {
+               CLST zone_lcn = wnd_zone_bit(wnd);
+
+               if (lcn + len == zone_lcn) {
+                       /* Append into head MFT zone. */
+                       wnd_zone_set(wnd, lcn, zlen);
+               } else if (zone_lcn + zone_len == lcn) {
+                       /* Append into tail MFT zone. */
+                       wnd_zone_set(wnd, zone_lcn, zlen);
+               }
+       }
+
 out:
        up_write(&wnd->rw_lock);
 }
index 6f81e3a49abfb30f7aa570e26cf595b3e8599032..440328147e7e39f011a0ee7b11bbfaf7f93b4d45 100644 (file)
@@ -1042,19 +1042,16 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
 {
        int err;
        struct NTFS_DE *e;
-       const struct INDEX_HDR *hdr;
        struct indx_node *node;
 
        if (!root)
                root = indx_get_root(&ni->dir, ni, NULL, NULL);
 
        if (!root) {
-               err = -EINVAL;
-               goto out;
+               /* Should not happen. */
+               return -EINVAL;
        }
 
-       hdr = &root->ihdr;
-
        /* Check cache. */
        e = fnd->level ? fnd->de[fnd->level - 1] : fnd->root_de;
        if (e && !de_is_last(e) &&
@@ -1068,39 +1065,35 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
        fnd_clear(fnd);
 
        /* Lookup entry that is <= to the search value. */
-       e = hdr_find_e(indx, hdr, key, key_len, ctx, diff);
+       e = hdr_find_e(indx, &root->ihdr, key, key_len, ctx, diff);
        if (!e)
                return -EINVAL;
 
        fnd->root_de = e;
-       err = 0;
 
        for (;;) {
                node = NULL;
-               if (*diff >= 0 || !de_has_vcn_ex(e)) {
-                       *entry = e;
-                       goto out;
-               }
+               if (*diff >= 0 || !de_has_vcn_ex(e))
+                       break;
 
                /* Read next level. */
                err = indx_read(indx, ni, de_get_vbn(e), &node);
                if (err)
-                       goto out;
+                       return err;
 
                /* Lookup entry that is <= to the search value. */
                e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
                               diff);
                if (!e) {
-                       err = -EINVAL;
                        put_indx_node(node);
-                       goto out;
+                       return -EINVAL;
                }
 
                fnd_push(fnd, node, e);
        }
 
-out:
-       return err;
+       *entry = e;
+       return 0;
 }
 
 int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
@@ -1354,7 +1347,7 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
                goto out;
 
        err = ni_insert_nonresident(ni, ATTR_ALLOC, in->name, in->name_len,
-                                   &run, 0, len, 0, &alloc, NULL);
+                                   &run, 0, len, 0, &alloc, NULL, NULL);
        if (err)
                goto out1;
 
@@ -1685,8 +1678,8 @@ indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
 {
        int err;
        const struct NTFS_DE *sp;
-       struct NTFS_DE *e, *de_t, *up_e = NULL;
-       struct indx_node *n2 = NULL;
+       struct NTFS_DE *e, *de_t, *up_e;
+       struct indx_node *n2;
        struct indx_node *n1 = fnd->nodes[level];
        struct INDEX_HDR *hdr1 = &n1->index->ihdr;
        struct INDEX_HDR *hdr2;
@@ -1994,7 +1987,7 @@ static int indx_free_children(struct ntfs_index *indx, struct ntfs_inode *ni,
                              const struct NTFS_DE *e, bool trim)
 {
        int err;
-       struct indx_node *n;
+       struct indx_node *n = NULL;
        struct INDEX_HDR *hdr;
        CLST vbn = de_get_vbn(e);
        size_t i;
index 80104afeb2cd91bdf19f47b403cf2daf5e9996d3..51363d4e8636b82cab2ed467563dbda3ca6ff9fe 100644 (file)
@@ -430,6 +430,7 @@ end_enum:
        } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
                   fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
                /* Records in $Extend are not a files or general directories. */
+               inode->i_op = &ntfs_file_inode_operations;
        } else {
                err = -EINVAL;
                goto out;
@@ -500,7 +501,7 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
                inode = ntfs_read_mft(inode, name, ref);
        else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
                /* Inode overlaps? */
-               make_bad_inode(inode);
+               _ntfs_bad_inode(inode);
        }
 
        return inode;
@@ -1632,7 +1633,7 @@ out4:
        ni->mi.dirty = false;
        discard_new_inode(inode);
 out3:
-       ntfs_mark_rec_free(sbi, ino);
+       ntfs_mark_rec_free(sbi, ino, false);
 
 out2:
        __putname(new_de);
@@ -1655,7 +1656,6 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
        struct ntfs_inode *ni = ntfs_i(inode);
        struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
        struct NTFS_DE *de;
-       struct ATTR_FILE_NAME *de_name;
 
        /* Allocate PATH_MAX bytes. */
        de = __getname();
@@ -1670,15 +1670,6 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
        if (err)
                goto out;
 
-       de_name = (struct ATTR_FILE_NAME *)(de + 1);
-       /* Fill duplicate info. */
-       de_name->dup.cr_time = de_name->dup.m_time = de_name->dup.c_time =
-               de_name->dup.a_time = kernel2nt(&inode->i_ctime);
-       de_name->dup.alloc_size = de_name->dup.data_size =
-               cpu_to_le64(inode->i_size);
-       de_name->dup.fa = ni->std_fa;
-       de_name->dup.ea_size = de_name->dup.reparse = 0;
-
        err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
 out:
        __putname(de);
@@ -1731,9 +1722,7 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
                if (inode->i_nlink)
                        mark_inode_dirty(inode);
        } else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
-               make_bad_inode(inode);
-               ntfs_inode_err(inode, "failed to undo unlink");
-               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+               _ntfs_bad_inode(inode);
        } else {
                if (ni_is_dirty(dir))
                        mark_inode_dirty(dir);
index bc741213ad84833b7fd434c32d4b784c6e7cb2f7..bc22cc321a74bba0110a0030386af6396e9e63e9 100644 (file)
@@ -208,7 +208,7 @@ static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
 }
 
 /*
- * ntfs_rmdir - inode_operations::rm_dir
+ * ntfs_rmdir - inode_operations::rmdir
  */
 static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
 {
@@ -308,9 +308,7 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *dir,
        err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de, &is_bad);
        if (is_bad) {
                /* Restore after failed rename failed too. */
-               make_bad_inode(inode);
-               ntfs_inode_err(inode, "failed to undo rename");
-               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+               _ntfs_bad_inode(inode);
        } else if (!err) {
                inode->i_ctime = dir->i_ctime = dir->i_mtime =
                        current_time(dir);
index 8dbdca03e1afc2c817a1b1a320764d35824d8528..2c791222c4e273a867c05476f177ccedbe2403cb 100644 (file)
@@ -220,6 +220,7 @@ struct ntfs_sb_info {
 
        u32 flags; // See NTFS_FLAGS_XXX.
 
+       CLST zone_max; // Maximum MFT zone length in clusters
        CLST bad_clusters; // The count of marked bad clusters.
 
        u16 max_bytes_per_attr; // Maximum attribute size in record.
@@ -408,8 +409,6 @@ enum REPARSE_SIGN {
 };
 
 /* Functions from attrib.c */
-int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
-                  struct runs_tree *run, const CLST *vcn);
 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
                           CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
                           enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
@@ -440,6 +439,7 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
                        u64 new_valid);
 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
+int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size);
 
 /* Functions from attrlist.c */
@@ -528,7 +528,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
                          const __le16 *name, u8 name_len,
                          const struct runs_tree *run, CLST svcn, CLST len,
                          __le16 flags, struct ATTRIB **new_attr,
-                         struct mft_inode **mi);
+                         struct mft_inode **mi, struct ATTR_LIST_ENTRY **le);
 int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
                       enum ATTR_TYPE type, const __le16 *name, u8 name_len,
                       struct ATTRIB **new_attr, struct mft_inode **mi,
@@ -589,10 +589,12 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
                             enum ALLOCATE_OPT opt);
 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
                       struct ntfs_inode *ni, struct mft_inode **mi);
-void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno);
+void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft);
 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to);
 int ntfs_refresh_zone(struct ntfs_sb_info *sbi);
-int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
+void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
+void ntfs_bad_inode(struct inode *inode, const char *hint);
+#define _ntfs_bad_inode(i) ntfs_bad_inode(i, __func__)
 enum NTFS_DIRTY_FLAGS {
        NTFS_DIRTY_CLEAR = 0,
        NTFS_DIRTY_DIRTY = 1,
@@ -738,7 +740,6 @@ static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
 int mi_write(struct mft_inode *mi, int wait);
 int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
                  __le16 flags, bool is_mft);
-void mi_mark_free(struct mft_inode *mi);
 struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
                              const __le16 *name, u8 name_len, u32 asize,
                              u16 name_off);
@@ -780,10 +781,10 @@ bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
 void run_truncate(struct runs_tree *run, CLST vcn);
 void run_truncate_head(struct runs_tree *run, CLST vcn);
 void run_truncate_around(struct runs_tree *run, CLST vcn);
-bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *Index);
 bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
                   bool is_mft);
 bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
+bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len);
 bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
                   CLST *lcn, CLST *len);
 bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn);
@@ -802,6 +803,7 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
 #define run_unpack_ex run_unpack
 #endif
 int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn);
+int run_clone(const struct runs_tree *run, struct runs_tree *new_run);
 
 /* Globals from super.c */
 void *ntfs_set_shared(void *ptr, u32 bytes);
index 861e35791506e801dc446414d935d5463e64a1fb..7d2fac5ee2156b5bc11cd8022818637b96731933 100644 (file)
@@ -394,28 +394,6 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
        return err;
 }
 
-/*
- * mi_mark_free - Mark record as unused and marks it as free in bitmap.
- */
-void mi_mark_free(struct mft_inode *mi)
-{
-       CLST rno = mi->rno;
-       struct ntfs_sb_info *sbi = mi->sbi;
-
-       if (rno >= MFT_REC_RESERVED && rno < MFT_REC_FREE) {
-               ntfs_clear_mft_tail(sbi, rno, rno + 1);
-               mi->dirty = false;
-               return;
-       }
-
-       if (mi->mrec) {
-               clear_rec_inuse(mi->mrec);
-               mi->dirty = true;
-               mi_write(mi, 0);
-       }
-       ntfs_mark_rec_free(sbi, rno);
-}
-
 /*
  * mi_insert_attr - Reserve space for new attribute.
  *
@@ -445,12 +423,11 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
        attr = NULL;
        while ((attr = mi_enum_attr(mi, attr))) {
                diff = compare_attr(attr, type, name, name_len, upcase);
-               if (diff > 0)
-                       break;
+
                if (diff < 0)
                        continue;
 
-               if (!is_attr_indexed(attr))
+               if (!diff && !is_attr_indexed(attr))
                        return NULL;
                break;
        }
index a8fec651f9732878ad871cc476d8a6fcf28f9312..aaaa0d3d35a24fbe9afb889aafedb3254c43c43a 100644 (file)
@@ -31,7 +31,7 @@ struct ntfs_run {
  * Case of entry missing from list 'index' will be set to
  * point to insertion position for the entry question.
  */
-bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
+static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
 {
        size_t min_idx, max_idx, mid_idx;
        struct ntfs_run *r;
@@ -547,6 +547,48 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
        return true;
 }
 
+/* run_insert_range
+ *
+ * Helper for attr_insert_range(),
+ * which is helper for fallocate(insert_range).
+ */
+bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len)
+{
+       size_t index;
+       struct ntfs_run *r, *e;
+
+       if (WARN_ON(!run_lookup(run, vcn, &index)))
+               return false; /* Should never be here. */
+
+       e = run->runs + run->count;
+       r = run->runs + index;
+
+       if (vcn > r->vcn)
+               r += 1;
+
+       for (; r < e; r++)
+               r->vcn += len;
+
+       r = run->runs + index;
+
+       if (vcn > r->vcn) {
+               /* split fragment. */
+               CLST len1 = vcn - r->vcn;
+               CLST len2 = r->len - len1;
+               CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1);
+
+               r->len = len1;
+
+               if (!run_add_entry(run, vcn + len, lcn2, len2, false))
+                       return false;
+       }
+
+       if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
+               return false;
+
+       return true;
+}
+
 /*
  * run_get_entry - Return index-th mapped region.
  */
@@ -778,26 +820,36 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
        CLST next_vcn, vcn, lcn;
        CLST prev_lcn = 0;
        CLST evcn1 = svcn + len;
+       const struct ntfs_run *r, *r_end;
        int packed_size = 0;
        size_t i;
-       bool ok;
        s64 dlcn;
        int offset_size, size_size, tmp;
 
-       next_vcn = vcn = svcn;
-
        *packed_vcns = 0;
 
        if (!len)
                goto out;
 
-       ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
+       /* Check all required entries [svcn, encv1) available. */
+       if (!run_lookup(run, svcn, &i))
+               return -ENOENT;
+
+       r_end = run->runs + run->count;
+       r = run->runs + i;
 
-       if (!ok)
-               goto error;
+       for (next_vcn = r->vcn + r->len; next_vcn < evcn1;
+            next_vcn = r->vcn + r->len) {
+               if (++r >= r_end || r->vcn != next_vcn)
+                       return -ENOENT;
+       }
 
-       if (next_vcn != vcn)
-               goto error;
+       /* Repeat cycle above and pack runs. Assume no errors. */
+       r = run->runs + i;
+       len = svcn - r->vcn;
+       vcn = svcn;
+       lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len);
+       len = r->len - len;
 
        for (;;) {
                next_vcn = vcn + len;
@@ -846,12 +898,10 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
                if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
                        goto out;
 
-               ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
-               if (!ok)
-                       goto error;
-
-               if (next_vcn != vcn)
-                       goto error;
+               r += 1;
+               vcn = r->vcn;
+               lcn = r->lcn;
+               len = r->len;
        }
 
 out:
@@ -860,9 +910,6 @@ out:
                run_buf[0] = 0;
 
        return packed_size + 1;
-
-error:
-       return -EOPNOTSUPP;
 }
 
 /*
@@ -1109,3 +1156,28 @@ int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
        *highest_vcn = vcn64 - 1;
        return 0;
 }
+
+/*
+ * run_clone
+ *
+ * Make a copy of run
+ */
+int run_clone(const struct runs_tree *run, struct runs_tree *new_run)
+{
+       size_t bytes = run->count * sizeof(struct ntfs_run);
+
+       if (bytes > new_run->allocated) {
+               struct ntfs_run *new_ptr = kvmalloc(bytes, GFP_KERNEL);
+
+               if (!new_ptr)
+                       return -ENOMEM;
+
+               kvfree(new_run->runs);
+               new_run->runs = new_ptr;
+               new_run->allocated = bytes;
+       }
+
+       memcpy(new_run->runs, run->runs, bytes);
+       new_run->count = run->count;
+       return 0;
+}
index 0c6de62877377a1c779f5adcbe2fa9769d2e99c7..47012c9bf505e8a42631a51a7d9b25d4b1228534 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/fs_context.h>
 #include <linux/fs_parser.h>
 #include <linux/log2.h>
+#include <linux/minmax.h>
 #include <linux/module.h>
 #include <linux/nls.h>
 #include <linux/seq_file.h>
@@ -390,7 +391,7 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
                return -EINVAL;
        }
 
-       memcpy(sbi->options, new_opts, sizeof(*new_opts));
+       swap(sbi->options, fc->fs_private);
 
        return 0;
 }
@@ -870,6 +871,13 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
 #endif
 
+       /*
+        * Compute the MFT zone at two steps.
+        * It would be nice if we are able to allocate 1/8 of
+        * total clusters for MFT but not more then 512 MB.
+        */
+       sbi->zone_max = min_t(CLST, 0x20000000 >> sbi->cluster_bits, clusters >> 3);
+
        err = 0;
 
 out:
@@ -900,6 +908,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
        ref.high = 0;
 
        sbi->sb = sb;
+       sbi->options = fc->fs_private;
+       fc->fs_private = NULL;
        sb->s_flags |= SB_NODIRATIME;
        sb->s_magic = 0x7366746e; // "ntfs"
        sb->s_op = &ntfs_sops;
@@ -1262,8 +1272,6 @@ load_root:
                goto put_inode_out;
        }
 
-       fc->fs_private = NULL;
-
        return 0;
 
 put_inode_out:
@@ -1378,7 +1386,7 @@ static const struct fs_context_operations ntfs_context_ops = {
 /*
  * ntfs_init_fs_context - Initialize spi and opts
  *
- * This will called when mount/remount. We will first initiliaze
+ * This will called when mount/remount. We will first initialize
  * options so that if remount we can use just that.
  */
 static int ntfs_init_fs_context(struct fs_context *fc)
@@ -1416,7 +1424,6 @@ static int ntfs_init_fs_context(struct fs_context *fc)
        mutex_init(&sbi->compress.mtx_lzx);
 #endif
 
-       sbi->options = opts;
        fc->s_fs_info = sbi;
 ok:
        fc->fs_private = opts;
index 5e0e0280e70debaac0c9e9478a65853b7deb8c1b..6ae1f56b7358ff97a36be66e07e85c4c47b6a4dd 100644 (file)
@@ -118,7 +118,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
 
                run_init(&run);
 
-               err = attr_load_runs(attr_ea, ni, &run, NULL);
+               err = attr_load_runs_range(ni, ATTR_EA, NULL, 0, &run, 0, size);
                if (!err)
                        err = ntfs_read_run_nb(sbi, &run, 0, ea_p, size, NULL);
                run_close(&run);
@@ -444,6 +444,11 @@ update_ea:
                /* Delete xattr, ATTR_EA */
                ni_remove_attr_le(ni, attr, mi, le);
        } else if (attr->non_res) {
+               err = attr_load_runs_range(ni, ATTR_EA, NULL, 0, &ea_run, 0,
+                                          size);
+               if (err)
+                       goto out;
+
                err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size, 0);
                if (err)
                        goto out;
@@ -478,8 +483,7 @@ out:
 }
 
 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
-static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
-                                        struct inode *inode, int type,
+static struct posix_acl *ntfs_get_acl_ex(struct inode *inode, int type,
                                         int locked)
 {
        struct ntfs_inode *ni = ntfs_i(inode);
@@ -514,7 +518,7 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
 
        /* Translate extended attribute to acl. */
        if (err >= 0) {
-               acl = posix_acl_from_xattr(mnt_userns, buf, err);
+               acl = posix_acl_from_xattr(&init_user_ns, buf, err);
        } else if (err == -ENODATA) {
                acl = NULL;
        } else {
@@ -537,8 +541,7 @@ struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu)
        if (rcu)
                return ERR_PTR(-ECHILD);
 
-       /* TODO: init_user_ns? */
-       return ntfs_get_acl_ex(&init_user_ns, inode, type, 0);
+       return ntfs_get_acl_ex(inode, type, 0);
 }
 
 static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
@@ -547,28 +550,23 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
 {
        const char *name;
        size_t size, name_len;
-       void *value = NULL;
-       int err = 0;
+       void *value;
+       int err;
        int flags;
+       umode_t mode;
 
        if (S_ISLNK(inode->i_mode))
                return -EOPNOTSUPP;
 
+       mode = inode->i_mode;
        switch (type) {
        case ACL_TYPE_ACCESS:
                /* Do not change i_mode if we are in init_acl */
                if (acl && !init_acl) {
-                       umode_t mode;
-
                        err = posix_acl_update_mode(mnt_userns, inode, &mode,
                                                    &acl);
                        if (err)
-                               goto out;
-
-                       if (inode->i_mode != mode) {
-                               inode->i_mode = mode;
-                               mark_inode_dirty(inode);
-                       }
+                               return err;
                }
                name = XATTR_NAME_POSIX_ACL_ACCESS;
                name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
@@ -595,7 +593,7 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
                value = kmalloc(size, GFP_NOFS);
                if (!value)
                        return -ENOMEM;
-               err = posix_acl_to_xattr(mnt_userns, acl, value, size);
+               err = posix_acl_to_xattr(&init_user_ns, acl, value, size);
                if (err < 0)
                        goto out;
                flags = 0;
@@ -604,8 +602,13 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
        err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
        if (err == -ENODATA && !size)
                err = 0; /* Removing non existed xattr. */
-       if (!err)
+       if (!err) {
                set_cached_acl(inode, type, acl);
+               if (inode->i_mode != mode) {
+                       inode->i_mode = mode;
+                       mark_inode_dirty(inode);
+               }
+       }
 
 out:
        kfree(value);
@@ -641,7 +644,7 @@ static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
        if (!acl)
                return -ENODATA;
 
-       err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
+       err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        posix_acl_release(acl);
 
        return err;
@@ -665,12 +668,12 @@ static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
        if (!value) {
                acl = NULL;
        } else {
-               acl = posix_acl_from_xattr(mnt_userns, value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
 
                if (acl) {
-                       err = posix_acl_valid(mnt_userns, acl);
+                       err = posix_acl_valid(&init_user_ns, acl);
                        if (err)
                                goto release_and_out;
                }
@@ -706,13 +709,13 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
                inode->i_default_acl = NULL;
        }
 
-       if (!acl)
-               inode->i_acl = NULL;
-       else {
+       if (acl) {
                if (!err)
                        err = ntfs_set_acl_ex(mnt_userns, inode, acl,
                                              ACL_TYPE_ACCESS, true);
                posix_acl_release(acl);
+       } else {
+               inode->i_acl = NULL;
        }
 
        return err;
index 801e60bab95557659c73614e228086df90837303..c28bc983a7b1c05b26b83708179b59974e20f7ac 100644 (file)
@@ -3403,10 +3403,12 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
        ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
        ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
 
-       ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
-       osb->cconn = NULL;
+       if (osb->cconn) {
+               ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
+               osb->cconn = NULL;
 
-       ocfs2_dlm_shutdown_debug(osb);
+               ocfs2_dlm_shutdown_debug(osb);
+       }
 }
 
 static int ocfs2_drop_lock(struct ocfs2_super *osb,
index 013a727bd7c82df03b309161f09198108da48373..e2cc9eec287c9bfcf8c33fc6e8b5893aa45818b7 100644 (file)
@@ -1914,8 +1914,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
            !ocfs2_is_hard_readonly(osb))
                hangup_needed = 1;
 
-       if (osb->cconn)
-               ocfs2_dlm_shutdown(osb, hangup_needed);
+       ocfs2_dlm_shutdown(osb, hangup_needed);
 
        ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
        debugfs_remove_recursive(osb->osb_debug_root);
index b45fea69fff3fdc26a56cbc46773fdaf5243230f..0fbcb590af8423522880860766e57ca81d3a66ac 100644 (file)
@@ -460,9 +460,12 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
  * of the POSIX ACLs retrieved from the lower layer to this function to not
  * alter the POSIX ACLs for the underlying filesystem.
  */
-static void ovl_idmap_posix_acl(struct user_namespace *mnt_userns,
+static void ovl_idmap_posix_acl(struct inode *realinode,
+                               struct user_namespace *mnt_userns,
                                struct posix_acl *acl)
 {
+       struct user_namespace *fs_userns = i_user_ns(realinode);
+
        for (unsigned int i = 0; i < acl->a_count; i++) {
                vfsuid_t vfsuid;
                vfsgid_t vfsgid;
@@ -470,11 +473,11 @@ static void ovl_idmap_posix_acl(struct user_namespace *mnt_userns,
                struct posix_acl_entry *e = &acl->a_entries[i];
                switch (e->e_tag) {
                case ACL_USER:
-                       vfsuid = make_vfsuid(mnt_userns, &init_user_ns, e->e_uid);
+                       vfsuid = make_vfsuid(mnt_userns, fs_userns, e->e_uid);
                        e->e_uid = vfsuid_into_kuid(vfsuid);
                        break;
                case ACL_GROUP:
-                       vfsgid = make_vfsgid(mnt_userns, &init_user_ns, e->e_gid);
+                       vfsgid = make_vfsgid(mnt_userns, fs_userns, e->e_gid);
                        e->e_gid = vfsgid_into_kgid(vfsgid);
                        break;
                }
@@ -536,7 +539,7 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu)
        if (!clone)
                clone = ERR_PTR(-ENOMEM);
        else
-               ovl_idmap_posix_acl(mnt_user_ns(realpath.mnt), clone);
+               ovl_idmap_posix_acl(realinode, mnt_user_ns(realpath.mnt), clone);
        /*
         * Since we're not in RCU path walk we always need to release the
         * original ACLs.
index 1d17d7b13dcd0c2f0d8a26c38f9024eb084b6d39..5af33800743e49dbbd75c0ed5f4f6c6f21ffa9b3 100644 (file)
@@ -361,6 +361,7 @@ posix_acl_permission(struct user_namespace *mnt_userns, struct inode *inode,
                     const struct posix_acl *acl, int want)
 {
        const struct posix_acl_entry *pa, *pe, *mask_obj;
+       struct user_namespace *fs_userns = i_user_ns(inode);
        int found = 0;
        vfsuid_t vfsuid;
        vfsgid_t vfsgid;
@@ -376,7 +377,7 @@ posix_acl_permission(struct user_namespace *mnt_userns, struct inode *inode,
                                         goto check_perm;
                                 break;
                         case ACL_USER:
-                               vfsuid = make_vfsuid(mnt_userns, &init_user_ns,
+                               vfsuid = make_vfsuid(mnt_userns, fs_userns,
                                                     pa->e_uid);
                                if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
                                         goto mask;
@@ -390,7 +391,7 @@ posix_acl_permission(struct user_namespace *mnt_userns, struct inode *inode,
                                 }
                                break;
                         case ACL_GROUP:
-                               vfsgid = make_vfsgid(mnt_userns, &init_user_ns,
+                               vfsgid = make_vfsgid(mnt_userns, fs_userns,
                                                     pa->e_gid);
                                if (vfsgid_in_group_p(vfsgid)) {
                                        found = 1;
@@ -736,6 +737,7 @@ void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
 {
        struct posix_acl_xattr_header *header = value;
        struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
+       struct user_namespace *fs_userns = i_user_ns(inode);
        int count;
        vfsuid_t vfsuid;
        vfsgid_t vfsgid;
@@ -753,13 +755,13 @@ void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
                switch (le16_to_cpu(entry->e_tag)) {
                case ACL_USER:
                        uid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
-                       vfsuid = make_vfsuid(mnt_userns, &init_user_ns, uid);
+                       vfsuid = make_vfsuid(mnt_userns, fs_userns, uid);
                        entry->e_id = cpu_to_le32(from_kuid(&init_user_ns,
                                                vfsuid_into_kuid(vfsuid)));
                        break;
                case ACL_GROUP:
                        gid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
-                       vfsgid = make_vfsgid(mnt_userns, &init_user_ns, gid);
+                       vfsgid = make_vfsgid(mnt_userns, fs_userns, gid);
                        entry->e_id = cpu_to_le32(from_kgid(&init_user_ns,
                                                vfsgid_into_kgid(vfsgid)));
                        break;
@@ -775,6 +777,7 @@ void posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns,
 {
        struct posix_acl_xattr_header *header = value;
        struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
+       struct user_namespace *fs_userns = i_user_ns(inode);
        int count;
        vfsuid_t vfsuid;
        vfsgid_t vfsgid;
@@ -793,13 +796,13 @@ void posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns,
                case ACL_USER:
                        uid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
                        vfsuid = VFSUIDT_INIT(uid);
-                       uid = from_vfsuid(mnt_userns, &init_user_ns, vfsuid);
+                       uid = from_vfsuid(mnt_userns, fs_userns, vfsuid);
                        entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, uid));
                        break;
                case ACL_GROUP:
                        gid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
                        vfsgid = VFSGIDT_INIT(gid);
-                       gid = from_vfsgid(mnt_userns, &init_user_ns, vfsgid);
+                       gid = from_vfsgid(mnt_userns, fs_userns, vfsgid);
                        entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, gid));
                        break;
                default:
index a3398d0f1927f5461b0888fafefa9b02479e7546..4e0023643f8be9f90ba9612e4b57a54af39ecd25 100644 (file)
@@ -527,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
        struct vm_area_struct *vma = walk->vma;
        bool locked = !!(vma->vm_flags & VM_LOCKED);
        struct page *page = NULL;
-       bool migration = false;
+       bool migration = false, young = false, dirty = false;
 
        if (pte_present(*pte)) {
                page = vm_normal_page(vma, addr, *pte);
+               young = pte_young(*pte);
+               dirty = pte_dirty(*pte);
        } else if (is_swap_pte(*pte)) {
                swp_entry_t swpent = pte_to_swp_entry(*pte);
 
@@ -560,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
        if (!page)
                return;
 
-       smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
-                     locked, migration);
+       smaps_account(mss, page, false, young, dirty, locked, migration);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 98e64fec75b77e09048757fc28ea04f2f81bf53c..e56510964b229e42170304f88ee73515f54f1e6a 100644 (file)
@@ -593,7 +593,7 @@ static void squashfs_readahead(struct readahead_control *ractl)
 
                res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
 
-               kfree(actor);
+               squashfs_page_actor_free(actor);
 
                if (res == expected) {
                        int bytes;
index be4b12d31e0c36fbd04e2d56f96d13e793371f92..f1ccad519e28ccf890831a36f462f0e3ce5ba3a1 100644 (file)
@@ -74,7 +74,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
        /* Decompress directly into the page cache buffers */
        res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
 
-       kfree(actor);
+       squashfs_page_actor_free(actor);
 
        if (res < 0)
                goto mark_errored;
index b23b780d8f42ece1639bd4bc21ced7743ffc2643..54b93bf4a25c15b9acb2bf85cbcab6723df89e40 100644 (file)
@@ -52,6 +52,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
        actor->buffer = buffer;
        actor->pages = pages;
        actor->next_page = 0;
+       actor->tmp_buffer = NULL;
        actor->squashfs_first_page = cache_first_page;
        actor->squashfs_next_page = cache_next_page;
        actor->squashfs_finish_page = cache_finish_page;
@@ -68,20 +69,9 @@ static void *handle_next_page(struct squashfs_page_actor *actor)
 
        if ((actor->next_page == actor->pages) ||
                        (actor->next_index != actor->page[actor->next_page]->index)) {
-               if (actor->alloc_buffer) {
-                       void *tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
-
-                       if (tmp_buffer) {
-                               actor->tmp_buffer = tmp_buffer;
-                               actor->next_index++;
-                               actor->returned_pages++;
-                               return tmp_buffer;
-                       }
-               }
-
                actor->next_index++;
                actor->returned_pages++;
-               return ERR_PTR(-ENOMEM);
+               return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM);
        }
 
        actor->next_index++;
@@ -96,11 +86,10 @@ static void *direct_first_page(struct squashfs_page_actor *actor)
 
 static void *direct_next_page(struct squashfs_page_actor *actor)
 {
-       if (actor->pageaddr)
+       if (actor->pageaddr) {
                kunmap_local(actor->pageaddr);
-
-       kfree(actor->tmp_buffer);
-       actor->pageaddr = actor->tmp_buffer = NULL;
+               actor->pageaddr = NULL;
+       }
 
        return handle_next_page(actor);
 }
@@ -109,8 +98,6 @@ static void direct_finish_page(struct squashfs_page_actor *actor)
 {
        if (actor->pageaddr)
                kunmap_local(actor->pageaddr);
-
-       kfree(actor->tmp_buffer);
 }
 
 struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
@@ -121,6 +108,16 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_
        if (actor == NULL)
                return NULL;
 
+       if (msblk->decompressor->alloc_buffer) {
+               actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+               if (actor->tmp_buffer == NULL) {
+                       kfree(actor);
+                       return NULL;
+               }
+       } else
+               actor->tmp_buffer = NULL;
+
        actor->length = length ? : pages * PAGE_SIZE;
        actor->page = page;
        actor->pages = pages;
@@ -128,7 +125,6 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_
        actor->returned_pages = 0;
        actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
        actor->pageaddr = NULL;
-       actor->tmp_buffer = NULL;
        actor->alloc_buffer = msblk->decompressor->alloc_buffer;
        actor->squashfs_first_page = direct_first_page;
        actor->squashfs_next_page = direct_next_page;
index 24841d28bc0fb85b630535f4c07c6c54b4aa2767..95ffbb543d913b601604ae3cc12f766bb5380295 100644 (file)
@@ -29,6 +29,11 @@ extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
 extern struct squashfs_page_actor *squashfs_page_actor_init_special(
                                struct squashfs_sb_info *msblk,
                                struct page **page, int pages, int length);
+static inline void squashfs_page_actor_free(struct squashfs_page_actor *actor)
+{
+       kfree(actor->tmp_buffer);
+       kfree(actor);
+}
 static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
 {
        return actor->squashfs_first_page(actor);
index 1c44bf75f9160cd509f684f0727c8a36be085527..175de70e3adfdd68a88ef6c11e16e68c8f296a17 100644 (file)
@@ -1601,6 +1601,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                        wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
                }
 
+               /* Reset ptes for the whole vma range if wr-protected */
+               if (userfaultfd_wp(vma))
+                       uffd_wp_range(mm, vma, start, vma_end - start, false);
+
                new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
                prev = vma_merge(mm, prev, start, vma_end, new_flags,
                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
index 3096f086b5a3274fb75969ac1853589ac61f0c94..71ab4ba9c25d189b2f018c4e5c6e46ee82ebd7a8 100644 (file)
@@ -39,9 +39,6 @@ arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
        unsigned long mask = BIT_MASK(nr);
 
        p += BIT_WORD(nr);
-       if (READ_ONCE(*p) & mask)
-               return 1;
-
        old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
@@ -53,9 +50,6 @@ arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
        unsigned long mask = BIT_MASK(nr);
 
        p += BIT_WORD(nr);
-       if (!(READ_ONCE(*p) & mask))
-               return 0;
-
        old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
index 3d5ebd24652b9ff2412c45fd829a5fe8b9308e0e..564a8c675d85898a601e889ab471880ef70c8068 100644 (file)
@@ -4,6 +4,7 @@
 #define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
 
 #include <linux/bits.h>
+#include <asm/barrier.h>
 
 #ifndef _LINUX_BITOPS_H
 #error only <linux/bitops.h> can be included directly
@@ -127,6 +128,18 @@ generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
        return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
 }
 
+/**
+ * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+       return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
+}
+
 /*
  * const_*() definitions provide good compile-time optimizations when
  * the passed arguments can be resolved at compile time.
@@ -137,6 +150,7 @@ generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
 #define const___test_and_set_bit       generic___test_and_set_bit
 #define const___test_and_clear_bit     generic___test_and_clear_bit
 #define const___test_and_change_bit    generic___test_and_change_bit
+#define const_test_bit_acquire         generic_test_bit_acquire
 
 /**
  * const_test_bit - Determine whether a bit is set
index 988a3bbfba34ec9a1b4904520a5cadc469842dbd..2b238b161a6206e6702a34523ee4043d7dd65d96 100644 (file)
@@ -142,4 +142,16 @@ _test_bit(unsigned long nr, const volatile unsigned long *addr)
        return arch_test_bit(nr, addr);
 }
 
+/**
+ * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+       instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
+       return arch_test_bit_acquire(nr, addr);
+}
+
 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
index 5c37ced343aedc0669347f64d042ffc2c161e5a9..71f8d54a5195e919bb51e3311df3150b087fbacd 100644 (file)
@@ -13,6 +13,7 @@
 #define arch___test_and_change_bit generic___test_and_change_bit
 
 #define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
 
 #include <asm-generic/bitops/non-instrumented-non-atomic.h>
 
index bdb9b1ffaee90a9444db25575b93a5a0d50e05f9..0ddc78dfc358bece4d4f6a40036e0e11e73684b5 100644 (file)
@@ -12,5 +12,6 @@
 #define ___test_and_change_bit arch___test_and_change_bit
 
 #define _test_bit              arch_test_bit
+#define _test_bit_acquire      arch_test_bit_acquire
 
 #endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
index d0f7bdd2fdf235765d53c91bbd78674a19e1168c..db13bb620f527e2a0d8204cfdafe877975c35394 100644 (file)
@@ -97,7 +97,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
 /**
  * memory_intersects - checks if the region occupied by an object intersects
  *                     with another memory region
- * @begin: virtual address of the beginning of the memory regien
+ * @begin: virtual address of the beginning of the memory region
  * @end: virtual address of the end of the memory region
  * @virt: virtual address of the memory object
  * @size: size of the memory object
@@ -110,7 +110,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
 {
        void *vend = virt + size;
 
-       return (virt >= begin && virt < end) || (vend >= begin && vend < end);
+       if (virt < end && vend > begin)
+               return true;
+
+       return false;
 }
 
 /**
index cf9bf65039f22b14b8f810cadc0301397c2303e2..3b89c64bcfd8f029593f9fa2da7e6366467868f6 100644 (file)
@@ -59,6 +59,7 @@ extern unsigned long __sw_hweight64(__u64 w);
 #define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
 #define __test_and_change_bit(nr, addr)        bitop(___test_and_change_bit, nr, addr)
 #define test_bit(nr, addr)             bitop(_test_bit, nr, addr)
+#define test_bit_acquire(nr, addr)     bitop(_test_bit_acquire, nr, addr)
 
 /*
  * Include this here because some architectures need generic_ffs/fls in
index effee1dc715a26ab97c8557e0f01d0c1c70d21a7..92294a5fb083612e578532362160c468d0e435c8 100644 (file)
@@ -857,7 +857,6 @@ void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 void blk_mq_complete_request(struct request *rq);
 bool blk_mq_complete_request_remote(struct request *rq);
-bool blk_mq_queue_stopped(struct request_queue *q);
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_stop_hw_queues(struct request_queue *q);
index def8b8d30ccc12563418509ad3368099fa7c404d..089c9ade43259e76420e60dd9dff8c46792ffa82 100644 (file)
@@ -156,7 +156,7 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh)
         * make it consistent with folio_test_uptodate
         * pairs with smp_mb__before_atomic in set_buffer_uptodate
         */
-       return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0;
+       return test_bit_acquire(BH_Uptodate, &bh->b_state);
 }
 
 #define bh_offset(bh)          ((unsigned long)(bh)->b_data & ~PAGE_MASK)
index ed53bfe7c46c4738df18744cc45ff6a204721e11..ac5d0515680eae0ae0ef10c74be2ab1ee56169f4 100644 (file)
@@ -734,11 +734,6 @@ static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
        return NULL;
 }
 
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
-{
-       return NULL;
-}
-
 static inline bool cgroup_psi_enabled(void)
 {
        return false;
index 0d435d0edbcb487316b63a0e5c79803107ee9c97..bd047864c7ac7bc58cff53e0ae60f22541af1171 100644 (file)
@@ -202,12 +202,13 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
        return 0;
 }
 
-static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
-                                            const struct cpumask *src2p) {
+static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+                                                     const struct cpumask *src2p)
+{
        return cpumask_first_and(src1p, src2p);
 }
 
-static inline int cpumask_any_distribute(const struct cpumask *srcp)
+static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
 {
        return cpumask_first(srcp);
 }
@@ -261,7 +262,26 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
                (cpu) = cpumask_next_zero((cpu), (mask)),       \
                (cpu) < nr_cpu_ids;)
 
+#if NR_CPUS == 1
+static inline
+unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+       cpumask_check(start);
+       if (n != -1)
+               cpumask_check(n);
+
+       /*
+        * Return the first available CPU when wrapping, or when starting before cpu0,
+        * since there is only one valid option.
+        */
+       if (wrap && n >= 0)
+               return nr_cpumask_bits;
+
+       return cpumask_first(mask);
+}
+#else
 unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+#endif
 
 /**
  * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
index 1c480b1821e18dd7eaf4589f77e8f1a9a59626c0..f4519d3689e102dadb22a212ce994a2f8852acea 100644 (file)
@@ -656,12 +656,12 @@ struct kvm_irq_routing_table {
 };
 #endif
 
-#ifndef KVM_PRIVATE_MEM_SLOTS
-#define KVM_PRIVATE_MEM_SLOTS 0
+#ifndef KVM_INTERNAL_MEM_SLOTS
+#define KVM_INTERNAL_MEM_SLOTS 0
 #endif
 
 #define KVM_MEM_SLOTS_NUM SHRT_MAX
-#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
+#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
 
 #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
@@ -765,10 +765,10 @@ struct kvm {
 
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
        struct mmu_notifier mmu_notifier;
-       unsigned long mmu_notifier_seq;
-       long mmu_notifier_count;
-       unsigned long mmu_notifier_range_start;
-       unsigned long mmu_notifier_range_end;
+       unsigned long mmu_invalidate_seq;
+       long mmu_invalidate_in_progress;
+       unsigned long mmu_invalidate_range_start;
+       unsigned long mmu_invalidate_range_end;
 #endif
        struct list_head devices;
        u64 manual_dirty_log_protect;
@@ -1357,10 +1357,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
 #endif
 
-void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
-                                  unsigned long end);
-void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
-                                  unsigned long end);
+void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
+                             unsigned long end);
+void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+                           unsigned long end);
 
 long kvm_arch_dev_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg);
@@ -1907,42 +1907,44 @@ extern const struct kvm_stats_header kvm_vcpu_stats_header;
 extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
 
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
-static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
+static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
 {
-       if (unlikely(kvm->mmu_notifier_count))
+       if (unlikely(kvm->mmu_invalidate_in_progress))
                return 1;
        /*
-        * Ensure the read of mmu_notifier_count happens before the read
-        * of mmu_notifier_seq.  This interacts with the smp_wmb() in
-        * mmu_notifier_invalidate_range_end to make sure that the caller
-        * either sees the old (non-zero) value of mmu_notifier_count or
-        * the new (incremented) value of mmu_notifier_seq.
-        * PowerPC Book3s HV KVM calls this under a per-page lock
-        * rather than under kvm->mmu_lock, for scalability, so
-        * can't rely on kvm->mmu_lock to keep things ordered.
+        * Ensure the read of mmu_invalidate_in_progress happens before
+        * the read of mmu_invalidate_seq.  This interacts with the
+        * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
+        * that the caller either sees the old (non-zero) value of
+        * mmu_invalidate_in_progress or the new (incremented) value of
+        * mmu_invalidate_seq.
+        *
+        * PowerPC Book3s HV KVM calls this under a per-page lock rather
+        * than under kvm->mmu_lock, for scalability, so can't rely on
+        * kvm->mmu_lock to keep things ordered.
         */
        smp_rmb();
-       if (kvm->mmu_notifier_seq != mmu_seq)
+       if (kvm->mmu_invalidate_seq != mmu_seq)
                return 1;
        return 0;
 }
 
-static inline int mmu_notifier_retry_hva(struct kvm *kvm,
-                                        unsigned long mmu_seq,
-                                        unsigned long hva)
+static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
+                                          unsigned long mmu_seq,
+                                          unsigned long hva)
 {
        lockdep_assert_held(&kvm->mmu_lock);
        /*
-        * If mmu_notifier_count is non-zero, then the range maintained by
-        * kvm_mmu_notifier_invalidate_range_start contains all addresses that
-        * might be being invalidated. Note that it may include some false
+        * If mmu_invalidate_in_progress is non-zero, then the range maintained
+        * by kvm_mmu_notifier_invalidate_range_start contains all addresses
+        * that might be being invalidated. Note that it may include some false
         * positives, due to shortcuts when handing concurrent invalidations.
         */
-       if (unlikely(kvm->mmu_notifier_count) &&
-           hva >= kvm->mmu_notifier_range_start &&
-           hva < kvm->mmu_notifier_range_end)
+       if (unlikely(kvm->mmu_invalidate_in_progress) &&
+           hva >= kvm->mmu_invalidate_range_start &&
+           hva < kvm->mmu_invalidate_range_end)
                return 1;
-       if (kvm->mmu_notifier_seq != mmu_seq)
+       if (kvm->mmu_invalidate_seq != mmu_seq)
                return 1;
        return 0;
 }
index 0269ff114f5a7abbccf366f17973c5407f780ada..698032e5ef2d3fe0c870c0fda087da4a7eeee2ca 100644 (file)
@@ -1382,7 +1382,8 @@ extern const struct attribute_group *ata_common_sdev_groups[];
        .proc_name              = drv_name,                     \
        .slave_destroy          = ata_scsi_slave_destroy,       \
        .bios_param             = ata_std_bios_param,           \
-       .unlock_native_capacity = ata_scsi_unlock_native_capacity
+       .unlock_native_capacity = ata_scsi_unlock_native_capacity,\
+       .max_sectors            = ATA_MAX_SECTORS_LBA48
 
 #define ATA_SUBBASE_SHT(drv_name)                              \
        __ATA_BASE_SHT(drv_name),                               \
index 4d31ce55b1c0d67f3b5c903f87be449d9953af35..6257867fbf95375c98ef7ddd0a1b4966c29b955b 100644 (file)
@@ -987,19 +987,30 @@ static inline void mod_memcg_page_state(struct page *page,
 
 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
 {
-       return READ_ONCE(memcg->vmstats.state[idx]);
+       long x = READ_ONCE(memcg->vmstats.state[idx]);
+#ifdef CONFIG_SMP
+       if (x < 0)
+               x = 0;
+#endif
+       return x;
 }
 
 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
                                              enum node_stat_item idx)
 {
        struct mem_cgroup_per_node *pn;
+       long x;
 
        if (mem_cgroup_disabled())
                return node_page_state(lruvec_pgdat(lruvec), idx);
 
        pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
-       return READ_ONCE(pn->lruvec_stats.state[idx]);
+       x = READ_ONCE(pn->lruvec_stats.state[idx]);
+#ifdef CONFIG_SMP
+       if (x < 0)
+               x = 0;
+#endif
+       return x;
 }
 
 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
index 96b16fbe1aa45932b52158da10007956fe081fcc..7b7ce602c8080956fed90daa083a7fa55b67d156 100644 (file)
@@ -779,6 +779,7 @@ struct mlx5_core_dev {
        enum mlx5_device_state  state;
        /* sync interface state */
        struct mutex            intf_state_mutex;
+       struct lock_class_key   lock_key;
        unsigned long           intf_state;
        struct mlx5_priv        priv;
        struct mlx5_profile     profile;
index 3bedc449c14d8f80530dd7ce6ece6c7c57d6eaa7..21f8b27bd9fd308b198f89275ed8d1295edd1ea6 100644 (file)
@@ -1544,9 +1544,16 @@ static inline bool is_longterm_pinnable_page(struct page *page)
        if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
                return false;
 #endif
-       return !(is_device_coherent_page(page) ||
-                is_zone_movable_page(page) ||
-                is_zero_pfn(page_to_pfn(page)));
+       /* The zero page may always be pinned */
+       if (is_zero_pfn(page_to_pfn(page)))
+               return true;
+
+       /* Coherent device memory must always allow eviction. */
+       if (is_device_coherent_page(page))
+               return false;
+
+       /* Otherwise, non-movable zone pages can be pinned. */
+       return !is_zone_movable_page(page);
 }
 #else
 static inline bool is_longterm_pinnable_page(struct page *page)
@@ -2885,7 +2892,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 #define FOLL_MIGRATION 0x400   /* wait for page to replace migration entry */
 #define FOLL_TRIED     0x800   /* a retry, previous pass started an IO */
 #define FOLL_REMOTE    0x2000  /* we are working on non-current tsk/mm */
-#define FOLL_COW       0x4000  /* internal GUP flag */
 #define FOLL_ANON      0x8000  /* don't do file mappings */
 #define FOLL_LONGTERM  0x10000 /* mapping lifetime is indefinite: see below */
 #define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
index 1a3cb93c3dcce129b601240fd502823df048e313..05d6f3facd5a5e6935f2ae94114a40f5365df669 100644 (file)
@@ -640,9 +640,23 @@ extern int sysctl_devconf_inherit_init_net;
  */
 static inline bool net_has_fallback_tunnels(const struct net *net)
 {
-       return !IS_ENABLED(CONFIG_SYSCTL) ||
-              !sysctl_fb_tunnels_only_for_init_net ||
-              (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
+#if IS_ENABLED(CONFIG_SYSCTL)
+       int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
+
+       return !fb_tunnels_only_for_init_net ||
+               (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
+#else
+       return true;
+#endif
+}
+
+static inline int net_inherit_devconf(void)
+{
+#if IS_ENABLED(CONFIG_SYSCTL)
+       return READ_ONCE(sysctl_devconf_inherit_init_net);
+#else
+       return 0;
+#endif
 }
 
 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
index a13296d6c7ceb2386e3870f6c1671686d990970a..fd533552a062ce0e1219c9225fadf93bc7aacabf 100644 (file)
@@ -94,10 +94,6 @@ struct ebt_table {
        struct ebt_replace_kernel *table;
        unsigned int valid_hooks;
        rwlock_t lock;
-       /* e.g. could be the table explicitly only allows certain
-        * matches, targets, ... 0 == let it in */
-       int (*check)(const struct ebt_table_info *info,
-          unsigned int valid_hooks);
        /* the data used by the kernel */
        struct ebt_table_info *private;
        struct nf_hook_ops *ops;
index b32ed68e7dc4971a72d03f1021d5595301cf429f..7931fa47256129e26422531be56b65ec4fb39c50 100644 (file)
@@ -83,7 +83,6 @@ struct nfs_open_context {
        fmode_t mode;
 
        unsigned long flags;
-#define NFS_CONTEXT_RESEND_WRITES      (1)
 #define NFS_CONTEXT_BAD                        (2)
 #define NFS_CONTEXT_UNLOCK     (3)
 #define NFS_CONTEXT_FILE_OPEN          (4)
@@ -182,6 +181,7 @@ struct nfs_inode {
                /* Regular file */
                struct {
                        atomic_long_t   nrequests;
+                       atomic_long_t   redirtied_pages;
                        struct nfs_mds_commit_info commit_info;
                        struct mutex    commit_mutex;
                };
index 89784763d19e2647bdfa0082652abcccec7cf038..dd74411ac21d799d1a9c298168875060f9011355 100644 (file)
@@ -27,7 +27,7 @@ void psi_memstall_leave(unsigned long *flags);
 
 int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
 struct psi_trigger *psi_trigger_create(struct psi_group *group,
-                       char *buf, size_t nbytes, enum psi_res res);
+                       char *buf, enum psi_res res);
 void psi_trigger_destroy(struct psi_trigger *t);
 
 __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
index a193884ecf2b19d64ae29eecb114dc6e6008d1a9..4f765bc788ffb7bd1a6d09ab3705a2ae27c19e9a 100644 (file)
@@ -84,7 +84,7 @@ struct scmi_protocol_handle;
 struct scmi_clk_proto_ops {
        int (*count_get)(const struct scmi_protocol_handle *ph);
 
-       const struct scmi_clock_info *(*info_get)
+       const struct scmi_clock_info __must_check *(*info_get)
                (const struct scmi_protocol_handle *ph, u32 clk_id);
        int (*rate_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
                        u64 *rate);
@@ -466,7 +466,7 @@ enum scmi_sensor_class {
  */
 struct scmi_sensor_proto_ops {
        int (*count_get)(const struct scmi_protocol_handle *ph);
-       const struct scmi_sensor_info *(*info_get)
+       const struct scmi_sensor_info __must_check *(*info_get)
                (const struct scmi_protocol_handle *ph, u32 sensor_id);
        int (*trip_point_config)(const struct scmi_protocol_handle *ph,
                                 u32 sensor_id, u8 trip_id, u64 trip_value);
index 1b6c4013f691b0ee64eade2718e33b9c6bd01c1b..ff0b990de83d457c162d1789745e2040e0df9efb 100644 (file)
@@ -29,15 +29,10 @@ struct shmem_inode_info {
        struct inode            vfs_inode;
 };
 
-#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
-#define SHMEM_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE
-#define SHMEM_FL_INHERITED FS_FL_USER_MODIFIABLE
-
-/* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define SHMEM_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
-
-/* Flags that are appropriate for non-directories/regular files. */
-#define SHMEM_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
+#define SHMEM_FL_USER_VISIBLE          FS_FL_USER_VISIBLE
+#define SHMEM_FL_USER_MODIFIABLE \
+       (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
+#define SHMEM_FL_INHERITED             (FS_NODUMP_FL | FS_NOATIME_FL)
 
 struct shmem_sb_info {
        unsigned long max_blocks;   /* How many blocks are allowed */
index 732b522bacb7e5c15a8d6e00f9f4d4e8c6daa4ea..e1b8a915e9e9fb1346fc2ffdc354afcdbf6627d3 100644 (file)
@@ -73,6 +73,8 @@ extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
 extern int mwriteprotect_range(struct mm_struct *dst_mm,
                               unsigned long start, unsigned long len,
                               bool enable_wp, atomic_t *mmap_changing);
+extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
+                         unsigned long start, unsigned long len, bool enable_wp);
 
 /* mm helpers */
 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
index a3f73bb6733e82b78b50e3ce2f736d67f1ec3f64..dcab9c7e878433685b90b799b97521f13fe835d7 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/gfp.h>
 
 /**
- * virtqueue - a queue to register buffers for sending or receiving.
+ * struct virtqueue - a queue to register buffers for sending or receiving.
  * @list: the chain of virtqueues for this device
  * @callback: the function to call when buffers are consumed (can be NULL).
  * @name: the name of this virtqueue (mainly for debugging)
@@ -97,7 +97,7 @@ int virtqueue_resize(struct virtqueue *vq, u32 num,
                     void (*recycle)(struct virtqueue *vq, void *buf));
 
 /**
- * virtio_device - representation of a device using virtio
+ * struct virtio_device - representation of a device using virtio
  * @index: unique position on the virtio bus
  * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
  * @config_enabled: configuration change reporting enabled
@@ -156,7 +156,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev);
        list_for_each_entry(vq, &vdev->vqs, list)
 
 /**
- * virtio_driver - operations for a virtio I/O driver
+ * struct virtio_driver - operations for a virtio I/O driver
  * @driver: underlying device driver (populate name and owner).
  * @id_table: the ids serviced by this driver.
  * @feature_table: an array of feature numbers supported by this driver.
index 6adff09f7170abdfd9c993d450221839d0872856..4b517649cfe84cb265dcc16f909e586968c3c625 100644 (file)
@@ -55,7 +55,6 @@ struct virtio_shm_region {
  *             include a NULL entry for vqs that do not need a callback
  *     names: array of virtqueue names (mainly for debugging)
  *             include a NULL entry for vqs unused by driver
- *     sizes: array of virtqueue sizes
  *     Returns 0 on success or error status
  * @del_vqs: free virtqueues found by find_vqs().
  * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
@@ -104,9 +103,7 @@ struct virtio_config_ops {
        void (*reset)(struct virtio_device *vdev);
        int (*find_vqs)(struct virtio_device *, unsigned nvqs,
                        struct virtqueue *vqs[], vq_callback_t *callbacks[],
-                       const char * const names[],
-                       u32 sizes[],
-                       const bool *ctx,
+                       const char * const names[], const bool *ctx,
                        struct irq_affinity *desc);
        void (*del_vqs)(struct virtio_device *);
        void (*synchronize_cbs)(struct virtio_device *);
@@ -215,7 +212,7 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
        const char *names[] = { n };
        struct virtqueue *vq;
        int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
-                                        NULL, NULL);
+                                        NULL);
        if (err < 0)
                return ERR_PTR(err);
        return vq;
@@ -227,8 +224,7 @@ int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                        const char * const names[],
                        struct irq_affinity *desc)
 {
-       return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL,
-                                     NULL, desc);
+       return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
 }
 
 static inline
@@ -237,25 +233,13 @@ int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
                        const char * const names[], const bool *ctx,
                        struct irq_affinity *desc)
 {
-       return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL,
-                                     ctx, desc);
-}
-
-static inline
-int virtio_find_vqs_ctx_size(struct virtio_device *vdev, u32 nvqs,
-                            struct virtqueue *vqs[],
-                            vq_callback_t *callbacks[],
-                            const char * const names[],
-                            u32 sizes[],
-                            const bool *ctx, struct irq_affinity *desc)
-{
-       return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, sizes,
-                                     ctx, desc);
+       return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
+                                     desc);
 }
 
 /**
  * virtio_synchronize_cbs - synchronize with virtqueue callbacks
- * @vdev: the device
+ * @dev: the virtio device
  */
 static inline
 void virtio_synchronize_cbs(struct virtio_device *dev)
@@ -274,7 +258,7 @@ void virtio_synchronize_cbs(struct virtio_device *dev)
 
 /**
  * virtio_device_ready - enable vq use in probe function
- * @vdev: the device
+ * @dev: the virtio device
  *
  * Driver must call this to use vqs in the probe function.
  *
@@ -322,7 +306,7 @@ const char *virtio_bus_name(struct virtio_device *vdev)
 /**
  * virtqueue_set_affinity - setting affinity for a virtqueue
  * @vq: the virtqueue
- * @cpu: the cpu no.
+ * @cpu_mask: the cpu mask
  *
  * Pay attention the function are best-effort: the affinity hint may not be set
  * due to config support, irq type and sharing.
index 404024486fa5397d1f87b986267e4f21cc8d0560..f3fc36cd2276a210bd8caa2df298c1e796f13fdb 100644 (file)
 #define HIGHMEM_ZONE(xx)
 #endif
 
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
+#ifdef CONFIG_ZONE_DEVICE
+#define DEVICE_ZONE(xx) xx##_DEVICE,
+#else
+#define DEVICE_ZONE(xx)
+#endif
+
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, \
+       HIGHMEM_ZONE(xx) xx##_MOVABLE, DEVICE_ZONE(xx)
 
 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
-               FOR_ALL_ZONES(PGALLOC),
-               FOR_ALL_ZONES(ALLOCSTALL),
-               FOR_ALL_ZONES(PGSCAN_SKIP),
+               FOR_ALL_ZONES(PGALLOC)
+               FOR_ALL_ZONES(ALLOCSTALL)
+               FOR_ALL_ZONES(PGSCAN_SKIP)
                PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
                PGFAULT, PGMAJFAULT,
                PGLAZYFREED,
index 7dec36aecbd9fe239ba1b94f81c729e6665d6bd4..7725b7579b7819d3b1f9267fc42e26bf44ff13c0 100644 (file)
@@ -71,7 +71,7 @@ static inline int
 wait_on_bit(unsigned long *word, int bit, unsigned mode)
 {
        might_sleep();
-       if (!test_bit(bit, word))
+       if (!test_bit_acquire(bit, word))
                return 0;
        return out_of_line_wait_on_bit(word, bit,
                                       bit_wait,
@@ -96,7 +96,7 @@ static inline int
 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
 {
        might_sleep();
-       if (!test_bit(bit, word))
+       if (!test_bit_acquire(bit, word))
                return 0;
        return out_of_line_wait_on_bit(word, bit,
                                       bit_wait_io,
@@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
                    unsigned long timeout)
 {
        might_sleep();
-       if (!test_bit(bit, word))
+       if (!test_bit_acquire(bit, word))
                return 0;
        return out_of_line_wait_on_bit_timeout(word, bit,
                                               bit_wait_timeout,
@@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
                   unsigned mode)
 {
        might_sleep();
-       if (!test_bit(bit, word))
+       if (!test_bit_acquire(bit, word))
                return 0;
        return out_of_line_wait_on_bit(word, bit, action, mode);
 }
index 184105d682942c2b2a50b023bf6be3441fbf7a21..be2992e6de5d50c5cde8ba4c47a4dd1802c606f2 100644 (file)
@@ -290,7 +290,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state)
 }
 
 /* ========== AD Exported functions to the main bonding code ========== */
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
+void bond_3ad_initialize(struct bonding *bond);
 void bond_3ad_bind_slave(struct slave *slave);
 void bond_3ad_unbind_slave(struct slave *slave);
 void bond_3ad_state_machine_handler(struct work_struct *);
index c4898fcbf923bf01f14c6bcc694eb036d75d7195..f90f0021f5f2dd4932c564ce8b8a7333093456c5 100644 (file)
@@ -33,7 +33,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
 
 static inline bool net_busy_loop_on(void)
 {
-       return sysctl_net_busy_poll;
+       return READ_ONCE(sysctl_net_busy_poll);
 }
 
 static inline bool sk_can_busy_loop(const struct sock *sk)
index 867656b0739c0985cae50d1aaf2e1d70bb1c80ed..24003dea8fa4de93edcc10ed6a02a80864732680 100644 (file)
@@ -439,7 +439,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
 {
        list_add_tail(&skb->list, &napi->rx_list);
        napi->rx_count += segs;
-       if (napi->rx_count >= gro_normal_batch)
+       if (napi->rx_count >= READ_ONCE(gro_normal_batch))
                gro_normal_list(napi);
 }
 
index 9f0bab0589d9c3e6a87d7de6365dbdb4bbd7af0b..3827a6b395fdb67f391853d528bb6f7ff9169449 100644 (file)
@@ -83,6 +83,7 @@ struct neigh_parms {
        struct rcu_head rcu_head;
 
        int     reachable_time;
+       int     qlen;
        int     data[NEIGH_VAR_DATA_MAX];
        DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
 };
index d5326c44b4535bb917ce13c3faddbeceab43aaa7..cd982f4a0f50cac10887e13ba50d180b4f3dc95e 100644 (file)
@@ -270,6 +270,7 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
 
 struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
                                                     struct flow_offload_tuple *tuple);
+void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
                              struct net_device *dev);
 void nf_flow_table_cleanup(struct net_device *dev);
@@ -306,6 +307,8 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
                           struct flow_offload *flow);
 
 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
+void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
+
 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
                                struct net_device *dev,
                                enum flow_block_command cmd);
index 99aae36c04b97532bc6884497db8ad6482fdfdcc..cdb7db9b0e25204fbcafef518a9c36d24da1edff 100644 (file)
@@ -1652,6 +1652,7 @@ struct nftables_pernet {
        struct list_head        module_list;
        struct list_head        notify_list;
        struct mutex            commit_mutex;
+       u64                     table_handle;
        unsigned int            base_seq;
        u8                      validate_state;
 };
index 0677cd3de03444c4cbdc24cfda7c40d94057e0c4..c396a3862e80866cefbb62277b7088025e768490 100644 (file)
@@ -95,7 +95,7 @@ struct nf_ip_net {
 
 struct netns_ct {
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
-       bool ctnetlink_has_listener;
+       u8 ctnetlink_has_listener;
        bool ecache_dwork_pending;
 #endif
        u8                      sysctl_log_invalid; /* Log invalid packets */
index 05a1bbdf58054d149f1ff8be8a4ffae827bb9be9..d08cfe190a78ba309ff31e549a7ba30d570e7105 100644 (file)
@@ -577,6 +577,31 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk)
 
 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
 
+/**
+ * __locked_read_sk_user_data_with_flags - return the pointer
+ * only if argument flags all has been set in sk_user_data. Otherwise
+ * return NULL
+ *
+ * @sk: socket
+ * @flags: flag bits
+ *
+ * The caller must be holding sk->sk_callback_lock.
+ */
+static inline void *
+__locked_read_sk_user_data_with_flags(const struct sock *sk,
+                                     uintptr_t flags)
+{
+       uintptr_t sk_user_data =
+               (uintptr_t)rcu_dereference_check(__sk_user_data(sk),
+                                                lockdep_is_held(&sk->sk_callback_lock));
+
+       WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
+
+       if ((sk_user_data & flags) == flags)
+               return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
+       return NULL;
+}
+
 /**
  * __rcu_dereference_sk_user_data_with_flags - return the pointer
  * only if argument flags all has been set in sk_user_data. Otherwise
index ac151ecc7f19f0e657ec2877e8734f00af7d6b27..2edea901bbd5a89368c7146bbc62e7e723c93b04 100644 (file)
 #define REG_RESERVED_ADDR              0xffffffff
 #define REG_RESERVED(reg)              REG(reg, REG_RESERVED_ADDR)
 
-#define for_each_stat(ocelot, stat)                            \
-       for ((stat) = (ocelot)->stats_layout;                   \
-            ((stat)->name[0] != '\0');                         \
-            (stat)++)
-
 enum ocelot_target {
        ANA = 1,
        QS,
@@ -335,13 +330,38 @@ enum ocelot_reg {
        SYS_COUNT_RX_64,
        SYS_COUNT_RX_65_127,
        SYS_COUNT_RX_128_255,
-       SYS_COUNT_RX_256_1023,
+       SYS_COUNT_RX_256_511,
+       SYS_COUNT_RX_512_1023,
        SYS_COUNT_RX_1024_1526,
        SYS_COUNT_RX_1527_MAX,
        SYS_COUNT_RX_PAUSE,
        SYS_COUNT_RX_CONTROL,
        SYS_COUNT_RX_LONGS,
        SYS_COUNT_RX_CLASSIFIED_DROPS,
+       SYS_COUNT_RX_RED_PRIO_0,
+       SYS_COUNT_RX_RED_PRIO_1,
+       SYS_COUNT_RX_RED_PRIO_2,
+       SYS_COUNT_RX_RED_PRIO_3,
+       SYS_COUNT_RX_RED_PRIO_4,
+       SYS_COUNT_RX_RED_PRIO_5,
+       SYS_COUNT_RX_RED_PRIO_6,
+       SYS_COUNT_RX_RED_PRIO_7,
+       SYS_COUNT_RX_YELLOW_PRIO_0,
+       SYS_COUNT_RX_YELLOW_PRIO_1,
+       SYS_COUNT_RX_YELLOW_PRIO_2,
+       SYS_COUNT_RX_YELLOW_PRIO_3,
+       SYS_COUNT_RX_YELLOW_PRIO_4,
+       SYS_COUNT_RX_YELLOW_PRIO_5,
+       SYS_COUNT_RX_YELLOW_PRIO_6,
+       SYS_COUNT_RX_YELLOW_PRIO_7,
+       SYS_COUNT_RX_GREEN_PRIO_0,
+       SYS_COUNT_RX_GREEN_PRIO_1,
+       SYS_COUNT_RX_GREEN_PRIO_2,
+       SYS_COUNT_RX_GREEN_PRIO_3,
+       SYS_COUNT_RX_GREEN_PRIO_4,
+       SYS_COUNT_RX_GREEN_PRIO_5,
+       SYS_COUNT_RX_GREEN_PRIO_6,
+       SYS_COUNT_RX_GREEN_PRIO_7,
        SYS_COUNT_TX_OCTETS,
        SYS_COUNT_TX_UNICAST,
        SYS_COUNT_TX_MULTICAST,
@@ -351,11 +371,46 @@ enum ocelot_reg {
        SYS_COUNT_TX_PAUSE,
        SYS_COUNT_TX_64,
        SYS_COUNT_TX_65_127,
-       SYS_COUNT_TX_128_511,
+       SYS_COUNT_TX_128_255,
+       SYS_COUNT_TX_256_511,
        SYS_COUNT_TX_512_1023,
        SYS_COUNT_TX_1024_1526,
        SYS_COUNT_TX_1527_MAX,
+       SYS_COUNT_TX_YELLOW_PRIO_0,
+       SYS_COUNT_TX_YELLOW_PRIO_1,
+       SYS_COUNT_TX_YELLOW_PRIO_2,
+       SYS_COUNT_TX_YELLOW_PRIO_3,
+       SYS_COUNT_TX_YELLOW_PRIO_4,
+       SYS_COUNT_TX_YELLOW_PRIO_5,
+       SYS_COUNT_TX_YELLOW_PRIO_6,
+       SYS_COUNT_TX_YELLOW_PRIO_7,
+       SYS_COUNT_TX_GREEN_PRIO_0,
+       SYS_COUNT_TX_GREEN_PRIO_1,
+       SYS_COUNT_TX_GREEN_PRIO_2,
+       SYS_COUNT_TX_GREEN_PRIO_3,
+       SYS_COUNT_TX_GREEN_PRIO_4,
+       SYS_COUNT_TX_GREEN_PRIO_5,
+       SYS_COUNT_TX_GREEN_PRIO_6,
+       SYS_COUNT_TX_GREEN_PRIO_7,
        SYS_COUNT_TX_AGING,
+       SYS_COUNT_DROP_LOCAL,
+       SYS_COUNT_DROP_TAIL,
+       SYS_COUNT_DROP_YELLOW_PRIO_0,
+       SYS_COUNT_DROP_YELLOW_PRIO_1,
+       SYS_COUNT_DROP_YELLOW_PRIO_2,
+       SYS_COUNT_DROP_YELLOW_PRIO_3,
+       SYS_COUNT_DROP_YELLOW_PRIO_4,
+       SYS_COUNT_DROP_YELLOW_PRIO_5,
+       SYS_COUNT_DROP_YELLOW_PRIO_6,
+       SYS_COUNT_DROP_YELLOW_PRIO_7,
+       SYS_COUNT_DROP_GREEN_PRIO_0,
+       SYS_COUNT_DROP_GREEN_PRIO_1,
+       SYS_COUNT_DROP_GREEN_PRIO_2,
+       SYS_COUNT_DROP_GREEN_PRIO_3,
+       SYS_COUNT_DROP_GREEN_PRIO_4,
+       SYS_COUNT_DROP_GREEN_PRIO_5,
+       SYS_COUNT_DROP_GREEN_PRIO_6,
+       SYS_COUNT_DROP_GREEN_PRIO_7,
        SYS_RESET_CFG,
        SYS_SR_ETYPE_CFG,
        SYS_VLAN_ETYPE_CFG,
@@ -538,16 +593,111 @@ enum ocelot_ptp_pins {
        TOD_ACC_PIN
 };
 
+enum ocelot_stat {
+       OCELOT_STAT_RX_OCTETS,
+       OCELOT_STAT_RX_UNICAST,
+       OCELOT_STAT_RX_MULTICAST,
+       OCELOT_STAT_RX_BROADCAST,
+       OCELOT_STAT_RX_SHORTS,
+       OCELOT_STAT_RX_FRAGMENTS,
+       OCELOT_STAT_RX_JABBERS,
+       OCELOT_STAT_RX_CRC_ALIGN_ERRS,
+       OCELOT_STAT_RX_SYM_ERRS,
+       OCELOT_STAT_RX_64,
+       OCELOT_STAT_RX_65_127,
+       OCELOT_STAT_RX_128_255,
+       OCELOT_STAT_RX_256_511,
+       OCELOT_STAT_RX_512_1023,
+       OCELOT_STAT_RX_1024_1526,
+       OCELOT_STAT_RX_1527_MAX,
+       OCELOT_STAT_RX_PAUSE,
+       OCELOT_STAT_RX_CONTROL,
+       OCELOT_STAT_RX_LONGS,
+       OCELOT_STAT_RX_CLASSIFIED_DROPS,
+       OCELOT_STAT_RX_RED_PRIO_0,
+       OCELOT_STAT_RX_RED_PRIO_1,
+       OCELOT_STAT_RX_RED_PRIO_2,
+       OCELOT_STAT_RX_RED_PRIO_3,
+       OCELOT_STAT_RX_RED_PRIO_4,
+       OCELOT_STAT_RX_RED_PRIO_5,
+       OCELOT_STAT_RX_RED_PRIO_6,
+       OCELOT_STAT_RX_RED_PRIO_7,
+       OCELOT_STAT_RX_YELLOW_PRIO_0,
+       OCELOT_STAT_RX_YELLOW_PRIO_1,
+       OCELOT_STAT_RX_YELLOW_PRIO_2,
+       OCELOT_STAT_RX_YELLOW_PRIO_3,
+       OCELOT_STAT_RX_YELLOW_PRIO_4,
+       OCELOT_STAT_RX_YELLOW_PRIO_5,
+       OCELOT_STAT_RX_YELLOW_PRIO_6,
+       OCELOT_STAT_RX_YELLOW_PRIO_7,
+       OCELOT_STAT_RX_GREEN_PRIO_0,
+       OCELOT_STAT_RX_GREEN_PRIO_1,
+       OCELOT_STAT_RX_GREEN_PRIO_2,
+       OCELOT_STAT_RX_GREEN_PRIO_3,
+       OCELOT_STAT_RX_GREEN_PRIO_4,
+       OCELOT_STAT_RX_GREEN_PRIO_5,
+       OCELOT_STAT_RX_GREEN_PRIO_6,
+       OCELOT_STAT_RX_GREEN_PRIO_7,
+       OCELOT_STAT_TX_OCTETS,
+       OCELOT_STAT_TX_UNICAST,
+       OCELOT_STAT_TX_MULTICAST,
+       OCELOT_STAT_TX_BROADCAST,
+       OCELOT_STAT_TX_COLLISION,
+       OCELOT_STAT_TX_DROPS,
+       OCELOT_STAT_TX_PAUSE,
+       OCELOT_STAT_TX_64,
+       OCELOT_STAT_TX_65_127,
+       OCELOT_STAT_TX_128_255,
+       OCELOT_STAT_TX_256_511,
+       OCELOT_STAT_TX_512_1023,
+       OCELOT_STAT_TX_1024_1526,
+       OCELOT_STAT_TX_1527_MAX,
+       OCELOT_STAT_TX_YELLOW_PRIO_0,
+       OCELOT_STAT_TX_YELLOW_PRIO_1,
+       OCELOT_STAT_TX_YELLOW_PRIO_2,
+       OCELOT_STAT_TX_YELLOW_PRIO_3,
+       OCELOT_STAT_TX_YELLOW_PRIO_4,
+       OCELOT_STAT_TX_YELLOW_PRIO_5,
+       OCELOT_STAT_TX_YELLOW_PRIO_6,
+       OCELOT_STAT_TX_YELLOW_PRIO_7,
+       OCELOT_STAT_TX_GREEN_PRIO_0,
+       OCELOT_STAT_TX_GREEN_PRIO_1,
+       OCELOT_STAT_TX_GREEN_PRIO_2,
+       OCELOT_STAT_TX_GREEN_PRIO_3,
+       OCELOT_STAT_TX_GREEN_PRIO_4,
+       OCELOT_STAT_TX_GREEN_PRIO_5,
+       OCELOT_STAT_TX_GREEN_PRIO_6,
+       OCELOT_STAT_TX_GREEN_PRIO_7,
+       OCELOT_STAT_TX_AGED,
+       OCELOT_STAT_DROP_LOCAL,
+       OCELOT_STAT_DROP_TAIL,
+       OCELOT_STAT_DROP_YELLOW_PRIO_0,
+       OCELOT_STAT_DROP_YELLOW_PRIO_1,
+       OCELOT_STAT_DROP_YELLOW_PRIO_2,
+       OCELOT_STAT_DROP_YELLOW_PRIO_3,
+       OCELOT_STAT_DROP_YELLOW_PRIO_4,
+       OCELOT_STAT_DROP_YELLOW_PRIO_5,
+       OCELOT_STAT_DROP_YELLOW_PRIO_6,
+       OCELOT_STAT_DROP_YELLOW_PRIO_7,
+       OCELOT_STAT_DROP_GREEN_PRIO_0,
+       OCELOT_STAT_DROP_GREEN_PRIO_1,
+       OCELOT_STAT_DROP_GREEN_PRIO_2,
+       OCELOT_STAT_DROP_GREEN_PRIO_3,
+       OCELOT_STAT_DROP_GREEN_PRIO_4,
+       OCELOT_STAT_DROP_GREEN_PRIO_5,
+       OCELOT_STAT_DROP_GREEN_PRIO_6,
+       OCELOT_STAT_DROP_GREEN_PRIO_7,
+       OCELOT_NUM_STATS,
+};
+
 struct ocelot_stat_layout {
-       u32 offset;
+       u32 reg;
        char name[ETH_GSTRING_LEN];
 };
 
-#define OCELOT_STAT_END { .name = "" }
-
 struct ocelot_stats_region {
        struct list_head node;
-       u32 offset;
+       u32 base;
        int count;
        u32 *buf;
 };
@@ -707,7 +857,6 @@ struct ocelot {
        const u32 *const                *map;
        const struct ocelot_stat_layout *stats_layout;
        struct list_head                stats_regions;
-       unsigned int                    num_stats;
 
        u32                             pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
        int                             packet_buffer_size;
@@ -750,7 +899,7 @@ struct ocelot {
        struct ocelot_psfp_list         psfp;
 
        /* Workqueue to check statistics for overflow with its lock */
-       struct mutex                    stats_lock;
+       spinlock_t                      stats_lock;
        u64                             *stats;
        struct delayed_work             stats_work;
        struct workqueue_struct         *stats_queue;
@@ -786,8 +935,8 @@ struct ocelot_policer {
        u32 burst; /* bytes */
 };
 
-#define ocelot_bulk_read_rix(ocelot, reg, ri, buf, count) \
-       __ocelot_bulk_read_ix(ocelot, reg, reg##_RSZ * (ri), buf, count)
+#define ocelot_bulk_read(ocelot, reg, buf, count) \
+       __ocelot_bulk_read_ix(ocelot, reg, 0, buf, count)
 
 #define ocelot_read_ix(ocelot, reg, gi, ri) \
        __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
index 65016a767b7ae9e8d041ec406a22ebe8e49ade05..f160d68f961d2685204eaaa95f55061d3471c0c5 100644 (file)
@@ -27,9 +27,9 @@ TRACE_EVENT(scmi_fc_call,
                __entry->val2 = val2;
        ),
 
-       TP_printk("[0x%02X]:[0x%02X]:[%08X]:%u:%u",
-                 __entry->protocol_id, __entry->msg_id,
-                 __entry->res_id, __entry->val1, __entry->val2)
+       TP_printk("pt=%02X msg_id=%02X res_id:%u vals=%u:%u",
+               __entry->protocol_id, __entry->msg_id,
+               __entry->res_id, __entry->val1, __entry->val2)
 );
 
 TRACE_EVENT(scmi_xfer_begin,
@@ -53,9 +53,9 @@ TRACE_EVENT(scmi_xfer_begin,
                __entry->poll = poll;
        ),
 
-       TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u poll=%u",
-               __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
-               __entry->seq, __entry->poll)
+       TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u",
+               __entry->protocol_id, __entry->msg_id, __entry->seq,
+               __entry->transfer_id, __entry->poll)
 );
 
 TRACE_EVENT(scmi_xfer_response_wait,
@@ -81,9 +81,9 @@ TRACE_EVENT(scmi_xfer_response_wait,
                __entry->poll = poll;
        ),
 
-       TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u tmo_ms=%u poll=%u",
-               __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
-               __entry->seq, __entry->timeout, __entry->poll)
+       TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X tmo_ms=%u poll=%u",
+               __entry->protocol_id, __entry->msg_id, __entry->seq,
+               __entry->transfer_id, __entry->timeout, __entry->poll)
 );
 
 TRACE_EVENT(scmi_xfer_end,
@@ -107,9 +107,9 @@ TRACE_EVENT(scmi_xfer_end,
                __entry->status = status;
        ),
 
-       TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%d",
-               __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
-               __entry->seq, __entry->status)
+       TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X s=%d",
+               __entry->protocol_id, __entry->msg_id, __entry->seq,
+               __entry->transfer_id, __entry->status)
 );
 
 TRACE_EVENT(scmi_rx_done,
@@ -133,9 +133,9 @@ TRACE_EVENT(scmi_rx_done,
                __entry->msg_type = msg_type;
        ),
 
-       TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u msg_type=%u",
-               __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
-               __entry->seq, __entry->msg_type)
+       TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X msg_type=%u",
+               __entry->protocol_id, __entry->msg_id, __entry->seq,
+               __entry->transfer_id, __entry->msg_type)
 );
 
 TRACE_EVENT(scmi_msg_dump,
index 1463cfecb56b03ba42a29f2eb8473f06303abf00..9e0b5c8d92cea86ab2043cf3de814393a507cabe 100644 (file)
 #include <linux/types.h>
 #include <linux/time_types.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * IO submission data structure (Submission Queue Entry)
  */
@@ -661,4 +665,8 @@ struct io_uring_recvmsg_out {
        __u32 flags;
 };
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif
index 476d3e5c0fe7024201b7de17a074388fbb8237f6..f8c20d3de8da14fc092b8d08283f1b4a7de9451f 100644 (file)
 #define VRING_USED_ALIGN_SIZE 4
 #define VRING_DESC_ALIGN_SIZE 16
 
-/* Virtio ring descriptors: 16 bytes.  These can chain together via "next". */
+/**
+ * struct vring_desc - Virtio ring descriptors,
+ * 16 bytes long. These can chain together via @next.
+ *
+ * @addr: buffer address (guest-physical)
+ * @len: buffer length
+ * @flags: descriptor flags
+ * @next: index of the next descriptor in the chain,
+ *        if the VRING_DESC_F_NEXT flag is set. We chain unused
+ *        descriptors via this, too.
+ */
 struct vring_desc {
-       /* Address (guest-physical). */
        __virtio64 addr;
-       /* Length. */
        __virtio32 len;
-       /* The flags as indicated above. */
        __virtio16 flags;
-       /* We chain unused descriptors via this, too */
        __virtio16 next;
 };
 
index b1f3e6a8f11a16dee683c295350c3aa89be61984..4f84ea7ee14c902ee473a843af97b396009e0a04 100644 (file)
@@ -296,7 +296,7 @@ enum xfrm_attr_type_t {
        XFRMA_ETIMER_THRESH,
        XFRMA_SRCADDR,          /* xfrm_address_t */
        XFRMA_COADDR,           /* xfrm_address_t */
-       XFRMA_LASTUSED,         /* unsigned long  */
+       XFRMA_LASTUSED,         /* __u64 */
        XFRMA_POLICY_TYPE,      /* struct xfrm_userpolicy_type */
        XFRMA_MIGRATE,
        XFRMA_ALG_AEAD,         /* struct xfrm_algo_aead */
index f81aa95ffbc40b490eb3fe3fd64f8b8deb0960c8..f525566a0864de12a3f2e4fdd62b606081a10565 100644 (file)
@@ -135,11 +135,7 @@ static inline u32 ufshci_version(u32 major, u32 minor)
 
 #define UFSHCD_UIC_MASK                (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
 
-#define UFSHCD_ERROR_MASK      (UIC_ERROR |\
-                               DEVICE_FATAL_ERROR |\
-                               CONTROLLER_FATAL_ERROR |\
-                               SYSTEM_BUS_FATAL_ERROR |\
-                               CRYPTO_ENGINE_FATAL_ERROR)
+#define UFSHCD_ERROR_MASK      (UIC_ERROR | INT_FATAL_ERRORS)
 
 #define INT_FATAL_ERRORS       (DEVICE_FATAL_ERROR |\
                                CONTROLLER_FATAL_ERROR |\
index 80fe60fa77fbaa9be5970c7b9fa87b2ffc77080e..532362fcfe31fd3c58b418ad54f9d42080b1a00e 100644 (file)
@@ -70,11 +70,7 @@ config CC_CAN_LINK_STATIC
        default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag) -static) if 64BIT
        default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m32-flag) -static)
 
-config CC_HAS_ASM_GOTO
-       def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
-
 config CC_HAS_ASM_GOTO_OUTPUT
-       depends on CC_HAS_ASM_GOTO
        def_bool $(success,echo 'int foo(int x) { asm goto ("": "=r"(x) ::: bar); return x; bar: return 0; }' | $(CC) -x c - -c -o /dev/null)
 
 config CC_HAS_ASM_GOTO_TIED_OUTPUT
index 91642a4e69be62b139fd77b6f7d7791d249b54fe..1fe7942f5d4a8775e0934e9c8b6b6c73ec726ad2 100644 (file)
@@ -1446,13 +1446,25 @@ static noinline void __init kernel_init_freeable(void);
 
 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
 bool rodata_enabled __ro_after_init = true;
+
+#ifndef arch_parse_debug_rodata
+static inline bool arch_parse_debug_rodata(char *str) { return false; }
+#endif
+
 static int __init set_debug_rodata(char *str)
 {
-       if (strtobool(str, &rodata_enabled))
+       if (arch_parse_debug_rodata(str))
+               return 0;
+
+       if (str && !strcmp(str, "on"))
+               rodata_enabled = true;
+       else if (str && !strcmp(str, "off"))
+               rodata_enabled = false;
+       else
                pr_warn("Invalid option string for rodata: '%s'\n", str);
-       return 1;
+       return 0;
 }
-__setup("rodata=", set_debug_rodata);
+early_param("rodata", set_debug_rodata);
 #endif
 
 #ifdef CONFIG_STRICT_KERNEL_RWX
index e4e1dc0325f0c8db54b858f8af678bdc43cf6c47..5fc5d3e80fcb3cd1fb53b10c02d22b70762627be 100644 (file)
@@ -218,7 +218,7 @@ static int __io_sync_cancel(struct io_uring_task *tctx,
            (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
                unsigned long file_ptr;
 
-               if (unlikely(fd > ctx->nr_user_files))
+               if (unlikely(fd >= ctx->nr_user_files))
                        return -EBADF;
                fd = array_index_nospec(fd, ctx->nr_user_files);
                file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
index ebfdb2212ec2520829c8d1c209970b5869e1b026..77616279000b0c04ee1d98e348f6e547bcba7cc1 100644 (file)
@@ -1450,9 +1450,10 @@ int io_req_prep_async(struct io_kiocb *req)
                return 0;
        if (WARN_ON_ONCE(req_has_async_data(req)))
                return -EFAULT;
-       if (io_alloc_async_data(req))
-               return -EAGAIN;
-
+       if (!io_op_defs[req->opcode].manual_alloc) {
+               if (io_alloc_async_data(req))
+                       return -EAGAIN;
+       }
        return def->prep_async(req);
 }
 
index 6d71748e2c5af0150f112726a40dc0594d0ab63c..0af8a02df580f4933fdd42e8957eb8f63e59f369 100644 (file)
@@ -116,7 +116,7 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_async_msghdr *hdr = req->async_data;
 
-       if (!hdr || issue_flags & IO_URING_F_UNLOCKED)
+       if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
                return;
 
        /* Let normal cleanup path reap it if we fail adding to the cache */
@@ -152,9 +152,9 @@ static int io_setup_async_msg(struct io_kiocb *req,
                              struct io_async_msghdr *kmsg,
                              unsigned int issue_flags)
 {
-       struct io_async_msghdr *async_msg = req->async_data;
+       struct io_async_msghdr *async_msg;
 
-       if (async_msg)
+       if (req_has_async_data(req))
                return -EAGAIN;
        async_msg = io_recvmsg_alloc_async(req, issue_flags);
        if (!async_msg) {
@@ -182,6 +182,37 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
                                        &iomsg->free_iov);
 }
 
+int io_sendzc_prep_async(struct io_kiocb *req)
+{
+       struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
+       struct io_async_msghdr *io;
+       int ret;
+
+       if (!zc->addr || req_has_async_data(req))
+               return 0;
+       if (io_alloc_async_data(req))
+               return -ENOMEM;
+
+       io = req->async_data;
+       ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
+       return ret;
+}
+
+static int io_setup_async_addr(struct io_kiocb *req,
+                             struct sockaddr_storage *addr,
+                             unsigned int issue_flags)
+{
+       struct io_async_msghdr *io;
+
+       if (!addr || req_has_async_data(req))
+               return -EAGAIN;
+       if (io_alloc_async_data(req))
+               return -ENOMEM;
+       io = req->async_data;
+       memcpy(&io->addr, addr, sizeof(io->addr));
+       return -EAGAIN;
+}
+
 int io_sendmsg_prep_async(struct io_kiocb *req)
 {
        int ret;
@@ -944,7 +975,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
 
 int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
 {
-       struct sockaddr_storage address;
+       struct sockaddr_storage __address, *addr = NULL;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
        struct io_notif_slot *notif_slot;
@@ -977,11 +1008,26 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
        msg.msg_controllen = 0;
        msg.msg_namelen = 0;
 
+       if (zc->addr) {
+               if (req_has_async_data(req)) {
+                       struct io_async_msghdr *io = req->async_data;
+
+                       msg.msg_name = addr = &io->addr;
+               } else {
+                       ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
+                       if (unlikely(ret < 0))
+                               return ret;
+                       msg.msg_name = (struct sockaddr *)&__address;
+                       addr = &__address;
+               }
+               msg.msg_namelen = zc->addr_len;
+       }
+
        if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
                ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
                                        (u64)(uintptr_t)zc->buf, zc->len);
                if (unlikely(ret))
-                               return ret;
+                       return ret;
        } else {
                ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
                                          &msg.msg_iter);
@@ -992,14 +1038,6 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
                        return ret;
        }
 
-       if (zc->addr) {
-               ret = move_addr_to_kernel(zc->addr, zc->addr_len, &address);
-               if (unlikely(ret < 0))
-                       return ret;
-               msg.msg_name = (struct sockaddr *)&address;
-               msg.msg_namelen = zc->addr_len;
-       }
-
        msg_flags = zc->msg_flags | MSG_ZEROCOPY;
        if (issue_flags & IO_URING_F_NONBLOCK)
                msg_flags |= MSG_DONTWAIT;
@@ -1013,16 +1051,18 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
 
        if (unlikely(ret < min_ret)) {
                if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
-                       return -EAGAIN;
+                       return io_setup_async_addr(req, addr, issue_flags);
+
                if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
                        zc->len -= ret;
                        zc->buf += ret;
                        zc->done_io += ret;
                        req->flags |= REQ_F_PARTIAL_IO;
-                       return -EAGAIN;
+                       return io_setup_async_addr(req, addr, issue_flags);
                }
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
+               req_set_fail(req);
        } else if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH) {
                io_notif_slot_flush_submit(notif_slot, 0);
        }
index 7c438d39c0899a17070bbb01efbbe4f20815aaab..f91f56c6eeacf7603ebcd3a86f150349fbf62d4c 100644 (file)
@@ -31,6 +31,7 @@ struct io_async_connect {
 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
 
+int io_sendzc_prep_async(struct io_kiocb *req);
 int io_sendmsg_prep_async(struct io_kiocb *req);
 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
index 977736e82c1aab5c5870619b7bc18c70bfa94448..96f076b175e077a4399a5408d84c9d5123101765 100644 (file)
@@ -73,7 +73,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
 }
 
 void io_notif_slot_flush(struct io_notif_slot *slot)
-       __must_hold(&ctx->uring_lock)
+       __must_hold(&slot->notif->ctx->uring_lock)
 {
        struct io_kiocb *notif = slot->notif;
        struct io_notif_data *nd = io_notif_to_data(notif);
@@ -81,8 +81,10 @@ void io_notif_slot_flush(struct io_notif_slot *slot)
        slot->notif = NULL;
 
        /* drop slot's master ref */
-       if (refcount_dec_and_test(&nd->uarg.refcnt))
-               io_notif_complete(notif);
+       if (refcount_dec_and_test(&nd->uarg.refcnt)) {
+               notif->io_task_work.func = __io_notif_complete_tw;
+               io_req_task_work_add(notif);
+       }
 }
 
 __cold int io_notif_unregister(struct io_ring_ctx *ctx)
index 65f0b42f255504f2b9a4f8de70116f9f48c6690a..80f6445e0c2ba573d0db9287db49ccf87159cefc 100644 (file)
@@ -8,7 +8,7 @@
 #include "rsrc.h"
 
 #define IO_NOTIF_SPLICE_BATCH  32
-#define IORING_MAX_NOTIF_SLOTS (1U << 10)
+#define IORING_MAX_NOTIF_SLOTS (1U << 15)
 
 struct io_notif_data {
        struct file             *file;
index 72dd2b2d8a9df110963bce445bf0e89e6fef2c2e..41410126c1c685cc74a4ca83f90fdbb82aae1819 100644 (file)
@@ -478,13 +478,15 @@ const struct io_op_def io_op_defs[] = {
                .pollout                = 1,
                .audit_skip             = 1,
                .ioprio                 = 1,
+               .manual_alloc           = 1,
 #if defined(CONFIG_NET)
+               .async_size             = sizeof(struct io_async_msghdr),
                .prep                   = io_sendzc_prep,
                .issue                  = io_sendzc,
+               .prep_async             = io_sendzc_prep_async,
 #else
                .prep                   = io_eopnotsupp_prep,
 #endif
-
        },
 };
 
index ece8ed4f96c43422d72cb1610a9b8c6d468eb2c8..763c6e54e2ee55553ac05d0232ffdd7f336ea47e 100644 (file)
@@ -25,6 +25,8 @@ struct io_op_def {
        unsigned                ioprio : 1;
        /* supports iopoll */
        unsigned                iopoll : 1;
+       /* opcode specific path will handle ->async_data allocation if needed */
+       unsigned                manual_alloc : 1;
        /* size of async data needed, if any */
        unsigned short          async_size;
 
index 8e0cc2d9205eaeeec162510d805932f176ff1bbc..b9989ae7b957bbb54c9aa33a1d8a289c68629100 100644 (file)
@@ -112,7 +112,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
                if (ret < 0)
                        req_set_fail(req);
                io_req_set_res(req, ret, 0);
-               return IOU_OK;
+               return ret;
        }
 
        return IOU_ISSUE_SKIP_COMPLETE;
index 6432a37ac1c94d1757265f6ccdefe0870cdb6c43..c565fbf66ac8767285b3de2fbd44928aaab59b2b 100644 (file)
@@ -102,6 +102,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
 
        ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, 0);
        if (ret < 0) {
+               audit_mark->path = NULL;
                fsnotify_put_mark(&audit_mark->mark);
                audit_mark = ERR_PTR(ret);
        }
index dd8d9ab747c3eee398c21b17a0513db39075cd87..79a5da1bc5bb694f8a72d01d785ee387378c7194 100644 (file)
@@ -1940,6 +1940,7 @@ void __audit_uring_exit(int success, long code)
                goto out;
        }
 
+       audit_return_fixup(ctx, success, code);
        if (ctx->context == AUDIT_CTX_SYSCALL) {
                /*
                 * NOTE: See the note in __audit_uring_entry() about the case
@@ -1981,7 +1982,6 @@ void __audit_uring_exit(int success, long code)
        audit_filter_inodes(current, ctx);
        if (ctx->current_state != AUDIT_STATE_RECORD)
                goto out;
-       audit_return_fixup(ctx, success, code);
        audit_log_exit();
 
 out:
@@ -2065,13 +2065,13 @@ void __audit_syscall_exit(int success, long return_code)
        if (!list_empty(&context->killed_trees))
                audit_kill_trees(context);
 
+       audit_return_fixup(context, success, return_code);
        /* run through both filters to ensure we set the filterkey properly */
        audit_filter_syscall(current, context);
        audit_filter_inodes(current, context);
        if (context->current_state < AUDIT_STATE_RECORD)
                goto out;
 
-       audit_return_fixup(context, success, return_code);
        audit_log_exit();
 
 out:
index 85fa9dbfa8bf88be026494b1a663912b968b1c26..82c61612f382a688333904e1563b4166b4b49478 100644 (file)
@@ -24,7 +24,7 @@ void bpf_sk_reuseport_detach(struct sock *sk)
        struct sock __rcu **socks;
 
        write_lock_bh(&sk->sk_callback_lock);
-       socks = __rcu_dereference_sk_user_data_with_flags(sk, SK_USER_DATA_BPF);
+       socks = __locked_read_sk_user_data_with_flags(sk, SK_USER_DATA_BPF);
        if (socks) {
                WRITE_ONCE(sk->sk_user_data, NULL);
                /*
index 2ade21b54dc44150f26479d03dac1ec705ad626c..ff6a8099eb2a2f591a3ebab42c667a80e51bf82c 100644 (file)
@@ -59,6 +59,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
        int retval = 0;
 
        mutex_lock(&cgroup_mutex);
+       cpus_read_lock();
        percpu_down_write(&cgroup_threadgroup_rwsem);
        for_each_root(root) {
                struct cgroup *from_cgrp;
@@ -72,6 +73,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
                        break;
        }
        percpu_up_write(&cgroup_threadgroup_rwsem);
+       cpus_read_unlock();
        mutex_unlock(&cgroup_mutex);
 
        return retval;
index ffaccd6373f1e786e9c90f8230b16f11ef4b8535..e4bb5d57f4d1d46cd0f1309765134c17410cf1cd 100644 (file)
@@ -1820,6 +1820,7 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
 
                if (ss->css_rstat_flush) {
                        list_del_rcu(&css->rstat_css_node);
+                       synchronize_rcu();
                        list_add_rcu(&css->rstat_css_node,
                                     &dcgrp->rstat_css_list);
                }
@@ -2369,6 +2370,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
 }
 EXPORT_SYMBOL_GPL(task_cgroup_path);
 
+/**
+ * cgroup_attach_lock - Lock for ->attach()
+ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
+ *
+ * cgroup migration sometimes needs to stabilize threadgroups against forks and
+ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
+ * implementations (e.g. cpuset), also need to disable CPU hotplug.
+ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
+ * lead to deadlocks.
+ *
+ * Bringing up a CPU may involve creating and destroying tasks which requires
+ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
+ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
+ * write-locking threadgroup_rwsem, the locking order is reversed and we end up
+ * waiting for an on-going CPU hotplug operation which in turn is waiting for
+ * the threadgroup_rwsem to be released to create new tasks. For more details:
+ *
+ *   http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
+ *
+ * Resolve the situation by always acquiring cpus_read_lock() before optionally
+ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
+ * CPU hotplug is disabled on entry.
+ */
+static void cgroup_attach_lock(bool lock_threadgroup)
+{
+       cpus_read_lock();
+       if (lock_threadgroup)
+               percpu_down_write(&cgroup_threadgroup_rwsem);
+}
+
+/**
+ * cgroup_attach_unlock - Undo cgroup_attach_lock()
+ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
+ */
+static void cgroup_attach_unlock(bool lock_threadgroup)
+{
+       if (lock_threadgroup)
+               percpu_up_write(&cgroup_threadgroup_rwsem);
+       cpus_read_unlock();
+}
+
 /**
  * cgroup_migrate_add_task - add a migration target task to a migration context
  * @task: target task
@@ -2841,8 +2883,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
 }
 
 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
-                                            bool *locked)
-       __acquires(&cgroup_threadgroup_rwsem)
+                                            bool *threadgroup_locked)
 {
        struct task_struct *tsk;
        pid_t pid;
@@ -2859,12 +2900,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
         * Therefore, we can skip the global lock.
         */
        lockdep_assert_held(&cgroup_mutex);
-       if (pid || threadgroup) {
-               percpu_down_write(&cgroup_threadgroup_rwsem);
-               *locked = true;
-       } else {
-               *locked = false;
-       }
+       *threadgroup_locked = pid || threadgroup;
+       cgroup_attach_lock(*threadgroup_locked);
 
        rcu_read_lock();
        if (pid) {
@@ -2895,17 +2932,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
        goto out_unlock_rcu;
 
 out_unlock_threadgroup:
-       if (*locked) {
-               percpu_up_write(&cgroup_threadgroup_rwsem);
-               *locked = false;
-       }
+       cgroup_attach_unlock(*threadgroup_locked);
+       *threadgroup_locked = false;
 out_unlock_rcu:
        rcu_read_unlock();
        return tsk;
 }
 
-void cgroup_procs_write_finish(struct task_struct *task, bool locked)
-       __releases(&cgroup_threadgroup_rwsem)
+void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
 {
        struct cgroup_subsys *ss;
        int ssid;
@@ -2913,8 +2947,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked)
        /* release reference from cgroup_procs_write_start() */
        put_task_struct(task);
 
-       if (locked)
-               percpu_up_write(&cgroup_threadgroup_rwsem);
+       cgroup_attach_unlock(threadgroup_locked);
+
        for_each_subsys(ss, ssid)
                if (ss->post_attach)
                        ss->post_attach();
@@ -3000,8 +3034,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
         * write-locking can be skipped safely.
         */
        has_tasks = !list_empty(&mgctx.preloaded_src_csets);
-       if (has_tasks)
-               percpu_down_write(&cgroup_threadgroup_rwsem);
+       cgroup_attach_lock(has_tasks);
 
        /* NULL dst indicates self on default hierarchy */
        ret = cgroup_migrate_prepare_dst(&mgctx);
@@ -3022,8 +3055,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
        ret = cgroup_migrate_execute(&mgctx);
 out_finish:
        cgroup_migrate_finish(&mgctx);
-       if (has_tasks)
-               percpu_up_write(&cgroup_threadgroup_rwsem);
+       cgroup_attach_unlock(has_tasks);
        return ret;
 }
 
@@ -3698,7 +3730,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
        }
 
        psi = cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
-       new = psi_trigger_create(psi, buf, nbytes, res);
+       new = psi_trigger_create(psi, buf, res);
        if (IS_ERR(new)) {
                cgroup_put(cgrp);
                return PTR_ERR(new);
@@ -4971,13 +5003,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
        struct task_struct *task;
        const struct cred *saved_cred;
        ssize_t ret;
-       bool locked;
+       bool threadgroup_locked;
 
        dst_cgrp = cgroup_kn_lock_live(of->kn, false);
        if (!dst_cgrp)
                return -ENODEV;
 
-       task = cgroup_procs_write_start(buf, threadgroup, &locked);
+       task = cgroup_procs_write_start(buf, threadgroup, &threadgroup_locked);
        ret = PTR_ERR_OR_ZERO(task);
        if (ret)
                goto out_unlock;
@@ -5003,7 +5035,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
        ret = cgroup_attach_task(dst_cgrp, task, threadgroup);
 
 out_finish:
-       cgroup_procs_write_finish(task, locked);
+       cgroup_procs_write_finish(task, threadgroup_locked);
 out_unlock:
        cgroup_kn_unlock(of->kn);
 
index 58aadfda9b8b3204e251bc86da1353df29df2dee..1f3a55297f39dcdd22162e68aee1a85695ef7497 100644 (file)
@@ -2289,7 +2289,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
        cgroup_taskset_first(tset, &css);
        cs = css_cs(css);
 
-       cpus_read_lock();
+       lockdep_assert_cpus_held();     /* see cgroup_attach_lock() */
        percpu_down_write(&cpuset_rwsem);
 
        guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
@@ -2343,7 +2343,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
                wake_up(&cpuset_attach_wq);
 
        percpu_up_write(&cpuset_rwsem);
-       cpus_read_unlock();
 }
 
 /* The various types of files and directories in a cpuset file system */
index 07b26df453a977c39f6771743a6eba9f694ad215..a0eb4d5cf557717b500d69270b4dfb1201cefea8 100644 (file)
@@ -494,6 +494,7 @@ static int __init crash_save_vmcoreinfo_init(void)
 
 #ifdef CONFIG_KALLSYMS
        VMCOREINFO_SYMBOL(kallsyms_names);
+       VMCOREINFO_SYMBOL(kallsyms_num_syms);
        VMCOREINFO_SYMBOL(kallsyms_token_table);
        VMCOREINFO_SYMBOL(kallsyms_token_index);
 #ifdef CONFIG_KALLSYMS_BASE_RELATIVE
index 80697e5e03e49121211c80c3b8b6fc95bab9b904..08350e35aba240b8042c74aa8993012a8277c344 100644 (file)
@@ -1707,11 +1707,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
                /* Try to disarm and disable this/parent probe */
                if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
                        /*
-                        * If 'kprobes_all_disarmed' is set, 'orig_p'
-                        * should have already been disarmed, so
-                        * skip unneed disarming process.
+                        * Don't be lazy here.  Even if 'kprobes_all_disarmed'
+                        * is false, 'orig_p' might not have been armed yet.
+                        * Note arm_all_kprobes() __tries__ to arm all kprobes
+                        * on the best effort basis.
                         */
-                       if (!kprobes_all_disarmed) {
+                       if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
                                ret = disarm_kprobe(orig_p, true);
                                if (ret) {
                                        p->flags &= ~KPROBE_FLAG_DISABLED;
index 6a477c622544d4351781870b4350bc32167e10ae..a4e4d84b6f4e03a59cab3995c58b5cb12effb873 100644 (file)
@@ -2099,7 +2099,7 @@ static int find_module_sections(struct module *mod, struct load_info *info)
                                              sizeof(*mod->static_call_sites),
                                              &mod->num_static_call_sites);
 #endif
-#ifdef CONFIG_KUNIT
+#if IS_ENABLED(CONFIG_KUNIT)
        mod->kunit_suites = section_objs(info, ".kunit_test_suites",
                                              sizeof(*mod->kunit_suites),
                                              &mod->num_kunit_suites);
index ec66b40bdd403ba2b4e9d5bb8c4459fc2a0f5ef2..ecb4b4ff4ce0aba2e4f85c3eaba0c1e1a3f37d43 100644 (file)
@@ -190,12 +190,8 @@ static void group_init(struct psi_group *group)
        /* Init trigger-related members */
        mutex_init(&group->trigger_lock);
        INIT_LIST_HEAD(&group->triggers);
-       memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
-       group->poll_states = 0;
        group->poll_min_period = U32_MAX;
-       memset(group->polling_total, 0, sizeof(group->polling_total));
        group->polling_next_update = ULLONG_MAX;
-       group->polling_until = 0;
        init_waitqueue_head(&group->poll_wait);
        timer_setup(&group->poll_timer, poll_timer_fn, 0);
        rcu_assign_pointer(group->poll_task, NULL);
@@ -957,7 +953,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
        if (static_branch_likely(&psi_disabled))
                return 0;
 
-       cgroup->psi = kmalloc(sizeof(struct psi_group), GFP_KERNEL);
+       cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
        if (!cgroup->psi)
                return -ENOMEM;
 
@@ -1091,7 +1087,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
 }
 
 struct psi_trigger *psi_trigger_create(struct psi_group *group,
-                       char *buf, size_t nbytes, enum psi_res res)
+                       char *buf, enum psi_res res)
 {
        struct psi_trigger *t;
        enum psi_states state;
@@ -1320,7 +1316,7 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
                return -EBUSY;
        }
 
-       new = psi_trigger_create(&psi_system, buf, nbytes, res);
+       new = psi_trigger_create(&psi_system, buf, res);
        if (IS_ERR(new)) {
                mutex_unlock(&seq->lock);
                return PTR_ERR(new);
index d4788f810b55511d6c88ca626ffddf4b6c7864a3..0b1cd985dc2749a9db3aadf3dc13bd179a7a450f 100644 (file)
@@ -47,7 +47,7 @@ __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_
                prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
                if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
                        ret = (*action)(&wbq_entry->key, mode);
-       } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
+       } while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
 
        finish_wait(wq_head, &wbq_entry->wq_entry);
 
index a492f159624fa2545dfc43b78337a6166bfece18..860b2dcf3ac465973493a911ecf4fedda79ba049 100644 (file)
@@ -277,6 +277,7 @@ COND_SYSCALL(landlock_restrict_self);
 
 /* mm/fadvise.c */
 COND_SYSCALL(fadvise64_64);
+COND_SYSCALL_COMPAT(fadvise64_64);
 
 /* mm/, CONFIG_MMU only */
 COND_SYSCALL(swapon);
index bc921a3f7ea894f975238c7d0319f1cbd94ec9c5..439e2ab6905ee1e05e6f68c508b7e759e87fc86a 100644 (file)
@@ -1861,8 +1861,6 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
        ftrace_hash_rec_update_modify(ops, filter_hash, 1);
 }
 
-static bool ops_references_ip(struct ftrace_ops *ops, unsigned long ip);
-
 /*
  * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
  * or no-needed to update, -EBUSY if it detects a conflict of the flag
@@ -2974,6 +2972,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
 
        ftrace_startup_enable(command);
 
+       /*
+        * If ftrace is in an undefined state, we just remove ops from list
+        * to prevent the NULL pointer, instead of totally rolling it back and
+        * free trampoline, because those actions could cause further damage.
+        */
+       if (unlikely(ftrace_disabled)) {
+               __unregister_ftrace_function(ops);
+               return -ENODEV;
+       }
+
        ops->flags &= ~FTRACE_OPS_FL_ADDING;
 
        return 0;
@@ -3108,49 +3116,6 @@ static inline int ops_traces_mod(struct ftrace_ops *ops)
                ftrace_hash_empty(ops->func_hash->notrace_hash);
 }
 
-/*
- * Check if the current ops references the given ip.
- *
- * If the ops traces all functions, then it was already accounted for.
- * If the ops does not trace the current record function, skip it.
- * If the ops ignores the function via notrace filter, skip it.
- */
-static bool
-ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
-{
-       /* If ops isn't enabled, ignore it */
-       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
-               return false;
-
-       /* If ops traces all then it includes this function */
-       if (ops_traces_mod(ops))
-               return true;
-
-       /* The function must be in the filter */
-       if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
-           !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
-               return false;
-
-       /* If in notrace hash, we ignore it too */
-       if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
-               return false;
-
-       return true;
-}
-
-/*
- * Check if the current ops references the record.
- *
- * If the ops traces all functions, then it was already accounted for.
- * If the ops does not trace the current record function, skip it.
- * If the ops ignores the function via notrace filter, skip it.
- */
-static bool
-ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
-{
-       return ops_references_ip(ops, rec->ip);
-}
-
 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
 {
        bool init_nop = ftrace_need_init_nop();
@@ -6812,6 +6777,38 @@ static int ftrace_get_trampoline_kallsym(unsigned int symnum,
        return -ERANGE;
 }
 
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
+/*
+ * Check if the current ops references the given ip.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static bool
+ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
+{
+       /* If ops isn't enabled, ignore it */
+       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+               return false;
+
+       /* If ops traces all then it includes this function */
+       if (ops_traces_mod(ops))
+               return true;
+
+       /* The function must be in the filter */
+       if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
+           !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
+               return false;
+
+       /* If in notrace hash, we ignore it too */
+       if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
+               return false;
+
+       return true;
+}
+#endif
+
 #ifdef CONFIG_MODULES
 
 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
@@ -6824,7 +6821,7 @@ static int referenced_filters(struct dyn_ftrace *rec)
        int cnt = 0;
 
        for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
-               if (ops_references_rec(ops, rec)) {
+               if (ops_references_ip(ops, rec->ip)) {
                        if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
                                continue;
                        if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
index 4a0e9d927443c38fc9e009107e22d6f321ca6da6..1783e3478912499a86710cf0184c5be9250112c4 100644 (file)
@@ -227,6 +227,7 @@ static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
        struct probe_arg *parg = &ep->tp.args[i];
        struct ftrace_event_field *field;
        struct list_head *head;
+       int ret = -ENOENT;
 
        head = trace_get_fields(ep->event);
        list_for_each_entry(field, head, link) {
@@ -236,9 +237,20 @@ static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
                        return 0;
                }
        }
+
+       /*
+        * Argument not found on event. But allow for comm and COMM
+        * to be used to get the current->comm.
+        */
+       if (strcmp(parg->code->data, "COMM") == 0 ||
+           strcmp(parg->code->data, "comm") == 0) {
+               parg->code->op = FETCH_OP_COMM;
+               ret = 0;
+       }
+
        kfree(parg->code->data);
        parg->code->data = NULL;
-       return -ENOENT;
+       return ret;
 }
 
 static int eprobe_event_define_fields(struct trace_event_call *event_call)
@@ -311,6 +323,27 @@ static unsigned long get_event_field(struct fetch_insn *code, void *rec)
 
        addr = rec + field->offset;
 
+       if (is_string_field(field)) {
+               switch (field->filter_type) {
+               case FILTER_DYN_STRING:
+                       val = (unsigned long)(rec + (*(unsigned int *)addr & 0xffff));
+                       break;
+               case FILTER_RDYN_STRING:
+                       val = (unsigned long)(addr + (*(unsigned int *)addr & 0xffff));
+                       break;
+               case FILTER_STATIC_STRING:
+                       val = (unsigned long)addr;
+                       break;
+               case FILTER_PTR_STRING:
+                       val = (unsigned long)(*(char *)addr);
+                       break;
+               default:
+                       WARN_ON_ONCE(1);
+                       return 0;
+               }
+               return val;
+       }
+
        switch (field->size) {
        case 1:
                if (field->is_signed)
@@ -342,16 +375,38 @@ static unsigned long get_event_field(struct fetch_insn *code, void *rec)
 
 static int get_eprobe_size(struct trace_probe *tp, void *rec)
 {
+       struct fetch_insn *code;
        struct probe_arg *arg;
        int i, len, ret = 0;
 
        for (i = 0; i < tp->nr_args; i++) {
                arg = tp->args + i;
-               if (unlikely(arg->dynamic)) {
+               if (arg->dynamic) {
                        unsigned long val;
 
-                       val = get_event_field(arg->code, rec);
-                       len = process_fetch_insn_bottom(arg->code + 1, val, NULL, NULL);
+                       code = arg->code;
+ retry:
+                       switch (code->op) {
+                       case FETCH_OP_TP_ARG:
+                               val = get_event_field(code, rec);
+                               break;
+                       case FETCH_OP_IMM:
+                               val = code->immediate;
+                               break;
+                       case FETCH_OP_COMM:
+                               val = (unsigned long)current->comm;
+                               break;
+                       case FETCH_OP_DATA:
+                               val = (unsigned long)code->data;
+                               break;
+                       case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
+                               code++;
+                               goto retry;
+                       default:
+                               continue;
+                       }
+                       code++;
+                       len = process_fetch_insn_bottom(code, val, NULL, NULL);
                        if (len > 0)
                                ret += len;
                }
@@ -369,8 +424,28 @@ process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
 {
        unsigned long val;
 
-       val = get_event_field(code, rec);
-       return process_fetch_insn_bottom(code + 1, val, dest, base);
+ retry:
+       switch (code->op) {
+       case FETCH_OP_TP_ARG:
+               val = get_event_field(code, rec);
+               break;
+       case FETCH_OP_IMM:
+               val = code->immediate;
+               break;
+       case FETCH_OP_COMM:
+               val = (unsigned long)current->comm;
+               break;
+       case FETCH_OP_DATA:
+               val = (unsigned long)code->data;
+               break;
+       case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
+               code++;
+               goto retry;
+       default:
+               return -EILSEQ;
+       }
+       code++;
+       return process_fetch_insn_bottom(code, val, dest, base);
 }
 NOKPROBE_SYMBOL(process_fetch_insn)
 
@@ -845,6 +920,10 @@ static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[
                        trace_probe_log_err(0, BAD_ATTACH_ARG);
        }
 
+       /* Handle symbols "@" */
+       if (!ret)
+               ret = traceprobe_update_arg(&ep->tp.args[i]);
+
        return ret;
 }
 
@@ -883,7 +962,7 @@ static int __trace_eprobe_create(int argc, const char *argv[])
        trace_probe_log_set_index(1);
        sys_event = argv[1];
        ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2, 0);
-       if (!sys_event || !sys_name) {
+       if (ret || !sys_event || !sys_name) {
                trace_probe_log_err(0, NO_EVENT_INFO);
                goto parse_error;
        }
index a114549720d632be1a76d2f83ce0921873aabf98..61e3a2620fa3c9417ac23cf5a18aeb86e7393dcc 100644 (file)
@@ -157,7 +157,7 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
        int i;
 
        if (--tp_event->perf_refcount > 0)
-               goto out;
+               return;
 
        tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
 
@@ -176,8 +176,6 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
                        perf_trace_buf[i] = NULL;
                }
        }
-out:
-       trace_event_put_ref(tp_event);
 }
 
 static int perf_trace_event_open(struct perf_event *p_event)
@@ -241,6 +239,7 @@ void perf_trace_destroy(struct perf_event *p_event)
        mutex_lock(&event_mutex);
        perf_trace_event_close(p_event);
        perf_trace_event_unreg(p_event);
+       trace_event_put_ref(p_event->tp_event);
        mutex_unlock(&event_mutex);
 }
 
@@ -292,6 +291,7 @@ void perf_kprobe_destroy(struct perf_event *p_event)
        mutex_lock(&event_mutex);
        perf_trace_event_close(p_event);
        perf_trace_event_unreg(p_event);
+       trace_event_put_ref(p_event->tp_event);
        mutex_unlock(&event_mutex);
 
        destroy_local_trace_kprobe(p_event->tp_event);
@@ -347,6 +347,7 @@ void perf_uprobe_destroy(struct perf_event *p_event)
        mutex_lock(&event_mutex);
        perf_trace_event_close(p_event);
        perf_trace_event_unreg(p_event);
+       trace_event_put_ref(p_event->tp_event);
        mutex_unlock(&event_mutex);
        destroy_local_trace_uprobe(p_event->tp_event);
 }
index 181f08186d32c01663b00b2d43b4b8877d2588e8..0356cae0cf74e79075f607bc841df05568688baa 100644 (file)
@@ -176,6 +176,7 @@ static int trace_define_generic_fields(void)
 
        __generic_field(int, CPU, FILTER_CPU);
        __generic_field(int, cpu, FILTER_CPU);
+       __generic_field(int, common_cpu, FILTER_CPU);
        __generic_field(char *, COMM, FILTER_COMM);
        __generic_field(char *, comm, FILTER_COMM);
 
index 850a88abd33ba20c921e85380f171af008a5c89c..36dff277de464a785d28117aa220f6bc82c3ecd8 100644 (file)
@@ -283,7 +283,14 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
        int ret = 0;
        int len;
 
-       if (strcmp(arg, "retval") == 0) {
+       if (flags & TPARG_FL_TPOINT) {
+               if (code->data)
+                       return -EFAULT;
+               code->data = kstrdup(arg, GFP_KERNEL);
+               if (!code->data)
+                       return -ENOMEM;
+               code->op = FETCH_OP_TP_ARG;
+       } else if (strcmp(arg, "retval") == 0) {
                if (flags & TPARG_FL_RETURN) {
                        code->op = FETCH_OP_RETVAL;
                } else {
@@ -307,7 +314,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
                        }
                } else
                        goto inval_var;
-       } else if (strcmp(arg, "comm") == 0) {
+       } else if (strcmp(arg, "comm") == 0 || strcmp(arg, "COMM") == 0) {
                code->op = FETCH_OP_COMM;
 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
        } else if (((flags & TPARG_FL_MASK) ==
@@ -323,13 +330,6 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
                code->op = FETCH_OP_ARG;
                code->param = (unsigned int)param - 1;
 #endif
-       } else if (flags & TPARG_FL_TPOINT) {
-               if (code->data)
-                       return -EFAULT;
-               code->data = kstrdup(arg, GFP_KERNEL);
-               if (!code->data)
-                       return -ENOMEM;
-               code->op = FETCH_OP_TP_ARG;
        } else
                goto inval_var;
 
@@ -384,6 +384,11 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
                break;
 
        case '%':       /* named register */
+               if (flags & TPARG_FL_TPOINT) {
+                       /* eprobes do not handle registers */
+                       trace_probe_log_err(offs, BAD_VAR);
+                       break;
+               }
                ret = regs_query_register_offset(arg + 1);
                if (ret >= 0) {
                        code->op = FETCH_OP_REG;
@@ -617,9 +622,11 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
 
        /*
         * Since $comm and immediate string can not be dereferenced,
-        * we can find those by strcmp.
+        * we can find those by strcmp. But ignore for eprobes.
         */
-       if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) {
+       if (!(flags & TPARG_FL_TPOINT) &&
+           (strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 ||
+            strncmp(arg, "\\\"", 2) == 0)) {
                /* The type of $comm must be "string", and not an array. */
                if (parg->count || (t && strcmp(t, "string")))
                        goto out;
index 072e4b289c13e2b312000092b59e12868c7cf301..bcbe60d6c80c1a7cfd3bf4f595b26b14c5579883 100644 (file)
@@ -2029,13 +2029,16 @@ config LKDTM
        Documentation on how to use the module can be found in
        Documentation/fault-injection/provoke-crashes.rst
 
-config TEST_CPUMASK
-       tristate "cpumask tests" if !KUNIT_ALL_TESTS
+config CPUMASK_KUNIT_TEST
+       tristate "KUnit test for cpumask" if !KUNIT_ALL_TESTS
        depends on KUNIT
        default KUNIT_ALL_TESTS
        help
          Enable to turn on cpumask tests, running at boot or module load time.
 
+         For more information on KUnit and unit tests in general, please refer
+         to the KUnit documentation in Documentation/dev-tools/kunit/.
+
          If unsure, say N.
 
 config TEST_LIST_SORT
index c952121419282fa83397341cce31edcca8b1b258..ffabc30a27d4e3bf9aef1389d576321bec95d743 100644 (file)
@@ -34,9 +34,10 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
         earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
         nmi_backtrace.o win_minmax.o memcat_p.o \
-        buildid.o cpumask.o
+        buildid.o
 
 lib-$(CONFIG_PRINTK) += dump_stack.o
+lib-$(CONFIG_SMP) += cpumask.o
 
 lib-y  += kobject.o klist.o
 obj-y  += lockref.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o
 obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
 obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
 CFLAGS_test_bitops.o += -Werror
+obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
 obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
 obj-$(CONFIG_TEST_SIPHASH) += test_siphash.o
 obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
@@ -99,7 +101,6 @@ obj-$(CONFIG_TEST_HMM) += test_hmm.o
 obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
 obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
 obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
-obj-$(CONFIG_TEST_CPUMASK) += test_cpumask.o
 CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
 obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
 #
index 8baeb37e23d34816a80548ffaad98ed74f8f0bd0..f0ae119be8c41e3e1f56233ed122f8e2590916b3 100644 (file)
@@ -109,7 +109,6 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
 }
 #endif
 
-#if NR_CPUS > 1
 /**
  * cpumask_local_spread - select the i'th cpu with local numa cpu's first
  * @i: index number
@@ -197,4 +196,3 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp)
        return next;
 }
 EXPORT_SYMBOL(cpumask_any_distribute);
-#endif /* NR_CPUS */
similarity index 58%
rename from lib/test_cpumask.c
rename to lib/cpumask_kunit.c
index a31a1622f1f6e8b0fcd901ceb081a1a9445c6022..ecbeec72221ea5c1af15730127650653182f2c62 100644 (file)
@@ -9,6 +9,10 @@
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 
+#define MASK_MSG(m) \
+       "%s contains %sCPUs %*pbl", #m, (cpumask_weight(m) ? "" : "no "), \
+       nr_cpumask_bits, cpumask_bits(m)
+
 #define EXPECT_FOR_EACH_CPU_EQ(test, mask)                     \
        do {                                                    \
                const cpumask_t *m = (mask);                    \
@@ -16,7 +20,7 @@
                int cpu, iter = 0;                              \
                for_each_cpu(cpu, m)                            \
                        iter++;                                 \
-               KUNIT_EXPECT_EQ((test), mask_weight, iter);     \
+               KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
        } while (0)
 
 #define EXPECT_FOR_EACH_CPU_NOT_EQ(test, mask)                                 \
@@ -26,7 +30,7 @@
                int cpu, iter = 0;                                              \
                for_each_cpu_not(cpu, m)                                        \
                        iter++;                                                 \
-               KUNIT_EXPECT_EQ((test), nr_cpu_ids - mask_weight, iter);        \
+               KUNIT_EXPECT_EQ_MSG((test), nr_cpu_ids - mask_weight, iter, MASK_MSG(mask));    \
        } while (0)
 
 #define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask)                        \
@@ -36,7 +40,7 @@
                int cpu, iter = 0;                              \
                for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2)       \
                        iter++;                                 \
-               KUNIT_EXPECT_EQ((test), mask_weight, iter);     \
+               KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
        } while (0)
 
 #define EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, name)             \
@@ -45,7 +49,7 @@
                int cpu, iter = 0;                              \
                for_each_##name##_cpu(cpu)                      \
                        iter++;                                 \
-               KUNIT_EXPECT_EQ((test), mask_weight, iter);     \
+               KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(cpu_##name##_mask));    \
        } while (0)
 
 static cpumask_t mask_empty;
@@ -53,37 +57,43 @@ static cpumask_t mask_all;
 
 static void test_cpumask_weight(struct kunit *test)
 {
-       KUNIT_EXPECT_TRUE(test, cpumask_empty(&mask_empty));
-       KUNIT_EXPECT_TRUE(test, cpumask_full(cpu_possible_mask));
-       KUNIT_EXPECT_TRUE(test, cpumask_full(&mask_all));
+       KUNIT_EXPECT_TRUE_MSG(test, cpumask_empty(&mask_empty), MASK_MSG(&mask_empty));
+       KUNIT_EXPECT_TRUE_MSG(test, cpumask_full(&mask_all), MASK_MSG(&mask_all));
 
-       KUNIT_EXPECT_EQ(test, 0, cpumask_weight(&mask_empty));
-       KUNIT_EXPECT_EQ(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask));
-       KUNIT_EXPECT_EQ(test, nr_cpumask_bits, cpumask_weight(&mask_all));
+       KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_weight(&mask_empty), MASK_MSG(&mask_empty));
+       KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask),
+                           MASK_MSG(cpu_possible_mask));
+       KUNIT_EXPECT_EQ_MSG(test, nr_cpumask_bits, cpumask_weight(&mask_all), MASK_MSG(&mask_all));
 }
 
 static void test_cpumask_first(struct kunit *test)
 {
-       KUNIT_EXPECT_LE(test, nr_cpu_ids, cpumask_first(&mask_empty));
-       KUNIT_EXPECT_EQ(test, 0, cpumask_first(cpu_possible_mask));
+       KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty));
+       KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first(cpu_possible_mask), MASK_MSG(cpu_possible_mask));
 
-       KUNIT_EXPECT_EQ(test, 0, cpumask_first_zero(&mask_empty));
-       KUNIT_EXPECT_LE(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask));
+       KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first_zero(&mask_empty), MASK_MSG(&mask_empty));
+       KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask),
+                           MASK_MSG(cpu_possible_mask));
 }
 
 static void test_cpumask_last(struct kunit *test)
 {
-       KUNIT_EXPECT_LE(test, nr_cpumask_bits, cpumask_last(&mask_empty));
-       KUNIT_EXPECT_EQ(test, nr_cpumask_bits - 1, cpumask_last(cpu_possible_mask));
+       KUNIT_EXPECT_LE_MSG(test, nr_cpumask_bits, cpumask_last(&mask_empty),
+                           MASK_MSG(&mask_empty));
+       KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask),
+                           MASK_MSG(cpu_possible_mask));
 }
 
 static void test_cpumask_next(struct kunit *test)
 {
-       KUNIT_EXPECT_EQ(test, 0, cpumask_next_zero(-1, &mask_empty));
-       KUNIT_EXPECT_LE(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask));
-
-       KUNIT_EXPECT_LE(test, nr_cpu_ids, cpumask_next(-1, &mask_empty));
-       KUNIT_EXPECT_EQ(test, 0, cpumask_next(-1, cpu_possible_mask));
+       KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next_zero(-1, &mask_empty), MASK_MSG(&mask_empty));
+       KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask),
+                           MASK_MSG(cpu_possible_mask));
+
+       KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty),
+                           MASK_MSG(&mask_empty));
+       KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next(-1, cpu_possible_mask),
+                           MASK_MSG(cpu_possible_mask));
 }
 
 static void test_cpumask_iterators(struct kunit *test)
index e01a93f46f833483680b876fe9df7573bf4c2281..ce945c17980b9b8addd4c044e1080f73f21a4f3d 100644 (file)
  */
 int ___ratelimit(struct ratelimit_state *rs, const char *func)
 {
+       /* Paired with WRITE_ONCE() in .proc_handler().
+        * Changing two values seperately could be inconsistent
+        * and some message could be lost.  (See: net_ratelimit_state).
+        */
+       int interval = READ_ONCE(rs->interval);
+       int burst = READ_ONCE(rs->burst);
        unsigned long flags;
        int ret;
 
-       if (!rs->interval)
+       if (!interval)
                return 1;
 
        /*
@@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
        if (!rs->begin)
                rs->begin = jiffies;
 
-       if (time_is_before_jiffies(rs->begin + rs->interval)) {
+       if (time_is_before_jiffies(rs->begin + interval)) {
                if (rs->missed) {
                        if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
                                printk_deferred(KERN_WARNING
@@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
                rs->begin   = jiffies;
                rs->printed = 0;
        }
-       if (rs->burst && rs->burst > rs->printed) {
+       if (burst && burst > rs->printed) {
                rs->printed++;
                ret = 1;
        } else {
index 95550b8fa7fe2e359b55eb3d4cb50001296e9f20..de65cb1e5f76117907d34eb3ad1b1555b7f2b13b 100644 (file)
@@ -260,10 +260,10 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
        unsigned long timeout;
 
        timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
-       spin_lock_bh(&wb->work_lock);
+       spin_lock_irq(&wb->work_lock);
        if (test_bit(WB_registered, &wb->state))
                queue_delayed_work(bdi_wq, &wb->dwork, timeout);
-       spin_unlock_bh(&wb->work_lock);
+       spin_unlock_irq(&wb->work_lock);
 }
 
 static void wb_update_bandwidth_workfn(struct work_struct *work)
@@ -334,12 +334,12 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
 static void wb_shutdown(struct bdi_writeback *wb)
 {
        /* Make sure nobody queues further work */
-       spin_lock_bh(&wb->work_lock);
+       spin_lock_irq(&wb->work_lock);
        if (!test_and_clear_bit(WB_registered, &wb->state)) {
-               spin_unlock_bh(&wb->work_lock);
+               spin_unlock_irq(&wb->work_lock);
                return;
        }
-       spin_unlock_bh(&wb->work_lock);
+       spin_unlock_irq(&wb->work_lock);
 
        cgwb_remove_from_bdi_list(wb);
        /*
index f18a631e74797b1f7c5902cca59a8e49a5f603d1..b1efebfcf94bb9a5282ca69826dd31063d9058d1 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/memblock.h>
 #include <linux/bootmem_info.h>
 #include <linux/memory_hotplug.h>
+#include <linux/kmemleak.h>
 
 void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
 {
@@ -33,6 +34,7 @@ void put_page_bootmem(struct page *page)
                ClearPagePrivate(page);
                set_page_private(page, 0);
                INIT_LIST_HEAD(&page->lru);
+               kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
                free_reserved_page(page);
        }
 }
index cb8a7e9926a40fe07b65ec23f61d9058b17d4718..cfdf63132d5add70bcaa4ff5e08039a1da79cd53 100644 (file)
@@ -818,6 +818,9 @@ static int dbgfs_mk_context(char *name)
                return -ENOENT;
 
        new_dir = debugfs_create_dir(name, root);
+       /* Below check is required for a potential duplicated name case */
+       if (IS_ERR(new_dir))
+               return PTR_ERR(new_dir);
        dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
 
        new_ctx = dbgfs_new_ctx();
index 7328251574307b17423d410c554e2879ffff85f2..5abdaf487460567800542482757f577dccad19ac 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -478,14 +478,42 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
        return -EEXIST;
 }
 
-/*
- * FOLL_FORCE can write to even unwritable pte's, but only
- * after we've gone through a COW cycle and they are dirty.
- */
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+/* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
+static inline bool can_follow_write_pte(pte_t pte, struct page *page,
+                                       struct vm_area_struct *vma,
+                                       unsigned int flags)
 {
-       return pte_write(pte) ||
-               ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+       /* If the pte is writable, we can write to the page. */
+       if (pte_write(pte))
+               return true;
+
+       /* Maybe FOLL_FORCE is set to override it? */
+       if (!(flags & FOLL_FORCE))
+               return false;
+
+       /* But FOLL_FORCE has no effect on shared mappings */
+       if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
+               return false;
+
+       /* ... or read-only private ones */
+       if (!(vma->vm_flags & VM_MAYWRITE))
+               return false;
+
+       /* ... or already writable ones that just need to take a write fault */
+       if (vma->vm_flags & VM_WRITE)
+               return false;
+
+       /*
+        * See can_change_pte_writable(): we broke COW and could map the page
+        * writable if we have an exclusive anonymous page ...
+        */
+       if (!page || !PageAnon(page) || !PageAnonExclusive(page))
+               return false;
+
+       /* ... and a write-fault isn't required for other reasons. */
+       if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
+               return false;
+       return !userfaultfd_pte_wp(vma, pte);
 }
 
 static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -528,12 +556,19 @@ retry:
        }
        if ((flags & FOLL_NUMA) && pte_protnone(pte))
                goto no_page;
-       if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
-               pte_unmap_unlock(ptep, ptl);
-               return NULL;
-       }
 
        page = vm_normal_page(vma, address, pte);
+
+       /*
+        * We only care about anon pages in can_follow_write_pte() and don't
+        * have to worry about pte_devmap() because they are never anon.
+        */
+       if ((flags & FOLL_WRITE) &&
+           !can_follow_write_pte(pte, page, vma, flags)) {
+               page = NULL;
+               goto out;
+       }
+
        if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
                /*
                 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
@@ -986,17 +1021,6 @@ static int faultin_page(struct vm_area_struct *vma,
                return -EBUSY;
        }
 
-       /*
-        * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
-        * necessary, even if maybe_mkwrite decided not to set pte_write. We
-        * can thus safely do subsequent page lookups as if they were reads.
-        * But only do so when looping for pte_write is futile: in some cases
-        * userspace may also be wanting to write to the gotten user page,
-        * which a read fault here might prevent (a readonly page might get
-        * reCOWed by userspace write).
-        */
-       if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
-               *flags |= FOLL_COW;
        return 0;
 }
 
index 8a7c1b344abefb4b1b3903ecc61d143d98d6a9bc..e9414ee57c5b149ac4d6434ba2ba3385630a03e9 100644 (file)
@@ -1040,12 +1040,6 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
-       /*
-        * When we COW a devmap PMD entry, we split it into PTEs, so we should
-        * not be in this function with `flags & FOLL_COW` set.
-        */
-       WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
-
        /* FOLL_GET and FOLL_PIN are mutually exclusive. */
        if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
                         (FOLL_PIN | FOLL_GET)))
@@ -1395,14 +1389,42 @@ fallback:
        return VM_FAULT_FALLBACK;
 }
 
-/*
- * FOLL_FORCE can write to even unwritable pmd's, but only
- * after we've gone through a COW cycle and they are dirty.
- */
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
+static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
+                                       struct vm_area_struct *vma,
+                                       unsigned int flags)
 {
-       return pmd_write(pmd) ||
-              ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+       /* If the pmd is writable, we can write to the page. */
+       if (pmd_write(pmd))
+               return true;
+
+       /* Maybe FOLL_FORCE is set to override it? */
+       if (!(flags & FOLL_FORCE))
+               return false;
+
+       /* But FOLL_FORCE has no effect on shared mappings */
+       if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
+               return false;
+
+       /* ... or read-only private ones */
+       if (!(vma->vm_flags & VM_MAYWRITE))
+               return false;
+
+       /* ... or already writable ones that just need to take a write fault */
+       if (vma->vm_flags & VM_WRITE)
+               return false;
+
+       /*
+        * See can_change_pte_writable(): we broke COW and could map the page
+        * writable if we have an exclusive anonymous page ...
+        */
+       if (!page || !PageAnon(page) || !PageAnonExclusive(page))
+               return false;
+
+       /* ... and a write-fault isn't required for other reasons. */
+       if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
+               return false;
+       return !userfaultfd_huge_pmd_wp(vma, pmd);
 }
 
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1411,12 +1433,16 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned int flags)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct page *page = NULL;
+       struct page *page;
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
-       if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
-               goto out;
+       page = pmd_page(*pmd);
+       VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
+
+       if ((flags & FOLL_WRITE) &&
+           !can_follow_write_pmd(*pmd, page, vma, flags))
+               return NULL;
 
        /* Avoid dumping huge zero page */
        if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
@@ -1424,10 +1450,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
        /* Full NUMA hinting faults to serialise migration in fault paths */
        if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
-               goto out;
-
-       page = pmd_page(*pmd);
-       VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
+               return NULL;
 
        if (!pmd_write(*pmd) && gup_must_unshare(flags, page))
                return ERR_PTR(-EMLINK);
@@ -1444,7 +1467,6 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
        VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
 
-out:
        return page;
 }
 
index 0aee2f3ae15c8251338746659e1b9ea73b1f1807..e070b8593b3765c998bc4d742afc862a1d42adaf 100644 (file)
@@ -5241,6 +5241,21 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
        VM_BUG_ON(unshare && (flags & FOLL_WRITE));
        VM_BUG_ON(!unshare && !(flags & FOLL_WRITE));
 
+       /*
+        * hugetlb does not support FOLL_FORCE-style write faults that keep the
+        * PTE mapped R/O such as maybe_mkwrite() would do.
+        */
+       if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
+               return VM_FAULT_SIGSEGV;
+
+       /* Let's take out MAP_SHARED mappings first. */
+       if (vma->vm_flags & VM_MAYSHARE) {
+               if (unlikely(unshare))
+                       return 0;
+               set_huge_ptep_writable(vma, haddr, ptep);
+               return 0;
+       }
+
        pte = huge_ptep_get(ptep);
        old_page = pte_page(pte);
 
@@ -5781,12 +5796,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * If we are going to COW/unshare the mapping later, we examine the
         * pending reservations for this page now. This will ensure that any
         * allocations necessary to record that reservation occur outside the
-        * spinlock. For private mappings, we also lookup the pagecache
-        * page now as it is used to determine if a reservation has been
-        * consumed.
+        * spinlock. Also lookup the pagecache page now as it is used to
+        * determine if a reservation has been consumed.
         */
        if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
-           !huge_pte_write(entry)) {
+           !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
                if (vma_needs_reservation(h, vma, haddr) < 0) {
                        ret = VM_FAULT_OOM;
                        goto out_mutex;
@@ -5794,9 +5808,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                /* Just decrements count, does not deallocate */
                vma_end_reservation(h, vma, haddr);
 
-               if (!(vma->vm_flags & VM_MAYSHARE))
-                       pagecache_page = hugetlbfs_pagecache_page(h,
-                                                               vma, haddr);
+               pagecache_page = hugetlbfs_pagecache_page(h, vma, haddr);
        }
 
        ptl = huge_pte_lock(h, mm, ptep);
@@ -6029,7 +6041,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
                goto out_release_unlock;
 
-       if (vm_shared) {
+       if (page_in_pagecache) {
                page_dup_file_rmap(page, true);
        } else {
                ClearHPageRestoreReserve(page);
index c035020d0c896971488ead38bacbfb68743ceaf6..9d780f415be3cd02ee51a9548fcea76cc5b514da 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1646,8 +1646,11 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
            pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
                return 0;
 
-       /* Do we need to track softdirty? */
-       if (vma_soft_dirty_enabled(vma))
+       /*
+        * Do we need to track softdirty? hugetlb does not support softdirty
+        * tracking yet.
+        */
+       if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
                return 1;
 
        /* Specialty mapping? */
index 3a23dde73723bc8adb1847c403b99a3d66970b88..bc6bddd156ca65fef31cfc761f18ac9f6381e10b 100644 (file)
@@ -196,10 +196,11 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
                        pages++;
                } else if (is_swap_pte(oldpte)) {
                        swp_entry_t entry = pte_to_swp_entry(oldpte);
-                       struct page *page = pfn_swap_entry_to_page(entry);
                        pte_t newpte;
 
                        if (is_writable_migration_entry(entry)) {
+                               struct page *page = pfn_swap_entry_to_page(entry);
+
                                /*
                                 * A protection check is difficult so
                                 * just be safe and disable write
index d0d466a5c804ca0fa8709dd991dcd62f738cd8a3..032a7bf8d25930fb2628701be30c3da576b6b078 100644 (file)
@@ -2892,6 +2892,7 @@ static void wb_inode_writeback_start(struct bdi_writeback *wb)
 
 static void wb_inode_writeback_end(struct bdi_writeback *wb)
 {
+       unsigned long flags;
        atomic_dec(&wb->writeback_inodes);
        /*
         * Make sure estimate of writeback throughput gets updated after
@@ -2900,7 +2901,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
         * that if multiple inodes end writeback at a similar time, they get
         * batched into one bandwidth update.
         */
-       queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
+       spin_lock_irqsave(&wb->work_lock, flags);
+       if (test_bit(WB_registered, &wb->state))
+               queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
+       spin_unlock_irqrestore(&wb->work_lock, flags);
 }
 
 bool __folio_end_writeback(struct folio *folio)
index 5783f11351bb0806c3828a4ada7690b8d5343552..42e5888bf84d8638dc19ce15ef0e81ed8291a910 100644 (file)
@@ -1659,7 +1659,9 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
                new = page_folio(newpage);
                mem_cgroup_migrate(old, new);
                __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
+               __inc_lruvec_page_state(newpage, NR_SHMEM);
                __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
+               __dec_lruvec_page_state(oldpage, NR_SHMEM);
        }
        xa_unlock_irq(&swap_mapping->i_pages);
 
@@ -1780,6 +1782,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
 
        if (shmem_should_replace_folio(folio, gfp)) {
                error = shmem_replace_page(&page, gfp, info, index);
+               folio = page_folio(page);
                if (error)
                        goto failed;
        }
@@ -2281,16 +2284,34 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
        return 0;
 }
 
-/* Mask out flags that are inappropriate for the given type of inode. */
-static unsigned shmem_mask_flags(umode_t mode, __u32 flags)
+#ifdef CONFIG_TMPFS_XATTR
+static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
+
+/*
+ * chattr's fsflags are unrelated to extended attributes,
+ * but tmpfs has chosen to enable them under the same config option.
+ */
+static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
+{
+       unsigned int i_flags = 0;
+
+       if (fsflags & FS_NOATIME_FL)
+               i_flags |= S_NOATIME;
+       if (fsflags & FS_APPEND_FL)
+               i_flags |= S_APPEND;
+       if (fsflags & FS_IMMUTABLE_FL)
+               i_flags |= S_IMMUTABLE;
+       /*
+        * But FS_NODUMP_FL does not require any action in i_flags.
+        */
+       inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
+}
+#else
+static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
 {
-       if (S_ISDIR(mode))
-               return flags;
-       else if (S_ISREG(mode))
-               return flags & SHMEM_REG_FLMASK;
-       else
-               return flags & SHMEM_OTHER_FLMASK;
 }
+#define shmem_initxattrs NULL
+#endif
 
 static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
                                     umode_t mode, dev_t dev, unsigned long flags)
@@ -2319,7 +2340,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
                info->i_crtime = inode->i_mtime;
                info->fsflags = (dir == NULL) ? 0 :
                        SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
-               info->fsflags = shmem_mask_flags(mode, info->fsflags);
+               if (info->fsflags)
+                       shmem_set_inode_flags(inode, info->fsflags);
                INIT_LIST_HEAD(&info->shrinklist);
                INIT_LIST_HEAD(&info->swaplist);
                simple_xattrs_init(&info->xattrs);
@@ -2468,12 +2490,6 @@ out_unacct_blocks:
 static const struct inode_operations shmem_symlink_inode_operations;
 static const struct inode_operations shmem_short_symlink_operations;
 
-#ifdef CONFIG_TMPFS_XATTR
-static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
-#else
-#define shmem_initxattrs NULL
-#endif
-
 static int
 shmem_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len,
@@ -2826,12 +2842,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
 
        if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
                i_size_write(inode, offset + len);
-       inode->i_ctime = current_time(inode);
 undone:
        spin_lock(&inode->i_lock);
        inode->i_private = NULL;
        spin_unlock(&inode->i_lock);
 out:
+       if (!error)
+               file_modified(file);
        inode_unlock(inode);
        return error;
 }
@@ -3179,18 +3196,13 @@ static int shmem_fileattr_set(struct user_namespace *mnt_userns,
 
        if (fileattr_has_fsx(fa))
                return -EOPNOTSUPP;
+       if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
+               return -EOPNOTSUPP;
 
        info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
                (fa->flags & SHMEM_FL_USER_MODIFIABLE);
 
-       inode->i_flags &= ~(S_APPEND | S_IMMUTABLE | S_NOATIME);
-       if (info->fsflags & FS_APPEND_FL)
-               inode->i_flags |= S_APPEND;
-       if (info->fsflags & FS_IMMUTABLE_FL)
-               inode->i_flags |= S_IMMUTABLE;
-       if (info->fsflags & FS_NOATIME_FL)
-               inode->i_flags |= S_NOATIME;
-
+       shmem_set_inode_flags(inode, info->fsflags);
        inode->i_ctime = current_time(inode);
        return 0;
 }
index 07d3befc80e4134dd6b54be127197ffaad5c10e2..7327b2573f7c2f83c0475109fe7b6e6479b270a5 100644 (file)
@@ -703,14 +703,29 @@ ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
                              mmap_changing, 0);
 }
 
+void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
+                  unsigned long start, unsigned long len, bool enable_wp)
+{
+       struct mmu_gather tlb;
+       pgprot_t newprot;
+
+       if (enable_wp)
+               newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
+       else
+               newprot = vm_get_page_prot(dst_vma->vm_flags);
+
+       tlb_gather_mmu(&tlb, dst_mm);
+       change_protection(&tlb, dst_vma, start, start + len, newprot,
+                         enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
+       tlb_finish_mmu(&tlb);
+}
+
 int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
                        unsigned long len, bool enable_wp,
                        atomic_t *mmap_changing)
 {
        struct vm_area_struct *dst_vma;
        unsigned long page_mask;
-       struct mmu_gather tlb;
-       pgprot_t newprot;
        int err;
 
        /*
@@ -750,15 +765,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
                        goto out_unlock;
        }
 
-       if (enable_wp)
-               newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
-       else
-               newprot = vm_get_page_prot(dst_vma->vm_flags);
-
-       tlb_gather_mmu(&tlb, dst_mm);
-       change_protection(&tlb, dst_vma, start, start + len, newprot,
-                         enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
-       tlb_finish_mmu(&tlb);
+       uffd_wp_range(dst_mm, dst_vma, start, len, enable_wp);
 
        err = 0;
 out_unlock:
index 373d2730fcf2157562f343ec4b3160e8f4f49eac..90af9a8572f5a7073520ddaf2f4d1d3aaec2b7ac 100644 (file)
@@ -1168,8 +1168,15 @@ int fragmentation_index(struct zone *zone, unsigned int order)
 #define TEXT_FOR_HIGHMEM(xx)
 #endif
 
+#ifdef CONFIG_ZONE_DEVICE
+#define TEXT_FOR_DEVICE(xx) xx "_device",
+#else
+#define TEXT_FOR_DEVICE(xx)
+#endif
+
 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
-                                       TEXT_FOR_HIGHMEM(xx) xx "_movable",
+                                       TEXT_FOR_HIGHMEM(xx) xx "_movable", \
+                                       TEXT_FOR_DEVICE(xx)
 
 const char * const vmstat_text[] = {
        /* enum zone_stat_item counters */
index 34f784a1604b7a60153567767f715fa3371eb675..907c9b1e1e6147d7c16441a3a0ee0ac9c9decd24 100644 (file)
@@ -1487,7 +1487,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
        struct size_class *class;
        enum fullness_group fullness;
 
-       if (unlikely(!handle))
+       if (IS_ERR_OR_NULL((void *)handle))
                return;
 
        /*
index 1a11064f9990719588c44d80a93c3269f4582c00..8f19253024b0aa4624bb7c8dac836d5c2fa3a01e 100644 (file)
@@ -36,18 +36,10 @@ static struct ebt_replace_kernel initial_table = {
        .entries        = (char *)&initial_chain,
 };
 
-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
-{
-       if (valid_hooks & ~(1 << NF_BR_BROUTING))
-               return -EINVAL;
-       return 0;
-}
-
 static const struct ebt_table broute_table = {
        .name           = "broute",
        .table          = &initial_table,
        .valid_hooks    = 1 << NF_BR_BROUTING,
-       .check          = check,
        .me             = THIS_MODULE,
 };
 
index cb949436bc0e34c2a721d5ca423c8db07e4bac2b..278f324e67524a8933345f48feeb267d0a9e2dfa 100644 (file)
@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
        .entries        = (char *)initial_chains,
 };
 
-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
-{
-       if (valid_hooks & ~FILTER_VALID_HOOKS)
-               return -EINVAL;
-       return 0;
-}
-
 static const struct ebt_table frame_filter = {
        .name           = "filter",
        .table          = &initial_table,
        .valid_hooks    = FILTER_VALID_HOOKS,
-       .check          = check,
        .me             = THIS_MODULE,
 };
 
index 5ee0531ae50610e456b07f051cf7769bb5cb004e..9066f7f376d57ea509f4af6cfc7d94cd157aae9b 100644 (file)
@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
        .entries        = (char *)initial_chains,
 };
 
-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
-{
-       if (valid_hooks & ~NAT_VALID_HOOKS)
-               return -EINVAL;
-       return 0;
-}
-
 static const struct ebt_table frame_nat = {
        .name           = "nat",
        .table          = &initial_table,
        .valid_hooks    = NAT_VALID_HOOKS,
-       .check          = check,
        .me             = THIS_MODULE,
 };
 
index f2dbefb61ce8368103ee4aa8b20f6b18edbd16ca..9a0ae59cdc500b5e0d5883b1cfd085399350e61d 100644 (file)
@@ -1040,8 +1040,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
                goto free_iterate;
        }
 
-       /* the table doesn't like it */
-       if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
+       if (repl->valid_hooks != t->valid_hooks)
                goto free_unlock;
 
        if (repl->num_counters && repl->num_counters != t->private->nentries) {
@@ -1231,11 +1230,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
        if (ret != 0)
                goto free_chainstack;
 
-       if (table->check && table->check(newinfo, table->valid_hooks)) {
-               ret = -EINVAL;
-               goto free_chainstack;
-       }
-
        table->private = newinfo;
        rwlock_init(&table->lock);
        mutex_lock(&ebt_mutex);
index 1b7f385643b4c90c02f0659e64d2f5d371f839dc..94374d529ea4211f516d2a0d10a3345fa3f3c924 100644 (file)
@@ -310,11 +310,12 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
 static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
                                 void *owner, u32 size)
 {
+       int optmem_max = READ_ONCE(sysctl_optmem_max);
        struct sock *sk = (struct sock *)owner;
 
        /* same check as in sock_kmalloc() */
-       if (size <= sysctl_optmem_max &&
-           atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
+       if (size <= optmem_max &&
+           atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
                atomic_add(size, &sk->sk_omem_alloc);
                return 0;
        }
index 716df64fcfa570c29101da230aeb4ec6e644d803..56c8b0921c9fdc9edfd4003f019693656813c1fa 100644 (file)
@@ -4624,7 +4624,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
        struct softnet_data *sd;
        unsigned int old_flow, new_flow;
 
-       if (qlen < (netdev_max_backlog >> 1))
+       if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
                return false;
 
        sd = this_cpu_ptr(&softnet_data);
@@ -4672,7 +4672,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
        if (!netif_running(skb->dev))
                goto drop;
        qlen = skb_queue_len(&sd->input_pkt_queue);
-       if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
+       if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
                if (qlen) {
 enqueue:
                        __skb_queue_tail(&sd->input_pkt_queue, skb);
@@ -4928,7 +4928,7 @@ static int netif_rx_internal(struct sk_buff *skb)
 {
        int ret;
 
-       net_timestamp_check(netdev_tstamp_prequeue, skb);
+       net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
 
        trace_netif_rx(skb);
 
@@ -5281,7 +5281,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
        int ret = NET_RX_DROP;
        __be16 type;
 
-       net_timestamp_check(!netdev_tstamp_prequeue, skb);
+       net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
 
        trace_netif_receive_skb(skb);
 
@@ -5664,7 +5664,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
 {
        int ret;
 
-       net_timestamp_check(netdev_tstamp_prequeue, skb);
+       net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
 
        if (skb_defer_rx_timestamp(skb))
                return NET_RX_SUCCESS;
@@ -5694,7 +5694,7 @@ void netif_receive_skb_list_internal(struct list_head *head)
 
        INIT_LIST_HEAD(&sublist);
        list_for_each_entry_safe(skb, next, head, list) {
-               net_timestamp_check(netdev_tstamp_prequeue, skb);
+               net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
                skb_list_del_init(skb);
                if (!skb_defer_rx_timestamp(skb))
                        list_add_tail(&skb->list, &sublist);
@@ -5918,7 +5918,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
                net_rps_action_and_irq_enable(sd);
        }
 
-       napi->weight = dev_rx_weight;
+       napi->weight = READ_ONCE(dev_rx_weight);
        while (again) {
                struct sk_buff *skb;
 
@@ -6665,8 +6665,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
 {
        struct softnet_data *sd = this_cpu_ptr(&softnet_data);
        unsigned long time_limit = jiffies +
-               usecs_to_jiffies(netdev_budget_usecs);
-       int budget = netdev_budget;
+               usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
+       int budget = READ_ONCE(netdev_budget);
        LIST_HEAD(list);
        LIST_HEAD(repoll);
 
@@ -10284,7 +10284,7 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
                                return dev;
 
                if (time_after(jiffies, warning_time +
-                              netdev_unregister_timeout_secs * HZ)) {
+                              READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
                        list_for_each_entry(dev, list, todo_list) {
                                pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
                                         dev->name, netdev_refcnt_read(dev));
index e8508aaafd27d75c6cbf872eeeb60e608ecdcd6f..c191db80ce93c7efbe8a623d64c8485fb7591ca3 100644 (file)
@@ -1214,10 +1214,11 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
 static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 {
        u32 filter_size = bpf_prog_size(fp->prog->len);
+       int optmem_max = READ_ONCE(sysctl_optmem_max);
 
        /* same check as in sock_kmalloc() */
-       if (filter_size <= sysctl_optmem_max &&
-           atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
+       if (filter_size <= optmem_max &&
+           atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) {
                atomic_add(filter_size, &sk->sk_omem_alloc);
                return true;
        }
@@ -1548,7 +1549,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
-       if (bpf_prog_size(prog->len) > sysctl_optmem_max)
+       if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max))
                err = -ENOMEM;
        else
                err = reuseport_attach_prog(sk, prog);
@@ -1615,7 +1616,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
                }
        } else {
                /* BPF_PROG_TYPE_SOCKET_FILTER */
-               if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
+               if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) {
                        err = -ENOMEM;
                        goto err_prog_put;
                }
@@ -5034,14 +5035,14 @@ static int __bpf_setsockopt(struct sock *sk, int level, int optname,
                /* Only some socketops are supported */
                switch (optname) {
                case SO_RCVBUF:
-                       val = min_t(u32, val, sysctl_rmem_max);
+                       val = min_t(u32, val, READ_ONCE(sysctl_rmem_max));
                        val = min_t(int, val, INT_MAX / 2);
                        sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
                        WRITE_ONCE(sk->sk_rcvbuf,
                                   max_t(int, val * 2, SOCK_MIN_RCVBUF));
                        break;
                case SO_SNDBUF:
-                       val = min_t(u32, val, sysctl_wmem_max);
+                       val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
                        val = min_t(int, val, INT_MAX / 2);
                        sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
                        WRITE_ONCE(sk->sk_sndbuf,
index a10335b4ba2d0bf55fc5d4bbdf50b88743152f45..c8d137ef5980eaf6e1c3ea10417e096bf2a33849 100644 (file)
@@ -345,7 +345,7 @@ static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats,
        for_each_possible_cpu(i) {
                const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
 
-               qstats->qlen += qcpu->backlog;
+               qstats->qlen += qcpu->qlen;
                qstats->backlog += qcpu->backlog;
                qstats->drops += qcpu->drops;
                qstats->requeues += qcpu->requeues;
index 541c7a72a28a4b00e7e196eca01df42842ea103f..21619c70a82b78aa82bcf63db22e6403ca2b22bd 100644 (file)
@@ -26,7 +26,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
 
        cell = this_cpu_ptr(gcells->cells);
 
-       if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+       if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
 drop:
                dev_core_stats_rx_dropped_inc(dev);
                kfree_skb(skb);
index 6a8c2596ebab9ea2963959914fe70dc9bc15736b..78cc8fb688140ff5ff045dacdcef746e741daf2f 100644 (file)
@@ -307,11 +307,35 @@ static int neigh_del_timer(struct neighbour *n)
        return 0;
 }
 
-static void pneigh_queue_purge(struct sk_buff_head *list)
+static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
 {
+       struct sk_buff_head tmp;
+       unsigned long flags;
        struct sk_buff *skb;
 
-       while ((skb = skb_dequeue(list)) != NULL) {
+       skb_queue_head_init(&tmp);
+       spin_lock_irqsave(&list->lock, flags);
+       skb = skb_peek(list);
+       while (skb != NULL) {
+               struct sk_buff *skb_next = skb_peek_next(skb, list);
+               struct net_device *dev = skb->dev;
+
+               if (net == NULL || net_eq(dev_net(dev), net)) {
+                       struct in_device *in_dev;
+
+                       rcu_read_lock();
+                       in_dev = __in_dev_get_rcu(dev);
+                       if (in_dev)
+                               in_dev->arp_parms->qlen--;
+                       rcu_read_unlock();
+                       __skb_unlink(skb, list);
+                       __skb_queue_tail(&tmp, skb);
+               }
+               skb = skb_next;
+       }
+       spin_unlock_irqrestore(&list->lock, flags);
+
+       while ((skb = __skb_dequeue(&tmp))) {
                dev_put(skb->dev);
                kfree_skb(skb);
        }
@@ -385,9 +409,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
        write_lock_bh(&tbl->lock);
        neigh_flush_dev(tbl, dev, skip_perm);
        pneigh_ifdown_and_unlock(tbl, dev);
-
-       del_timer_sync(&tbl->proxy_timer);
-       pneigh_queue_purge(&tbl->proxy_queue);
+       pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
+       if (skb_queue_empty_lockless(&tbl->proxy_queue))
+               del_timer_sync(&tbl->proxy_timer);
        return 0;
 }
 
@@ -1597,8 +1621,15 @@ static void neigh_proxy_process(struct timer_list *t)
 
                if (tdif <= 0) {
                        struct net_device *dev = skb->dev;
+                       struct in_device *in_dev;
 
+                       rcu_read_lock();
+                       in_dev = __in_dev_get_rcu(dev);
+                       if (in_dev)
+                               in_dev->arp_parms->qlen--;
+                       rcu_read_unlock();
                        __skb_unlink(skb, &tbl->proxy_queue);
+
                        if (tbl->proxy_redo && netif_running(dev)) {
                                rcu_read_lock();
                                tbl->proxy_redo(skb);
@@ -1623,7 +1654,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
        unsigned long sched_next = jiffies +
                        prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
 
-       if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
+       if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
                kfree_skb(skb);
                return;
        }
@@ -1639,6 +1670,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
        skb_dst_drop(skb);
        dev_hold(skb->dev);
        __skb_queue_tail(&tbl->proxy_queue, skb);
+       p->qlen++;
        mod_timer(&tbl->proxy_timer, sched_next);
        spin_unlock(&tbl->proxy_queue.lock);
 }
@@ -1671,6 +1703,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
                refcount_set(&p->refcnt, 1);
                p->reachable_time =
                                neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+               p->qlen = 0;
                netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
                p->dev = dev;
                write_pnet(&p->net, net);
@@ -1736,6 +1769,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
        refcount_set(&tbl->parms.refcnt, 1);
        tbl->parms.reachable_time =
                          neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
+       tbl->parms.qlen = 0;
 
        tbl->stats = alloc_percpu(struct neigh_statistics);
        if (!tbl->stats)
@@ -1787,7 +1821,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
        cancel_delayed_work_sync(&tbl->managed_work);
        cancel_delayed_work_sync(&tbl->gc_work);
        del_timer_sync(&tbl->proxy_timer);
-       pneigh_queue_purge(&tbl->proxy_queue);
+       pneigh_queue_purge(&tbl->proxy_queue, NULL);
        neigh_ifdown(tbl, NULL);
        if (atomic_read(&tbl->entries))
                pr_crit("neighbour leakage\n");
index ac45328607f77af33cf51f85f9918376a9fe8ae0..4b5b15c684ed63522325740dea0678a71cd07206 100644 (file)
@@ -6070,6 +6070,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
            !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
                NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
+               module_put(owner);
                goto err_unlock;
        }
 
index 974bbbbe7138a447e04d3a773d8c875574413516..84bb5e188d0d422a3cb8eef05e6c7dc1f85c5511 100644 (file)
@@ -4205,9 +4205,8 @@ normal:
                                SKB_GSO_CB(nskb)->csum_start =
                                        skb_headroom(nskb) + doffset;
                        } else {
-                               skb_copy_bits(head_skb, offset,
-                                             skb_put(nskb, len),
-                                             len);
+                               if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
+                                       goto err;
                        }
                        continue;
                }
@@ -4798,7 +4797,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
 {
        bool ret;
 
-       if (likely(sysctl_tstamp_allow_data || tsonly))
+       if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
                return true;
 
        read_lock_bh(&sk->sk_callback_lock);
index f47338d89d5d504ff46b91b1fdb596089cea06cc..59e75ffcc1f4085710c56c9461c4a82dd1b3882c 100644 (file)
@@ -1194,8 +1194,9 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
                ret = bpf_prog_run_pin_on_cpu(prog, skb);
                ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
        }
-       if (sk_psock_verdict_apply(psock, skb, ret) < 0)
-               len = 0;
+       ret = sk_psock_verdict_apply(psock, skb, ret);
+       if (ret < 0)
+               len = ret;
 out:
        rcu_read_unlock();
        return len;
index 4cb957d934a252f00e4aabf8c5fa4631fd0b529a..788c1372663cbabdd3d2dd0d0274d60b7d63dd2c 100644 (file)
@@ -1101,7 +1101,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
                 * play 'guess the biggest size' games. RCVBUF/SNDBUF
                 * are treated in BSD as hints
                 */
-               val = min_t(u32, val, sysctl_wmem_max);
+               val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
 set_sndbuf:
                /* Ensure val * 2 fits into an int, to prevent max_t()
                 * from treating it as a negative value.
@@ -1133,7 +1133,7 @@ set_sndbuf:
                 * play 'guess the biggest size' games. RCVBUF/SNDBUF
                 * are treated in BSD as hints
                 */
-               __sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max));
+               __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max)));
                break;
 
        case SO_RCVBUFFORCE:
@@ -2536,7 +2536,7 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
 
        /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
        if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
-           sysctl_optmem_max)
+           READ_ONCE(sysctl_optmem_max))
                return NULL;
 
        skb = alloc_skb(size, priority);
@@ -2554,8 +2554,10 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
  */
 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
 {
-       if ((unsigned int)size <= sysctl_optmem_max &&
-           atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
+       int optmem_max = READ_ONCE(sysctl_optmem_max);
+
+       if ((unsigned int)size <= optmem_max &&
+           atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
                void *mem;
                /* First do the add, to avoid the race if kmalloc
                 * might sleep.
@@ -3309,8 +3311,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        timer_setup(&sk->sk_timer, NULL, 0);
 
        sk->sk_allocation       =       GFP_KERNEL;
-       sk->sk_rcvbuf           =       sysctl_rmem_default;
-       sk->sk_sndbuf           =       sysctl_wmem_default;
+       sk->sk_rcvbuf           =       READ_ONCE(sysctl_rmem_default);
+       sk->sk_sndbuf           =       READ_ONCE(sysctl_wmem_default);
        sk->sk_state            =       TCP_CLOSE;
        sk_set_socket(sk, sock);
 
@@ -3365,7 +3367,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
        sk->sk_napi_id          =       0;
-       sk->sk_ll_usec          =       sysctl_net_busy_read;
+       sk->sk_ll_usec          =       READ_ONCE(sysctl_net_busy_read);
 #endif
 
        sk->sk_max_pacing_rate = ~0UL;
index 71a13596ea2bffb64b48399d9aeb2dc04c3446c6..725891527814c4d6cc5709646c5c7e0ebc0cbf4c 100644 (file)
@@ -234,14 +234,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
 static int proc_do_dev_weight(struct ctl_table *table, int write,
                           void *buffer, size_t *lenp, loff_t *ppos)
 {
-       int ret;
+       static DEFINE_MUTEX(dev_weight_mutex);
+       int ret, weight;
 
+       mutex_lock(&dev_weight_mutex);
        ret = proc_dointvec(table, write, buffer, lenp, ppos);
-       if (ret != 0)
-               return ret;
-
-       dev_rx_weight = weight_p * dev_weight_rx_bias;
-       dev_tx_weight = weight_p * dev_weight_tx_bias;
+       if (!ret && write) {
+               weight = READ_ONCE(weight_p);
+               WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
+               WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
+       }
+       mutex_unlock(&dev_weight_mutex);
 
        return ret;
 }
index 2dd76eb1621c74b514f3e0480a688da7ff44627b..a8895ee3cd600ca8cfd1cc88a6614a910cc76b7f 100644 (file)
@@ -145,11 +145,14 @@ int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
                                   bool do_fast_age)
 {
+       struct dsa_switch *ds = dp->ds;
        int err;
 
        err = dsa_port_set_state(dp, state, do_fast_age);
-       if (err)
-               pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
+       if (err && err != -EOPNOTSUPP) {
+               dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n",
+                       dp->index, state, ERR_PTR(err));
+       }
 }
 
 int dsa_port_set_mst_state(struct dsa_port *dp,
index ad6a6663feeb5ecce4559dcb783608edb82030c2..1291c2431d440586ec891bb450a7f645dd834461 100644 (file)
@@ -2484,7 +2484,7 @@ static int dsa_slave_changeupper(struct net_device *dev,
                        if (!err)
                                dsa_bridge_mtu_normalization(dp);
                        if (err == -EOPNOTSUPP) {
-                               if (!extack->_msg)
+                               if (extack && !extack->_msg)
                                        NL_SET_ERR_MSG_MOD(extack,
                                                           "Offloading not supported");
                                err = 0;
index 92b778e423df824d45ffbfa53a975af14c6c5713..e8b9a9202fecd913137f169f161dfdccc16f7edf 100644 (file)
@@ -2682,23 +2682,27 @@ static __net_init int devinet_init_net(struct net *net)
 #endif
 
        if (!net_eq(net, &init_net)) {
-               if (IS_ENABLED(CONFIG_SYSCTL) &&
-                   sysctl_devconf_inherit_init_net == 3) {
+               switch (net_inherit_devconf()) {
+               case 3:
                        /* copy from the current netns */
                        memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
                               sizeof(ipv4_devconf));
                        memcpy(dflt,
                               current->nsproxy->net_ns->ipv4.devconf_dflt,
                               sizeof(ipv4_devconf_dflt));
-               } else if (!IS_ENABLED(CONFIG_SYSCTL) ||
-                          sysctl_devconf_inherit_init_net != 2) {
-                       /* inherit == 0 or 1: copy from init_net */
+                       break;
+               case 0:
+               case 1:
+                       /* copy from init_net */
                        memcpy(all, init_net.ipv4.devconf_all,
                               sizeof(ipv4_devconf));
                        memcpy(dflt, init_net.ipv4.devconf_dflt,
                               sizeof(ipv4_devconf_dflt));
+                       break;
+               case 2:
+                       /* use compiled values */
+                       break;
                }
-               /* else inherit == 2: use compiled values */
        }
 
 #ifdef CONFIG_SYSCTL
index d7bd1daf022b5a6a2c025e83346d347714f672c8..04e2034f2f8eda79c1b0a9b67c8dba09bca34dba 100644 (file)
@@ -1730,7 +1730,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
 
        sk->sk_protocol = ip_hdr(skb)->protocol;
        sk->sk_bound_dev_if = arg->bound_dev_if;
-       sk->sk_sndbuf = sysctl_wmem_default;
+       sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
        ipc.sockc.mark = fl4.flowi4_mark;
        err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
                             len, 0, &ipc, &rt, MSG_DONTWAIT);
index a8a323ecbb54b702e0a744e44f34ddcef7e2d383..e49a61a053a68838d9fb7fc0b1a9012477e19fd6 100644 (file)
@@ -772,7 +772,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
 
        if (optlen < GROUP_FILTER_SIZE(0))
                return -EINVAL;
-       if (optlen > sysctl_optmem_max)
+       if (optlen > READ_ONCE(sysctl_optmem_max))
                return -ENOBUFS;
 
        gsf = memdup_sockptr(optval, optlen);
@@ -808,7 +808,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
 
        if (optlen < size0)
                return -EINVAL;
-       if (optlen > sysctl_optmem_max - 4)
+       if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
                return -ENOBUFS;
 
        p = kmalloc(optlen + 4, GFP_KERNEL);
@@ -1233,7 +1233,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname,
 
                if (optlen < IP_MSFILTER_SIZE(0))
                        goto e_inval;
-               if (optlen > sysctl_optmem_max) {
+               if (optlen > READ_ONCE(sysctl_optmem_max)) {
                        err = -ENOBUFS;
                        break;
                }
index 970e9a2cca4aedc1804b5062b8819ce552c4e9f6..e5011c136fdb7accd6b86cb38d0817f7b854f9fd 100644 (file)
@@ -1000,7 +1000,7 @@ new_segment:
 
        i = skb_shinfo(skb)->nr_frags;
        can_coalesce = skb_can_coalesce(skb, i, page, offset);
-       if (!can_coalesce && i >= sysctl_max_skb_frags) {
+       if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
                tcp_mark_push(tp, skb);
                goto new_segment;
        }
@@ -1354,7 +1354,7 @@ new_segment:
 
                        if (!skb_can_coalesce(skb, i, pfrag->page,
                                              pfrag->offset)) {
-                               if (i >= sysctl_max_skb_frags) {
+                               if (i >= READ_ONCE(sysctl_max_skb_frags)) {
                                        tcp_mark_push(tp, skb);
                                        goto new_segment;
                                }
@@ -1567,17 +1567,11 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
  * calculation of whether or not we must ACK for the sake of
  * a window update.
  */
-void tcp_cleanup_rbuf(struct sock *sk, int copied)
+static void __tcp_cleanup_rbuf(struct sock *sk, int copied)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        bool time_to_ack = false;
 
-       struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
-
-       WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
-            "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
-            tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
-
        if (inet_csk_ack_scheduled(sk)) {
                const struct inet_connection_sock *icsk = inet_csk(sk);
 
@@ -1623,6 +1617,17 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
                tcp_send_ack(sk);
 }
 
+void tcp_cleanup_rbuf(struct sock *sk, int copied)
+{
+       struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
+            "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+            tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
+       __tcp_cleanup_rbuf(sk, copied);
+}
+
 static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
 {
        __skb_unlink(skb, &sk->sk_receive_queue);
@@ -1756,34 +1761,26 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
        if (sk->sk_state == TCP_LISTEN)
                return -ENOTCONN;
 
-       while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
-               int used;
-
-               __skb_unlink(skb, &sk->sk_receive_queue);
-               used = recv_actor(sk, skb);
-               if (used <= 0) {
-                       if (!copied)
-                               copied = used;
-                       break;
-               }
-               seq += used;
-               copied += used;
+       skb = tcp_recv_skb(sk, seq, &offset);
+       if (!skb)
+               return 0;
 
-               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
-                       consume_skb(skb);
+       __skb_unlink(skb, &sk->sk_receive_queue);
+       WARN_ON(!skb_set_owner_sk_safe(skb, sk));
+       copied = recv_actor(sk, skb);
+       if (copied >= 0) {
+               seq += copied;
+               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                        ++seq;
-                       break;
-               }
-               consume_skb(skb);
-               break;
        }
+       consume_skb(skb);
        WRITE_ONCE(tp->copied_seq, seq);
 
        tcp_rcv_space_adjust(sk);
 
        /* Clean up data we have read: This will do ACK frames. */
        if (copied > 0)
-               tcp_cleanup_rbuf(sk, copied);
+               __tcp_cleanup_rbuf(sk, copied);
 
        return copied;
 }
index 78b654ff421b19fc8b73d73ff2d358008856a498..290019de766dc2b8dae3bbfbcd5011d6d03f0f45 100644 (file)
@@ -239,7 +239,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
        if (wscale_ok) {
                /* Set window scaling on max possible window */
                space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
-               space = max_t(u32, space, sysctl_rmem_max);
+               space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
                space = min_t(u32, space, *window_clamp);
                *rcv_wscale = clamp_t(int, ilog2(space) - 15,
                                      0, TCP_MAX_WSCALE);
index b624e3d8c5f0a5062879d8cbfacfc37d138afc3a..e15f64f22fa8327a4e4211c299ecee9cbcf41d26 100644 (file)
@@ -7162,9 +7162,8 @@ static int __net_init addrconf_init_net(struct net *net)
        if (!dflt)
                goto err_alloc_dflt;
 
-       if (IS_ENABLED(CONFIG_SYSCTL) &&
-           !net_eq(net, &init_net)) {
-               switch (sysctl_devconf_inherit_init_net) {
+       if (!net_eq(net, &init_net)) {
+               switch (net_inherit_devconf()) {
                case 1:  /* copy from init_net */
                        memcpy(all, init_net.ipv6.devconf_all,
                               sizeof(ipv6_devconf));
index 3fda5634578ce672a90ca5977838071a66156aa3..79c6a827dea9fa102f917afd5b11283428d36dcc 100644 (file)
@@ -1517,7 +1517,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
  *   ip6_tnl_change() updates the tunnel parameters
  **/
 
-static int
+static void
 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
 {
        t->parms.laddr = p->laddr;
@@ -1531,29 +1531,25 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
        t->parms.fwmark = p->fwmark;
        dst_cache_reset(&t->dst_cache);
        ip6_tnl_link_config(t);
-       return 0;
 }
 
-static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+static void ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
 {
        struct net *net = t->net;
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
-       int err;
 
        ip6_tnl_unlink(ip6n, t);
        synchronize_net();
-       err = ip6_tnl_change(t, p);
+       ip6_tnl_change(t, p);
        ip6_tnl_link(ip6n, t);
        netdev_state_change(t->dev);
-       return err;
 }
 
-static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+static void ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
 {
        /* for default tnl0 device allow to change only the proto */
        t->parms.proto = p->proto;
        netdev_state_change(t->dev);
-       return 0;
 }
 
 static void
@@ -1667,9 +1663,9 @@ ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
                        } else
                                t = netdev_priv(dev);
                        if (dev == ip6n->fb_tnl_dev)
-                               err = ip6_tnl0_update(t, &p1);
+                               ip6_tnl0_update(t, &p1);
                        else
-                               err = ip6_tnl_update(t, &p1);
+                               ip6_tnl_update(t, &p1);
                }
                if (!IS_ERR(t)) {
                        err = 0;
@@ -2091,7 +2087,8 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
        } else
                t = netdev_priv(dev);
 
-       return ip6_tnl_update(t, &p);
+       ip6_tnl_update(t, &p);
+       return 0;
 }
 
 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
index 222f6bf220ba0d08bdde1464a1d383f819b3fe34..e0dcc7a193df2a9f70350ec448f810c06a9e5330 100644 (file)
@@ -210,7 +210,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
 
        if (optlen < GROUP_FILTER_SIZE(0))
                return -EINVAL;
-       if (optlen > sysctl_optmem_max)
+       if (optlen > READ_ONCE(sysctl_optmem_max))
                return -ENOBUFS;
 
        gsf = memdup_sockptr(optval, optlen);
@@ -244,7 +244,7 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
 
        if (optlen < size0)
                return -EINVAL;
-       if (optlen > sysctl_optmem_max - 4)
+       if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
                return -ENOBUFS;
 
        p = kmalloc(optlen + 4, GFP_KERNEL);
index 98453693e400973256d51f4e69f04b386c982263..3a553494ff16468401f88a384423729a673188b3 100644 (file)
@@ -1378,6 +1378,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        if (!rt && lifetime) {
                ND_PRINTK(3, info, "RA: adding default router\n");
 
+               if (neigh)
+                       neigh_release(neigh);
+
                rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr,
                                         skb->dev, pref, defrtr_usr_metric);
                if (!rt) {
index 7dd3629dd19e71a6db2add2265ca49ab9cceaf63..38db0064d6613a8472ec2835afdbf80071c1fcc2 100644 (file)
@@ -86,7 +86,6 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
        table[1].extra2 = &nf_frag->fqdir->high_thresh;
        table[2].data   = &nf_frag->fqdir->high_thresh;
        table[2].extra1 = &nf_frag->fqdir->low_thresh;
-       table[2].extra2 = &nf_frag->fqdir->high_thresh;
 
        hdr = register_net_sysctl(net, "net/netfilter", table);
        if (hdr == NULL)
index fda2dcc8a3831b46e57d361b59988a78b71271d4..c85df5b958d266ce37fb27caba1bad74e8ef70a7 100644 (file)
@@ -1697,9 +1697,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
                pfk->registered |= (1<<hdr->sadb_msg_satype);
        }
 
+       mutex_lock(&pfkey_mutex);
        xfrm_probe_algs();
 
        supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
+       mutex_unlock(&pfkey_mutex);
+
        if (!supp_skb) {
                if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
                        pfk->registered &= ~(1<<hdr->sadb_msg_satype);
index da4257504fad0d504c3229dfda54511667f2f056..d398f3810662ba3bb05b3450237772763204def5 100644 (file)
@@ -1263,7 +1263,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
 
                i = skb_shinfo(skb)->nr_frags;
                can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
-               if (!can_coalesce && i >= sysctl_max_skb_frags) {
+               if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
                        tcp_mark_push(tcp_sk(ssk), skb);
                        goto alloc_skb;
                }
index 22f15ebf6045b3a9ad699d67a98f4f47f7c8568c..4b8d04640ff322744e044436ef588a37b6bc6189 100644 (file)
@@ -144,7 +144,6 @@ config NF_CONNTRACK_ZONES
 
 config NF_CONNTRACK_PROCFS
        bool "Supply CT list in procfs (OBSOLETE)"
-       default y
        depends on PROC_FS
        help
        This option enables for the list of known conntrack entries
index 9d43277b8b4fec1bb3da9993c33a3605f966ae38..a56fd0b5a430af283d02e25a365a49edfd0e4d65 100644 (file)
@@ -1280,12 +1280,12 @@ static void set_sock_size(struct sock *sk, int mode, int val)
        lock_sock(sk);
        if (mode) {
                val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2,
-                             sysctl_wmem_max);
+                             READ_ONCE(sysctl_wmem_max));
                sk->sk_sndbuf = val * 2;
                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
        } else {
                val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2,
-                             sysctl_rmem_max);
+                             READ_ONCE(sysctl_rmem_max));
                sk->sk_rcvbuf = val * 2;
                sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
        }
index a414274338cff03efc477e28f2f43976b9adcd7b..0d9332e9cf71a8fae7a5e7b5a0bc904863c50072 100644 (file)
@@ -34,11 +34,6 @@ MODULE_DESCRIPTION("ftp connection tracking helper");
 MODULE_ALIAS("ip_conntrack_ftp");
 MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
 
-/* This is slow, but it's simple. --RR */
-static char *ftp_buffer;
-
-static DEFINE_SPINLOCK(nf_ftp_lock);
-
 #define MAX_PORTS 8
 static u_int16_t ports[MAX_PORTS];
 static unsigned int ports_c;
@@ -398,6 +393,9 @@ static int help(struct sk_buff *skb,
                return NF_ACCEPT;
        }
 
+       if (unlikely(skb_linearize(skb)))
+               return NF_DROP;
+
        th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
        if (th == NULL)
                return NF_ACCEPT;
@@ -411,12 +409,8 @@ static int help(struct sk_buff *skb,
        }
        datalen = skb->len - dataoff;
 
-       spin_lock_bh(&nf_ftp_lock);
-       fb_ptr = skb_header_pointer(skb, dataoff, datalen, ftp_buffer);
-       if (!fb_ptr) {
-               spin_unlock_bh(&nf_ftp_lock);
-               return NF_ACCEPT;
-       }
+       spin_lock_bh(&ct->lock);
+       fb_ptr = skb->data + dataoff;
 
        ends_in_nl = (fb_ptr[datalen - 1] == '\n');
        seq = ntohl(th->seq) + datalen;
@@ -544,7 +538,7 @@ out_update_nl:
        if (ends_in_nl)
                update_nl_seq(ct, seq, ct_ftp_info, dir, skb);
  out:
-       spin_unlock_bh(&nf_ftp_lock);
+       spin_unlock_bh(&ct->lock);
        return ret;
 }
 
@@ -571,7 +565,6 @@ static const struct nf_conntrack_expect_policy ftp_exp_policy = {
 static void __exit nf_conntrack_ftp_fini(void)
 {
        nf_conntrack_helpers_unregister(ftp, ports_c * 2);
-       kfree(ftp_buffer);
 }
 
 static int __init nf_conntrack_ftp_init(void)
@@ -580,10 +573,6 @@ static int __init nf_conntrack_ftp_init(void)
 
        NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_ftp_master));
 
-       ftp_buffer = kmalloc(65536, GFP_KERNEL);
-       if (!ftp_buffer)
-               return -ENOMEM;
-
        if (ports_c == 0)
                ports[ports_c++] = FTP_PORT;
 
@@ -603,7 +592,6 @@ static int __init nf_conntrack_ftp_init(void)
        ret = nf_conntrack_helpers_register(ftp, ports_c * 2);
        if (ret < 0) {
                pr_err("failed to register helpers\n");
-               kfree(ftp_buffer);
                return ret;
        }
 
index bb76305bb7ff942a5af66ea866316b09eba56396..5a9bce24f3c3d94e2e433145c6ef0e320554fe58 100644 (file)
@@ -34,6 +34,8 @@
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <linux/netfilter/nf_conntrack_h323.h>
 
+#define H323_MAX_SIZE 65535
+
 /* Parameters */
 static unsigned int default_rrq_ttl __read_mostly = 300;
 module_param(default_rrq_ttl, uint, 0600);
@@ -86,6 +88,9 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
        if (tcpdatalen <= 0)    /* No TCP data */
                goto clear_out;
 
+       if (tcpdatalen > H323_MAX_SIZE)
+               tcpdatalen = H323_MAX_SIZE;
+
        if (*data == NULL) {    /* first TPKT */
                /* Get first TPKT pointer */
                tpkt = skb_header_pointer(skb, tcpdataoff, tcpdatalen,
@@ -1169,6 +1174,9 @@ static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
        if (dataoff >= skb->len)
                return NULL;
        *datalen = skb->len - dataoff;
+       if (*datalen > H323_MAX_SIZE)
+               *datalen = H323_MAX_SIZE;
+
        return skb_header_pointer(skb, dataoff, *datalen, h323_buffer);
 }
 
@@ -1770,7 +1778,7 @@ static int __init nf_conntrack_h323_init(void)
 
        NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_h323_master));
 
-       h323_buffer = kmalloc(65536, GFP_KERNEL);
+       h323_buffer = kmalloc(H323_MAX_SIZE + 1, GFP_KERNEL);
        if (!h323_buffer)
                return -ENOMEM;
        ret = h323_helper_init();
index 08ee4e760a3d2551ad689dfb4167555e82a99737..1796c456ac98beb96753652934759a81e6980448 100644 (file)
@@ -39,6 +39,7 @@ unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
 EXPORT_SYMBOL_GPL(nf_nat_irc_hook);
 
 #define HELPER_NAME "irc"
+#define MAX_SEARCH_SIZE        4095
 
 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 MODULE_DESCRIPTION("IRC (DCC) connection tracking helper");
@@ -121,6 +122,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
        int i, ret = NF_ACCEPT;
        char *addr_beg_p, *addr_end_p;
        typeof(nf_nat_irc_hook) nf_nat_irc;
+       unsigned int datalen;
 
        /* If packet is coming from IRC server */
        if (dir == IP_CT_DIR_REPLY)
@@ -140,8 +142,12 @@ static int help(struct sk_buff *skb, unsigned int protoff,
        if (dataoff >= skb->len)
                return NF_ACCEPT;
 
+       datalen = skb->len - dataoff;
+       if (datalen > MAX_SEARCH_SIZE)
+               datalen = MAX_SEARCH_SIZE;
+
        spin_lock_bh(&irc_buffer_lock);
-       ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff,
+       ib_ptr = skb_header_pointer(skb, dataoff, datalen,
                                    irc_buffer);
        if (!ib_ptr) {
                spin_unlock_bh(&irc_buffer_lock);
@@ -149,7 +155,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
        }
 
        data = ib_ptr;
-       data_limit = ib_ptr + skb->len - dataoff;
+       data_limit = ib_ptr + datalen;
 
        /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
         * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
@@ -251,7 +257,7 @@ static int __init nf_conntrack_irc_init(void)
        irc_exp_policy.max_expected = max_dcc_channels;
        irc_exp_policy.timeout = dcc_timeout;
 
-       irc_buffer = kmalloc(65536, GFP_KERNEL);
+       irc_buffer = kmalloc(MAX_SEARCH_SIZE + 1, GFP_KERNEL);
        if (!irc_buffer)
                return -ENOMEM;
 
index a63b51dceaf2cc3624e8e23862072c203e0b911f..a634c72b1ffcfd6bb7c985f8eb0650bd9d1f8b27 100644 (file)
@@ -655,6 +655,37 @@ static bool tcp_in_window(struct nf_conn *ct,
                    tn->tcp_be_liberal)
                        res = true;
                if (!res) {
+                       bool seq_ok = before(seq, sender->td_maxend + 1);
+
+                       if (!seq_ok) {
+                               u32 overshot = end - sender->td_maxend + 1;
+                               bool ack_ok;
+
+                               ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
+
+                               if (in_recv_win &&
+                                   ack_ok &&
+                                   overshot <= receiver->td_maxwin &&
+                                   before(sack, receiver->td_end + 1)) {
+                                       /* Work around TCPs that send more bytes than allowed by
+                                        * the receive window.
+                                        *
+                                        * If the (marked as invalid) packet is allowed to pass by
+                                        * the ruleset and the peer acks this data, then its possible
+                                        * all future packets will trigger 'ACK is over upper bound' check.
+                                        *
+                                        * Thus if only the sequence check fails then do update td_end so
+                                        * possible ACK for this data can update internal state.
+                                        */
+                                       sender->td_end = end;
+                                       sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
+
+                                       nf_ct_l4proto_log_invalid(skb, ct, hook_state,
+                                                                 "%u bytes more than expected", overshot);
+                                       return res;
+                               }
+                       }
+
                        nf_ct_l4proto_log_invalid(skb, ct, hook_state,
                        "%s",
                        before(seq, sender->td_maxend + 1) ?
index fcb33b1d5456dd4548c24770f7dfeb8df68d5f56..13dc421fc4f5241c9af425137ec4a535868ebaf1 100644 (file)
@@ -34,10 +34,6 @@ MODULE_AUTHOR("Michal Schmidt <mschmidt@redhat.com>");
 MODULE_DESCRIPTION("SANE connection tracking helper");
 MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
 
-static char *sane_buffer;
-
-static DEFINE_SPINLOCK(nf_sane_lock);
-
 #define MAX_PORTS 8
 static u_int16_t ports[MAX_PORTS];
 static unsigned int ports_c;
@@ -67,14 +63,16 @@ static int help(struct sk_buff *skb,
        unsigned int dataoff, datalen;
        const struct tcphdr *th;
        struct tcphdr _tcph;
-       void *sb_ptr;
        int ret = NF_ACCEPT;
        int dir = CTINFO2DIR(ctinfo);
        struct nf_ct_sane_master *ct_sane_info = nfct_help_data(ct);
        struct nf_conntrack_expect *exp;
        struct nf_conntrack_tuple *tuple;
-       struct sane_request *req;
        struct sane_reply_net_start *reply;
+       union {
+               struct sane_request req;
+               struct sane_reply_net_start repl;
+       } buf;
 
        /* Until there's been traffic both ways, don't look in packets. */
        if (ctinfo != IP_CT_ESTABLISHED &&
@@ -92,59 +90,62 @@ static int help(struct sk_buff *skb,
                return NF_ACCEPT;
 
        datalen = skb->len - dataoff;
-
-       spin_lock_bh(&nf_sane_lock);
-       sb_ptr = skb_header_pointer(skb, dataoff, datalen, sane_buffer);
-       if (!sb_ptr) {
-               spin_unlock_bh(&nf_sane_lock);
-               return NF_ACCEPT;
-       }
-
        if (dir == IP_CT_DIR_ORIGINAL) {
+               const struct sane_request *req;
+
                if (datalen != sizeof(struct sane_request))
-                       goto out;
+                       return NF_ACCEPT;
+
+               req = skb_header_pointer(skb, dataoff, datalen, &buf.req);
+               if (!req)
+                       return NF_ACCEPT;
 
-               req = sb_ptr;
                if (req->RPC_code != htonl(SANE_NET_START)) {
                        /* Not an interesting command */
-                       ct_sane_info->state = SANE_STATE_NORMAL;
-                       goto out;
+                       WRITE_ONCE(ct_sane_info->state, SANE_STATE_NORMAL);
+                       return NF_ACCEPT;
                }
 
                /* We're interested in the next reply */
-               ct_sane_info->state = SANE_STATE_START_REQUESTED;
-               goto out;
+               WRITE_ONCE(ct_sane_info->state, SANE_STATE_START_REQUESTED);
+               return NF_ACCEPT;
        }
 
+       /* IP_CT_DIR_REPLY */
+
        /* Is it a reply to an uninteresting command? */
-       if (ct_sane_info->state != SANE_STATE_START_REQUESTED)
-               goto out;
+       if (READ_ONCE(ct_sane_info->state) != SANE_STATE_START_REQUESTED)
+               return NF_ACCEPT;
 
        /* It's a reply to SANE_NET_START. */
-       ct_sane_info->state = SANE_STATE_NORMAL;
+       WRITE_ONCE(ct_sane_info->state, SANE_STATE_NORMAL);
 
        if (datalen < sizeof(struct sane_reply_net_start)) {
                pr_debug("NET_START reply too short\n");
-               goto out;
+               return NF_ACCEPT;
        }
 
-       reply = sb_ptr;
+       datalen = sizeof(struct sane_reply_net_start);
+
+       reply = skb_header_pointer(skb, dataoff, datalen, &buf.repl);
+       if (!reply)
+               return NF_ACCEPT;
+
        if (reply->status != htonl(SANE_STATUS_SUCCESS)) {
                /* saned refused the command */
                pr_debug("unsuccessful SANE_STATUS = %u\n",
                         ntohl(reply->status));
-               goto out;
+               return NF_ACCEPT;
        }
 
        /* Invalid saned reply? Ignore it. */
        if (reply->zero != 0)
-               goto out;
+               return NF_ACCEPT;
 
        exp = nf_ct_expect_alloc(ct);
        if (exp == NULL) {
                nf_ct_helper_log(skb, ct, "cannot alloc expectation");
-               ret = NF_DROP;
-               goto out;
+               return NF_DROP;
        }
 
        tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
@@ -162,9 +163,6 @@ static int help(struct sk_buff *skb,
        }
 
        nf_ct_expect_put(exp);
-
-out:
-       spin_unlock_bh(&nf_sane_lock);
        return ret;
 }
 
@@ -178,7 +176,6 @@ static const struct nf_conntrack_expect_policy sane_exp_policy = {
 static void __exit nf_conntrack_sane_fini(void)
 {
        nf_conntrack_helpers_unregister(sane, ports_c * 2);
-       kfree(sane_buffer);
 }
 
 static int __init nf_conntrack_sane_init(void)
@@ -187,10 +184,6 @@ static int __init nf_conntrack_sane_init(void)
 
        NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_sane_master));
 
-       sane_buffer = kmalloc(65536, GFP_KERNEL);
-       if (!sane_buffer)
-               return -ENOMEM;
-
        if (ports_c == 0)
                ports[ports_c++] = SANE_PORT;
 
@@ -210,7 +203,6 @@ static int __init nf_conntrack_sane_init(void)
        ret = nf_conntrack_helpers_register(sane, ports_c * 2);
        if (ret < 0) {
                pr_err("failed to register helpers\n");
-               kfree(sane_buffer);
                return ret;
        }
 
index 765ac779bfc8f5fe89b6e75b65e4e1ecd01b9272..81c26a96c30bb68203e88cb3db674335d725fe16 100644 (file)
@@ -437,12 +437,17 @@ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
        }
 }
 
+void nf_flow_table_gc_run(struct nf_flowtable *flow_table)
+{
+       nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+}
+
 static void nf_flow_offload_work_gc(struct work_struct *work)
 {
        struct nf_flowtable *flow_table;
 
        flow_table = container_of(work, struct nf_flowtable, gc_work.work);
-       nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+       nf_flow_table_gc_run(flow_table);
        queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
 }
 
@@ -600,11 +605,11 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
        mutex_unlock(&flowtable_lock);
 
        cancel_delayed_work_sync(&flow_table->gc_work);
-       nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
-       nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
        nf_flow_table_offload_flush(flow_table);
-       if (nf_flowtable_hw_offload(flow_table))
-               nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+       /* ... no more pending work after this stage ... */
+       nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
+       nf_flow_table_gc_run(flow_table);
+       nf_flow_table_offload_flush_cleanup(flow_table);
        rhashtable_destroy(&flow_table->rhashtable);
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_free);
index 103b6cbf257f2eb26f2eb09f6366d117f6822bf9..b04645ced89baabda0afd23d18dbb5e8b4ed793e 100644 (file)
@@ -1074,6 +1074,14 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
        flow_offload_queue_work(offload);
 }
 
+void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable)
+{
+       if (nf_flowtable_hw_offload(flowtable)) {
+               flush_workqueue(nf_flow_offload_del_wq);
+               nf_flow_table_gc_run(flowtable);
+       }
+}
+
 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
 {
        if (nf_flowtable_hw_offload(flowtable)) {
index 3cc88998b8795079dd30c5def0e6e3c34dff9b4f..2ee50e23c9b716986fde758d6d821cfded021a01 100644 (file)
@@ -32,7 +32,6 @@ static LIST_HEAD(nf_tables_objects);
 static LIST_HEAD(nf_tables_flowtables);
 static LIST_HEAD(nf_tables_destroy_list);
 static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
-static u64 table_handle;
 
 enum {
        NFT_VALIDATE_SKIP       = 0,
@@ -889,7 +888,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
 
        rcu_read_lock();
        nft_net = nft_pernet(net);
-       cb->seq = nft_net->base_seq;
+       cb->seq = READ_ONCE(nft_net->base_seq);
 
        list_for_each_entry_rcu(table, &nft_net->tables, list) {
                if (family != NFPROTO_UNSPEC && family != table->family)
@@ -1235,7 +1234,7 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
        INIT_LIST_HEAD(&table->flowtables);
        table->family = family;
        table->flags = flags;
-       table->handle = ++table_handle;
+       table->handle = ++nft_net->table_handle;
        if (table->flags & NFT_TABLE_F_OWNER)
                table->nlpid = NETLINK_CB(skb).portid;
 
@@ -1705,7 +1704,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
 
        rcu_read_lock();
        nft_net = nft_pernet(net);
-       cb->seq = nft_net->base_seq;
+       cb->seq = READ_ONCE(nft_net->base_seq);
 
        list_for_each_entry_rcu(table, &nft_net->tables, list) {
                if (family != NFPROTO_UNSPEC && family != table->family)
@@ -2196,9 +2195,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                              struct netlink_ext_ack *extack)
 {
        const struct nlattr * const *nla = ctx->nla;
+       struct nft_stats __percpu *stats = NULL;
        struct nft_table *table = ctx->table;
        struct nft_base_chain *basechain;
-       struct nft_stats __percpu *stats;
        struct net *net = ctx->net;
        char name[NFT_NAME_MAXLEN];
        struct nft_rule_blob *blob;
@@ -2236,7 +2235,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                                return PTR_ERR(stats);
                        }
                        rcu_assign_pointer(basechain->stats, stats);
-                       static_branch_inc(&nft_counters_enabled);
                }
 
                err = nft_basechain_init(basechain, family, &hook, flags);
@@ -2319,6 +2317,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                goto err_unregister_hook;
        }
 
+       if (stats)
+               static_branch_inc(&nft_counters_enabled);
+
        table->use++;
 
        return 0;
@@ -2574,6 +2575,9 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
        nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
 
        if (chain != NULL) {
+               if (chain->flags & NFT_CHAIN_BINDING)
+                       return -EINVAL;
+
                if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
                        NL_SET_BAD_ATTR(extack, attr);
                        return -EEXIST;
@@ -3149,7 +3153,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
 
        rcu_read_lock();
        nft_net = nft_pernet(net);
-       cb->seq = nft_net->base_seq;
+       cb->seq = READ_ONCE(nft_net->base_seq);
 
        list_for_each_entry_rcu(table, &nft_net->tables, list) {
                if (family != NFPROTO_UNSPEC && family != table->family)
@@ -3907,7 +3911,7 @@ cont:
                list_for_each_entry(i, &ctx->table->sets, list) {
                        int tmp;
 
-                       if (!nft_is_active_next(ctx->net, set))
+                       if (!nft_is_active_next(ctx->net, i))
                                continue;
                        if (!sscanf(i->name, name, &tmp))
                                continue;
@@ -4133,7 +4137,7 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
 
        rcu_read_lock();
        nft_net = nft_pernet(net);
-       cb->seq = nft_net->base_seq;
+       cb->seq = READ_ONCE(nft_net->base_seq);
 
        list_for_each_entry_rcu(table, &nft_net->tables, list) {
                if (ctx->family != NFPROTO_UNSPEC &&
@@ -4451,6 +4455,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
                err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
                if (err < 0)
                        return err;
+
+               if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT))
+                       return -EINVAL;
+       } else if (flags & NFT_SET_CONCAT) {
+               return -EINVAL;
        }
 
        if (nla[NFTA_SET_EXPR] || nla[NFTA_SET_EXPRESSIONS])
@@ -5061,6 +5070,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
 
        rcu_read_lock();
        nft_net = nft_pernet(net);
+       cb->seq = READ_ONCE(nft_net->base_seq);
+
        list_for_each_entry_rcu(table, &nft_net->tables, list) {
                if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
                    dump_ctx->ctx.family != table->family)
@@ -5196,6 +5207,9 @@ static int nft_setelem_parse_flags(const struct nft_set *set,
        if (!(set->flags & NFT_SET_INTERVAL) &&
            *flags & NFT_SET_ELEM_INTERVAL_END)
                return -EINVAL;
+       if ((*flags & (NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL)) ==
+           (NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL))
+               return -EINVAL;
 
        return 0;
 }
@@ -5599,7 +5613,7 @@ int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
 
                err = nft_expr_clone(expr, set->exprs[i]);
                if (err < 0) {
-                       nft_expr_destroy(ctx, expr);
+                       kfree(expr);
                        goto err_expr;
                }
                expr_array[i] = expr;
@@ -5842,6 +5856,24 @@ static void nft_setelem_remove(const struct net *net,
                set->ops->remove(net, set, elem);
 }
 
+static bool nft_setelem_valid_key_end(const struct nft_set *set,
+                                     struct nlattr **nla, u32 flags)
+{
+       if ((set->flags & (NFT_SET_CONCAT | NFT_SET_INTERVAL)) ==
+                         (NFT_SET_CONCAT | NFT_SET_INTERVAL)) {
+               if (flags & NFT_SET_ELEM_INTERVAL_END)
+                       return false;
+               if (!nla[NFTA_SET_ELEM_KEY_END] &&
+                   !(flags & NFT_SET_ELEM_CATCHALL))
+                       return false;
+       } else {
+               if (nla[NFTA_SET_ELEM_KEY_END])
+                       return false;
+       }
+
+       return true;
+}
+
 static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                            const struct nlattr *attr, u32 nlmsg_flags)
 {
@@ -5892,6 +5924,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                        return -EINVAL;
        }
 
+       if (set->flags & NFT_SET_OBJECT) {
+               if (!nla[NFTA_SET_ELEM_OBJREF] &&
+                   !(flags & NFT_SET_ELEM_INTERVAL_END))
+                       return -EINVAL;
+       } else {
+               if (nla[NFTA_SET_ELEM_OBJREF])
+                       return -EINVAL;
+       }
+
+       if (!nft_setelem_valid_key_end(set, nla, flags))
+               return -EINVAL;
+
        if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
             (nla[NFTA_SET_ELEM_DATA] ||
              nla[NFTA_SET_ELEM_OBJREF] ||
@@ -5899,6 +5943,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
              nla[NFTA_SET_ELEM_EXPIRATION] ||
              nla[NFTA_SET_ELEM_USERDATA] ||
              nla[NFTA_SET_ELEM_EXPR] ||
+             nla[NFTA_SET_ELEM_KEY_END] ||
              nla[NFTA_SET_ELEM_EXPRESSIONS]))
                return -EINVAL;
 
@@ -6029,10 +6074,6 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        }
 
        if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
-               if (!(set->flags & NFT_SET_OBJECT)) {
-                       err = -EINVAL;
-                       goto err_parse_key_end;
-               }
                obj = nft_obj_lookup(ctx->net, ctx->table,
                                     nla[NFTA_SET_ELEM_OBJREF],
                                     set->objtype, genmask);
@@ -6325,6 +6366,9 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
        if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
                return -EINVAL;
 
+       if (!nft_setelem_valid_key_end(set, nla, flags))
+               return -EINVAL;
+
        nft_set_ext_prepare(&tmpl);
 
        if (flags != 0) {
@@ -6941,7 +6985,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
 
        rcu_read_lock();
        nft_net = nft_pernet(net);
-       cb->seq = nft_net->base_seq;
+       cb->seq = READ_ONCE(nft_net->base_seq);
 
        list_for_each_entry_rcu(table, &nft_net->tables, list) {
                if (family != NFPROTO_UNSPEC && family != table->family)
@@ -7873,7 +7917,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
 
        rcu_read_lock();
        nft_net = nft_pernet(net);
-       cb->seq = nft_net->base_seq;
+       cb->seq = READ_ONCE(nft_net->base_seq);
 
        list_for_each_entry_rcu(table, &nft_net->tables, list) {
                if (family != NFPROTO_UNSPEC && family != table->family)
@@ -8806,6 +8850,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
        struct nft_trans_elem *te;
        struct nft_chain *chain;
        struct nft_table *table;
+       unsigned int base_seq;
        LIST_HEAD(adl);
        int err;
 
@@ -8855,9 +8900,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
         * Bump generation counter, invalidate any dump in progress.
         * Cannot fail after this point.
         */
-       while (++nft_net->base_seq == 0)
+       base_seq = READ_ONCE(nft_net->base_seq);
+       while (++base_seq == 0)
                ;
 
+       WRITE_ONCE(nft_net->base_seq, base_seq);
+
        /* step 3. Start new generation, rules_gen_X now in use. */
        net->nft.gencursor = nft_gencursor_next(net);
 
@@ -9419,13 +9467,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                                break;
                        }
                }
-
-               cond_resched();
        }
 
        list_for_each_entry(set, &ctx->table->sets, list) {
-               cond_resched();
-
                if (!nft_is_active_next(ctx->net, set))
                        continue;
                if (!(set->flags & NFT_SET_MAP) ||
@@ -9667,6 +9711,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
                        return PTR_ERR(chain);
                if (nft_is_base_chain(chain))
                        return -EOPNOTSUPP;
+               if (nft_chain_is_bound(chain))
+                       return -EINVAL;
                if (desc->flags & NFT_DATA_DESC_SETELEM &&
                    chain->flags & NFT_CHAIN_BINDING)
                        return -EINVAL;
index c24b1240908fdff7424b9459e3bc6e79e92b61a0..9c44518cb70ff74c3b0754c95b5d65fa2495f88e 100644 (file)
@@ -44,6 +44,10 @@ MODULE_DESCRIPTION("Netfilter messages via netlink socket");
 
 static unsigned int nfnetlink_pernet_id __read_mostly;
 
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+static DEFINE_SPINLOCK(nfnl_grp_active_lock);
+#endif
+
 struct nfnl_net {
        struct sock *nfnl;
 };
@@ -654,6 +658,44 @@ static void nfnetlink_rcv(struct sk_buff *skb)
                netlink_rcv_skb(skb, nfnetlink_rcv_msg);
 }
 
+static void nfnetlink_bind_event(struct net *net, unsigned int group)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+       int type, group_bit;
+       u8 v;
+
+       /* All NFNLGRP_CONNTRACK_* group bits fit into u8.
+        * The other groups are not relevant and can be ignored.
+        */
+       if (group >= 8)
+               return;
+
+       type = nfnl_group2type[group];
+
+       switch (type) {
+       case NFNL_SUBSYS_CTNETLINK:
+               break;
+       case NFNL_SUBSYS_CTNETLINK_EXP:
+               break;
+       default:
+               return;
+       }
+
+       group_bit = (1 << group);
+
+       spin_lock(&nfnl_grp_active_lock);
+       v = READ_ONCE(net->ct.ctnetlink_has_listener);
+       if ((v & group_bit) == 0) {
+               v |= group_bit;
+
+               /* read concurrently without nfnl_grp_active_lock held. */
+               WRITE_ONCE(net->ct.ctnetlink_has_listener, v);
+       }
+
+       spin_unlock(&nfnl_grp_active_lock);
+#endif
+}
+
 static int nfnetlink_bind(struct net *net, int group)
 {
        const struct nfnetlink_subsystem *ss;
@@ -670,28 +712,45 @@ static int nfnetlink_bind(struct net *net, int group)
        if (!ss)
                request_module_nowait("nfnetlink-subsys-%d", type);
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-       if (type == NFNL_SUBSYS_CTNETLINK) {
-               nfnl_lock(NFNL_SUBSYS_CTNETLINK);
-               WRITE_ONCE(net->ct.ctnetlink_has_listener, true);
-               nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
-       }
-#endif
+       nfnetlink_bind_event(net, group);
        return 0;
 }
 
 static void nfnetlink_unbind(struct net *net, int group)
 {
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
+       int type, group_bit;
+
        if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
                return;
 
-       if (nfnl_group2type[group] == NFNL_SUBSYS_CTNETLINK) {
-               nfnl_lock(NFNL_SUBSYS_CTNETLINK);
-               if (!nfnetlink_has_listeners(net, group))
-                       WRITE_ONCE(net->ct.ctnetlink_has_listener, false);
-               nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
+       type = nfnl_group2type[group];
+
+       switch (type) {
+       case NFNL_SUBSYS_CTNETLINK:
+               break;
+       case NFNL_SUBSYS_CTNETLINK_EXP:
+               break;
+       default:
+               return;
+       }
+
+       /* ctnetlink_has_listener is u8 */
+       if (group >= 8)
+               return;
+
+       group_bit = (1 << group);
+
+       spin_lock(&nfnl_grp_active_lock);
+       if (!nfnetlink_has_listeners(net, group)) {
+               u8 v = READ_ONCE(net->ct.ctnetlink_has_listener);
+
+               v &= ~group_bit;
+
+               /* read concurrently without nfnl_grp_active_lock held. */
+               WRITE_ONCE(net->ct.ctnetlink_has_listener, v);
        }
+       spin_unlock(&nfnl_grp_active_lock);
 #endif
 }
 
index 0053a697c9316364cb16f3312f30c0bafe26dfb1..89342ccccdccf6a333155e9b841826ff0b4ce7c0 100644 (file)
@@ -115,9 +115,21 @@ static int nft_osf_validate(const struct nft_ctx *ctx,
                            const struct nft_expr *expr,
                            const struct nft_data **data)
 {
-       return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
-                                                   (1 << NF_INET_PRE_ROUTING) |
-                                                   (1 << NF_INET_FORWARD));
+       unsigned int hooks;
+
+       switch (ctx->family) {
+       case NFPROTO_IPV4:
+       case NFPROTO_IPV6:
+       case NFPROTO_INET:
+               hooks = (1 << NF_INET_LOCAL_IN) |
+                       (1 << NF_INET_PRE_ROUTING) |
+                       (1 << NF_INET_FORWARD);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return nft_chain_validate_hooks(ctx->chain, hooks);
 }
 
 static bool nft_osf_reduce(struct nft_regs_track *track,
index 2e7ac007cb30fe6c02429034d102b4f2d6df9ceb..eb0e40c29712189e95c14312b43f8c6f5bff49a9 100644 (file)
@@ -740,17 +740,23 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
                                const struct nlattr * const tb[])
 {
        struct nft_payload_set *priv = nft_expr_priv(expr);
+       u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
+       int err;
 
        priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
        priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
        priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
 
        if (tb[NFTA_PAYLOAD_CSUM_TYPE])
-               priv->csum_type =
-                       ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
-       if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
-               priv->csum_offset =
-                       ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
+               csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
+       if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
+               err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
+                                         &csum_offset);
+               if (err < 0)
+                       return err;
+
+               priv->csum_offset = csum_offset;
+       }
        if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
                u32 flags;
 
@@ -761,7 +767,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
                priv->csum_flags = flags;
        }
 
-       switch (priv->csum_type) {
+       switch (csum_type) {
        case NFT_PAYLOAD_CSUM_NONE:
        case NFT_PAYLOAD_CSUM_INET:
                break;
@@ -775,6 +781,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
        default:
                return -EOPNOTSUPP;
        }
+       priv->csum_type = csum_type;
 
        return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
                                       priv->len);
@@ -833,6 +840,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
 {
        enum nft_payload_bases base;
        unsigned int offset, len;
+       int err;
 
        if (tb[NFTA_PAYLOAD_BASE] == NULL ||
            tb[NFTA_PAYLOAD_OFFSET] == NULL ||
@@ -859,8 +867,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
        if (tb[NFTA_PAYLOAD_DREG] == NULL)
                return ERR_PTR(-EINVAL);
 
-       offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
-       len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+       err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
+       if (err < 0)
+               return ERR_PTR(err);
 
        if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
            base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
index 68b2eed742df907863fe237860798a0814a2df17..62da25ad264bcfe9566b86680b8271d68864fa1c 100644 (file)
@@ -312,6 +312,13 @@ static int nft_tproxy_dump(struct sk_buff *skb,
        return 0;
 }
 
+static int nft_tproxy_validate(const struct nft_ctx *ctx,
+                              const struct nft_expr *expr,
+                              const struct nft_data **data)
+{
+       return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
+}
+
 static struct nft_expr_type nft_tproxy_type;
 static const struct nft_expr_ops nft_tproxy_ops = {
        .type           = &nft_tproxy_type,
@@ -321,6 +328,7 @@ static const struct nft_expr_ops nft_tproxy_ops = {
        .destroy        = nft_tproxy_destroy,
        .dump           = nft_tproxy_dump,
        .reduce         = NFT_REDUCE_READONLY,
+       .validate       = nft_tproxy_validate,
 };
 
 static struct nft_expr_type nft_tproxy_type __read_mostly = {
index 5edaaded706d9cf018ad39ef64184507c99a6778..983ade4be3b39b1cb55396a0f7ca8e2e0d0c4cac 100644 (file)
@@ -161,6 +161,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = {
 
 static struct nft_expr_type nft_tunnel_type __read_mostly = {
        .name           = "tunnel",
+       .family         = NFPROTO_NETDEV,
        .ops            = &nft_tunnel_get_ops,
        .policy         = nft_tunnel_policy,
        .maxattr        = NFTA_TUNNEL_MAX,
index 1afca2a6c2ac15f5107ae48509e4b282ff1297c3..57010927e20a805b003d0e31d6e0b24f0207c799 100644 (file)
@@ -1174,13 +1174,17 @@ static int ctrl_dumppolicy_start(struct netlink_callback *cb)
                                                             op.policy,
                                                             op.maxattr);
                        if (err)
-                               return err;
+                               goto err_free_state;
                }
        }
 
        if (!ctx->state)
                return -ENODATA;
        return 0;
+
+err_free_state:
+       netlink_policy_dump_free(ctx->state);
+       return err;
 }
 
 static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
index 8d7c900e27f4c5e8f31ed9759bd14ca0bd023a5d..87e3de0fde8963ec8ba967fe9c6610770b9e6475 100644 (file)
@@ -144,7 +144,7 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
 
        err = add_policy(&state, policy, maxtype);
        if (err)
-               return err;
+               goto err_try_undo;
 
        for (policy_idx = 0;
             policy_idx < state->n_alloc && state->policies[policy_idx].policy;
@@ -164,7 +164,7 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
                                                 policy[type].nested_policy,
                                                 policy[type].len);
                                if (err)
-                                       return err;
+                                       goto err_try_undo;
                                break;
                        default:
                                break;
@@ -174,6 +174,16 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
 
        *pstate = state;
        return 0;
+
+err_try_undo:
+       /* Try to preserve reasonable unwind semantics - if we're starting from
+        * scratch clean up fully, otherwise record what we got and caller will.
+        */
+       if (!*pstate)
+               netlink_policy_dump_free(state);
+       else
+               *pstate = state;
+       return err;
 }
 
 static bool
index 18196e1c8c2fd2f303bb3eb3c93c8c18bb4e2fa0..9ced13c0627a7f7e6cac0c8532eeb4155b6b4e93 100644 (file)
@@ -78,11 +78,6 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
        struct qrtr_mhi_dev *qdev;
        int rc;
 
-       /* start channels */
-       rc = mhi_prepare_for_transfer_autoqueue(mhi_dev);
-       if (rc)
-               return rc;
-
        qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
        if (!qdev)
                return -ENOMEM;
@@ -96,6 +91,13 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
        if (rc)
                return rc;
 
+       /* start channels */
+       rc = mhi_prepare_for_transfer_autoqueue(mhi_dev);
+       if (rc) {
+               qrtr_endpoint_unregister(&qdev->ep);
+               return rc;
+       }
+
        dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
 
        return 0;
index 6fdedd9dbbc28ff060010b966472d6d6a2decea8..cfbf0e129cba586b481c19a64a7ff8403274a96d 100644 (file)
@@ -363,6 +363,7 @@ static int acquire_refill(struct rds_connection *conn)
 static void release_refill(struct rds_connection *conn)
 {
        clear_bit(RDS_RECV_REFILL, &conn->c_flags);
+       smp_mb__after_atomic();
 
        /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
         * hot path and finding waiters is very rare.  We don't want to walk
index 11c45c8c6c1641781dd733e93e5453c2be75340b..036d92c0ad794889e65aec901c39d97fbc1bb9f2 100644 (file)
@@ -96,7 +96,8 @@ static void rose_loopback_timer(struct timer_list *unused)
                }
 
                if (frametype == ROSE_CALL_REQUEST) {
-                       if (!rose_loopback_neigh->dev) {
+                       if (!rose_loopback_neigh->dev &&
+                           !rose_loopback_neigh->loopback) {
                                kfree_skb(skb);
                                continue;
                        }
index 84d0a41096450cbe8a7aa70441025f4d52c025a6..6401cdf7a62469dad7d2b5b4deb532dafa28634a 100644 (file)
@@ -285,8 +285,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        _enter("%p,%lx", rx, p->user_call_ID);
 
        limiter = rxrpc_get_call_slot(p, gfp);
-       if (!limiter)
+       if (!limiter) {
+               release_sock(&rx->sk);
                return ERR_PTR(-ERESTARTSYS);
+       }
 
        call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
        if (IS_ERR(call)) {
index 1d38e279e2efaadc848fbf666ddf3b10816a5027..3c3a626459debb3e564ef6742d61d4fdbaada6e7 100644 (file)
@@ -51,10 +51,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
                        return sock_intr_errno(*timeo);
 
                trace_rxrpc_transmit(call, rxrpc_transmit_wait);
-               mutex_unlock(&call->user_mutex);
                *timeo = schedule_timeout(*timeo);
-               if (mutex_lock_interruptible(&call->user_mutex) < 0)
-                       return sock_intr_errno(*timeo);
        }
 }
 
@@ -290,37 +287,48 @@ out:
 static int rxrpc_send_data(struct rxrpc_sock *rx,
                           struct rxrpc_call *call,
                           struct msghdr *msg, size_t len,
-                          rxrpc_notify_end_tx_t notify_end_tx)
+                          rxrpc_notify_end_tx_t notify_end_tx,
+                          bool *_dropped_lock)
 {
        struct rxrpc_skb_priv *sp;
        struct sk_buff *skb;
        struct sock *sk = &rx->sk;
+       enum rxrpc_call_state state;
        long timeo;
-       bool more;
-       int ret, copied;
+       bool more = msg->msg_flags & MSG_MORE;
+       int ret, copied = 0;
 
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
        /* this should be in poll */
        sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
+reload:
+       ret = -EPIPE;
        if (sk->sk_shutdown & SEND_SHUTDOWN)
-               return -EPIPE;
-
-       more = msg->msg_flags & MSG_MORE;
-
+               goto maybe_error;
+       state = READ_ONCE(call->state);
+       ret = -ESHUTDOWN;
+       if (state >= RXRPC_CALL_COMPLETE)
+               goto maybe_error;
+       ret = -EPROTO;
+       if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
+           state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+           state != RXRPC_CALL_SERVER_SEND_REPLY)
+               goto maybe_error;
+
+       ret = -EMSGSIZE;
        if (call->tx_total_len != -1) {
-               if (len > call->tx_total_len)
-                       return -EMSGSIZE;
-               if (!more && len != call->tx_total_len)
-                       return -EMSGSIZE;
+               if (len - copied > call->tx_total_len)
+                       goto maybe_error;
+               if (!more && len - copied != call->tx_total_len)
+                       goto maybe_error;
        }
 
        skb = call->tx_pending;
        call->tx_pending = NULL;
        rxrpc_see_skb(skb, rxrpc_skb_seen);
 
-       copied = 0;
        do {
                /* Check to see if there's a ping ACK to reply to. */
                if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
@@ -331,16 +339,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
 
                        _debug("alloc");
 
-                       if (!rxrpc_check_tx_space(call, NULL)) {
-                               ret = -EAGAIN;
-                               if (msg->msg_flags & MSG_DONTWAIT)
-                                       goto maybe_error;
-                               ret = rxrpc_wait_for_tx_window(rx, call,
-                                                              &timeo,
-                                                              msg->msg_flags & MSG_WAITALL);
-                               if (ret < 0)
-                                       goto maybe_error;
-                       }
+                       if (!rxrpc_check_tx_space(call, NULL))
+                               goto wait_for_space;
 
                        /* Work out the maximum size of a packet.  Assume that
                         * the security header is going to be in the padded
@@ -468,6 +468,27 @@ maybe_error:
 efault:
        ret = -EFAULT;
        goto out;
+
+wait_for_space:
+       ret = -EAGAIN;
+       if (msg->msg_flags & MSG_DONTWAIT)
+               goto maybe_error;
+       mutex_unlock(&call->user_mutex);
+       *_dropped_lock = true;
+       ret = rxrpc_wait_for_tx_window(rx, call, &timeo,
+                                      msg->msg_flags & MSG_WAITALL);
+       if (ret < 0)
+               goto maybe_error;
+       if (call->interruptibility == RXRPC_INTERRUPTIBLE) {
+               if (mutex_lock_interruptible(&call->user_mutex) < 0) {
+                       ret = sock_intr_errno(timeo);
+                       goto maybe_error;
+               }
+       } else {
+               mutex_lock(&call->user_mutex);
+       }
+       *_dropped_lock = false;
+       goto reload;
 }
 
 /*
@@ -629,6 +650,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
        enum rxrpc_call_state state;
        struct rxrpc_call *call;
        unsigned long now, j;
+       bool dropped_lock = false;
        int ret;
 
        struct rxrpc_send_params p = {
@@ -737,21 +759,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                        ret = rxrpc_send_abort_packet(call);
        } else if (p.command != RXRPC_CMD_SEND_DATA) {
                ret = -EINVAL;
-       } else if (rxrpc_is_client_call(call) &&
-                  state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
-               /* request phase complete for this client call */
-               ret = -EPROTO;
-       } else if (rxrpc_is_service_call(call) &&
-                  state != RXRPC_CALL_SERVER_ACK_REQUEST &&
-                  state != RXRPC_CALL_SERVER_SEND_REPLY) {
-               /* Reply phase not begun or not complete for service call. */
-               ret = -EPROTO;
        } else {
-               ret = rxrpc_send_data(rx, call, msg, len, NULL);
+               ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock);
        }
 
 out_put_unlock:
-       mutex_unlock(&call->user_mutex);
+       if (!dropped_lock)
+               mutex_unlock(&call->user_mutex);
 error_put:
        rxrpc_put_call(call, rxrpc_call_put);
        _leave(" = %d", ret);
@@ -779,6 +793,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
                           struct msghdr *msg, size_t len,
                           rxrpc_notify_end_tx_t notify_end_tx)
 {
+       bool dropped_lock = false;
        int ret;
 
        _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
@@ -796,7 +811,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
        case RXRPC_CALL_SERVER_ACK_REQUEST:
        case RXRPC_CALL_SERVER_SEND_REPLY:
                ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
-                                     notify_end_tx);
+                                     notify_end_tx, &dropped_lock);
                break;
        case RXRPC_CALL_COMPLETE:
                read_lock_bh(&call->state_lock);
@@ -810,7 +825,8 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
                break;
        }
 
-       mutex_unlock(&call->user_mutex);
+       if (!dropped_lock)
+               mutex_unlock(&call->user_mutex);
        _leave(" = %d", ret);
        return ret;
 }
index 3f935cbbaff66c58f96063a2cdc5230ebe0f7c93..48712bc51bda7ec737ebe79a6fea0f792bf78f5a 100644 (file)
@@ -424,6 +424,11 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
                        return -EINVAL;
        }
 
+       if (!nhandle) {
+               NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid");
+               return -EINVAL;
+       }
+
        h1 = to_hash(nhandle);
        b = rtnl_dereference(head->table[h1]);
        if (!b) {
@@ -477,6 +482,11 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
        int err;
        bool new = true;
 
+       if (!handle) {
+               NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid");
+               return -EINVAL;
+       }
+
        if (opt == NULL)
                return handle ? -EINVAL : 0;
 
index d47b9689eba6a4d5828aaad6e574c62d68aa4194..99b697ad2b983a929d8a194411ff013f2824c122 100644 (file)
@@ -409,7 +409,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
 
 void __qdisc_run(struct Qdisc *q)
 {
-       int quota = dev_tx_weight;
+       int quota = READ_ONCE(dev_tx_weight);
        int packets;
 
        while (qdisc_restart(q, &packets)) {
index 9b27c5e4e5ba829c2fc31a74e3b38aa2c0b006a1..7378375d3a5b6d03b5deb06d7de0efbadc947182 100644 (file)
@@ -1801,7 +1801,7 @@ int __sys_listen(int fd, int backlog)
 
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (sock) {
-               somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
+               somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
                if ((unsigned int)backlog > somaxconn)
                        backlog = somaxconn;
 
index b098e707ad4155f235046b02a355fb2e043d592a..7d268a291486ba54f692cbc8dfd1a8dd79529530 100644 (file)
@@ -1902,7 +1902,7 @@ call_encode(struct rpc_task *task)
                        break;
                case -EKEYEXPIRED:
                        if (!task->tk_cred_retry) {
-                               rpc_exit(task, task->tk_status);
+                               rpc_call_rpcerror(task, task->tk_status);
                        } else {
                                task->tk_action = call_refresh;
                                task->tk_cred_retry--;
index 7330eb9a70cf82ce1084f977106532b7d363949b..c65c90ad626ad046bb1b23ef6d640fd31b0f0349 100644 (file)
@@ -291,8 +291,10 @@ static ssize_t rpc_sysfs_xprt_state_change(struct kobject *kobj,
        int offline = 0, online = 0, remove = 0;
        struct rpc_xprt_switch *xps = rpc_sysfs_xprt_kobj_get_xprt_switch(kobj);
 
-       if (!xprt)
-               return 0;
+       if (!xprt || !xps) {
+               count = 0;
+               goto out_put;
+       }
 
        if (!strncmp(buf, "offline", 7))
                offline = 1;
index f76119f62f1b56b3fde5cc5204df2eccb3f6d7fa..fe27241cd13fcfc7bfab366dbf9dd6c1c0366bdf 100644 (file)
@@ -2702,7 +2702,9 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
                        crypto_info->version != TLS_1_3_VERSION &&
                        !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
 
-               tls_strp_init(&sw_ctx_rx->strp, sk);
+               rc = tls_strp_init(&sw_ctx_rx->strp, sk);
+               if (rc)
+                       goto free_aead;
        }
 
        goto out;
index 82d14eea1b5ad06a481137867ca070a465463b9a..974eb97b77d22936b5e272115d909c11f80f0d9b 100644 (file)
@@ -168,7 +168,7 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
 {
        struct espintcp_ctx *ctx = espintcp_getctx(sk);
 
-       if (skb_queue_len(&ctx->out_queue) >= netdev_max_backlog)
+       if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
                return -ENOBUFS;
 
        __skb_queue_tail(&ctx->out_queue, skb);
index 144238a50f3d4e1efc99eb8e893650e60f1b3bda..b2f4ec9c537f0037e3fa5b7d8ceef7beabc9e6aa 100644 (file)
@@ -669,7 +669,6 @@ resume:
 
                x->curlft.bytes += skb->len;
                x->curlft.packets++;
-               x->curlft.use_time = ktime_get_real_seconds();
 
                spin_unlock(&x->lock);
 
@@ -783,7 +782,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
 
        trans = this_cpu_ptr(&xfrm_trans_tasklet);
 
-       if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
+       if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
                return -ENOBUFS;
 
        BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
index 555ab35cd119a71ba5828e90c3fc297383194d6e..9a5e79a38c6797e86648178090601471c2e68584 100644 (file)
@@ -534,7 +534,6 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
 
                x->curlft.bytes += skb->len;
                x->curlft.packets++;
-               x->curlft.use_time = ktime_get_real_seconds();
 
                spin_unlock_bh(&x->lock);
 
index f1a0bab920a5580a507932974a86f96c08113759..cc6ab79609e29f398e85e159569f9d8103066bc1 100644 (file)
@@ -3162,7 +3162,7 @@ ok:
        return dst;
 
 nopol:
-       if (!(dst_orig->dev->flags & IFF_LOOPBACK) &&
+       if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
            net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
                err = -EPERM;
                goto error;
@@ -3599,6 +3599,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                if (pols[1]) {
                        if (IS_ERR(pols[1])) {
                                XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
+                               xfrm_pol_put(pols[0]);
                                return 0;
                        }
                        pols[1]->curlft.use_time = ktime_get_real_seconds();
index 52e60e607f8ad5ff446ab4cc6ccba9d14acb43c7..91c32a3b692497fb2200b3b3c54a990a90c21c2d 100644 (file)
@@ -1592,6 +1592,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
        x->replay = orig->replay;
        x->preplay = orig->preplay;
        x->mapping_maxage = orig->mapping_maxage;
+       x->lastused = orig->lastused;
        x->new_mapping = 0;
        x->new_mapping_sport = 0;
 
index f5f0d6f09053f02f8ab22c520e2cf704b365e1f9..0621c39a3955674dc9dda8e54e605203800bcaaa 100644 (file)
@@ -49,7 +49,6 @@ ifdef CONFIG_CC_IS_CLANG
 KBUILD_CFLAGS += -Wno-initializer-overrides
 KBUILD_CFLAGS += -Wno-format
 KBUILD_CFLAGS += -Wno-sign-compare
-KBUILD_CFLAGS += -Wno-format-zero-length
 KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
 KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
 KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
index 692d64a70542a299b0c29295e06b07def0f773c0..e4deaf5fa571d52073dffddfb77fbb6b4db63419 100644 (file)
@@ -4,7 +4,7 @@ gcc-plugin-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY)  += latent_entropy_plugin.so
 gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY)          \
                += -DLATENT_ENTROPY_PLUGIN
 ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
-    DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable
+    DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable -ULATENT_ENTROPY_PLUGIN
 endif
 export DISABLE_LATENT_ENTROPY_PLUGIN
 
index f754415af398b7eb97710931c1ce5761e664f879..1337cedca096ddfdc7c9b16cd56774e7d005f89b 100755 (executable)
@@ -51,6 +51,7 @@ def run_analysis(entry):
         checks += "linuxkernel-*"
     else:
         checks += "clang-analyzer-*"
+        checks += ",-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling"
     p = subprocess.run(["clang-tidy", "-p", args.path, checks, entry["file"]],
                        stdout=subprocess.PIPE,
                        stderr=subprocess.STDOUT,
index 7db82584343559b23925cdd68f773fb7cc542f14..1db1889f6d81e2947daf65cc9afc1b08f49a86d0 100755 (executable)
@@ -59,7 +59,7 @@ fi
 if arg_contain -E "$@"; then
        # For scripts/cc-version.sh; This emulates GCC 20.0.0
        if arg_contain - "$@"; then
-               sed -n '/^GCC/{s/__GNUC__/20/; s/__GNUC_MINOR__/0/; s/__GNUC_PATCHLEVEL__/0/; p;}'
+               sed -n '/^GCC/{s/__GNUC__/20/; s/__GNUC_MINOR__/0/; s/__GNUC_PATCHLEVEL__/0/; p;}; s/__LONG_DOUBLE_128__/1/ p'
                exit 0
        else
                echo "no input files" >&2
diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh
deleted file mode 100755 (executable)
index 8b980fb..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Test for gcc 'asm goto' support
-# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
-
-cat << "END" | $@ -x c - -fno-PIE -c -o /dev/null
-int main(void)
-{
-#if defined(__arm__) || defined(__aarch64__)
-       /*
-        * Not related to asm goto, but used by jump label
-        * and broken on some ARM GCC versions (see GCC Bug 48637).
-        */
-       static struct { int dummy; int state; } tp;
-       asm (".long %c0" :: "i" (&tp.state));
-#endif
-
-entry:
-       asm goto ("" :::: entry);
-       return 0;
-}
-END
index 55e32af2e53f00ffee3eefde10b3ba3e94cb92ab..2c80da0220c326efbe4fb7598054cef04ad3566e 100644 (file)
@@ -2021,13 +2021,11 @@ static void add_exported_symbols(struct buffer *buf, struct module *mod)
        /* record CRCs for exported symbols */
        buf_printf(buf, "\n");
        list_for_each_entry(sym, &mod->exported_symbols, list) {
-               if (!sym->crc_valid) {
+               if (!sym->crc_valid)
                        warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n"
                             "Is \"%s\" prototyped in <asm/asm-prototypes.h>?\n",
                             sym->name, mod->name, mod->is_vmlinux ? "" : ".ko",
                             sym->name);
-                       continue;
-               }
 
                buf_printf(buf, "SYMBOL_CRC(%s, 0x%08x, \"%s\");\n",
                           sym->name, sym->crc, sym->is_gpl_only ? "_gpl" : "");
index 6ab5f2bbf41f9b08a22d619812c065ec2299383b..44521582dcba2464de36fa4119293df724dd1c51 100644 (file)
@@ -356,13 +356,11 @@ static long dm_verity_ioctl(struct file *filp, unsigned int cmd, unsigned long a
 {
        void __user *uarg = (void __user *)arg;
        unsigned int fd;
-       int rc;
 
        switch (cmd) {
        case LOADPIN_IOC_SET_TRUSTED_VERITY_DIGESTS:
-               rc = copy_from_user(&fd, uarg, sizeof(fd));
-               if (rc)
-                       return rc;
+               if (copy_from_user(&fd, uarg, sizeof(fd)))
+                       return -EFAULT;
 
                return read_trusted_verity_root_digests(fd);
 
index b8058b341178349184a5597a6b8cf1a673576f8e..0b2f04dcb58979bacdff7012c9f1044ef67c079e 100644 (file)
@@ -111,9 +111,9 @@ static loff_t snd_info_entry_llseek(struct file *file, loff_t offset, int orig)
        entry = data->entry;
        mutex_lock(&entry->access);
        if (entry->c.ops->llseek) {
-               offset = entry->c.ops->llseek(entry,
-                                             data->file_private_data,
-                                             file, offset, orig);
+               ret = entry->c.ops->llseek(entry,
+                                          data->file_private_data,
+                                          file, offset, orig);
                goto out;
        }
 
index 129bffb431c22e7f38c4e90dc7385a2899458870..15e2a0009080ee1ea1a22d518b7f95a729c645bf 100644 (file)
@@ -1163,6 +1163,11 @@ static int cs35l41_no_acpi_dsd(struct cs35l41_hda *cs35l41, struct device *physd
                hw_cfg->gpio1.func = CS35l41_VSPK_SWITCH;
                hw_cfg->gpio1.valid = true;
        } else {
+               /*
+                * Note: CLSA010(0/1) are special cases which use a slightly different design.
+                * All other HIDs e.g. CSC3551 require valid ACPI _DSD properties to be supported.
+                */
+               dev_err(cs35l41->dev, "Error: ACPI _DSD Properties are missing for HID %s.\n", hid);
                hw_cfg->valid = false;
                hw_cfg->gpio1.valid = false;
                hw_cfg->gpio2.valid = false;
index e0d3a8be2e38b56f63727250a541e9e07888d303..b288874e401e5ce58358c7eccd9e7b26f7c8219e 100644 (file)
@@ -546,6 +546,10 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0BD6, "Dolphin", CS8409_DOLPHIN),
        SND_PCI_QUIRK(0x1028, 0x0BD7, "Dolphin", CS8409_DOLPHIN),
        SND_PCI_QUIRK(0x1028, 0x0BD8, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0C43, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0C50, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0C51, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0C52, "Dolphin", CS8409_DOLPHIN),
        {} /* terminator */
 };
 
index fd630d62b5a0a3f9869ae70e704992bf6742a34d..47e72cf76608eb5e6297e2186c1a8f1fe14d322a 100644 (file)
@@ -9283,6 +9283,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
@@ -9303,6 +9304,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
@@ -9389,6 +9391,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x7717, "Clevo NS70PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x7718, "Clevo L140PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -9490,6 +9493,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3855, "Legion 7 16ITHG6", ALC287_FIXUP_LEGION_16ITHG6),
+       SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
index ecfe7a7907901533042b410378353fdf5264fa5d..e0b24e1daef3d56a85d71070c1c778a7e704fcff 100644 (file)
@@ -143,6 +143,34 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "21CL"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21EM"),
+               }
+       },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21EN"),
+               }
+       },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21J5"),
+               }
+       },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21J6"),
+               }
+       },
        {}
 };
 
index 38ab8d4291c2d0bff9b68bf7386ec147b8a883f0..5a844329800f0ccb7209af1800322d0e2c96f244 100644 (file)
@@ -1986,7 +1986,7 @@ static int rt5640_set_bias_level(struct snd_soc_component *component,
                snd_soc_component_write(component, RT5640_PWR_MIXER, 0x0000);
                if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER)
                        snd_soc_component_write(component, RT5640_PWR_ANLG1,
-                               0x0018);
+                               0x2818);
                else
                        snd_soc_component_write(component, RT5640_PWR_ANLG1,
                                0x0000);
@@ -2600,7 +2600,8 @@ static void rt5640_enable_hda_jack_detect(
        snd_soc_component_update_bits(component, RT5640_DUMMY1, 0x400, 0x0);
 
        snd_soc_component_update_bits(component, RT5640_PWR_ANLG1,
-               RT5640_PWR_VREF2, RT5640_PWR_VREF2);
+               RT5640_PWR_VREF2 | RT5640_PWR_MB | RT5640_PWR_BG,
+               RT5640_PWR_VREF2 | RT5640_PWR_MB | RT5640_PWR_BG);
        usleep_range(10000, 15000);
        snd_soc_component_update_bits(component, RT5640_PWR_ANLG1,
                RT5640_PWR_FV2, RT5640_PWR_FV2);
index 3cb634c2826103252ce04a00554be9794e631b82..bb653b6641466c4c974d1bc26c4fd43fe415e0cf 100644 (file)
@@ -46,34 +46,22 @@ static void tas2770_reset(struct tas2770_priv *tas2770)
        usleep_range(1000, 2000);
 }
 
-static int tas2770_set_bias_level(struct snd_soc_component *component,
-                                enum snd_soc_bias_level level)
+static int tas2770_update_pwr_ctrl(struct tas2770_priv *tas2770)
 {
-       struct tas2770_priv *tas2770 =
-                       snd_soc_component_get_drvdata(component);
+       struct snd_soc_component *component = tas2770->component;
+       unsigned int val;
+       int ret;
 
-       switch (level) {
-       case SND_SOC_BIAS_ON:
-               snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
-                                             TAS2770_PWR_CTRL_MASK,
-                                             TAS2770_PWR_CTRL_ACTIVE);
-               break;
-       case SND_SOC_BIAS_STANDBY:
-       case SND_SOC_BIAS_PREPARE:
-               snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
-                                             TAS2770_PWR_CTRL_MASK,
-                                             TAS2770_PWR_CTRL_MUTE);
-               break;
-       case SND_SOC_BIAS_OFF:
-               snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
-                                             TAS2770_PWR_CTRL_MASK,
-                                             TAS2770_PWR_CTRL_SHUTDOWN);
-               break;
+       if (tas2770->dac_powered)
+               val = tas2770->unmuted ?
+                       TAS2770_PWR_CTRL_ACTIVE : TAS2770_PWR_CTRL_MUTE;
+       else
+               val = TAS2770_PWR_CTRL_SHUTDOWN;
 
-       default:
-               dev_err(tas2770->dev, "wrong power level setting %d\n", level);
-               return -EINVAL;
-       }
+       ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
+                                           TAS2770_PWR_CTRL_MASK, val);
+       if (ret < 0)
+               return ret;
 
        return 0;
 }
@@ -114,9 +102,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component)
                gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
                usleep_range(1000, 2000);
        } else {
-               ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
-                                                   TAS2770_PWR_CTRL_MASK,
-                                                   TAS2770_PWR_CTRL_ACTIVE);
+               ret = tas2770_update_pwr_ctrl(tas2770);
                if (ret < 0)
                        return ret;
        }
@@ -152,24 +138,19 @@ static int tas2770_dac_event(struct snd_soc_dapm_widget *w,
 
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
-               ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
-                                                   TAS2770_PWR_CTRL_MASK,
-                                                   TAS2770_PWR_CTRL_MUTE);
+               tas2770->dac_powered = 1;
+               ret = tas2770_update_pwr_ctrl(tas2770);
                break;
        case SND_SOC_DAPM_PRE_PMD:
-               ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
-                                                   TAS2770_PWR_CTRL_MASK,
-                                                   TAS2770_PWR_CTRL_SHUTDOWN);
+               tas2770->dac_powered = 0;
+               ret = tas2770_update_pwr_ctrl(tas2770);
                break;
        default:
                dev_err(tas2770->dev, "Not supported evevt\n");
                return -EINVAL;
        }
 
-       if (ret < 0)
-               return ret;
-
-       return 0;
+       return ret;
 }
 
 static const struct snd_kcontrol_new isense_switch =
@@ -203,21 +184,11 @@ static const struct snd_soc_dapm_route tas2770_audio_map[] = {
 static int tas2770_mute(struct snd_soc_dai *dai, int mute, int direction)
 {
        struct snd_soc_component *component = dai->component;
-       int ret;
-
-       if (mute)
-               ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
-                                                   TAS2770_PWR_CTRL_MASK,
-                                                   TAS2770_PWR_CTRL_MUTE);
-       else
-               ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
-                                                   TAS2770_PWR_CTRL_MASK,
-                                                   TAS2770_PWR_CTRL_ACTIVE);
-
-       if (ret < 0)
-               return ret;
+       struct tas2770_priv *tas2770 =
+                       snd_soc_component_get_drvdata(component);
 
-       return 0;
+       tas2770->unmuted = !mute;
+       return tas2770_update_pwr_ctrl(tas2770);
 }
 
 static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
@@ -337,7 +308,7 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        struct snd_soc_component *component = dai->component;
        struct tas2770_priv *tas2770 =
                        snd_soc_component_get_drvdata(component);
-       u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0;
+       u8 tdm_rx_start_slot = 0, invert_fpol = 0, fpol_preinv = 0, asi_cfg_1 = 0;
        int ret;
 
        switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
@@ -349,9 +320,15 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        }
 
        switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_IF:
+               invert_fpol = 1;
+               fallthrough;
        case SND_SOC_DAIFMT_NB_NF:
                asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_RSING;
                break;
+       case SND_SOC_DAIFMT_IB_IF:
+               invert_fpol = 1;
+               fallthrough;
        case SND_SOC_DAIFMT_IB_NF:
                asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_FALING;
                break;
@@ -369,15 +346,19 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
                tdm_rx_start_slot = 1;
+               fpol_preinv = 0;
                break;
        case SND_SOC_DAIFMT_DSP_A:
                tdm_rx_start_slot = 0;
+               fpol_preinv = 1;
                break;
        case SND_SOC_DAIFMT_DSP_B:
                tdm_rx_start_slot = 1;
+               fpol_preinv = 1;
                break;
        case SND_SOC_DAIFMT_LEFT_J:
                tdm_rx_start_slot = 0;
+               fpol_preinv = 1;
                break;
        default:
                dev_err(tas2770->dev,
@@ -391,6 +372,14 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        if (ret < 0)
                return ret;
 
+       ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG0,
+                                           TAS2770_TDM_CFG_REG0_FPOL_MASK,
+                                           (fpol_preinv ^ invert_fpol)
+                                            ? TAS2770_TDM_CFG_REG0_FPOL_RSING
+                                            : TAS2770_TDM_CFG_REG0_FPOL_FALING);
+       if (ret < 0)
+               return ret;
+
        return 0;
 }
 
@@ -489,7 +478,7 @@ static struct snd_soc_dai_driver tas2770_dai_driver[] = {
                .id = 0,
                .playback = {
                        .stream_name    = "ASI1 Playback",
-                       .channels_min   = 2,
+                       .channels_min   = 1,
                        .channels_max   = 2,
                        .rates      = TAS2770_RATES,
                        .formats    = TAS2770_FORMATS,
@@ -537,7 +526,6 @@ static const struct snd_soc_component_driver soc_component_driver_tas2770 = {
        .probe                  = tas2770_codec_probe,
        .suspend                = tas2770_codec_suspend,
        .resume                 = tas2770_codec_resume,
-       .set_bias_level = tas2770_set_bias_level,
        .controls               = tas2770_snd_controls,
        .num_controls           = ARRAY_SIZE(tas2770_snd_controls),
        .dapm_widgets           = tas2770_dapm_widgets,
index d156666bcc55253bc1af762a352e3655e099eeb5..f75f40781ab136cccbe1c272f7129ddd3e4a22a3 100644 (file)
@@ -41,6 +41,9 @@
 #define TAS2770_TDM_CFG_REG0_31_44_1_48KHZ  0x6
 #define TAS2770_TDM_CFG_REG0_31_88_2_96KHZ  0x8
 #define TAS2770_TDM_CFG_REG0_31_176_4_192KHZ  0xa
+#define TAS2770_TDM_CFG_REG0_FPOL_MASK  BIT(0)
+#define TAS2770_TDM_CFG_REG0_FPOL_RSING  0
+#define TAS2770_TDM_CFG_REG0_FPOL_FALING  1
     /* TDM Configuration Reg1 */
 #define TAS2770_TDM_CFG_REG1  TAS2770_REG(0X0, 0x0B)
 #define TAS2770_TDM_CFG_REG1_MASK      GENMASK(5, 1)
@@ -135,6 +138,8 @@ struct tas2770_priv {
        struct device *dev;
        int v_sense_slot;
        int i_sense_slot;
+       bool dac_powered;
+       bool unmuted;
 };
 
 #endif /* __TAS2770__ */
index 4b74805cdd2e5aae517e8a082d8e8aebab3d8157..ffe1828a4b7ed1af7ef7ca23b9ea0157437fc4c0 100644 (file)
@@ -49,6 +49,8 @@ struct aic32x4_priv {
        struct aic32x4_setup_data *setup;
        struct device *dev;
        enum aic32x4_type type;
+
+       unsigned int fmt;
 };
 
 static int aic32x4_reset_adc(struct snd_soc_dapm_widget *w,
@@ -611,6 +613,7 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
 static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 {
        struct snd_soc_component *component = codec_dai->component;
+       struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
        u8 iface_reg_1 = 0;
        u8 iface_reg_2 = 0;
        u8 iface_reg_3 = 0;
@@ -653,6 +656,8 @@ static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
                return -EINVAL;
        }
 
+       aic32x4->fmt = fmt;
+
        snd_soc_component_update_bits(component, AIC32X4_IFACE1,
                                AIC32X4_IFACE1_DATATYPE_MASK |
                                AIC32X4_IFACE1_MASTER_MASK, iface_reg_1);
@@ -757,6 +762,10 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
                return -EINVAL;
        }
 
+       /* PCM over I2S is always 2-channel */
+       if ((aic32x4->fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S)
+               channels = 2;
+
        madc = DIV_ROUND_UP((32 * adc_resource_class), aosr);
        max_dosr = (AIC32X4_MAX_DOSR_FREQ / sample_rate / dosr_increment) *
                        dosr_increment;
index f21b0cdd320630cfca48253574e701470a1b8919..8fe5917b1e2637ece466f89cc5d9e7da123f8039 100644 (file)
@@ -636,8 +636,8 @@ static ssize_t topology_name_read(struct file *file, char __user *user_buf, size
        char buf[64];
        size_t len;
 
-       len = snprintf(buf, sizeof(buf), "%s/%s\n", component->driver->topology_name_prefix,
-                      mach->tplg_filename);
+       len = scnprintf(buf, sizeof(buf), "%s/%s\n", component->driver->topology_name_prefix,
+                       mach->tplg_filename);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
index c7f33c89588e746abfd89b1fbbcff4046100e162..606cc3242a60fbfe9f9d166713a23bf9ec9060a2 100644 (file)
@@ -759,6 +759,9 @@ static int sof_es8336_remove(struct platform_device *pdev)
 }
 
 static const struct platform_device_id board_ids[] = {
+       {
+               .name = "sof-essx8336", /* default quirk == 0 */
+       },
        {
                .name = "adl_es83x6_c1_h02",
                .driver_data = (kernel_ulong_t)(SOF_ES8336_SSP_CODEC(1) |
@@ -786,5 +789,4 @@ module_platform_driver(sof_es8336_driver);
 
 MODULE_DESCRIPTION("ASoC Intel(R) SOF + ES8336 Machine driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:sof-essx8336");
 MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
index 0d0594a0e4f6cdb986127e7b77e1bb1a53617900..7ace0c0db5b154b457415ece608296cb0e63f094 100644 (file)
@@ -1017,32 +1017,36 @@ static int rz_ssi_probe(struct platform_device *pdev)
 
        ssi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
        if (IS_ERR(ssi->rstc)) {
-               rz_ssi_release_dma_channels(ssi);
-               return PTR_ERR(ssi->rstc);
+               ret = PTR_ERR(ssi->rstc);
+               goto err_reset;
        }
 
        reset_control_deassert(ssi->rstc);
        pm_runtime_enable(&pdev->dev);
        ret = pm_runtime_resume_and_get(&pdev->dev);
        if (ret < 0) {
-               rz_ssi_release_dma_channels(ssi);
-               pm_runtime_disable(ssi->dev);
-               reset_control_assert(ssi->rstc);
-               return dev_err_probe(ssi->dev, ret, "pm_runtime_resume_and_get failed\n");
+               dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n");
+               goto err_pm;
        }
 
        ret = devm_snd_soc_register_component(&pdev->dev, &rz_ssi_soc_component,
                                              rz_ssi_soc_dai,
                                              ARRAY_SIZE(rz_ssi_soc_dai));
        if (ret < 0) {
-               rz_ssi_release_dma_channels(ssi);
-
-               pm_runtime_put(ssi->dev);
-               pm_runtime_disable(ssi->dev);
-               reset_control_assert(ssi->rstc);
                dev_err(&pdev->dev, "failed to register snd component\n");
+               goto err_snd_soc;
        }
 
+       return 0;
+
+err_snd_soc:
+       pm_runtime_put(ssi->dev);
+err_pm:
+       pm_runtime_disable(ssi->dev);
+       reset_control_assert(ssi->rstc);
+err_reset:
+       rz_ssi_release_dma_channels(ssi);
+
        return ret;
 }
 
index 5b99bf2dbd08501d128b18badac2b39c40f6f475..4f60c0a833110fc41bc0424783660645c88cfb3e 100644 (file)
@@ -1317,6 +1317,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
                if (!be->dai_link->no_pcm)
                        continue;
 
+               if (!snd_soc_dpcm_get_substream(be, stream))
+                       continue;
+
                for_each_rtd_dais(be, i, dai) {
                        w = snd_soc_dai_get_widget(dai, stream);
 
index c5d797e97c0271d6b30dcc044257b87b068048b9..d9a3ce7b69e16cf03b73adc64d93a61293aed724 100644 (file)
@@ -252,9 +252,9 @@ static int memory_info_update(struct snd_sof_dev *sdev, char *buf, size_t buff_s
        }
 
        for (i = 0, len = 0; i < reply->num_elems; i++) {
-               ret = snprintf(buf + len, buff_size - len, "zone %d.%d used %#8x free %#8x\n",
-                              reply->elems[i].zone, reply->elems[i].id,
-                              reply->elems[i].used, reply->elems[i].free);
+               ret = scnprintf(buf + len, buff_size - len, "zone %d.%d used %#8x free %#8x\n",
+                               reply->elems[i].zone, reply->elems[i].id,
+                               reply->elems[i].used, reply->elems[i].free);
                if (ret < 0)
                        goto error;
                len += ret;
index 8639ea63a10dbb1baceef97ab2710b27ea9b4ad5..6d4ecbe14adf31583f1bfa73742f426bb5a618ef 100644 (file)
@@ -574,7 +574,7 @@ static void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, const char *le
        chip = get_chip_info(sdev->pdata);
        for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) {
                value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4);
-               len += snprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
+               len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
        }
 
        dev_printk(level, sdev->dev, "extended rom status: %s", msg);
index b2cc046b9f606c84c566a423d88fc9d229e2a0ec..65923e7a5976f21e0df5c99c4799d54c5a190967 100644 (file)
@@ -2338,7 +2338,7 @@ static int sof_ipc3_parse_manifest(struct snd_soc_component *scomp, int index,
        }
 
        dev_info(scomp->dev,
-                "Topology: ABI %d:%d:%d Kernel ABI %hhu:%hhu:%hhu\n",
+                "Topology: ABI %d:%d:%d Kernel ABI %d:%d:%d\n",
                 man->priv.data[0], man->priv.data[1], man->priv.data[2],
                 SOF_ABI_MAJOR, SOF_ABI_MINOR, SOF_ABI_PATCH);
 
index 3bb134355874c8bf12e81da517aa661dbb60ea6c..316917b9870704de245f002cb4261c46f8a2fea4 100644 (file)
@@ -75,9 +75,11 @@ struct kvm_regs {
 
 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
 #define KVM_ARM_DEVICE_TYPE_SHIFT      0
-#define KVM_ARM_DEVICE_TYPE_MASK       (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_TYPE_MASK       GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
+                                               KVM_ARM_DEVICE_TYPE_SHIFT)
 #define KVM_ARM_DEVICE_ID_SHIFT                16
-#define KVM_ARM_DEVICE_ID_MASK         (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
+#define KVM_ARM_DEVICE_ID_MASK         GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
+                                               KVM_ARM_DEVICE_ID_SHIFT)
 
 /* Supported device IDs */
 #define KVM_ARM_DEVICE_VGIC_V2         0
index 7a6b14874d65c486242982d3bccabc4891b8fbda..a73cf01a1606671bf77a995c665f90ca7428c9ab 100644 (file)
@@ -74,6 +74,7 @@ struct kvm_s390_io_adapter_req {
 #define KVM_S390_VM_CRYPTO             2
 #define KVM_S390_VM_CPU_MODEL          3
 #define KVM_S390_VM_MIGRATION          4
+#define KVM_S390_VM_CPU_TOPOLOGY       5
 
 /* kvm attributes for mem_ctrl */
 #define KVM_S390_VM_MEM_ENABLE_CMMA    0
index 8323ac5b7eee517209093b9df6a9ea7d73870300..235dc85c91c3e372980b8b428e9713869115ead4 100644 (file)
 #define X86_FEATURE_IBRS               ( 7*32+25) /* Indirect Branch Restricted Speculation */
 #define X86_FEATURE_IBPB               ( 7*32+26) /* Indirect Branch Prediction Barrier */
 #define X86_FEATURE_STIBP              ( 7*32+27) /* Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_ZEN                        ( 7*32+28) /* "" CPU is AMD family 0x17 or above (Zen) */
+#define X86_FEATURE_ZEN                        (7*32+28) /* "" CPU based on Zen microarchitecture */
 #define X86_FEATURE_L1TF_PTEINV                ( 7*32+29) /* "" L1TF workaround PTE inversion */
 #define X86_FEATURE_IBRS_ENHANCED      ( 7*32+30) /* Enhanced IBRS */
 #define X86_FEATURE_MSR_IA32_FEAT_CTL  ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
 #define X86_FEATURE_RETHUNK            (11*32+14) /* "" Use REturn THUNK */
 #define X86_FEATURE_UNRET              (11*32+15) /* "" AMD BTB untrain return */
 #define X86_FEATURE_USE_IBPB_FW                (11*32+16) /* "" Use IBPB during runtime firmware calls */
-#define X86_FEATURE_RSB_VMEXIT_LITE    (11*32+17) /* "" Fill RSB on VM-Exit when EIBRS is enabled */
+#define X86_FEATURE_RSB_VMEXIT_LITE    (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
 #define X86_FEATURE_AVIC               (15*32+13) /* Virtual Interrupt Controller */
 #define X86_FEATURE_V_VMSAVE_VMLOAD    (15*32+15) /* Virtual VMSAVE VMLOAD */
 #define X86_FEATURE_VGIF               (15*32+16) /* Virtual GIF */
+#define X86_FEATURE_X2AVIC             (15*32+18) /* Virtual x2apic */
 #define X86_FEATURE_V_SPEC_CTRL                (15*32+20) /* Virtual SPEC_CTRL */
 #define X86_FEATURE_SVME_ADDR_CHK      (15*32+28) /* "" SVME addr check */
 
 #define X86_BUG_SRBDS                  X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
 #define X86_BUG_MMIO_STALE_DATA                X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
 #define X86_BUG_RETBLEED               X86_BUG(26) /* CPU is affected by RETBleed */
+#define X86_BUG_EIBRS_PBRSB            X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index e057e039173cb627baeb133052b4a4991b44e382..6674bdb096f346d940e353e3c4df7491fd5e0779 100644 (file)
 #define PERF_CAP_PT_IDX                        16
 
 #define MSR_PEBS_LD_LAT_THRESHOLD      0x000003f6
+#define PERF_CAP_PEBS_TRAP             BIT_ULL(6)
+#define PERF_CAP_ARCH_REG              BIT_ULL(7)
+#define PERF_CAP_PEBS_FORMAT           0xf00
+#define PERF_CAP_PEBS_BASELINE         BIT_ULL(14)
+#define PERF_CAP_PEBS_MASK     (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
+                                PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE)
 
 #define MSR_IA32_RTIT_CTL              0x00000570
 #define RTIT_CTL_TRACEEN               BIT(0)
 #define MSR_TURBO_ACTIVATION_RATIO     0x0000064C
 
 #define MSR_PLATFORM_ENERGY_STATUS     0x0000064D
+#define MSR_SECONDARY_TURBO_RATIO_LIMIT        0x00000650
 
 #define MSR_PKG_WEIGHTED_CORE_C0_RES   0x00000658
 #define MSR_PKG_ANY_CORE_C0_RES                0x00000659
 #define MSR_IA32_VMX_TRUE_EXIT_CTLS      0x0000048f
 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS     0x00000490
 #define MSR_IA32_VMX_VMFUNC             0x00000491
+#define MSR_IA32_VMX_PROCBASED_CTLS3   0x00000492
 
 /* VMX_BASIC bits and bitmasks */
 #define VMX_BASIC_VMCS_SIZE_SHIFT      32
index fee7983a90b4f4ced4632e1a48a0164605bc7334..11ff975242cac7cff4dfaab3a4591dc4cb82eb1d 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef _TOOLS_LINUX_ASM_X86_RMWcc
 #define _TOOLS_LINUX_ASM_X86_RMWcc
 
-#ifdef CONFIG_CC_HAS_ASM_GOTO
-
 #define __GEN_RMWcc(fullop, var, cc, ...)                              \
 do {                                                                   \
        asm_volatile_goto (fullop "; j" cc " %l[cc_label]"              \
@@ -20,23 +18,4 @@ cc_label:                                                            \
 #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                 \
        __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
 
-#else /* !CONFIG_CC_HAS_ASM_GOTO */
-
-#define __GEN_RMWcc(fullop, var, cc, ...)                              \
-do {                                                                   \
-       char c;                                                         \
-       asm volatile (fullop "; set" cc " %1"                           \
-                       : "+m" (var), "=qm" (c)                         \
-                       : __VA_ARGS__ : "memory");                      \
-       return c != 0;                                                  \
-} while (0)
-
-#define GEN_UNARY_RMWcc(op, var, arg0, cc)                             \
-       __GEN_RMWcc(op " " arg0, var, cc)
-
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                 \
-       __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
-
-#endif /* CONFIG_CC_HAS_ASM_GOTO */
-
 #endif /* _TOOLS_LINUX_ASM_X86_RMWcc */
index ec53c9fa1da967d628882c4c0817fc3cf88900e1..46de10a809ecbd81aa30d28afd78ebcc266e97a5 100644 (file)
@@ -306,7 +306,8 @@ struct kvm_pit_state {
        struct kvm_pit_channel_state channels[3];
 };
 
-#define KVM_PIT_FLAGS_HPET_LEGACY  0x00000001
+#define KVM_PIT_FLAGS_HPET_LEGACY     0x00000001
+#define KVM_PIT_FLAGS_SPEAKER_DATA_ON 0x00000002
 
 struct kvm_pit_state2 {
        struct kvm_pit_channel_state channels[3];
@@ -325,6 +326,7 @@ struct kvm_reinject_control {
 #define KVM_VCPUEVENT_VALID_SHADOW     0x00000004
 #define KVM_VCPUEVENT_VALID_SMM                0x00000008
 #define KVM_VCPUEVENT_VALID_PAYLOAD    0x00000010
+#define KVM_VCPUEVENT_VALID_TRIPLE_FAULT       0x00000020
 
 /* Interrupt shadow states */
 #define KVM_X86_SHADOW_INT_MOV_SS      0x01
@@ -359,7 +361,10 @@ struct kvm_vcpu_events {
                __u8 smm_inside_nmi;
                __u8 latched_init;
        } smi;
-       __u8 reserved[27];
+       struct {
+               __u8 pending;
+       } triple_fault;
+       __u8 reserved[26];
        __u8 exception_has_payload;
        __u64 exception_payload;
 };
@@ -434,6 +439,7 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_OUT_7E_INC_RIP           (1 << 3)
 #define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT     (1 << 4)
 #define KVM_X86_QUIRK_FIX_HYPERCALL_INSN       (1 << 5)
+#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS    (1 << 6)
 
 #define KVM_STATE_NESTED_FORMAT_VMX    0
 #define KVM_STATE_NESTED_FORMAT_SVM    1
index 946d761adbd3df33ed49c2589cb4042d3709d617..a5faf6d88f1bf614a997e120bb364d8c695ab94a 100644 (file)
@@ -91,6 +91,7 @@
 #define EXIT_REASON_UMWAIT              67
 #define EXIT_REASON_TPAUSE              68
 #define EXIT_REASON_BUS_LOCK            74
+#define EXIT_REASON_NOTIFY              75
 
 #define VMX_EXIT_REASONS \
        { EXIT_REASON_EXCEPTION_NMI,         "EXCEPTION_NMI" }, \
        { EXIT_REASON_XRSTORS,               "XRSTORS" }, \
        { EXIT_REASON_UMWAIT,                "UMWAIT" }, \
        { EXIT_REASON_TPAUSE,                "TPAUSE" }, \
-       { EXIT_REASON_BUS_LOCK,              "BUS_LOCK" }
+       { EXIT_REASON_BUS_LOCK,              "BUS_LOCK" }, \
+       { EXIT_REASON_NOTIFY,                "NOTIFY" }
 
 #define VMX_EXIT_REASON_FLAGS \
        { VMX_EXIT_REASONS_FAILED_VMENTRY,      "FAILED_VMENTRY" }
index 24ae3054f304f27432cf9cdc14503a86d480b9dc..1bdd834bdd57198059c91222036314403191cdbc 100644 (file)
@@ -36,4 +36,8 @@
 #include <linux/compiler-gcc.h>
 #endif
 
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
 #endif /* __LINUX_COMPILER_TYPES_H */
index b28ff5d881457af531096d8504ae007cfd6a78b3..520ad2691a99d166ffaff16c819b01bb1bda11f8 100644 (file)
@@ -751,14 +751,27 @@ typedef struct drm_i915_irq_wait {
 
 /* Must be kept compact -- no holes and well documented */
 
-typedef struct drm_i915_getparam {
+/**
+ * struct drm_i915_getparam - Driver parameter query structure.
+ */
+struct drm_i915_getparam {
+       /** @param: Driver parameter to query. */
        __s32 param;
-       /*
+
+       /**
+        * @value: Address of memory where queried value should be put.
+        *
         * WARNING: Using pointers instead of fixed-size u64 means we need to write
         * compat32 code. Don't repeat this mistake.
         */
        int __user *value;
-} drm_i915_getparam_t;
+};
+
+/**
+ * typedef drm_i915_getparam_t - Driver parameter query structure.
+ * See struct drm_i915_getparam.
+ */
+typedef struct drm_i915_getparam drm_i915_getparam_t;
 
 /* Ioctl to set kernel params:
  */
@@ -1239,76 +1252,119 @@ struct drm_i915_gem_exec_object2 {
        __u64 rsvd2;
 };
 
+/**
+ * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf
+ * ioctl.
+ *
+ * The request will wait for input fence to signal before submission.
+ *
+ * The returned output fence will be signaled after the completion of the
+ * request.
+ */
 struct drm_i915_gem_exec_fence {
-       /**
-        * User's handle for a drm_syncobj to wait on or signal.
-        */
+       /** @handle: User's handle for a drm_syncobj to wait on or signal. */
        __u32 handle;
 
+       /**
+        * @flags: Supported flags are:
+        *
+        * I915_EXEC_FENCE_WAIT:
+        * Wait for the input fence before request submission.
+        *
+        * I915_EXEC_FENCE_SIGNAL:
+        * Return request completion fence as output
+        */
+       __u32 flags;
 #define I915_EXEC_FENCE_WAIT            (1<<0)
 #define I915_EXEC_FENCE_SIGNAL          (1<<1)
 #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
-       __u32 flags;
 };
 
-/*
- * See drm_i915_gem_execbuffer_ext_timeline_fences.
- */
-#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
-
-/*
+/**
+ * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences
+ * for execbuf ioctl.
+ *
  * This structure describes an array of drm_syncobj and associated points for
  * timeline variants of drm_syncobj. It is invalid to append this structure to
  * the execbuf if I915_EXEC_FENCE_ARRAY is set.
  */
 struct drm_i915_gem_execbuffer_ext_timeline_fences {
+#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
+       /** @base: Extension link. See struct i915_user_extension. */
        struct i915_user_extension base;
 
        /**
-        * Number of element in the handles_ptr & value_ptr arrays.
+        * @fence_count: Number of elements in the @handles_ptr & @value_ptr
+        * arrays.
         */
        __u64 fence_count;
 
        /**
-        * Pointer to an array of struct drm_i915_gem_exec_fence of length
-        * fence_count.
+        * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence
+        * of length @fence_count.
         */
        __u64 handles_ptr;
 
        /**
-        * Pointer to an array of u64 values of length fence_count. Values
-        * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
-        * drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
+        * @values_ptr: Pointer to an array of u64 values of length
+        * @fence_count.
+        * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
+        * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+        * binary one.
         */
        __u64 values_ptr;
 };
 
+/**
+ * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2
+ * ioctl.
+ */
 struct drm_i915_gem_execbuffer2 {
-       /**
-        * List of gem_exec_object2 structs
-        */
+       /** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */
        __u64 buffers_ptr;
+
+       /** @buffer_count: Number of elements in @buffers_ptr array */
        __u32 buffer_count;
 
-       /** Offset in the batchbuffer to start execution from. */
+       /**
+        * @batch_start_offset: Offset in the batchbuffer to start execution
+        * from.
+        */
        __u32 batch_start_offset;
-       /** Bytes used in batchbuffer from batch_start_offset */
+
+       /**
+        * @batch_len: Length in bytes of the batch buffer, starting from the
+        * @batch_start_offset. If 0, length is assumed to be the batch buffer
+        * object size.
+        */
        __u32 batch_len;
+
+       /** @DR1: deprecated */
        __u32 DR1;
+
+       /** @DR4: deprecated */
        __u32 DR4;
+
+       /** @num_cliprects: See @cliprects_ptr */
        __u32 num_cliprects;
+
        /**
-        * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
-        * & I915_EXEC_USE_EXTENSIONS are not set.
+        * @cliprects_ptr: Kernel clipping was a DRI1 misfeature.
+        *
+        * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or
+        * I915_EXEC_USE_EXTENSIONS flags are not set.
         *
         * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
-        * of struct drm_i915_gem_exec_fence and num_cliprects is the length
-        * of the array.
+        * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the
+        * array.
         *
         * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
-        * single struct i915_user_extension and num_cliprects is 0.
+        * single &i915_user_extension and num_cliprects is 0.
         */
        __u64 cliprects_ptr;
+
+       /** @flags: Execbuf flags */
+       __u64 flags;
 #define I915_EXEC_RING_MASK              (0x3f)
 #define I915_EXEC_DEFAULT                (0<<0)
 #define I915_EXEC_RENDER                 (1<<0)
@@ -1326,10 +1382,6 @@ struct drm_i915_gem_execbuffer2 {
 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
 #define I915_EXEC_CONSTANTS_ABSOLUTE   (1<<6)
 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
-       __u64 flags;
-       __u64 rsvd1; /* now used for context info */
-       __u64 rsvd2;
-};
 
 /** Resets the SO write offset registers for transform feedback on gen7. */
 #define I915_EXEC_GEN7_SOL_RESET       (1<<8)
@@ -1432,9 +1484,23 @@ struct drm_i915_gem_execbuffer2 {
  * drm_i915_gem_execbuffer_ext enum.
  */
 #define I915_EXEC_USE_EXTENSIONS       (1 << 21)
-
 #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
 
+       /** @rsvd1: Context id */
+       __u64 rsvd1;
+
+       /**
+        * @rsvd2: in and out sync_file file descriptors.
+        *
+        * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the
+        * lower 32 bits of this field will have the in sync_file fd (input).
+        *
+        * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this
+        * field will have the out sync_file fd (output).
+        */
+       __u64 rsvd2;
+};
+
 #define I915_EXEC_CONTEXT_ID_MASK      (0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
        (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
@@ -1814,19 +1880,58 @@ struct drm_i915_gem_context_create {
        __u32 pad;
 };
 
+/**
+ * struct drm_i915_gem_context_create_ext - Structure for creating contexts.
+ */
 struct drm_i915_gem_context_create_ext {
-       __u32 ctx_id; /* output: id of new context*/
+       /** @ctx_id: Id of the created context (output) */
+       __u32 ctx_id;
+
+       /**
+        * @flags: Supported flags are:
+        *
+        * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS:
+        *
+        * Extensions may be appended to this structure and driver must check
+        * for those. See @extensions.
+        *
+        * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE
+        *
+        * Created context will have single timeline.
+        */
        __u32 flags;
 #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS       (1u << 0)
 #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE      (1u << 1)
 #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
        (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
+
+       /**
+        * @extensions: Zero-terminated chain of extensions.
+        *
+        * I915_CONTEXT_CREATE_EXT_SETPARAM:
+        * Context parameter to set or query during context creation.
+        * See struct drm_i915_gem_context_create_ext_setparam.
+        *
+        * I915_CONTEXT_CREATE_EXT_CLONE:
+        * This extension has been removed. On the off chance someone somewhere
+        * has attempted to use it, never re-use this extension number.
+        */
        __u64 extensions;
+#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
 };
 
+/**
+ * struct drm_i915_gem_context_param - Context parameter to set or query.
+ */
 struct drm_i915_gem_context_param {
+       /** @ctx_id: Context id */
        __u32 ctx_id;
+
+       /** @size: Size of the parameter @value */
        __u32 size;
+
+       /** @param: Parameter to set or query */
        __u64 param;
 #define I915_CONTEXT_PARAM_BAN_PERIOD  0x1
 /* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed.  On the off chance
@@ -1973,6 +2078,7 @@ struct drm_i915_gem_context_param {
 #define I915_CONTEXT_PARAM_PROTECTED_CONTENT    0xd
 /* Must be kept compact -- no holes and well documented */
 
+       /** @value: Context parameter value to be set or queried */
        __u64 value;
 };
 
@@ -2371,23 +2477,29 @@ struct i915_context_param_engines {
        struct i915_engine_class_instance engines[N__]; \
 } __attribute__((packed)) name__
 
+/**
+ * struct drm_i915_gem_context_create_ext_setparam - Context parameter
+ * to set or query during context creation.
+ */
 struct drm_i915_gem_context_create_ext_setparam {
-#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+       /** @base: Extension link. See struct i915_user_extension. */
        struct i915_user_extension base;
+
+       /**
+        * @param: Context parameter to set or query.
+        * See struct drm_i915_gem_context_param.
+        */
        struct drm_i915_gem_context_param param;
 };
 
-/* This API has been removed.  On the off chance someone somewhere has
- * attempted to use it, never re-use this extension number.
- */
-#define I915_CONTEXT_CREATE_EXT_CLONE 1
-
 struct drm_i915_gem_context_destroy {
        __u32 ctx_id;
        __u32 pad;
 };
 
-/*
+/**
+ * struct drm_i915_gem_vm_control - Structure to create or destroy VM.
+ *
  * DRM_I915_GEM_VM_CREATE -
  *
  * Create a new virtual memory address space (ppGTT) for use within a context
@@ -2397,20 +2509,23 @@ struct drm_i915_gem_context_destroy {
  * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
  * returned in the outparam @id.
  *
- * No flags are defined, with all bits reserved and must be zero.
- *
  * An extension chain maybe provided, starting with @extensions, and terminated
  * by the @next_extension being 0. Currently, no extensions are defined.
  *
  * DRM_I915_GEM_VM_DESTROY -
  *
- * Destroys a previously created VM id, specified in @id.
+ * Destroys a previously created VM id, specified in @vm_id.
  *
  * No extensions or flags are allowed currently, and so must be zero.
  */
 struct drm_i915_gem_vm_control {
+       /** @extensions: Zero-terminated chain of extensions. */
        __u64 extensions;
+
+       /** @flags: reserved for future usage, currently MBZ */
        __u32 flags;
+
+       /** @vm_id: Id of the VM created or to be destroyed */
        __u32 vm_id;
 };
 
@@ -3207,36 +3322,6 @@ struct drm_i915_gem_memory_class_instance {
  * struct drm_i915_memory_region_info - Describes one region as known to the
  * driver.
  *
- * Note that we reserve some stuff here for potential future work. As an example
- * we might want expose the capabilities for a given region, which could include
- * things like if the region is CPU mappable/accessible, what are the supported
- * mapping types etc.
- *
- * Note that to extend struct drm_i915_memory_region_info and struct
- * drm_i915_query_memory_regions in the future the plan is to do the following:
- *
- * .. code-block:: C
- *
- *     struct drm_i915_memory_region_info {
- *             struct drm_i915_gem_memory_class_instance region;
- *             union {
- *                     __u32 rsvd0;
- *                     __u32 new_thing1;
- *             };
- *             ...
- *             union {
- *                     __u64 rsvd1[8];
- *                     struct {
- *                             __u64 new_thing2;
- *                             __u64 new_thing3;
- *                             ...
- *                     };
- *             };
- *     };
- *
- * With this things should remain source compatible between versions for
- * userspace, even as we add new fields.
- *
  * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
  * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
  * at &drm_i915_query_item.query_id.
@@ -3248,14 +3333,81 @@ struct drm_i915_memory_region_info {
        /** @rsvd0: MBZ */
        __u32 rsvd0;
 
-       /** @probed_size: Memory probed by the driver (-1 = unknown) */
+       /**
+        * @probed_size: Memory probed by the driver
+        *
+        * Note that it should not be possible to ever encounter a zero value
+        * here, also note that no current region type will ever return -1 here.
+        * Although for future region types, this might be a possibility. The
+        * same applies to the other size fields.
+        */
        __u64 probed_size;
 
-       /** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
+       /**
+        * @unallocated_size: Estimate of memory remaining
+        *
+        * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting.
+        * Without this (or if this is an older kernel) the value here will
+        * always equal the @probed_size. Note this is only currently tracked
+        * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here
+        * will always equal the @probed_size).
+        */
        __u64 unallocated_size;
 
-       /** @rsvd1: MBZ */
-       __u64 rsvd1[8];
+       union {
+               /** @rsvd1: MBZ */
+               __u64 rsvd1[8];
+               struct {
+                       /**
+                        * @probed_cpu_visible_size: Memory probed by the driver
+                        * that is CPU accessible.
+                        *
+                        * This will be always be <= @probed_size, and the
+                        * remainder (if there is any) will not be CPU
+                        * accessible.
+                        *
+                        * On systems without small BAR, the @probed_size will
+                        * always equal the @probed_cpu_visible_size, since all
+                        * of it will be CPU accessible.
+                        *
+                        * Note this is only tracked for
+                        * I915_MEMORY_CLASS_DEVICE regions (for other types the
+                        * value here will always equal the @probed_size).
+                        *
+                        * Note that if the value returned here is zero, then
+                        * this must be an old kernel which lacks the relevant
+                        * small-bar uAPI support (including
+                        * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on
+                        * such systems we should never actually end up with a
+                        * small BAR configuration, assuming we are able to load
+                        * the kernel module. Hence it should be safe to treat
+                        * this the same as when @probed_cpu_visible_size ==
+                        * @probed_size.
+                        */
+                       __u64 probed_cpu_visible_size;
+
+                       /**
+                        * @unallocated_cpu_visible_size: Estimate of CPU
+                        * visible memory remaining.
+                        *
+                        * Note this is only tracked for
+                        * I915_MEMORY_CLASS_DEVICE regions (for other types the
+                        * value here will always equal the
+                        * @probed_cpu_visible_size).
+                        *
+                        * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+                        * accounting.  Without this the value here will always
+                        * equal the @probed_cpu_visible_size. Note this is only
+                        * currently tracked for I915_MEMORY_CLASS_DEVICE
+                        * regions (for other types the value here will also
+                        * always equal the @probed_cpu_visible_size).
+                        *
+                        * If this is an older kernel the value here will be
+                        * zero, see also @probed_cpu_visible_size.
+                        */
+                       __u64 unallocated_cpu_visible_size;
+               };
+       };
 };
 
 /**
@@ -3329,11 +3481,11 @@ struct drm_i915_query_memory_regions {
  * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
  * extension support using struct i915_user_extension.
  *
- * Note that in the future we want to have our buffer flags here, at least for
- * the stuff that is immutable. Previously we would have two ioctls, one to
- * create the object with gem_create, and another to apply various parameters,
- * however this creates some ambiguity for the params which are considered
- * immutable. Also in general we're phasing out the various SET/GET ioctls.
+ * Note that new buffer flags should be added here, at least for the stuff that
+ * is immutable. Previously we would have two ioctls, one to create the object
+ * with gem_create, and another to apply various parameters, however this
+ * creates some ambiguity for the params which are considered immutable. Also in
+ * general we're phasing out the various SET/GET ioctls.
  */
 struct drm_i915_gem_create_ext {
        /**
@@ -3341,7 +3493,6 @@ struct drm_i915_gem_create_ext {
         *
         * The (page-aligned) allocated size for the object will be returned.
         *
-        *
         * DG2 64K min page size implications:
         *
         * On discrete platforms, starting from DG2, we have to contend with GTT
@@ -3353,7 +3504,9 @@ struct drm_i915_gem_create_ext {
         *
         * Note that the returned size here will always reflect any required
         * rounding up done by the kernel, i.e 4K will now become 64K on devices
-        * such as DG2.
+        * such as DG2. The kernel will always select the largest minimum
+        * page-size for the set of possible placements as the value to use when
+        * rounding up the @size.
         *
         * Special DG2 GTT address alignment requirement:
         *
@@ -3377,14 +3530,58 @@ struct drm_i915_gem_create_ext {
         * is deemed to be a good compromise.
         */
        __u64 size;
+
        /**
         * @handle: Returned handle for the object.
         *
         * Object handles are nonzero.
         */
        __u32 handle;
-       /** @flags: MBZ */
+
+       /**
+        * @flags: Optional flags.
+        *
+        * Supported values:
+        *
+        * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
+        * the object will need to be accessed via the CPU.
+        *
+        * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only
+        * strictly required on configurations where some subset of the device
+        * memory is directly visible/mappable through the CPU (which we also
+        * call small BAR), like on some DG2+ systems. Note that this is quite
+        * undesirable, but due to various factors like the client CPU, BIOS etc
+        * it's something we can expect to see in the wild. See
+        * &drm_i915_memory_region_info.probed_cpu_visible_size for how to
+        * determine if this system applies.
+        *
+        * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to
+        * ensure the kernel can always spill the allocation to system memory,
+        * if the object can't be allocated in the mappable part of
+        * I915_MEMORY_CLASS_DEVICE.
+        *
+        * Also note that since the kernel only supports flat-CCS on objects
+        * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
+        * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
+        * flat-CCS.
+        *
+        * Without this hint, the kernel will assume that non-mappable
+        * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
+        * kernel can still migrate the object to the mappable part, as a last
+        * resort, if userspace ever CPU faults this object, but this might be
+        * expensive, and so ideally should be avoided.
+        *
+        * On older kernels which lack the relevant small-bar uAPI support (see
+        * also &drm_i915_memory_region_info.probed_cpu_visible_size),
+        * usage of the flag will result in an error, but it should NEVER be
+        * possible to end up with a small BAR configuration, assuming we can
+        * also successfully load the i915 kernel module. In such cases the
+        * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as
+        * such there are zero restrictions on where the object can be placed.
+        */
+#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
        __u32 flags;
+
        /**
         * @extensions: The chain of extensions to apply to this object.
         *
@@ -3443,6 +3640,22 @@ struct drm_i915_gem_create_ext {
  * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
  * along with the final object size in &drm_i915_gem_create_ext.size, which
  * should account for any rounding up, if required.
+ *
+ * Note that userspace has no means of knowing the current backing region
+ * for objects where @num_regions is larger than one. The kernel will only
+ * ensure that the priority order of the @regions array is honoured, either
+ * when initially placing the object, or when moving memory around due to
+ * memory pressure
+ *
+ * On Flat-CCS capable HW, compression is supported for the objects residing
+ * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other
+ * memory class in @regions and migrated (by i915, due to memory
+ * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to
+ * decompress the content. But i915 doesn't have the required information to
+ * decompress the userspace compressed objects.
+ *
+ * So i915 supports Flat-CCS, on the objects which can reside only on
+ * I915_MEMORY_CLASS_DEVICE regions.
  */
 struct drm_i915_gem_create_ext_memory_regions {
        /** @base: Extension link. See struct i915_user_extension. */
index 9f4428be3e36266808bf0185df775bb49e963771..a756b29afcc23749f4102902a22d445d6fad9628 100644 (file)
@@ -27,7 +27,8 @@
 #define FSCRYPT_MODE_AES_128_CBC               5
 #define FSCRYPT_MODE_AES_128_CTS               6
 #define FSCRYPT_MODE_ADIANTUM                  9
-/* If adding a mode number > 9, update FSCRYPT_MODE_MAX in fscrypt_private.h */
+#define FSCRYPT_MODE_AES_256_HCTR2             10
+/* If adding a mode number > 10, update FSCRYPT_MODE_MAX in fscrypt_private.h */
 
 /*
  * Legacy policy version; ad-hoc KDF and no key verification.
index cb6e3846d27b9a1408ef851352a7ebd4e5736b7e..eed0315a77a6db675d639e6ed9ef26b5f9081280 100644 (file)
@@ -270,6 +270,8 @@ struct kvm_xen_exit {
 #define KVM_EXIT_X86_BUS_LOCK     33
 #define KVM_EXIT_XEN              34
 #define KVM_EXIT_RISCV_SBI        35
+#define KVM_EXIT_RISCV_CSR        36
+#define KVM_EXIT_NOTIFY           37
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -496,6 +498,18 @@ struct kvm_run {
                        unsigned long args[6];
                        unsigned long ret[2];
                } riscv_sbi;
+               /* KVM_EXIT_RISCV_CSR */
+               struct {
+                       unsigned long csr_num;
+                       unsigned long new_value;
+                       unsigned long write_mask;
+                       unsigned long ret_value;
+               } riscv_csr;
+               /* KVM_EXIT_NOTIFY */
+               struct {
+#define KVM_NOTIFY_CONTEXT_INVALID     (1 << 0)
+                       __u32 flags;
+               } notify;
                /* Fix the size of the union. */
                char padding[256];
        };
@@ -1157,6 +1171,12 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_VM_TSC_CONTROL 214
 #define KVM_CAP_SYSTEM_EVENT_DATA 215
 #define KVM_CAP_ARM_SYSTEM_SUSPEND 216
+#define KVM_CAP_S390_PROTECTED_DUMP 217
+#define KVM_CAP_X86_TRIPLE_FAULT_EVENT 218
+#define KVM_CAP_X86_NOTIFY_VMEXIT 219
+#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220
+#define KVM_CAP_S390_ZPCI_OP 221
+#define KVM_CAP_S390_CPU_TOPOLOGY 222
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1660,6 +1680,55 @@ struct kvm_s390_pv_unp {
        __u64 tweak;
 };
 
+enum pv_cmd_dmp_id {
+       KVM_PV_DUMP_INIT,
+       KVM_PV_DUMP_CONFIG_STOR_STATE,
+       KVM_PV_DUMP_COMPLETE,
+       KVM_PV_DUMP_CPU,
+};
+
+struct kvm_s390_pv_dmp {
+       __u64 subcmd;
+       __u64 buff_addr;
+       __u64 buff_len;
+       __u64 gaddr;            /* For dump storage state */
+       __u64 reserved[4];
+};
+
+enum pv_cmd_info_id {
+       KVM_PV_INFO_VM,
+       KVM_PV_INFO_DUMP,
+};
+
+struct kvm_s390_pv_info_dump {
+       __u64 dump_cpu_buffer_len;
+       __u64 dump_config_mem_buffer_per_1m;
+       __u64 dump_config_finalize_len;
+};
+
+struct kvm_s390_pv_info_vm {
+       __u64 inst_calls_list[4];
+       __u64 max_cpus;
+       __u64 max_guests;
+       __u64 max_guest_addr;
+       __u64 feature_indication;
+};
+
+struct kvm_s390_pv_info_header {
+       __u32 id;
+       __u32 len_max;
+       __u32 len_written;
+       __u32 reserved;
+};
+
+struct kvm_s390_pv_info {
+       struct kvm_s390_pv_info_header header;
+       union {
+               struct kvm_s390_pv_info_dump dump;
+               struct kvm_s390_pv_info_vm vm;
+       };
+};
+
 enum pv_cmd_id {
        KVM_PV_ENABLE,
        KVM_PV_DISABLE,
@@ -1668,6 +1737,8 @@ enum pv_cmd_id {
        KVM_PV_VERIFY,
        KVM_PV_PREP_RESET,
        KVM_PV_UNSHARE_ALL,
+       KVM_PV_INFO,
+       KVM_PV_DUMP,
 };
 
 struct kvm_pv_cmd {
@@ -2119,4 +2190,41 @@ struct kvm_stats_desc {
 /* Available with KVM_CAP_XSAVE2 */
 #define KVM_GET_XSAVE2           _IOR(KVMIO,  0xcf, struct kvm_xsave)
 
+/* Available with KVM_CAP_S390_PROTECTED_DUMP */
+#define KVM_S390_PV_CPU_COMMAND        _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd)
+
+/* Available with KVM_CAP_X86_NOTIFY_VMEXIT */
+#define KVM_X86_NOTIFY_VMEXIT_ENABLED          (1ULL << 0)
+#define KVM_X86_NOTIFY_VMEXIT_USER             (1ULL << 1)
+
+/* Available with KVM_CAP_S390_ZPCI_OP */
+#define KVM_S390_ZPCI_OP         _IOW(KVMIO,  0xd1, struct kvm_s390_zpci_op)
+
+struct kvm_s390_zpci_op {
+       /* in */
+       __u32 fh;               /* target device */
+       __u8  op;               /* operation to perform */
+       __u8  pad[3];
+       union {
+               /* for KVM_S390_ZPCIOP_REG_AEN */
+               struct {
+                       __u64 ibv;      /* Guest addr of interrupt bit vector */
+                       __u64 sb;       /* Guest addr of summary bit */
+                       __u32 flags;
+                       __u32 noi;      /* Number of interrupts */
+                       __u8 isc;       /* Guest interrupt subclass */
+                       __u8 sbo;       /* Offset of guest summary bit vector */
+                       __u16 pad;
+               } reg_aen;
+               __u64 reserved[8];
+       } u;
+};
+
+/* types for kvm_s390_zpci_op->op */
+#define KVM_S390_ZPCIOP_REG_AEN                0
+#define KVM_S390_ZPCIOP_DEREG_AEN      1
+
+/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
+#define KVM_S390_ZPCIOP_REGAEN_HOST    (1 << 0)
+
 #endif /* __LINUX_KVM_H */
index e2b77fbca91e983a43815cccb33b1fc1e6ab4b56..581ed4bdc06219ee7c42516ba48b436b8abcf6c8 100644 (file)
@@ -301,6 +301,7 @@ enum {
  *       { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  *       { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  *       { u64         id;           } && PERF_FORMAT_ID
+ *       { u64         lost;         } && PERF_FORMAT_LOST
  *     } && !PERF_FORMAT_GROUP
  *
  *     { u64           nr;
@@ -308,6 +309,7 @@ enum {
  *       { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  *       { u64         value;
  *         { u64       id;           } && PERF_FORMAT_ID
+ *         { u64       lost;         } && PERF_FORMAT_LOST
  *       }             cntr[nr];
  *     } && PERF_FORMAT_GROUP
  * };
@@ -317,8 +319,9 @@ enum perf_event_read_format {
        PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
        PERF_FORMAT_ID                          = 1U << 2,
        PERF_FORMAT_GROUP                       = 1U << 3,
+       PERF_FORMAT_LOST                        = 1U << 4,
 
-       PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
+       PERF_FORMAT_MAX = 1U << 5,              /* non-ABI */
 };
 
 #define PERF_ATTR_SIZE_VER0    64      /* sizeof first published struct */
index cab645d4a64555641833eba4e08d60b6163f3512..f9f115a7c75b8a3060f0678599e7662d0015b878 100644 (file)
 #define VHOST_VDPA_SET_GROUP_ASID      _IOW(VHOST_VIRTIO, 0x7C, \
                                             struct vhost_vring_state)
 
+/* Suspend a device so it does not process virtqueue requests anymore
+ *
+ * After the return of ioctl the device must preserve all the necessary state
+ * (the virtqueue vring base plus the possible device specific states) that is
+ * required for restoring in the future. The device must not change its
+ * configuration after that point.
+ */
+#define VHOST_VDPA_SUSPEND             _IO(VHOST_VIRTIO, 0x7D)
+
 #endif
index 384d5e076ee436ac2905c6572dbbdd0164bc2eb8..6cd0be7c1bb438e50011600ec5e1d898bba9059b 100644 (file)
@@ -309,7 +309,7 @@ bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
        return perf_cpu_map__idx(cpus, cpu) != -1;
 }
 
-struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map)
+struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
 {
        struct perf_cpu result = {
                .cpu = -1
index 952f3520d5c261bf827fcf551168c91c4b1e259f..8ce5bbd096666cb9e8ba23d075be99c6719a30f6 100644 (file)
@@ -305,6 +305,9 @@ int perf_evsel__read_size(struct perf_evsel *evsel)
        if (read_format & PERF_FORMAT_ID)
                entry += sizeof(u64);
 
+       if (read_format & PERF_FORMAT_LOST)
+               entry += sizeof(u64);
+
        if (read_format & PERF_FORMAT_GROUP) {
                nr = evsel->nr_members;
                size += sizeof(u64);
@@ -314,24 +317,98 @@ int perf_evsel__read_size(struct perf_evsel *evsel)
        return size;
 }
 
+/* This only reads values for the leader */
+static int perf_evsel__read_group(struct perf_evsel *evsel, int cpu_map_idx,
+                                 int thread, struct perf_counts_values *count)
+{
+       size_t size = perf_evsel__read_size(evsel);
+       int *fd = FD(evsel, cpu_map_idx, thread);
+       u64 read_format = evsel->attr.read_format;
+       u64 *data;
+       int idx = 1;
+
+       if (fd == NULL || *fd < 0)
+               return -EINVAL;
+
+       data = calloc(1, size);
+       if (data == NULL)
+               return -ENOMEM;
+
+       if (readn(*fd, data, size) <= 0) {
+               free(data);
+               return -errno;
+       }
+
+       /*
+        * This reads only the leader event intentionally since we don't have
+        * perf counts values for sibling events.
+        */
+       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+               count->ena = data[idx++];
+       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+               count->run = data[idx++];
+
+       /* value is always available */
+       count->val = data[idx++];
+       if (read_format & PERF_FORMAT_ID)
+               count->id = data[idx++];
+       if (read_format & PERF_FORMAT_LOST)
+               count->lost = data[idx++];
+
+       free(data);
+       return 0;
+}
+
+/*
+ * The perf read format is very flexible.  It needs to set the proper
+ * values according to the read format.
+ */
+static void perf_evsel__adjust_values(struct perf_evsel *evsel, u64 *buf,
+                                     struct perf_counts_values *count)
+{
+       u64 read_format = evsel->attr.read_format;
+       int n = 0;
+
+       count->val = buf[n++];
+
+       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+               count->ena = buf[n++];
+
+       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+               count->run = buf[n++];
+
+       if (read_format & PERF_FORMAT_ID)
+               count->id = buf[n++];
+
+       if (read_format & PERF_FORMAT_LOST)
+               count->lost = buf[n++];
+}
+
 int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
                     struct perf_counts_values *count)
 {
        size_t size = perf_evsel__read_size(evsel);
        int *fd = FD(evsel, cpu_map_idx, thread);
+       u64 read_format = evsel->attr.read_format;
+       struct perf_counts_values buf;
 
        memset(count, 0, sizeof(*count));
 
        if (fd == NULL || *fd < 0)
                return -EINVAL;
 
+       if (read_format & PERF_FORMAT_GROUP)
+               return perf_evsel__read_group(evsel, cpu_map_idx, thread, count);
+
        if (MMAP(evsel, cpu_map_idx, thread) &&
+           !(read_format & (PERF_FORMAT_ID | PERF_FORMAT_LOST)) &&
            !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
                return 0;
 
-       if (readn(*fd, count->values, size) <= 0)
+       if (readn(*fd, buf.values, size) <= 0)
                return -errno;
 
+       perf_evsel__adjust_values(evsel, buf.values, count);
        return 0;
 }
 
index 24de795b09bb3c3d55fcf8fce85dfdaaa8592258..03aceb72a783c43a816415b747c011ba99e2e95a 100644 (file)
@@ -23,7 +23,7 @@ LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
 LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
 LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
 LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
-LIBPERF_API struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map);
+LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
 LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
 
 #define perf_cpu_map__for_each_cpu(cpu, idx, cpus)             \
index 556bb06798f27b5eb8839a1cb720f15e07dd141a..93bf93a59c99b600cb30d758b7f3acbd4d9955f6 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/types.h>
 #include <linux/limits.h>
 #include <linux/bpf.h>
+#include <linux/compiler.h>
 #include <sys/types.h> /* pid_t */
 
 #define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
@@ -76,7 +77,7 @@ struct perf_record_lost_samples {
 };
 
 /*
- * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
+ * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID | PERF_FORMAT_LOST
  */
 struct perf_record_read {
        struct perf_event_header header;
@@ -85,6 +86,7 @@ struct perf_record_read {
        __u64                    time_enabled;
        __u64                    time_running;
        __u64                    id;
+       __u64                    lost;
 };
 
 struct perf_record_throttle {
@@ -153,22 +155,60 @@ enum {
        PERF_CPU_MAP__MASK = 1,
 };
 
+/*
+ * Array encoding of a perf_cpu_map where nr is the number of entries in cpu[]
+ * and each entry is a value for a CPU in the map.
+ */
 struct cpu_map_entries {
        __u16                    nr;
        __u16                    cpu[];
 };
 
-struct perf_record_record_cpu_map {
+/* Bitmap encoding of a perf_cpu_map where bitmap entries are 32-bit. */
+struct perf_record_mask_cpu_map32 {
+       /* Number of mask values. */
+       __u16                    nr;
+       /* Constant 4. */
+       __u16                    long_size;
+       /* Bitmap data. */
+       __u32                    mask[];
+};
+
+/* Bitmap encoding of a perf_cpu_map where bitmap entries are 64-bit. */
+struct perf_record_mask_cpu_map64 {
+       /* Number of mask values. */
        __u16                    nr;
+       /* Constant 8. */
        __u16                    long_size;
-       unsigned long            mask[];
+       /* Legacy padding. */
+       char                     __pad[4];
+       /* Bitmap data. */
+       __u64                    mask[];
 };
 
-struct perf_record_cpu_map_data {
+/*
+ * 'struct perf_record_cpu_map_data' is packed as unfortunately an earlier
+ * version had unaligned data and we wish to retain file format compatibility.
+ * -irogers
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpacked"
+#pragma GCC diagnostic ignored "-Wattributes"
+
+struct __packed perf_record_cpu_map_data {
        __u16                    type;
-       char                     data[];
+       union {
+               /* Used when type == PERF_CPU_MAP__CPUS. */
+               struct cpu_map_entries cpus_data;
+               /* Used when type == PERF_CPU_MAP__MASK and long_size == 4. */
+               struct perf_record_mask_cpu_map32 mask32_data;
+               /* Used when type == PERF_CPU_MAP__MASK and long_size == 8. */
+               struct perf_record_mask_cpu_map64 mask64_data;
+       };
 };
 
+#pragma GCC diagnostic pop
+
 struct perf_record_cpu_map {
        struct perf_event_header         header;
        struct perf_record_cpu_map_data  data;
index 699c0ed97d34ed051524f38f97a174124bdfb56b..6f92204075c244bc623b26dc2c97fa4c835a4228 100644 (file)
@@ -18,8 +18,10 @@ struct perf_counts_values {
                        uint64_t val;
                        uint64_t ena;
                        uint64_t run;
+                       uint64_t id;
+                       uint64_t lost;
                };
-               uint64_t values[3];
+               uint64_t values[5];
        };
 };
 
index 89be89afb24d93d0440a6a8688fa68992673578f..a11fc51bfb688304e764f9166ebeddb11c902938 100644 (file)
@@ -1,10 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <stdarg.h>
 #include <stdio.h>
+#include <string.h>
 #include <linux/perf_event.h>
+#include <linux/kernel.h>
 #include <perf/cpumap.h>
 #include <perf/threadmap.h>
 #include <perf/evsel.h>
+#include <internal/evsel.h>
 #include <internal/tests.h>
 #include "tests.h"
 
@@ -189,6 +192,163 @@ static int test_stat_user_read(int event)
        return 0;
 }
 
+static int test_stat_read_format_single(struct perf_event_attr *attr, struct perf_thread_map *threads)
+{
+       struct perf_evsel *evsel;
+       struct perf_counts_values counts;
+       volatile int count = 0x100000;
+       int err;
+
+       evsel = perf_evsel__new(attr);
+       __T("failed to create evsel", evsel);
+
+       /* skip old kernels that don't support the format */
+       err = perf_evsel__open(evsel, NULL, threads);
+       if (err < 0)
+               return 0;
+
+       while (count--) ;
+
+       memset(&counts, -1, sizeof(counts));
+       perf_evsel__read(evsel, 0, 0, &counts);
+
+       __T("failed to read value", counts.val);
+       if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+               __T("failed to read TOTAL_TIME_ENABLED", counts.ena);
+       if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+               __T("failed to read TOTAL_TIME_RUNNING", counts.run);
+       if (attr->read_format & PERF_FORMAT_ID)
+               __T("failed to read ID", counts.id);
+       if (attr->read_format & PERF_FORMAT_LOST)
+               __T("failed to read LOST", counts.lost == 0);
+
+       perf_evsel__close(evsel);
+       perf_evsel__delete(evsel);
+       return 0;
+}
+
+static int test_stat_read_format_group(struct perf_event_attr *attr, struct perf_thread_map *threads)
+{
+       struct perf_evsel *leader, *member;
+       struct perf_counts_values counts;
+       volatile int count = 0x100000;
+       int err;
+
+       attr->read_format |= PERF_FORMAT_GROUP;
+       leader = perf_evsel__new(attr);
+       __T("failed to create leader", leader);
+
+       attr->read_format &= ~PERF_FORMAT_GROUP;
+       member = perf_evsel__new(attr);
+       __T("failed to create member", member);
+
+       member->leader = leader;
+       leader->nr_members = 2;
+
+       /* skip old kernels that don't support the format */
+       err = perf_evsel__open(leader, NULL, threads);
+       if (err < 0)
+               return 0;
+       err = perf_evsel__open(member, NULL, threads);
+       if (err < 0)
+               return 0;
+
+       while (count--) ;
+
+       memset(&counts, -1, sizeof(counts));
+       perf_evsel__read(leader, 0, 0, &counts);
+
+       __T("failed to read leader value", counts.val);
+       if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+               __T("failed to read leader TOTAL_TIME_ENABLED", counts.ena);
+       if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+               __T("failed to read leader TOTAL_TIME_RUNNING", counts.run);
+       if (attr->read_format & PERF_FORMAT_ID)
+               __T("failed to read leader ID", counts.id);
+       if (attr->read_format & PERF_FORMAT_LOST)
+               __T("failed to read leader LOST", counts.lost == 0);
+
+       memset(&counts, -1, sizeof(counts));
+       perf_evsel__read(member, 0, 0, &counts);
+
+       __T("failed to read member value", counts.val);
+       if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+               __T("failed to read member TOTAL_TIME_ENABLED", counts.ena);
+       if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+               __T("failed to read member TOTAL_TIME_RUNNING", counts.run);
+       if (attr->read_format & PERF_FORMAT_ID)
+               __T("failed to read member ID", counts.id);
+       if (attr->read_format & PERF_FORMAT_LOST)
+               __T("failed to read member LOST", counts.lost == 0);
+
+       perf_evsel__close(member);
+       perf_evsel__close(leader);
+       perf_evsel__delete(member);
+       perf_evsel__delete(leader);
+       return 0;
+}
+
+static int test_stat_read_format(void)
+{
+       struct perf_thread_map *threads;
+       struct perf_event_attr attr = {
+               .type   = PERF_TYPE_SOFTWARE,
+               .config = PERF_COUNT_SW_TASK_CLOCK,
+       };
+       int err, i;
+
+#define FMT(_fmt)  PERF_FORMAT_ ## _fmt
+#define FMT_TIME  (FMT(TOTAL_TIME_ENABLED) | FMT(TOTAL_TIME_RUNNING))
+
+       uint64_t test_formats [] = {
+               0,
+               FMT_TIME,
+               FMT(ID),
+               FMT(LOST),
+               FMT_TIME | FMT(ID),
+               FMT_TIME | FMT(LOST),
+               FMT_TIME | FMT(ID) | FMT(LOST),
+               FMT(ID) | FMT(LOST),
+       };
+
+#undef FMT
+#undef FMT_TIME
+
+       threads = perf_thread_map__new_dummy();
+       __T("failed to create threads", threads);
+
+       perf_thread_map__set_pid(threads, 0, 0);
+
+       for (i = 0; i < (int)ARRAY_SIZE(test_formats); i++) {
+               attr.read_format = test_formats[i];
+               __T_VERBOSE("testing single read with read_format: %lx\n",
+                           (unsigned long)test_formats[i]);
+
+               err = test_stat_read_format_single(&attr, threads);
+               __T("failed to read single format", err == 0);
+       }
+
+       perf_thread_map__put(threads);
+
+       threads = perf_thread_map__new_array(2, NULL);
+       __T("failed to create threads", threads);
+
+       perf_thread_map__set_pid(threads, 0, 0);
+       perf_thread_map__set_pid(threads, 1, 0);
+
+       for (i = 0; i < (int)ARRAY_SIZE(test_formats); i++) {
+               attr.read_format = test_formats[i];
+               __T_VERBOSE("testing group read with read_format: %lx\n",
+                           (unsigned long)test_formats[i]);
+
+               err = test_stat_read_format_group(&attr, threads);
+               __T("failed to read group format", err == 0);
+       }
+
+       perf_thread_map__put(threads);
+       return 0;
+}
+
 int test_evsel(int argc, char **argv)
 {
        __T_START;
@@ -200,6 +360,7 @@ int test_evsel(int argc, char **argv)
        test_stat_thread_enable();
        test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
        test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
+       test_stat_read_format();
 
        __T_END;
        return tests_failed == 0 ? 0 : -1;
index 0cec74da7ffea42da25d5423207995e1a0579ad0..e55fdf952a3a15c2af189315c90f102e0bf6c8df 100644 (file)
@@ -162,32 +162,34 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
 
        /*
         * Unfortunately these have to be hard coded because the noreturn
-        * attribute isn't provided in ELF data.
+        * attribute isn't provided in ELF data. Keep 'em sorted.
         */
        static const char * const global_noreturns[] = {
+               "__invalid_creds",
+               "__module_put_and_kthread_exit",
+               "__reiserfs_panic",
                "__stack_chk_fail",
-               "panic",
+               "__ubsan_handle_builtin_unreachable",
+               "cpu_bringup_and_idle",
+               "cpu_startup_entry",
                "do_exit",
+               "do_group_exit",
                "do_task_dead",
-               "kthread_exit",
-               "make_task_dead",
-               "__module_put_and_kthread_exit",
+               "ex_handler_msr_mce",
+               "fortify_panic",
                "kthread_complete_and_exit",
-               "__reiserfs_panic",
+               "kthread_exit",
+               "kunit_try_catch_throw",
                "lbug_with_loc",
-               "fortify_panic",
-               "usercopy_abort",
                "machine_real_restart",
+               "make_task_dead",
+               "panic",
                "rewind_stack_and_make_dead",
-               "kunit_try_catch_throw",
-               "xen_start_kernel",
-               "cpu_bringup_and_idle",
-               "do_group_exit",
+               "sev_es_terminate",
+               "snp_abort",
                "stop_this_cpu",
-               "__invalid_creds",
-               "cpu_startup_entry",
-               "__ubsan_handle_builtin_unreachable",
-               "ex_handler_msr_mce",
+               "usercopy_abort",
+               "xen_start_kernel",
        };
 
        if (!func)
@@ -4096,7 +4098,8 @@ static int validate_ibt(struct objtool_file *file)
                 * These sections can reference text addresses, but not with
                 * the intent to indirect branch to them.
                 */
-               if (!strncmp(sec->name, ".discard", 8)                  ||
+               if ((!strncmp(sec->name, ".discard", 8) &&
+                    strcmp(sec->name, ".discard.ibt_endbr_noseal"))    ||
                    !strncmp(sec->name, ".debug", 6)                    ||
                    !strcmp(sec->name, ".altinstructions")              ||
                    !strcmp(sec->name, ".ibt_endbr_seal")               ||
index c9302096dc461e82bf9a3a3d2c7e6430f1417675..e7a776ad25d719fa6af1115f2e75f48e8ae63948 100644 (file)
@@ -21,11 +21,6 @@ cat /sys/devices/cpu_atom/cpus
 
 It indicates cpu0-cpu15 are core cpus and cpu16-cpu23 are atom cpus.
 
-Quickstart
-
-List hybrid event
------------------
-
 As before, use perf-list to list the symbolic event.
 
 perf list
@@ -40,7 +35,6 @@ the event is belong to. Same event name but with different pmu can
 be supported.
 
 Enable hybrid event with a specific pmu
----------------------------------------
 
 To enable a core only event or atom only event, following syntax is supported:
 
@@ -53,7 +47,6 @@ For example, count the 'cycles' event on core cpus.
        perf stat -e cpu_core/cycles/
 
 Create two events for one hardware event automatically
-------------------------------------------------------
 
 When creating one event and the event is available on both atom and core,
 two events are created automatically. One is for atom, the other is for
@@ -132,7 +125,6 @@ For perf-stat result, it displays two events:
 The first 'cycles' is core event, the second 'cycles' is atom event.
 
 Thread mode example:
---------------------
 
 perf-stat reports the scaled counts for hybrid event and with a percentage
 displayed. The percentage is the event's running time/enabling time.
@@ -176,14 +168,12 @@ perf_event_attr:
        604,097,080      cpu_atom/cycles/                                              (99.57%)
 
 perf-record:
-------------
 
 If there is no '-e' specified in perf record, on hybrid platform,
 it creates two default 'cycles' and adds them to event list. One
 is for core, the other is for atom.
 
 perf-stat:
-----------
 
 If there is no '-e' specified in perf stat, on hybrid platform,
 besides of software events, following events are created and
index 099817ef5150d7eee7c172e1b056694ea93a58d4..0228efc96686a46b1665e7216a49746374a06614 100644 (file)
@@ -397,6 +397,9 @@ following filters are defined:
        - abort_tx: only when the target is a hardware transaction abort
        - cond: conditional branches
        - save_type: save branch type during sampling in case binary is not available later
+                    For the platforms with Intel Arch LBR support (12th-Gen+ client or
+                    4th-Gen Xeon+ server), the save branch type is unconditionally enabled
+                    when the taken branch stack sampling is enabled.
 
 +
 The option requires at least one branch type among any, any_call, any_ret, ind_call, cond.
@@ -757,8 +760,6 @@ events in data directory files. Option specified with no or empty value
 defaults to CPU layout. Masks defined or provided by the option value are
 filtered through the mask provided by -C option.
 
-include::intel-hybrid.txt[]
-
 --debuginfod[=URLs]::
        Specify debuginfod URL to be used when cacheing perf.data binaries,
        it follows the same syntax as the DEBUGINFOD_URLS variable, like:
@@ -778,6 +779,8 @@ include::intel-hybrid.txt[]
        only, as of now.  So the applications built without the frame
        pointer might see bogus addresses.
 
+include::intel-hybrid.txt[]
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-list[1], linkperf:perf-intel-pt[1]
index 0661a1cf98556ed38f068c981c7f58c61f8119c2..2171f02daf59d5018682ae0f670e8d310191395f 100644 (file)
@@ -265,7 +265,7 @@ endif
 # defined. get-executable-or-default fails with an error if the first argument is supplied but
 # doesn't exist.
 override PYTHON_CONFIG := $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON_AUTO))
-override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_AUTO)))
+override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_CONFIG)))
 
 grep-libs  = $(filter -l%,$(1))
 strip-libs  = $(filter-out -l%,$(1))
index 2f6cd1b8b66273fd7a80c584026c486e1dfa543f..a5cf243c337f108c67e42ca13c671b4c1b609fc4 100644 (file)
@@ -3355,7 +3355,8 @@ static bool schedstat_events_exposed(void)
 static int __cmd_record(int argc, const char **argv)
 {
        unsigned int rec_argc, i, j;
-       const char **rec_argv;
+       char **rec_argv;
+       const char **rec_argv_copy;
        const char * const record_args[] = {
                "record",
                "-a",
@@ -3384,6 +3385,7 @@ static int __cmd_record(int argc, const char **argv)
                ARRAY_SIZE(schedstat_args) : 0;
 
        struct tep_event *waking_event;
+       int ret;
 
        /*
         * +2 for either "-e", "sched:sched_wakeup" or
@@ -3391,14 +3393,18 @@ static int __cmd_record(int argc, const char **argv)
         */
        rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
-
        if (rec_argv == NULL)
                return -ENOMEM;
+       rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
+       if (rec_argv_copy == NULL) {
+               free(rec_argv);
+               return -ENOMEM;
+       }
 
        for (i = 0; i < ARRAY_SIZE(record_args); i++)
                rec_argv[i] = strdup(record_args[i]);
 
-       rec_argv[i++] = "-e";
+       rec_argv[i++] = strdup("-e");
        waking_event = trace_event__tp_format("sched", "sched_waking");
        if (!IS_ERR(waking_event))
                rec_argv[i++] = strdup("sched:sched_waking");
@@ -3409,11 +3415,19 @@ static int __cmd_record(int argc, const char **argv)
                rec_argv[i++] = strdup(schedstat_args[j]);
 
        for (j = 1; j < (unsigned int)argc; j++, i++)
-               rec_argv[i] = argv[j];
+               rec_argv[i] = strdup(argv[j]);
 
        BUG_ON(i != rec_argc);
 
-       return cmd_record(i, rec_argv);
+       memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
+       ret = cmd_record(rec_argc, rec_argv_copy);
+
+       for (i = 0; i < rec_argc; i++)
+               free(rec_argv[i]);
+       free(rec_argv);
+       free(rec_argv_copy);
+
+       return ret;
 }
 
 int cmd_sched(int argc, const char **argv)
index 7fb81a44672d76e116b67207e1d80d69afafefcd..54cd29d07ca8d4cce18816795e7896e2925bec47 100644 (file)
@@ -826,6 +826,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
        }
 
        evlist__for_each_entry(evsel_list, counter) {
+               counter->reset_group = false;
                if (bpf_counter__load(counter, &target))
                        return -1;
                if (!evsel__is_bpf(counter))
index f94929ebb54bdf5c16a0ecf1c60ba7d4d59aca47..7ea150cdc137d3a121118021f15d87b19c6db537 100644 (file)
@@ -17,21 +17,23 @@ static int process_event_mask(struct perf_tool *tool __maybe_unused,
                         struct machine *machine __maybe_unused)
 {
        struct perf_record_cpu_map *map_event = &event->cpu_map;
-       struct perf_record_record_cpu_map *mask;
        struct perf_record_cpu_map_data *data;
        struct perf_cpu_map *map;
        int i;
+       unsigned int long_size;
 
        data = &map_event->data;
 
        TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__MASK);
 
-       mask = (struct perf_record_record_cpu_map *)data->data;
+       long_size = data->mask32_data.long_size;
 
-       TEST_ASSERT_VAL("wrong nr",   mask->nr == 1);
+       TEST_ASSERT_VAL("wrong long_size", long_size == 4 || long_size == 8);
+
+       TEST_ASSERT_VAL("wrong nr",   data->mask32_data.nr == 1);
 
        for (i = 0; i < 20; i++) {
-               TEST_ASSERT_VAL("wrong cpu", test_bit(i, mask->mask));
+               TEST_ASSERT_VAL("wrong cpu", perf_record_cpu_map_data__test_bit(i, data));
        }
 
        map = cpu_map__new_data(data);
@@ -51,7 +53,6 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
                         struct machine *machine __maybe_unused)
 {
        struct perf_record_cpu_map *map_event = &event->cpu_map;
-       struct cpu_map_entries *cpus;
        struct perf_record_cpu_map_data *data;
        struct perf_cpu_map *map;
 
@@ -59,11 +60,9 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
 
        TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__CPUS);
 
-       cpus = (struct cpu_map_entries *)data->data;
-
-       TEST_ASSERT_VAL("wrong nr",   cpus->nr == 2);
-       TEST_ASSERT_VAL("wrong cpu",  cpus->cpu[0] == 1);
-       TEST_ASSERT_VAL("wrong cpu",  cpus->cpu[1] == 256);
+       TEST_ASSERT_VAL("wrong nr",   data->cpus_data.nr == 2);
+       TEST_ASSERT_VAL("wrong cpu",  data->cpus_data.cpu[0] == 1);
+       TEST_ASSERT_VAL("wrong cpu",  data->cpus_data.cpu[1] == 256);
 
        map = cpu_map__new_data(data);
        TEST_ASSERT_VAL("wrong nr",  perf_cpu_map__nr(map) == 2);
index 07f2411b0ad45553581189682fb53913481fdffa..20930dd48ee03e43848f351f4c5010207c456af4 100644 (file)
@@ -86,10 +86,15 @@ static bool samples_same(const struct perf_sample *s1,
                        COMP(read.time_running);
                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
                if (read_format & PERF_FORMAT_GROUP) {
-                       for (i = 0; i < s1->read.group.nr; i++)
-                               MCOMP(read.group.values[i]);
+                       for (i = 0; i < s1->read.group.nr; i++) {
+                               /* FIXME: check values without LOST */
+                               if (read_format & PERF_FORMAT_LOST)
+                                       MCOMP(read.group.values[i]);
+                       }
                } else {
                        COMP(read.one.id);
+                       if (read_format & PERF_FORMAT_LOST)
+                               COMP(read.one.lost);
                }
        }
 
@@ -263,7 +268,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
                        .data   = (void *)aux_data,
                },
        };
-       struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
+       struct sample_read_value values[] = {{1, 5, 0}, {9, 3, 0}, {2, 7, 0}, {6, 4, 1},};
        struct perf_sample sample_out, sample_out_endian;
        size_t i, sz, bufsz;
        int err, ret = -1;
@@ -286,6 +291,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
        } else {
                sample.read.one.value = 0x08789faeb786aa87ULL;
                sample.read.one.id    = 99;
+               sample.read.one.lost  = 1;
        }
 
        sz = perf_event__sample_event_size(&sample, sample_type, read_format);
@@ -370,7 +376,7 @@ out_free:
  */
 static int test__sample_parsing(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
 {
-       const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
+       const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 28, 29, 30, 31};
        u64 sample_type;
        u64 sample_regs;
        size_t i;
index 9313ef2739e07d12afbd18a750205ee6d3036622..26a51b48aee464e3b18938736872f3aa6f00cf70 100755 (executable)
@@ -28,6 +28,24 @@ test_stat_record_report() {
   echo "stat record and report test [Success]"
 }
 
+test_stat_repeat_weak_groups() {
+  echo "stat repeat weak groups test"
+  if ! perf stat -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}' \
+     true 2>&1 | grep -q 'seconds time elapsed'
+  then
+    echo "stat repeat weak groups test [Skipped event parsing failed]"
+    return
+  fi
+  if ! perf stat -r2 -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}:W' \
+    true > /dev/null 2>&1
+  then
+    echo "stat repeat weak groups test [Failed]"
+    err=1
+    return
+  fi
+  echo "stat repeat weak groups test [Success]"
+}
+
 test_topdown_groups() {
   # Topdown events must be grouped with the slots event first. Test that
   # parse-events reorders this.
@@ -75,6 +93,7 @@ test_topdown_weak_groups() {
 
 test_default_stat
 test_stat_record_report
+test_stat_repeat_weak_groups
 test_topdown_groups
 test_topdown_weak_groups
 exit $err
index 17311ad9f9af247967d0704aef7f84fd2bce1a07..de3701a2a2129dd6357f1910ec4878cb1005eaf9 100644 (file)
@@ -14,6 +14,8 @@ struct file;
 struct pid;
 struct cred;
 struct socket;
+struct sock;
+struct sk_buff;
 
 #define __sockaddr_check_size(size)    \
        BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -69,6 +71,9 @@ struct msghdr {
        unsigned int    msg_flags;      /* flags on received message */
        __kernel_size_t msg_controllen; /* ancillary data buffer length */
        struct kiocb    *msg_iocb;      /* ptr to iocb for async requests */
+       struct ubuf_info *msg_ubuf;
+       int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb,
+                           struct iov_iter *from, size_t length);
 };
 
 struct user_msghdr {
@@ -416,10 +421,9 @@ extern int recvmsg_copy_msghdr(struct msghdr *msg,
                               struct user_msghdr __user *umsg, unsigned flags,
                               struct sockaddr __user **uaddr,
                               struct iovec **iov);
-extern int __copy_msghdr_from_user(struct msghdr *kmsg,
-                                  struct user_msghdr __user *umsg,
-                                  struct sockaddr __user **save_addr,
-                                  struct iovec __user **uiov, size_t *nsegs);
+extern int __copy_msghdr(struct msghdr *kmsg,
+                        struct user_msghdr *umsg,
+                        struct sockaddr __user **save_addr);
 
 /* helpers which do the actual work for syscalls */
 extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
@@ -428,10 +432,6 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
 extern int __sys_sendto(int fd, void __user *buff, size_t len,
                        unsigned int flags, struct sockaddr __user *addr,
                        int addr_len);
-extern int __sys_accept4_file(struct file *file, unsigned file_flags,
-                       struct sockaddr __user *upeer_sockaddr,
-                        int __user *upeer_addrlen, int flags,
-                        unsigned long nofile);
 extern struct file *do_accept(struct file *file, unsigned file_flags,
                              struct sockaddr __user *upeer_sockaddr,
                              int __user *upeer_addrlen, int flags);
index 12b2243222b0e68dbd9ee91eea27754d9a657e70..ae43fb88f444e89004f65126e30801877d0ffa33 100644 (file)
@@ -22,54 +22,102 @@ static int max_node_num;
  */
 static int *cpunode_map;
 
-static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
+bool perf_record_cpu_map_data__test_bit(int i,
+                                       const struct perf_record_cpu_map_data *data)
+{
+       int bit_word32 = i / 32;
+       __u32 bit_mask32 = 1U << (i & 31);
+       int bit_word64 = i / 64;
+       __u64 bit_mask64 = ((__u64)1) << (i & 63);
+
+       return (data->mask32_data.long_size == 4)
+               ? (bit_word32 < data->mask32_data.nr) &&
+               (data->mask32_data.mask[bit_word32] & bit_mask32) != 0
+               : (bit_word64 < data->mask64_data.nr) &&
+               (data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
+}
+
+/* Read ith mask value from data into the given 64-bit sized bitmap */
+static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
+                                                   int i, unsigned long *bitmap)
+{
+#if __SIZEOF_LONG__ == 8
+       if (data->mask32_data.long_size == 4)
+               bitmap[0] = data->mask32_data.mask[i];
+       else
+               bitmap[0] = data->mask64_data.mask[i];
+#else
+       if (data->mask32_data.long_size == 4) {
+               bitmap[0] = data->mask32_data.mask[i];
+               bitmap[1] = 0;
+       } else {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+               bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
+               bitmap[1] = (unsigned long)data->mask64_data.mask[i];
+#else
+               bitmap[0] = (unsigned long)data->mask64_data.mask[i];
+               bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
+#endif
+       }
+#endif
+}
+static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
 {
        struct perf_cpu_map *map;
 
-       map = perf_cpu_map__empty_new(cpus->nr);
+       map = perf_cpu_map__empty_new(data->cpus_data.nr);
        if (map) {
                unsigned i;
 
-               for (i = 0; i < cpus->nr; i++) {
+               for (i = 0; i < data->cpus_data.nr; i++) {
                        /*
                         * Special treatment for -1, which is not real cpu number,
                         * and we need to use (int) -1 to initialize map[i],
                         * otherwise it would become 65535.
                         */
-                       if (cpus->cpu[i] == (u16) -1)
+                       if (data->cpus_data.cpu[i] == (u16) -1)
                                map->map[i].cpu = -1;
                        else
-                               map->map[i].cpu = (int) cpus->cpu[i];
+                               map->map[i].cpu = (int) data->cpus_data.cpu[i];
                }
        }
 
        return map;
 }
 
-static struct perf_cpu_map *cpu_map__from_mask(struct perf_record_record_cpu_map *mask)
+static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
 {
+       DECLARE_BITMAP(local_copy, 64);
+       int weight = 0, mask_nr = data->mask32_data.nr;
        struct perf_cpu_map *map;
-       int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
 
-       nr = bitmap_weight(mask->mask, nbits);
+       for (int i = 0; i < mask_nr; i++) {
+               perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
+               weight += bitmap_weight(local_copy, 64);
+       }
+
+       map = perf_cpu_map__empty_new(weight);
+       if (!map)
+               return NULL;
 
-       map = perf_cpu_map__empty_new(nr);
-       if (map) {
-               int cpu, i = 0;
+       for (int i = 0, j = 0; i < mask_nr; i++) {
+               int cpus_per_i = (i * data->mask32_data.long_size  * BITS_PER_BYTE);
+               int cpu;
 
-               for_each_set_bit(cpu, mask->mask, nbits)
-                       map->map[i++].cpu = cpu;
+               perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
+               for_each_set_bit(cpu, local_copy, 64)
+                       map->map[j++].cpu = cpu + cpus_per_i;
        }
        return map;
 
 }
 
-struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data)
+struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
 {
        if (data->type == PERF_CPU_MAP__CPUS)
-               return cpu_map__from_entries((struct cpu_map_entries *)data->data);
+               return cpu_map__from_entries(data);
        else
-               return cpu_map__from_mask((struct perf_record_record_cpu_map *)data->data);
+               return cpu_map__from_mask(data);
 }
 
 size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
index 703ae6d3386e3934baf489679242ba9a09086ce9..fa8a5acdcae128c2d829a80f2d92d4755d4f5cc3 100644 (file)
@@ -37,9 +37,11 @@ struct cpu_aggr_map {
 
 struct perf_record_cpu_map_data;
 
+bool perf_record_cpu_map_data__test_bit(int i, const struct perf_record_cpu_map_data *data);
+
 struct perf_cpu_map *perf_cpu_map__empty_new(int nr);
 
-struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data);
+struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data);
 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size);
 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size);
 size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp);
index a7b0931d51379dd8db73ac4294d07b05be9cca2c..12eae69170225206670548754f685af575498339 100644 (file)
@@ -65,7 +65,8 @@ struct stack_dump {
 
 struct sample_read_value {
        u64 value;
-       u64 id;
+       u64 id;   /* only if PERF_FORMAT_ID */
+       u64 lost; /* only if PERF_FORMAT_LOST */
 };
 
 struct sample_read {
@@ -80,6 +81,24 @@ struct sample_read {
        };
 };
 
+static inline size_t sample_read_value_size(u64 read_format)
+{
+       /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+       if (read_format & PERF_FORMAT_LOST)
+               return sizeof(struct sample_read_value);
+       else
+               return offsetof(struct sample_read_value, lost);
+}
+
+static inline struct sample_read_value *
+next_sample_read_value(struct sample_read_value *v, u64 read_format)
+{
+       return (void *)v + sample_read_value_size(read_format);
+}
+
+#define sample_read_group__for_each(v, nr, rf)         \
+       for (int __i = 0; __i < (int)nr; v = next_sample_read_value(v, rf), __i++)
+
 struct ip_callchain {
        u64 nr;
        u64 ips[];
@@ -463,10 +482,6 @@ size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FIL
 int kallsyms__get_function_start(const char *kallsyms_filename,
                                 const char *symbol_name, u64 *addr);
 
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max);
-void  cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
-                              u16 type, int max);
-
 void event_attr_init(struct perf_event_attr *attr);
 
 int perf_event_paranoid(void);
index 4852089e1d79f205d2bba3ee6dbedb4c73304a5f..18c3eb864d5587a016221de5e1dae094c7b6be9d 100644 (file)
@@ -1541,7 +1541,7 @@ static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
 }
 
 static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
-                            u64 val, u64 ena, u64 run)
+                            u64 val, u64 ena, u64 run, u64 lost)
 {
        struct perf_counts_values *count;
 
@@ -1550,6 +1550,7 @@ static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
        count->val    = val;
        count->ena    = ena;
        count->run    = run;
+       count->lost   = lost;
 
        perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
 }
@@ -1558,7 +1559,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int
 {
        u64 read_format = leader->core.attr.read_format;
        struct sample_read_value *v;
-       u64 nr, ena = 0, run = 0, i;
+       u64 nr, ena = 0, run = 0, lost = 0;
 
        nr = *data++;
 
@@ -1571,18 +1572,18 @@ static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
                run = *data++;
 
-       v = (struct sample_read_value *) data;
-
-       evsel__set_count(leader, cpu_map_idx, thread, v[0].value, ena, run);
-
-       for (i = 1; i < nr; i++) {
+       v = (void *)data;
+       sample_read_group__for_each(v, nr, read_format) {
                struct evsel *counter;
 
-               counter = evlist__id2evsel(leader->evlist, v[i].id);
+               counter = evlist__id2evsel(leader->evlist, v->id);
                if (!counter)
                        return -EINVAL;
 
-               evsel__set_count(counter, cpu_map_idx, thread, v[i].value, ena, run);
+               if (read_format & PERF_FORMAT_LOST)
+                       lost = v->lost;
+
+               evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost);
        }
 
        return 0;
@@ -2475,8 +2476,8 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
 
                        if (data->read.group.nr > max_group_nr)
                                return -EFAULT;
-                       sz = data->read.group.nr *
-                            sizeof(struct sample_read_value);
+
+                       sz = data->read.group.nr * sample_read_value_size(read_format);
                        OVERFLOW_CHECK(array, sz, max_size);
                        data->read.group.values =
                                        (struct sample_read_value *)array;
@@ -2485,6 +2486,12 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
                        OVERFLOW_CHECK_u64(array);
                        data->read.one.id = *array;
                        array++;
+
+                       if (read_format & PERF_FORMAT_LOST) {
+                               OVERFLOW_CHECK_u64(array);
+                               data->read.one.lost = *array;
+                               array++;
+                       }
                }
        }
 
index 9ef2406e0ede714c8a00abb81c95d6a66b9a1959..1f2040f36d4e937193d26cca1b2d9b5afa4e8b81 100644 (file)
@@ -642,15 +642,19 @@ exit:
        return pylist;
 }
 
-static PyObject *get_sample_value_as_tuple(struct sample_read_value *value)
+static PyObject *get_sample_value_as_tuple(struct sample_read_value *value,
+                                          u64 read_format)
 {
        PyObject *t;
 
-       t = PyTuple_New(2);
+       t = PyTuple_New(3);
        if (!t)
                Py_FatalError("couldn't create Python tuple");
        PyTuple_SetItem(t, 0, PyLong_FromUnsignedLongLong(value->id));
        PyTuple_SetItem(t, 1, PyLong_FromUnsignedLongLong(value->value));
+       if (read_format & PERF_FORMAT_LOST)
+               PyTuple_SetItem(t, 2, PyLong_FromUnsignedLongLong(value->lost));
+
        return t;
 }
 
@@ -681,12 +685,17 @@ static void set_sample_read_in_dict(PyObject *dict_sample,
                Py_FatalError("couldn't create Python list");
 
        if (read_format & PERF_FORMAT_GROUP) {
-               for (i = 0; i < sample->read.group.nr; i++) {
-                       PyObject *t = get_sample_value_as_tuple(&sample->read.group.values[i]);
+               struct sample_read_value *v = sample->read.group.values;
+
+               i = 0;
+               sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+                       PyObject *t = get_sample_value_as_tuple(v, read_format);
                        PyList_SET_ITEM(values, i, t);
+                       i++;
                }
        } else {
-               PyObject *t = get_sample_value_as_tuple(&sample->read.one);
+               PyObject *t = get_sample_value_as_tuple(&sample->read.one,
+                                                       read_format);
                PyList_SET_ITEM(values, 0, t);
        }
        pydict_set_item_string_decref(dict_sample, "values", values);
index 98e16659a149509fd06ac9a61b810b57ad683d34..192c9274f7ade92fbfc6ffe572dea19eae1285c9 100644 (file)
@@ -916,30 +916,30 @@ static void perf_event__cpu_map_swap(union perf_event *event,
                                     bool sample_id_all __maybe_unused)
 {
        struct perf_record_cpu_map_data *data = &event->cpu_map.data;
-       struct cpu_map_entries *cpus;
-       struct perf_record_record_cpu_map *mask;
-       unsigned i;
 
        data->type = bswap_16(data->type);
 
        switch (data->type) {
        case PERF_CPU_MAP__CPUS:
-               cpus = (struct cpu_map_entries *)data->data;
-
-               cpus->nr = bswap_16(cpus->nr);
+               data->cpus_data.nr = bswap_16(data->cpus_data.nr);
 
-               for (i = 0; i < cpus->nr; i++)
-                       cpus->cpu[i] = bswap_16(cpus->cpu[i]);
+               for (unsigned i = 0; i < data->cpus_data.nr; i++)
+                       data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
                break;
        case PERF_CPU_MAP__MASK:
-               mask = (struct perf_record_record_cpu_map *)data->data;
-
-               mask->nr = bswap_16(mask->nr);
-               mask->long_size = bswap_16(mask->long_size);
+               data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
 
-               switch (mask->long_size) {
-               case 4: mem_bswap_32(&mask->mask, mask->nr); break;
-               case 8: mem_bswap_64(&mask->mask, mask->nr); break;
+               switch (data->mask32_data.long_size) {
+               case 4:
+                       data->mask32_data.nr = bswap_16(data->mask32_data.nr);
+                       for (unsigned i = 0; i < data->mask32_data.nr; i++)
+                               data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
+                       break;
+               case 8:
+                       data->mask64_data.nr = bswap_16(data->mask64_data.nr);
+                       for (unsigned i = 0; i < data->mask64_data.nr; i++)
+                               data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
+                       break;
                default:
                        pr_err("cpu_map swap: unsupported long size\n");
                }
@@ -1283,21 +1283,25 @@ static void sample_read__printf(struct perf_sample *sample, u64 read_format)
                       sample->read.time_running);
 
        if (read_format & PERF_FORMAT_GROUP) {
-               u64 i;
+               struct sample_read_value *value = sample->read.group.values;
 
                printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
 
-               for (i = 0; i < sample->read.group.nr; i++) {
-                       struct sample_read_value *value;
-
-                       value = &sample->read.group.values[i];
+               sample_read_group__for_each(value, sample->read.group.nr, read_format) {
                        printf("..... id %016" PRIx64
-                              ", value %016" PRIx64 "\n",
+                              ", value %016" PRIx64,
                               value->id, value->value);
+                       if (read_format & PERF_FORMAT_LOST)
+                               printf(", lost %" PRIu64, value->lost);
+                       printf("\n");
                }
-       } else
-               printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
+       } else {
+               printf("..... id %016" PRIx64 ", value %016" PRIx64,
                        sample->read.one.id, sample->read.one.value);
+               if (read_format & PERF_FORMAT_LOST)
+                       printf(", lost %" PRIu64, sample->read.one.lost);
+               printf("\n");
+       }
 }
 
 static void dump_event(struct evlist *evlist, union perf_event *event,
@@ -1411,6 +1415,9 @@ static void dump_read(struct evsel *evsel, union perf_event *event)
 
        if (read_format & PERF_FORMAT_ID)
                printf("... id           : %" PRI_lu64 "\n", read_event->id);
+
+       if (read_format & PERF_FORMAT_LOST)
+               printf("... lost         : %" PRI_lu64 "\n", read_event->lost);
 }
 
 static struct machine *machines__find_for_cpumode(struct machines *machines,
@@ -1479,14 +1486,14 @@ static int deliver_sample_group(struct evlist *evlist,
                                struct perf_tool *tool,
                                union  perf_event *event,
                                struct perf_sample *sample,
-                               struct machine *machine)
+                               struct machine *machine,
+                               u64 read_format)
 {
        int ret = -EINVAL;
-       u64 i;
+       struct sample_read_value *v = sample->read.group.values;
 
-       for (i = 0; i < sample->read.group.nr; i++) {
-               ret = deliver_sample_value(evlist, tool, event, sample,
-                                          &sample->read.group.values[i],
+       sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+               ret = deliver_sample_value(evlist, tool, event, sample, v,
                                           machine);
                if (ret)
                        break;
@@ -1510,7 +1517,7 @@ static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
        /* For PERF_SAMPLE_READ we have either single or group mode. */
        if (read_format & PERF_FORMAT_GROUP)
                return deliver_sample_group(evlist, tool, event, sample,
-                                           machine);
+                                           machine, read_format);
        else
                return deliver_sample_value(evlist, tool, event, sample,
                                            &sample->read.one, machine);
index 979c8cb918f724f3ff71b15b1200667a7363ba38..788ce5e46470a4ad16666c20c67c5bd1e2f16722 100644 (file)
@@ -1193,7 +1193,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                                  &rsd);
                if (retiring > 0.7)
                        color = PERF_COLOR_GREEN;
-               print_metric(config, ctxp, color, "%8.1f%%", "retiring",
+               print_metric(config, ctxp, color, "%8.1f%%", "Retiring",
                                retiring * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
                   full_td(cpu_map_idx, st, &rsd)) {
@@ -1202,7 +1202,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                                  &rsd);
                if (fe_bound > 0.2)
                        color = PERF_COLOR_RED;
-               print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
+               print_metric(config, ctxp, color, "%8.1f%%", "Frontend Bound",
                                fe_bound * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
                   full_td(cpu_map_idx, st, &rsd)) {
@@ -1211,7 +1211,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                                  &rsd);
                if (be_bound > 0.2)
                        color = PERF_COLOR_RED;
-               print_metric(config, ctxp, color, "%8.1f%%", "backend bound",
+               print_metric(config, ctxp, color, "%8.1f%%", "Backend Bound",
                                be_bound * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
                   full_td(cpu_map_idx, st, &rsd)) {
@@ -1220,7 +1220,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                                  &rsd);
                if (bad_spec > 0.1)
                        color = PERF_COLOR_RED;
-               print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
+               print_metric(config, ctxp, color, "%8.1f%%", "Bad Speculation",
                                bad_spec * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) &&
                        full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1234,13 +1234,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
 
                if (retiring > 0.7 && heavy_ops > 0.1)
                        color = PERF_COLOR_GREEN;
-               print_metric(config, ctxp, color, "%8.1f%%", "heavy operations",
+               print_metric(config, ctxp, color, "%8.1f%%", "Heavy Operations",
                                heavy_ops * 100.);
                if (retiring > 0.7 && light_ops > 0.6)
                        color = PERF_COLOR_GREEN;
                else
                        color = NULL;
-               print_metric(config, ctxp, color, "%8.1f%%", "light operations",
+               print_metric(config, ctxp, color, "%8.1f%%", "Light Operations",
                                light_ops * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) &&
                        full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1254,13 +1254,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
 
                if (bad_spec > 0.1 && br_mis > 0.05)
                        color = PERF_COLOR_RED;
-               print_metric(config, ctxp, color, "%8.1f%%", "branch mispredict",
+               print_metric(config, ctxp, color, "%8.1f%%", "Branch Mispredict",
                                br_mis * 100.);
                if (bad_spec > 0.1 && m_clears > 0.05)
                        color = PERF_COLOR_RED;
                else
                        color = NULL;
-               print_metric(config, ctxp, color, "%8.1f%%", "machine clears",
+               print_metric(config, ctxp, color, "%8.1f%%", "Machine Clears",
                                m_clears * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) &&
                        full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1274,13 +1274,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
 
                if (fe_bound > 0.2 && fetch_lat > 0.15)
                        color = PERF_COLOR_RED;
-               print_metric(config, ctxp, color, "%8.1f%%", "fetch latency",
+               print_metric(config, ctxp, color, "%8.1f%%", "Fetch Latency",
                                fetch_lat * 100.);
                if (fe_bound > 0.2 && fetch_bw > 0.1)
                        color = PERF_COLOR_RED;
                else
                        color = NULL;
-               print_metric(config, ctxp, color, "%8.1f%%", "fetch bandwidth",
+               print_metric(config, ctxp, color, "%8.1f%%", "Fetch Bandwidth",
                                fetch_bw * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) &&
                        full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1294,13 +1294,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
 
                if (be_bound > 0.2 && mem_bound > 0.2)
                        color = PERF_COLOR_RED;
-               print_metric(config, ctxp, color, "%8.1f%%", "memory bound",
+               print_metric(config, ctxp, color, "%8.1f%%", "Memory Bound",
                                mem_bound * 100.);
                if (be_bound > 0.2 && core_bound > 0.1)
                        color = PERF_COLOR_RED;
                else
                        color = NULL;
-               print_metric(config, ctxp, color, "%8.1f%%", "Core bound",
+               print_metric(config, ctxp, color, "%8.1f%%", "Core Bound",
                                core_bound * 100.);
        } else if (evsel->metric_expr) {
                generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
index 2ae59c03ae774713f9425b1d2ff3cf4162eb369e..812424dbf2d5b95f1b19df7f704f37643a87dbd6 100644 (file)
@@ -1184,52 +1184,48 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
        return err;
 }
 
-static void synthesize_cpus(struct cpu_map_entries *cpus,
-                           struct perf_cpu_map *map)
+static void synthesize_cpus(struct perf_record_cpu_map_data *data,
+                           const struct perf_cpu_map *map)
 {
        int i, map_nr = perf_cpu_map__nr(map);
 
-       cpus->nr = map_nr;
+       data->cpus_data.nr = map_nr;
 
        for (i = 0; i < map_nr; i++)
-               cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu;
+               data->cpus_data.cpu[i] = perf_cpu_map__cpu(map, i).cpu;
 }
 
-static void synthesize_mask(struct perf_record_record_cpu_map *mask,
-                           struct perf_cpu_map *map, int max)
+static void synthesize_mask(struct perf_record_cpu_map_data *data,
+                           const struct perf_cpu_map *map, int max)
 {
-       int i;
+       int idx;
+       struct perf_cpu cpu;
+
+       /* Due to padding, the 4bytes per entry mask variant is always smaller. */
+       data->mask32_data.nr = BITS_TO_U32(max);
+       data->mask32_data.long_size = 4;
 
-       mask->nr = BITS_TO_LONGS(max);
-       mask->long_size = sizeof(long);
+       perf_cpu_map__for_each_cpu(cpu, idx, map) {
+               int bit_word = cpu.cpu / 32;
+               __u32 bit_mask = 1U << (cpu.cpu & 31);
 
-       for (i = 0; i < perf_cpu_map__nr(map); i++)
-               set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask);
+               data->mask32_data.mask[bit_word] |= bit_mask;
+       }
 }
 
-static size_t cpus_size(struct perf_cpu_map *map)
+static size_t cpus_size(const struct perf_cpu_map *map)
 {
        return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
 }
 
-static size_t mask_size(struct perf_cpu_map *map, int *max)
+static size_t mask_size(const struct perf_cpu_map *map, int *max)
 {
-       int i;
-
-       *max = 0;
-
-       for (i = 0; i < perf_cpu_map__nr(map); i++) {
-               /* bit position of the cpu is + 1 */
-               int bit = perf_cpu_map__cpu(map, i).cpu + 1;
-
-               if (bit > *max)
-                       *max = bit;
-       }
-
-       return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
+       *max = perf_cpu_map__max(map).cpu;
+       return sizeof(struct perf_record_mask_cpu_map32) + BITS_TO_U32(*max) * sizeof(__u32);
 }
 
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
+static void *cpu_map_data__alloc(const struct perf_cpu_map *map, size_t *size,
+                                u16 *type, int *max)
 {
        size_t size_cpus, size_mask;
        bool is_dummy = perf_cpu_map__empty(map);
@@ -1258,30 +1254,31 @@ void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int
                *type  = PERF_CPU_MAP__MASK;
        }
 
-       *size += sizeof(struct perf_record_cpu_map_data);
+       *size += sizeof(__u16); /* For perf_record_cpu_map_data.type. */
        *size = PERF_ALIGN(*size, sizeof(u64));
        return zalloc(*size);
 }
 
-void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
-                             u16 type, int max)
+static void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data,
+                                    const struct perf_cpu_map *map,
+                                    u16 type, int max)
 {
        data->type = type;
 
        switch (type) {
        case PERF_CPU_MAP__CPUS:
-               synthesize_cpus((struct cpu_map_entries *) data->data, map);
+               synthesize_cpus(data, map);
                break;
        case PERF_CPU_MAP__MASK:
-               synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
+               synthesize_mask(data, map, max);
        default:
                break;
        }
 }
 
-static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
+static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
 {
-       size_t size = sizeof(struct perf_record_cpu_map);
+       size_t size = sizeof(struct perf_event_header);
        struct perf_record_cpu_map *event;
        int max;
        u16 type;
@@ -1299,7 +1296,7 @@ static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
 }
 
 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
-                                  struct perf_cpu_map *map,
+                                  const struct perf_cpu_map *map,
                                   perf_event__handler_t process,
                                   struct machine *machine)
 {
@@ -1432,11 +1429,12 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
                        result += sizeof(u64);
                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
                if (read_format & PERF_FORMAT_GROUP) {
-                       sz = sample->read.group.nr *
-                            sizeof(struct sample_read_value);
-                       result += sz;
+                       sz = sample_read_value_size(read_format);
+                       result += sz * sample->read.group.nr;
                } else {
                        result += sizeof(u64);
+                       if (read_format & PERF_FORMAT_LOST)
+                               result += sizeof(u64);
                }
        }
 
@@ -1521,6 +1519,20 @@ void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
        *array = data->weight;
 }
 
+static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
+                                    const struct perf_sample *sample)
+{
+       size_t sz = sample_read_value_size(read_format);
+       struct sample_read_value *v = sample->read.group.values;
+
+       sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+               /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+               memcpy(array, v, sz);
+               array = (void *)array + sz;
+       }
+       return array;
+}
+
 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
                                  const struct perf_sample *sample)
 {
@@ -1602,13 +1614,16 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
 
                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
                if (read_format & PERF_FORMAT_GROUP) {
-                       sz = sample->read.group.nr *
-                            sizeof(struct sample_read_value);
-                       memcpy(array, sample->read.group.values, sz);
-                       array = (void *)array + sz;
+                       array = copy_read_group_values(array, read_format,
+                                                      sample);
                } else {
                        *array = sample->read.one.id;
                        array++;
+
+                       if (read_format & PERF_FORMAT_LOST) {
+                               *array = sample->read.one.lost;
+                               array++;
+                       }
                }
        }
 
index 81cb3d6af0b9685f68af70fad8fc6e7c62265d5f..53737d1619a411221626e998a02ae954f36d6593 100644 (file)
@@ -46,7 +46,7 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool, union perf_event *e
 int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process);
 int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process);
 int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_cpu_map(struct perf_tool *tool, struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_cpu_map(struct perf_tool *tool, const struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine);
 int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
 int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
 int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
index 10b34bb03bc1b8af48c4c056e609154ce2b14a23..c2064a35688b081d41457f21eaa2d53e2f046f87 100644 (file)
@@ -12,6 +12,7 @@ TARGETS += cpu-hotplug
 TARGETS += damon
 TARGETS += drivers/dma-buf
 TARGETS += drivers/s390x/uvdevice
+TARGETS += drivers/net/bonding
 TARGETS += efivarfs
 TARGETS += exec
 TARGETS += filesystems
diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile
new file mode 100644 (file)
index 0000000..ab6c54b
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for net selftests
+
+TEST_PROGS := bond-break-lacpdu-tx.sh
+
+include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
new file mode 100755 (executable)
index 0000000..47ab905
--- /dev/null
@@ -0,0 +1,81 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Regression Test:
+#   Verify LACPDUs get transmitted after setting the MAC address of
+#   the bond.
+#
+# https://bugzilla.redhat.com/show_bug.cgi?id=2020773
+#
+#       +---------+
+#       | fab-br0 |
+#       +---------+
+#            |
+#       +---------+
+#       |  fbond  |
+#       +---------+
+#        |       |
+#    +------+ +------+
+#    |veth1 | |veth2 |
+#    +------+ +------+
+#
+# We use veths instead of physical interfaces
+
+set -e
+tmp=$(mktemp -q dump.XXXXXX)
+cleanup() {
+       ip link del fab-br0 >/dev/null 2>&1 || :
+       ip link del fbond  >/dev/null 2>&1 || :
+       ip link del veth1-bond  >/dev/null 2>&1 || :
+       ip link del veth2-bond  >/dev/null 2>&1 || :
+       modprobe -r bonding  >/dev/null 2>&1 || :
+       rm -f -- ${tmp}
+}
+
+trap cleanup 0 1 2
+cleanup
+sleep 1
+
+# create the bridge
+ip link add fab-br0 address 52:54:00:3B:7C:A6 mtu 1500 type bridge \
+       forward_delay 15
+
+# create the bond
+ip link add fbond type bond mode 4 miimon 200 xmit_hash_policy 1 \
+       ad_actor_sys_prio 65535 lacp_rate fast
+
+# set bond address
+ip link set fbond address 52:54:00:3B:7C:A6
+ip link set fbond up
+
+# set again bond sysfs parameters
+ip link set fbond type bond ad_actor_sys_prio 65535
+
+# create veths
+ip link add name veth1-bond type veth peer name veth1-end
+ip link add name veth2-bond type veth peer name veth2-end
+
+# add ports
+ip link set fbond master fab-br0
+ip link set veth1-bond down master fbond
+ip link set veth2-bond down master fbond
+
+# bring up
+ip link set veth1-end up
+ip link set veth2-end up
+ip link set fab-br0 up
+ip link set fbond up
+ip addr add dev fab-br0 10.0.0.3
+
+tcpdump -n -i veth1-end -e ether proto 0x8809 >${tmp} 2>&1 &
+sleep 15
+pkill tcpdump >/dev/null 2>&1
+rc=0
+num=$(grep "packets captured" ${tmp} | awk '{print $1}')
+if test "$num" -gt 0; then
+       echo "PASS, captured ${num}"
+else
+       echo "FAIL"
+       rc=1
+fi
+exit $rc
diff --git a/tools/testing/selftests/drivers/net/bonding/config b/tools/testing/selftests/drivers/net/bonding/config
new file mode 100644 (file)
index 0000000..dc1c22d
--- /dev/null
@@ -0,0 +1 @@
+CONFIG_BONDING=y
diff --git a/tools/testing/selftests/drivers/net/bonding/settings b/tools/testing/selftests/drivers/net/bonding/settings
new file mode 100644 (file)
index 0000000..867e118
--- /dev/null
@@ -0,0 +1 @@
+timeout=60
index a6959df28eb0f23eb9625f13df271f8dd939549d..02868ac3bc717f341029489ef9ef67eda5ff417c 100644 (file)
@@ -9,10 +9,13 @@ TEST_GEN_PROGS := $(src_test:.c=)
 TEST_GEN_PROGS_EXTENDED := true
 
 OVERRIDE_TARGETS := 1
+top_srcdir := ../../../..
 include ../lib.mk
 
+khdr_dir = $(top_srcdir)/usr/include
+
 $(OUTPUT)/true: true.c
        $(LINK.c) $< $(LDLIBS) -o $@ -static
 
-$(OUTPUT)/%_test: %_test.c ../kselftest_harness.h common.h
-       $(LINK.c) $< $(LDLIBS) -o $@ -lcap
+$(OUTPUT)/%_test: %_test.c $(khdr_dir)/linux/landlock.h ../kselftest_harness.h common.h
+       $(LINK.c) $< $(LDLIBS) -o $@ -lcap -I$(khdr_dir)
index 947fc72413e9ae46ea95a1e8f1a8faa0a6ffcdd5..d44c72b3abe36d6d174807ee9679ff33e676205a 100644 (file)
@@ -40,6 +40,7 @@ ifeq (0,$(MAKELEVEL))
     endif
 endif
 selfdir = $(realpath $(dir $(filter %/lib.mk,$(MAKEFILE_LIST))))
+top_srcdir = $(selfdir)/../../..
 
 # The following are built by lib.mk common compile rules.
 # TEST_CUSTOM_PROGS should be used by tests that require
index d4ffebb989f88d2e63b14f2ccd9fe6644d3f21b7..7060bae04ec87d9ecd59d6643becb6452fdf22d5 100755 (executable)
 # nft_flowtable.sh -o8000 -l1500 -r2000
 #
 
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+nsr1="nsr1-$sfx"
+nsr2="nsr2-$sfx"
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 ret=0
 
-ns1in=""
-ns2in=""
+nsin=""
 ns1out=""
 ns2out=""
 
@@ -36,21 +40,19 @@ checktool (){
 checktool "nft --version" "run test without nft tool"
 checktool "ip -Version" "run test without ip tool"
 checktool "which nc" "run test without nc (netcat)"
-checktool "ip netns add nsr1" "create net namespace"
+checktool "ip netns add $nsr1" "create net namespace $nsr1"
 
-ip netns add ns1
-ip netns add ns2
-
-ip netns add nsr2
+ip netns add $ns1
+ip netns add $ns2
+ip netns add $nsr2
 
 cleanup() {
-       for i in 1 2; do
-               ip netns del ns$i
-               ip netns del nsr$i
-       done
+       ip netns del $ns1
+       ip netns del $ns2
+       ip netns del $nsr1
+       ip netns del $nsr2
 
-       rm -f "$ns1in" "$ns1out"
-       rm -f "$ns2in" "$ns2out"
+       rm -f "$nsin" "$ns1out" "$ns2out"
 
        [ $log_netns -eq 0 ] && sysctl -q net.netfilter.nf_log_all_netns=$log_netns
 }
@@ -59,22 +61,21 @@ trap cleanup EXIT
 
 sysctl -q net.netfilter.nf_log_all_netns=1
 
-ip link add veth0 netns nsr1 type veth peer name eth0 netns ns1
-ip link add veth1 netns nsr1 type veth peer name veth0 netns nsr2
+ip link add veth0 netns $nsr1 type veth peer name eth0 netns $ns1
+ip link add veth1 netns $nsr1 type veth peer name veth0 netns $nsr2
 
-ip link add veth1 netns nsr2 type veth peer name eth0 netns ns2
+ip link add veth1 netns $nsr2 type veth peer name eth0 netns $ns2
 
 for dev in lo veth0 veth1; do
-  for i in 1 2; do
-    ip -net nsr$i link set $dev up
-  done
+    ip -net $nsr1 link set $dev up
+    ip -net $nsr2 link set $dev up
 done
 
-ip -net nsr1 addr add 10.0.1.1/24 dev veth0
-ip -net nsr1 addr add dead:1::1/64 dev veth0
+ip -net $nsr1 addr add 10.0.1.1/24 dev veth0
+ip -net $nsr1 addr add dead:1::1/64 dev veth0
 
-ip -net nsr2 addr add 10.0.2.1/24 dev veth1
-ip -net nsr2 addr add dead:2::1/64 dev veth1
+ip -net $nsr2 addr add 10.0.2.1/24 dev veth1
+ip -net $nsr2 addr add dead:2::1/64 dev veth1
 
 # set different MTUs so we need to push packets coming from ns1 (large MTU)
 # to ns2 (smaller MTU) to stack either to perform fragmentation (ip_no_pmtu_disc=1),
@@ -106,85 +107,76 @@ do
        esac
 done
 
-if ! ip -net nsr1 link set veth0 mtu $omtu; then
+if ! ip -net $nsr1 link set veth0 mtu $omtu; then
        exit 1
 fi
 
-ip -net ns1 link set eth0 mtu $omtu
+ip -net $ns1 link set eth0 mtu $omtu
 
-if ! ip -net nsr2 link set veth1 mtu $rmtu; then
+if ! ip -net $nsr2 link set veth1 mtu $rmtu; then
        exit 1
 fi
 
-ip -net ns2 link set eth0 mtu $rmtu
+ip -net $ns2 link set eth0 mtu $rmtu
 
 # transfer-net between nsr1 and nsr2.
 # these addresses are not used for connections.
-ip -net nsr1 addr add 192.168.10.1/24 dev veth1
-ip -net nsr1 addr add fee1:2::1/64 dev veth1
-
-ip -net nsr2 addr add 192.168.10.2/24 dev veth0
-ip -net nsr2 addr add fee1:2::2/64 dev veth0
-
-for i in 1 2; do
-  ip netns exec nsr$i sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
-  ip netns exec nsr$i sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
-
-  ip -net ns$i link set lo up
-  ip -net ns$i link set eth0 up
-  ip -net ns$i addr add 10.0.$i.99/24 dev eth0
-  ip -net ns$i route add default via 10.0.$i.1
-  ip -net ns$i addr add dead:$i::99/64 dev eth0
-  ip -net ns$i route add default via dead:$i::1
-  if ! ip netns exec ns$i sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null; then
+ip -net $nsr1 addr add 192.168.10.1/24 dev veth1
+ip -net $nsr1 addr add fee1:2::1/64 dev veth1
+
+ip -net $nsr2 addr add 192.168.10.2/24 dev veth0
+ip -net $nsr2 addr add fee1:2::2/64 dev veth0
+
+for i in 0 1; do
+  ip netns exec $nsr1 sysctl net.ipv4.conf.veth$i.forwarding=1 > /dev/null
+  ip netns exec $nsr2 sysctl net.ipv4.conf.veth$i.forwarding=1 > /dev/null
+done
+
+for ns in $ns1 $ns2;do
+  ip -net $ns link set lo up
+  ip -net $ns link set eth0 up
+
+  if ! ip netns exec $ns sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null; then
        echo "ERROR: Check Originator/Responder values (problem during address addition)"
        exit 1
   fi
-
   # don't set ip DF bit for first two tests
-  ip netns exec ns$i sysctl net.ipv4.ip_no_pmtu_disc=1 > /dev/null
+  ip netns exec $ns sysctl net.ipv4.ip_no_pmtu_disc=1 > /dev/null
 done
 
-ip -net nsr1 route add default via 192.168.10.2
-ip -net nsr2 route add default via 192.168.10.1
+ip -net $ns1 addr add 10.0.1.99/24 dev eth0
+ip -net $ns2 addr add 10.0.2.99/24 dev eth0
+ip -net $ns1 route add default via 10.0.1.1
+ip -net $ns2 route add default via 10.0.2.1
+ip -net $ns1 addr add dead:1::99/64 dev eth0
+ip -net $ns2 addr add dead:2::99/64 dev eth0
+ip -net $ns1 route add default via dead:1::1
+ip -net $ns2 route add default via dead:2::1
+
+ip -net $nsr1 route add default via 192.168.10.2
+ip -net $nsr2 route add default via 192.168.10.1
 
-ip netns exec nsr1 nft -f - <<EOF
+ip netns exec $nsr1 nft -f - <<EOF
 table inet filter {
   flowtable f1 {
      hook ingress priority 0
      devices = { veth0, veth1 }
    }
 
+   counter routed_orig { }
+   counter routed_repl { }
+
    chain forward {
       type filter hook forward priority 0; policy drop;
 
       # flow offloaded? Tag ct with mark 1, so we can detect when it fails.
-      meta oif "veth1" tcp dport 12345 flow offload @f1 counter
-
-      # use packet size to trigger 'should be offloaded by now'.
-      # otherwise, if 'flow offload' expression never offloads, the
-      # test will pass.
-      tcp dport 12345 meta length gt 200 ct mark set 1 counter
+      meta oif "veth1" tcp dport 12345 ct mark set 1 flow add @f1 counter name routed_orig accept
 
-      # this turns off flow offloading internally, so expect packets again
-      tcp flags fin,rst ct mark set 0 accept
-
-      # this allows large packets from responder, we need this as long
-      # as PMTUd is off.
-      # This rule is deleted for the last test, when we expect PMTUd
-      # to kick in and ensure all packets meet mtu requirements.
-      meta length gt $lmtu accept comment something-to-grep-for
-
-      # next line blocks connection w.o. working offload.
-      # we only do this for reverse dir, because we expect packets to
-      # enter slow path due to MTU mismatch of veth0 and veth1.
-      tcp sport 12345 ct mark 1 counter log prefix "mark failure " drop
+      # count packets supposedly offloaded as per direction.
+      ct mark 1 counter name ct direction map { original : routed_orig, reply : routed_repl } accept
 
       ct state established,related accept
 
-      # for packets that we can't offload yet, i.e. SYN (any ct that is not confirmed)
-      meta length lt 200 oif "veth1" tcp dport 12345 counter accept
-
       meta nfproto ipv4 meta l4proto icmp accept
       meta nfproto ipv6 meta l4proto icmpv6 accept
    }
@@ -197,30 +189,30 @@ if [ $? -ne 0 ]; then
 fi
 
 # test basic connectivity
-if ! ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
-  echo "ERROR: ns1 cannot reach ns2" 1>&2
+if ! ip netns exec $ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
+  echo "ERROR: $ns1 cannot reach ns2" 1>&2
   exit 1
 fi
 
-if ! ip netns exec ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then
-  echo "ERROR: ns2 cannot reach ns1" 1>&2
+if ! ip netns exec $ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then
+  echo "ERROR: $ns2 cannot reach $ns1" 1>&2
   exit 1
 fi
 
 if [ $ret -eq 0 ];then
-       echo "PASS: netns routing/connectivity: ns1 can reach ns2"
+       echo "PASS: netns routing/connectivity: $ns1 can reach $ns2"
 fi
 
-ns1in=$(mktemp)
+nsin=$(mktemp)
 ns1out=$(mktemp)
-ns2in=$(mktemp)
 ns2out=$(mktemp)
 
 make_file()
 {
        name=$1
 
-       SIZE=$((RANDOM % (1024 * 8)))
+       SIZE=$((RANDOM % (1024 * 128)))
+       SIZE=$((SIZE + (1024 * 8)))
        TSIZE=$((SIZE * 1024))
 
        dd if=/dev/urandom of="$name" bs=1024 count=$SIZE 2> /dev/null
@@ -231,6 +223,38 @@ make_file()
        dd if=/dev/urandom conf=notrunc of="$name" bs=1 count=$SIZE 2> /dev/null
 }
 
+check_counters()
+{
+       local what=$1
+       local ok=1
+
+       local orig=$(ip netns exec $nsr1 nft reset counter inet filter routed_orig | grep packets)
+       local repl=$(ip netns exec $nsr1 nft reset counter inet filter routed_repl | grep packets)
+
+       local orig_cnt=${orig#*bytes}
+       local repl_cnt=${repl#*bytes}
+
+       local fs=$(du -sb $nsin)
+       local max_orig=${fs%%/*}
+       local max_repl=$((max_orig/4))
+
+       if [ $orig_cnt -gt $max_orig ];then
+               echo "FAIL: $what: original counter $orig_cnt exceeds expected value $max_orig" 1>&2
+               ret=1
+               ok=0
+       fi
+
+       if [ $repl_cnt -gt $max_repl ];then
+               echo "FAIL: $what: reply counter $repl_cnt exceeds expected value $max_repl" 1>&2
+               ret=1
+               ok=0
+       fi
+
+       if [ $ok -eq 1 ]; then
+               echo "PASS: $what"
+       fi
+}
+
 check_transfer()
 {
        in=$1
@@ -255,11 +279,11 @@ test_tcp_forwarding_ip()
        local dstport=$4
        local lret=0
 
-       ip netns exec $nsb nc -w 5 -l -p 12345 < "$ns2in" > "$ns2out" &
+       ip netns exec $nsb nc -w 5 -l -p 12345 < "$nsin" > "$ns2out" &
        lpid=$!
 
        sleep 1
-       ip netns exec $nsa nc -w 4 "$dstip" "$dstport" < "$ns1in" > "$ns1out" &
+       ip netns exec $nsa nc -w 4 "$dstip" "$dstport" < "$nsin" > "$ns1out" &
        cpid=$!
 
        sleep 3
@@ -274,11 +298,11 @@ test_tcp_forwarding_ip()
 
        wait
 
-       if ! check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"; then
+       if ! check_transfer "$nsin" "$ns2out" "ns1 -> ns2"; then
                lret=1
        fi
 
-       if ! check_transfer "$ns2in" "$ns1out" "ns1 <- ns2"; then
+       if ! check_transfer "$nsin" "$ns1out" "ns1 <- ns2"; then
                lret=1
        fi
 
@@ -295,41 +319,59 @@ test_tcp_forwarding()
 test_tcp_forwarding_nat()
 {
        local lret
+       local pmtu
 
        test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
        lret=$?
 
+       pmtu=$3
+       what=$4
+
        if [ $lret -eq 0 ] ; then
+               if [ $pmtu -eq 1 ] ;then
+                       check_counters "flow offload for ns1/ns2 with masquerade and pmtu discovery $what"
+               else
+                       echo "PASS: flow offload for ns1/ns2 with masquerade $what"
+               fi
+
                test_tcp_forwarding_ip "$1" "$2" 10.6.6.6 1666
                lret=$?
+               if [ $pmtu -eq 1 ] ;then
+                       check_counters "flow offload for ns1/ns2 with dnat and pmtu discovery $what"
+               elif [ $lret -eq 0 ] ; then
+                       echo "PASS: flow offload for ns1/ns2 with dnat $what"
+               fi
        fi
 
        return $lret
 }
 
-make_file "$ns1in"
-make_file "$ns2in"
+make_file "$nsin"
 
 # First test:
 # No PMTU discovery, nsr1 is expected to fragment packets from ns1 to ns2 as needed.
-if test_tcp_forwarding ns1 ns2; then
+# Due to MTU mismatch in both directions, all packets (except small packets like pure
+# acks) have to be handled by normal forwarding path.  Therefore, packet counters
+# are not checked.
+if test_tcp_forwarding $ns1 $ns2; then
        echo "PASS: flow offloaded for ns1/ns2"
 else
        echo "FAIL: flow offload for ns1/ns2:" 1>&2
-       ip netns exec nsr1 nft list ruleset
+       ip netns exec $nsr1 nft list ruleset
        ret=1
 fi
 
 # delete default route, i.e. ns2 won't be able to reach ns1 and
 # will depend on ns1 being masqueraded in nsr1.
 # expect ns1 has nsr1 address.
-ip -net ns2 route del default via 10.0.2.1
-ip -net ns2 route del default via dead:2::1
-ip -net ns2 route add 192.168.10.1 via 10.0.2.1
+ip -net $ns2 route del default via 10.0.2.1
+ip -net $ns2 route del default via dead:2::1
+ip -net $ns2 route add 192.168.10.1 via 10.0.2.1
 
 # Second test:
-# Same, but with NAT enabled.
-ip netns exec nsr1 nft -f - <<EOF
+# Same, but with NAT enabled.  Same as in first test: we expect normal forward path
+# to handle most packets.
+ip netns exec $nsr1 nft -f - <<EOF
 table ip nat {
    chain prerouting {
       type nat hook prerouting priority 0; policy accept;
@@ -343,47 +385,45 @@ table ip nat {
 }
 EOF
 
-if test_tcp_forwarding_nat ns1 ns2; then
-       echo "PASS: flow offloaded for ns1/ns2 with NAT"
-else
+if ! test_tcp_forwarding_nat $ns1 $ns2 0 ""; then
        echo "FAIL: flow offload for ns1/ns2 with NAT" 1>&2
-       ip netns exec nsr1 nft list ruleset
+       ip netns exec $nsr1 nft list ruleset
        ret=1
 fi
 
 # Third test:
-# Same as second test, but with PMTU discovery enabled.
-handle=$(ip netns exec nsr1 nft -a list table inet filter | grep something-to-grep-for | cut -d \# -f 2)
-
-if ! ip netns exec nsr1 nft delete rule inet filter forward $handle; then
-       echo "FAIL: Could not delete large-packet accept rule"
-       exit 1
-fi
-
-ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
-ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
-
-if test_tcp_forwarding_nat ns1 ns2; then
-       echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery"
-else
+# Same as second test, but with PMTU discovery enabled. This
+# means that we expect the fastpath to handle packets as soon
+# as the endpoints adjust the packet size.
+ip netns exec $ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
+ip netns exec $ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
+
+# reset counters.
+# With pmtu in-place we'll also check that nft counters
+# are lower than file size and packets were forwarded via flowtable layer.
+# For earlier tests (large mtus), packets cannot be handled via flowtable
+# (except pure acks and other small packets).
+ip netns exec $nsr1 nft reset counters table inet filter >/dev/null
+
+if ! test_tcp_forwarding_nat $ns1 $ns2 1 ""; then
        echo "FAIL: flow offload for ns1/ns2 with NAT and pmtu discovery" 1>&2
-       ip netns exec nsr1 nft list ruleset
+       ip netns exec $nsr1 nft list ruleset
 fi
 
 # Another test:
 # Add bridge interface br0 to Router1, with NAT enabled.
-ip -net nsr1 link add name br0 type bridge
-ip -net nsr1 addr flush dev veth0
-ip -net nsr1 link set up dev veth0
-ip -net nsr1 link set veth0 master br0
-ip -net nsr1 addr add 10.0.1.1/24 dev br0
-ip -net nsr1 addr add dead:1::1/64 dev br0
-ip -net nsr1 link set up dev br0
+ip -net $nsr1 link add name br0 type bridge
+ip -net $nsr1 addr flush dev veth0
+ip -net $nsr1 link set up dev veth0
+ip -net $nsr1 link set veth0 master br0
+ip -net $nsr1 addr add 10.0.1.1/24 dev br0
+ip -net $nsr1 addr add dead:1::1/64 dev br0
+ip -net $nsr1 link set up dev br0
 
-ip netns exec nsr1 sysctl net.ipv4.conf.br0.forwarding=1 > /dev/null
+ip netns exec $nsr1 sysctl net.ipv4.conf.br0.forwarding=1 > /dev/null
 
 # br0 with NAT enabled.
-ip netns exec nsr1 nft -f - <<EOF
+ip netns exec $nsr1 nft -f - <<EOF
 flush table ip nat
 table ip nat {
    chain prerouting {
@@ -398,59 +438,56 @@ table ip nat {
 }
 EOF
 
-if test_tcp_forwarding_nat ns1 ns2; then
-       echo "PASS: flow offloaded for ns1/ns2 with bridge NAT"
-else
+if ! test_tcp_forwarding_nat $ns1 $ns2 1 "on bridge"; then
        echo "FAIL: flow offload for ns1/ns2 with bridge NAT" 1>&2
-       ip netns exec nsr1 nft list ruleset
+       ip netns exec $nsr1 nft list ruleset
        ret=1
 fi
 
+
 # Another test:
 # Add bridge interface br0 to Router1, with NAT and VLAN.
-ip -net nsr1 link set veth0 nomaster
-ip -net nsr1 link set down dev veth0
-ip -net nsr1 link add link veth0 name veth0.10 type vlan id 10
-ip -net nsr1 link set up dev veth0
-ip -net nsr1 link set up dev veth0.10
-ip -net nsr1 link set veth0.10 master br0
-
-ip -net ns1 addr flush dev eth0
-ip -net ns1 link add link eth0 name eth0.10 type vlan id 10
-ip -net ns1 link set eth0 up
-ip -net ns1 link set eth0.10 up
-ip -net ns1 addr add 10.0.1.99/24 dev eth0.10
-ip -net ns1 route add default via 10.0.1.1
-ip -net ns1 addr add dead:1::99/64 dev eth0.10
-
-if test_tcp_forwarding_nat ns1 ns2; then
-       echo "PASS: flow offloaded for ns1/ns2 with bridge NAT and VLAN"
-else
+ip -net $nsr1 link set veth0 nomaster
+ip -net $nsr1 link set down dev veth0
+ip -net $nsr1 link add link veth0 name veth0.10 type vlan id 10
+ip -net $nsr1 link set up dev veth0
+ip -net $nsr1 link set up dev veth0.10
+ip -net $nsr1 link set veth0.10 master br0
+
+ip -net $ns1 addr flush dev eth0
+ip -net $ns1 link add link eth0 name eth0.10 type vlan id 10
+ip -net $ns1 link set eth0 up
+ip -net $ns1 link set eth0.10 up
+ip -net $ns1 addr add 10.0.1.99/24 dev eth0.10
+ip -net $ns1 route add default via 10.0.1.1
+ip -net $ns1 addr add dead:1::99/64 dev eth0.10
+
+if ! test_tcp_forwarding_nat $ns1 $ns2 1 "bridge and VLAN"; then
        echo "FAIL: flow offload for ns1/ns2 with bridge NAT and VLAN" 1>&2
-       ip netns exec nsr1 nft list ruleset
+       ip netns exec $nsr1 nft list ruleset
        ret=1
 fi
 
 # restore test topology (remove bridge and VLAN)
-ip -net nsr1 link set veth0 nomaster
-ip -net nsr1 link set veth0 down
-ip -net nsr1 link set veth0.10 down
-ip -net nsr1 link delete veth0.10 type vlan
-ip -net nsr1 link delete br0 type bridge
-ip -net ns1 addr flush dev eth0.10
-ip -net ns1 link set eth0.10 down
-ip -net ns1 link set eth0 down
-ip -net ns1 link delete eth0.10 type vlan
+ip -net $nsr1 link set veth0 nomaster
+ip -net $nsr1 link set veth0 down
+ip -net $nsr1 link set veth0.10 down
+ip -net $nsr1 link delete veth0.10 type vlan
+ip -net $nsr1 link delete br0 type bridge
+ip -net $ns1 addr flush dev eth0.10
+ip -net $ns1 link set eth0.10 down
+ip -net $ns1 link set eth0 down
+ip -net $ns1 link delete eth0.10 type vlan
 
 # restore address in ns1 and nsr1
-ip -net ns1 link set eth0 up
-ip -net ns1 addr add 10.0.1.99/24 dev eth0
-ip -net ns1 route add default via 10.0.1.1
-ip -net ns1 addr add dead:1::99/64 dev eth0
-ip -net ns1 route add default via dead:1::1
-ip -net nsr1 addr add 10.0.1.1/24 dev veth0
-ip -net nsr1 addr add dead:1::1/64 dev veth0
-ip -net nsr1 link set up dev veth0
+ip -net $ns1 link set eth0 up
+ip -net $ns1 addr add 10.0.1.99/24 dev eth0
+ip -net $ns1 route add default via 10.0.1.1
+ip -net $ns1 addr add dead:1::99/64 dev eth0
+ip -net $ns1 route add default via dead:1::1
+ip -net $nsr1 addr add 10.0.1.1/24 dev veth0
+ip -net $nsr1 addr add dead:1::1/64 dev veth0
+ip -net $nsr1 link set up dev veth0
 
 KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1)
 KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1)
@@ -480,23 +517,23 @@ do_esp() {
 
 }
 
-do_esp nsr1 192.168.10.1 192.168.10.2 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2
+do_esp $nsr1 192.168.10.1 192.168.10.2 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2
 
-do_esp nsr2 192.168.10.2 192.168.10.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1
+do_esp $nsr2 192.168.10.2 192.168.10.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1
 
-ip netns exec nsr1 nft delete table ip nat
+ip netns exec $nsr1 nft delete table ip nat
 
 # restore default routes
-ip -net ns2 route del 192.168.10.1 via 10.0.2.1
-ip -net ns2 route add default via 10.0.2.1
-ip -net ns2 route add default via dead:2::1
+ip -net $ns2 route del 192.168.10.1 via 10.0.2.1
+ip -net $ns2 route add default via 10.0.2.1
+ip -net $ns2 route add default via dead:2::1
 
-if test_tcp_forwarding ns1 ns2; then
-       echo "PASS: ipsec tunnel mode for ns1/ns2"
+if test_tcp_forwarding $ns1 $ns2; then
+       check_counters "ipsec tunnel mode for ns1/ns2"
 else
        echo "FAIL: ipsec tunnel mode for ns1/ns2"
-       ip netns exec nsr1 nft list ruleset 1>&2
-       ip netns exec nsr1 cat /proc/net/xfrm_stat 1>&2
+       ip netns exec $nsr1 nft list ruleset 1>&2
+       ip netns exec $nsr1 cat /proc/net/xfrm_stat 1>&2
 fi
 
 exit $ret
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore b/tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore
new file mode 100644 (file)
index 0000000..5710683
--- /dev/null
@@ -0,0 +1,20 @@
+blacklisted_events_test
+event_alternatives_tests_p10
+event_alternatives_tests_p9
+generic_events_valid_test
+group_constraint_cache_test
+group_constraint_l2l3_sel_test
+group_constraint_mmcra_sample_test
+group_constraint_pmc56_test
+group_constraint_pmc_count_test
+group_constraint_radix_scope_qual_test
+group_constraint_repeat_test
+group_constraint_thresh_cmp_test
+group_constraint_thresh_ctl_test
+group_constraint_thresh_sel_test
+group_constraint_unit_test
+group_pmc56_exclude_constraints_test
+hw_cache_event_type_test
+invalid_event_code_test
+reserved_bits_mmcra_sample_elig_mode_test
+reserved_bits_mmcra_thresh_ctl_test
index 0fce5a694684b07c54e71cc7b613d48a97bdd404..f93b4c7c3a8ad5401551350c939f0180a2e054ba 100644 (file)
@@ -1,11 +1,21 @@
-mmcr0_exceptionbits_test
+bhrb_filter_map_test
+bhrb_no_crash_wo_pmu_test
+intr_regs_no_crash_wo_pmu_test
 mmcr0_cc56run_test
-mmcr0_pmccext_test
-mmcr0_pmcjce_test
+mmcr0_exceptionbits_test
 mmcr0_fc56_pmc1ce_test
 mmcr0_fc56_pmc56_test
+mmcr0_pmccext_test
+mmcr0_pmcjce_test
 mmcr1_comb_test
-mmcr2_l2l3_test
+mmcr1_sel_unit_cache_test
 mmcr2_fcs_fch_test
+mmcr2_l2l3_test
 mmcr3_src_test
+mmcra_bhrb_any_test
+mmcra_bhrb_cond_test
+mmcra_bhrb_disable_no_branch_test
+mmcra_bhrb_disable_test
+mmcra_bhrb_ind_call_test
+mmcra_thresh_cmp_test
 mmcra_thresh_marked_sample_test
index 50c5ab1aa6fa1a3918546e84df23682c9f846153..a07896a463643d3bd8e6e29c1dfc7f6b44e5f49e 100644 (file)
 #include "defines.h"
 #include "main.h"
 
+/*
+ * FIXME: OpenSSL 3.0 has deprecated some functions. For now just ignore
+ * the warnings.
+ */
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
 struct q1q2_ctx {
        BN_CTX *bn_ctx;
        BIGNUM *m;
index 1bea2d16d4c115ff46134c2266e07267dd759055..22e28b76f80048f12302d034576a85e1a38f912a 100644 (file)
@@ -30,8 +30,8 @@ WOPTS :=      -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_A
 
 TRACEFS_HEADERS        := $$($(PKG_CONFIG) --cflags libtracefs)
 
-CFLAGS :=      -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS)
-LDFLAGS        :=      -ggdb
+CFLAGS :=      -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS)
+LDFLAGS        :=      -ggdb $(EXTRA_LDFLAGS)
 LIBS   :=      $$($(PKG_CONFIG) --libs libtracefs)
 
 SRC    :=      $(wildcard src/*.c)
@@ -61,40 +61,50 @@ endif
 LIBTRACEEVENT_MIN_VERSION = 1.5
 LIBTRACEFS_MIN_VERSION = 1.3
 
+.PHONY:        all warnings show_warnings
+all:   warnings rtla
+
 TEST_LIBTRACEEVENT = $(shell sh -c "$(PKG_CONFIG) --atleast-version $(LIBTRACEEVENT_MIN_VERSION) libtraceevent > /dev/null 2>&1 || echo n")
 ifeq ("$(TEST_LIBTRACEEVENT)", "n")
-.PHONY: warning_traceevent
-warning_traceevent:
-       @echo "********************************************"
-       @echo "** NOTICE: libtraceevent version $(LIBTRACEEVENT_MIN_VERSION) or higher not found"
-       @echo "**"
-       @echo "** Consider installing the latest libtraceevent from your"
-       @echo "** distribution, e.g., 'dnf install libtraceevent' on Fedora,"
-       @echo "** or from source:"
-       @echo "**"
-       @echo "**  https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ "
-       @echo "**"
-       @echo "********************************************"
+WARNINGS = show_warnings
+MISSING_LIBS += echo "**   libtraceevent version $(LIBTRACEEVENT_MIN_VERSION) or higher";
+MISSING_PACKAGES += "libtraceevent-devel"
+MISSING_SOURCE += echo "**  https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ ";
 endif
 
 TEST_LIBTRACEFS = $(shell sh -c "$(PKG_CONFIG) --atleast-version $(LIBTRACEFS_MIN_VERSION) libtracefs > /dev/null 2>&1 || echo n")
 ifeq ("$(TEST_LIBTRACEFS)", "n")
-.PHONY: warning_tracefs
-warning_tracefs:
-       @echo "********************************************"
-       @echo "** NOTICE: libtracefs version $(LIBTRACEFS_MIN_VERSION) or higher not found"
-       @echo "**"
-       @echo "** Consider installing the latest libtracefs from your"
-       @echo "** distribution, e.g., 'dnf install libtracefs' on Fedora,"
-       @echo "** or from source:"
-       @echo "**"
-       @echo "**  https://git.kernel.org/pub/scm/libs/libtrace/libtracefs.git/ "
-       @echo "**"
-       @echo "********************************************"
+WARNINGS = show_warnings
+MISSING_LIBS += echo "**   libtracefs version $(LIBTRACEFS_MIN_VERSION) or higher";
+MISSING_PACKAGES += "libtracefs-devel"
+MISSING_SOURCE += echo "**  https://git.kernel.org/pub/scm/libs/libtrace/libtracefs.git/ ";
 endif
 
-.PHONY:        all
-all:   rtla
+define show_dependencies
+       @echo "********************************************";                           \
+       echo "** NOTICE: Failed build dependencies";                                    \
+       echo "**";                                                                      \
+       echo "** Required Libraries:";                                                  \
+       $(MISSING_LIBS)                                                                 \
+       echo "**";                                                                      \
+       echo "** Consider installing the latest libtracefs from your";                  \
+       echo "** distribution, e.g., 'dnf install $(MISSING_PACKAGES)' on Fedora,";     \
+       echo "** or from source:";                                                      \
+       echo "**";                                                                      \
+       $(MISSING_SOURCE)                                                               \
+       echo "**";                                                                      \
+       echo "********************************************"
+endef
+
+show_warnings:
+       $(call show_dependencies);
+
+ifneq ("$(WARNINGS)", "")
+ERROR_OUT = $(error Please add the necessary dependencies)
+
+warnings: $(WARNINGS)
+       $(ERROR_OUT)
+endif
 
 rtla: $(OBJ)
        $(CC) -o rtla $(LDFLAGS) $(OBJ) $(LIBS)
@@ -108,9 +118,9 @@ install: doc_install
        $(INSTALL) rtla -m 755 $(DESTDIR)$(BINDIR)
        $(STRIP) $(DESTDIR)$(BINDIR)/rtla
        @test ! -f $(DESTDIR)$(BINDIR)/osnoise || rm $(DESTDIR)$(BINDIR)/osnoise
-       ln -s $(DESTDIR)$(BINDIR)/rtla $(DESTDIR)$(BINDIR)/osnoise
+       ln -s rtla $(DESTDIR)$(BINDIR)/osnoise
        @test ! -f $(DESTDIR)$(BINDIR)/timerlat || rm $(DESTDIR)$(BINDIR)/timerlat
-       ln -s $(DESTDIR)$(BINDIR)/rtla $(DESTDIR)$(BINDIR)/timerlat
+       ln -s rtla $(DESTDIR)$(BINDIR)/timerlat
 
 .PHONY: clean tarball
 clean: doc_clean
index f3ec628f5e5196ffb47461d0234aa680a4982f68..4b48af8a8309614f94451e5597a7d7f686f7981d 100644 (file)
@@ -892,7 +892,7 @@ int timerlat_hist_main(int argc, char *argv[])
        return_value = 0;
 
        if (trace_is_off(&tool->trace, &record->trace)) {
-               printf("rtla timelat hit stop tracing\n");
+               printf("rtla timerlat hit stop tracing\n");
                if (params->trace_output) {
                        printf("  Saving trace to %s\n", params->trace_output);
                        save_trace_to_file(record->trace.inst, params->trace_output);
index 35452a1d45e9fbff6a97b3c3de4b71842661748b..3342719352222e0911be9c914ca339e2121b6691 100644 (file)
@@ -687,7 +687,7 @@ int timerlat_top_main(int argc, char *argv[])
        return_value = 0;
 
        if (trace_is_off(&top->trace, &record->trace)) {
-               printf("rtla timelat hit stop tracing\n");
+               printf("rtla timerlat hit stop tracing\n");
                if (params->trace_output) {
                        printf("  Saving trace to %s\n", params->trace_output);
                        save_trace_to_file(record->trace.inst, params->trace_output);
index 515dfe9d3bcfb09b091f6af83dae7d76210f60dd..584a5bab3af395e392b4f4d83f37c75c25859bdb 100644 (file)
@@ -702,30 +702,31 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
 
        /*
         * .change_pte() must be surrounded by .invalidate_range_{start,end}().
-        * If mmu_notifier_count is zero, then no in-progress invalidations,
-        * including this one, found a relevant memslot at start(); rechecking
-        * memslots here is unnecessary.  Note, a false positive (count elevated
-        * by a different invalidation) is sub-optimal but functionally ok.
+        * If mmu_invalidate_in_progress is zero, then no in-progress
+        * invalidations, including this one, found a relevant memslot at
+        * start(); rechecking memslots here is unnecessary.  Note, a false
+        * positive (count elevated by a different invalidation) is sub-optimal
+        * but functionally ok.
         */
        WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
-       if (!READ_ONCE(kvm->mmu_notifier_count))
+       if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
                return;
 
        kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
 }
 
-void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
-                                  unsigned long end)
+void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
+                             unsigned long end)
 {
        /*
         * The count increase must become visible at unlock time as no
         * spte can be established without taking the mmu_lock and
         * count is also read inside the mmu_lock critical section.
         */
-       kvm->mmu_notifier_count++;
-       if (likely(kvm->mmu_notifier_count == 1)) {
-               kvm->mmu_notifier_range_start = start;
-               kvm->mmu_notifier_range_end = end;
+       kvm->mmu_invalidate_in_progress++;
+       if (likely(kvm->mmu_invalidate_in_progress == 1)) {
+               kvm->mmu_invalidate_range_start = start;
+               kvm->mmu_invalidate_range_end = end;
        } else {
                /*
                 * Fully tracking multiple concurrent ranges has diminishing
@@ -736,10 +737,10 @@ void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
                 * accumulate and persist until all outstanding invalidates
                 * complete.
                 */
-               kvm->mmu_notifier_range_start =
-                       min(kvm->mmu_notifier_range_start, start);
-               kvm->mmu_notifier_range_end =
-                       max(kvm->mmu_notifier_range_end, end);
+               kvm->mmu_invalidate_range_start =
+                       min(kvm->mmu_invalidate_range_start, start);
+               kvm->mmu_invalidate_range_end =
+                       max(kvm->mmu_invalidate_range_end, end);
        }
 }
 
@@ -752,7 +753,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
                .end            = range->end,
                .pte            = __pte(0),
                .handler        = kvm_unmap_gfn_range,
-               .on_lock        = kvm_inc_notifier_count,
+               .on_lock        = kvm_mmu_invalidate_begin,
                .on_unlock      = kvm_arch_guest_memory_reclaimed,
                .flush_on_ret   = true,
                .may_block      = mmu_notifier_range_blockable(range),
@@ -763,7 +764,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
        /*
         * Prevent memslot modification between range_start() and range_end()
         * so that conditionally locking provides the same result in both
-        * functions.  Without that guarantee, the mmu_notifier_count
+        * functions.  Without that guarantee, the mmu_invalidate_in_progress
         * adjustments will be imbalanced.
         *
         * Pairs with the decrement in range_end().
@@ -779,7 +780,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
         * any given time, and the caches themselves can check for hva overlap,
         * i.e. don't need to rely on memslot overlap checks for performance.
         * Because this runs without holding mmu_lock, the pfn caches must use
-        * mn_active_invalidate_count (see above) instead of mmu_notifier_count.
+        * mn_active_invalidate_count (see above) instead of
+        * mmu_invalidate_in_progress.
         */
        gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
                                          hva_range.may_block);
@@ -789,22 +791,22 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
        return 0;
 }
 
-void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
-                                  unsigned long end)
+void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+                           unsigned long end)
 {
        /*
         * This sequence increase will notify the kvm page fault that
         * the page that is going to be mapped in the spte could have
         * been freed.
         */
-       kvm->mmu_notifier_seq++;
+       kvm->mmu_invalidate_seq++;
        smp_wmb();
        /*
         * The above sequence increase must be visible before the
         * below count decrease, which is ensured by the smp_wmb above
-        * in conjunction with the smp_rmb in mmu_notifier_retry().
+        * in conjunction with the smp_rmb in mmu_invalidate_retry().
         */
-       kvm->mmu_notifier_count--;
+       kvm->mmu_invalidate_in_progress--;
 }
 
 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
@@ -816,7 +818,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
                .end            = range->end,
                .pte            = __pte(0),
                .handler        = (void *)kvm_null_fn,
-               .on_lock        = kvm_dec_notifier_count,
+               .on_lock        = kvm_mmu_invalidate_end,
                .on_unlock      = (void *)kvm_null_fn,
                .flush_on_ret   = false,
                .may_block      = mmu_notifier_range_blockable(range),
@@ -837,7 +839,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
        if (wake)
                rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
 
-       BUG_ON(kvm->mmu_notifier_count < 0);
+       BUG_ON(kvm->mmu_invalidate_in_progress < 0);
 }
 
 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
@@ -1134,6 +1136,9 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
        if (!kvm)
                return ERR_PTR(-ENOMEM);
 
+       /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */
+       __module_get(kvm_chardev_ops.owner);
+
        KVM_MMU_LOCK_INIT(kvm);
        mmgrab(current->mm);
        kvm->mm = current->mm;
@@ -1211,9 +1216,17 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
        if (r)
                goto out_err_no_mmu_notifier;
 
+       r = kvm_coalesced_mmio_init(kvm);
+       if (r < 0)
+               goto out_no_coalesced_mmio;
+
+       r = kvm_create_vm_debugfs(kvm, fdname);
+       if (r)
+               goto out_err_no_debugfs;
+
        r = kvm_arch_post_init_vm(kvm);
        if (r)
-               goto out_err_mmu_notifier;
+               goto out_err;
 
        mutex_lock(&kvm_lock);
        list_add(&kvm->vm_list, &vm_list);
@@ -1222,25 +1235,13 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
        preempt_notifier_inc();
        kvm_init_pm_notifier(kvm);
 
-       /*
-        * When the fd passed to this ioctl() is opened it pins the module,
-        * but try_module_get() also prevents getting a reference if the module
-        * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait").
-        */
-       if (!try_module_get(kvm_chardev_ops.owner)) {
-               r = -ENODEV;
-               goto out_err_mmu_notifier;
-       }
-
-       r = kvm_create_vm_debugfs(kvm, fdname);
-       if (r)
-               goto out_err;
-
        return kvm;
 
 out_err:
-       module_put(kvm_chardev_ops.owner);
-out_err_mmu_notifier:
+       kvm_destroy_vm_debugfs(kvm);
+out_err_no_debugfs:
+       kvm_coalesced_mmio_free(kvm);
+out_no_coalesced_mmio:
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
        if (kvm->mmu_notifier.ops)
                mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
@@ -1259,6 +1260,7 @@ out_err_no_irq_srcu:
 out_err_no_srcu:
        kvm_arch_free_vm(kvm);
        mmdrop(current->mm);
+       module_put(kvm_chardev_ops.owner);
        return ERR_PTR(r);
 }
 
@@ -2516,7 +2518,7 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
 {
        unsigned int flags = FOLL_HWPOISON;
        struct page *page;
-       int npages = 0;
+       int npages;
 
        might_sleep();
 
@@ -4378,7 +4380,7 @@ void kvm_unregister_device_ops(u32 type)
 static int kvm_ioctl_create_device(struct kvm *kvm,
                                   struct kvm_create_device *cd)
 {
-       const struct kvm_device_ops *ops = NULL;
+       const struct kvm_device_ops *ops;
        struct kvm_device *dev;
        bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
        int type;
@@ -4913,11 +4915,6 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
                goto put_fd;
        }
 
-#ifdef CONFIG_KVM_MMIO
-       r = kvm_coalesced_mmio_init(kvm);
-       if (r < 0)
-               goto put_kvm;
-#endif
        file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
        if (IS_ERR(file)) {
                r = PTR_ERR(file);
index ab519f72f2cd030452fdb91e10319845d8fd74cd..68ff41d39545277c5f8b7143fbeea50d77743e0c 100644 (file)
@@ -112,27 +112,28 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
 {
        /*
         * mn_active_invalidate_count acts for all intents and purposes
-        * like mmu_notifier_count here; but the latter cannot be used
-        * here because the invalidation of caches in the mmu_notifier
-        * event occurs _before_ mmu_notifier_count is elevated.
+        * like mmu_invalidate_in_progress here; but the latter cannot
+        * be used here because the invalidation of caches in the
+        * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
+        * is elevated.
         *
         * Note, it does not matter that mn_active_invalidate_count
         * is not protected by gpc->lock.  It is guaranteed to
         * be elevated before the mmu_notifier acquires gpc->lock, and
-        * isn't dropped until after mmu_notifier_seq is updated.
+        * isn't dropped until after mmu_invalidate_seq is updated.
         */
        if (kvm->mn_active_invalidate_count)
                return true;
 
        /*
         * Ensure mn_active_invalidate_count is read before
-        * mmu_notifier_seq.  This pairs with the smp_wmb() in
+        * mmu_invalidate_seq.  This pairs with the smp_wmb() in
         * mmu_notifier_invalidate_range_end() to guarantee either the
         * old (non-zero) value of mn_active_invalidate_count or the
-        * new (incremented) value of mmu_notifier_seq is observed.
+        * new (incremented) value of mmu_invalidate_seq is observed.
         */
        smp_rmb();
-       return kvm->mmu_notifier_seq != mmu_seq;
+       return kvm->mmu_invalidate_seq != mmu_seq;
 }
 
 static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
@@ -155,7 +156,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
        gpc->valid = false;
 
        do {
-               mmu_seq = kvm->mmu_notifier_seq;
+               mmu_seq = kvm->mmu_invalidate_seq;
                smp_rmb();
 
                write_unlock_irq(&gpc->lock);