]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge tag 'arc-6.7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 19 Dec 2023 20:19:25 +0000 (12:19 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 19 Dec 2023 20:19:25 +0000 (12:19 -0800)
Pull ARC fixes from Vineet Gupta:

 - build error for hugetlb, sparse and smatch fixes

 - removal of VIPT aliasing cache code

* tag 'arc-6.7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: add hugetlb definitions
  ARC: fix smatch warning
  ARC: fix spare error
  ARC: mm: retire support for aliasing VIPT D$
  ARC: entry: move ARCompact specific bits out of entry.h
  ARC: entry: SAVE_ABI_CALLEE_REG: ISA/ABI specific helper

649 files changed:
.mailmap
CREDITS
Documentation/ABI/testing/sysfs-bus-optee-devices
Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
Documentation/devicetree/bindings/display/fsl,lcdif.yaml
Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.yaml
Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
Documentation/devicetree/bindings/perf/riscv,pmu.yaml
Documentation/devicetree/bindings/pwm/imx-pwm.yaml
Documentation/devicetree/bindings/soc/rockchip/grf.yaml
Documentation/filesystems/fuse-io.rst
Documentation/networking/tcp_ao.rst
Documentation/trace/coresight/coresight.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts
arch/arm/boot/dts/nxp/imx/imx6q-skov-reve-mi1010ait-1cp1.dts
arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi
arch/arm/boot/dts/nxp/imx/imx7s.dtsi
arch/arm/boot/dts/nxp/mxs/imx28-xea.dts
arch/arm/boot/dts/rockchip/rk3128.dtsi
arch/arm/boot/dts/rockchip/rk322x.dtsi
arch/arm/include/asm/kexec.h
arch/arm/kernel/Makefile
arch/arm/mach-imx/mmdc.c
arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/freescale/imx8mq.dtsi
arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
arch/arm64/boot/dts/freescale/imx8ulp.dtsi
arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
arch/arm64/boot/dts/freescale/imx93.dtsi
arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
arch/arm64/boot/dts/mediatek/mt7986a.dtsi
arch/arm64/boot/dts/mediatek/mt8173-evb.dts
arch/arm64/boot/dts/mediatek/mt8183-evb.dts
arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
arch/arm64/boot/dts/mediatek/mt8183.dtsi
arch/arm64/boot/dts/mediatek/mt8186.dtsi
arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
arch/arm64/boot/dts/mediatek/mt8195.dtsi
arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/boot/dts/rockchip/rk356x.dtsi
arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi
arch/arm64/boot/dts/rockchip/rk3588s.dtsi
arch/arm64/include/asm/pgtable.h
arch/arm64/kvm/vgic/vgic-v4.c
arch/loongarch/Makefile
arch/loongarch/include/asm/efi.h
arch/loongarch/include/asm/elf.h
arch/loongarch/include/asm/loongarch.h
arch/loongarch/kernel/Makefile
arch/loongarch/kernel/stacktrace.c
arch/loongarch/kernel/unwind.c
arch/loongarch/kernel/unwind_prologue.c
arch/loongarch/net/bpf_jit.c
arch/m68k/include/asm/kexec.h
arch/m68k/kernel/Makefile
arch/mips/Kconfig
arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
arch/mips/boot/dts/loongson/ls7a-pch.dtsi
arch/mips/cavium-octeon/smp.c
arch/mips/include/asm/kexec.h
arch/mips/include/asm/mach-loongson64/boot_param.h
arch/mips/include/asm/smp-ops.h
arch/mips/include/asm/smp.h
arch/mips/kernel/Makefile
arch/mips/kernel/process.c
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp-cps.c
arch/mips/kernel/smp.c
arch/mips/loongson64/env.c
arch/mips/loongson64/init.c
arch/mips/loongson64/reset.c
arch/mips/loongson64/smp.c
arch/parisc/include/asm/bug.h
arch/powerpc/configs/skiroot_defconfig
arch/powerpc/kernel/trace/ftrace_entry.S
arch/powerpc/platforms/pseries/vas.c
arch/powerpc/platforms/pseries/vas.h
arch/riscv/Kconfig
arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
arch/riscv/boot/dts/microchip/mpfs-m100pfsevp.dts
arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
arch/riscv/boot/dts/microchip/mpfs-sev-kit.dts
arch/riscv/boot/dts/microchip/mpfs-tysom-m.dts
arch/riscv/boot/dts/microchip/mpfs.dtsi
arch/riscv/boot/dts/sophgo/cv1800b.dtsi
arch/riscv/errata/andes/errata.c
arch/riscv/include/asm/pgtable.h
arch/riscv/kernel/crash_core.c
arch/riscv/kernel/head.S
arch/riscv/kernel/module.c
arch/riscv/kernel/sys_riscv.c
arch/riscv/kernel/tests/module_test/test_uleb128.S
arch/riscv/kernel/traps_misaligned.c
arch/s390/configs/debug_defconfig
arch/s390/kvm/vsie.c
arch/s390/mm/pgtable.c
arch/sh/include/asm/kexec.h
arch/sh/kernel/Makefile
arch/sh/kernel/reboot.c
arch/sh/kernel/setup.c
arch/x86/boot/compressed/acpi.c
arch/x86/coco/tdx/tdx.c
arch/x86/entry/common.c
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/ia32.h
arch/x86/include/asm/idtentry.h
arch/x86/include/asm/proto.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/idt.c
arch/x86/kernel/sev.c
arch/x86/kvm/debugfs.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/x86.c
arch/x86/mm/mem_encrypt_amd.c
arch/x86/net/bpf_jit_comp.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/xen-asm.S
drivers/accel/ivpu/ivpu_hw_37xx.c
drivers/acpi/utils.c
drivers/atm/solos-pci.c
drivers/base/cpu.c
drivers/base/devcoredump.c
drivers/base/memory.c
drivers/base/regmap/regcache.c
drivers/clk/qcom/Kconfig
drivers/clk/rockchip/clk-rk3128.c
drivers/clk/rockchip/clk-rk3568.c
drivers/cxl/core/hdm.c
drivers/cxl/core/memdev.c
drivers/cxl/core/pci.c
drivers/cxl/core/pmu.c
drivers/cxl/core/port.c
drivers/cxl/core/region.c
drivers/dma/fsl-edma-common.c
drivers/dma/fsl-edma-main.c
drivers/dma/idxd/registers.h
drivers/dma/idxd/submit.c
drivers/dma/stm32-dma.c
drivers/dma/ti/k3-psil-am62.c
drivers/dma/ti/k3-psil-am62a.c
drivers/dpll/dpll_netlink.c
drivers/edac/versal_edac.c
drivers/firmware/arm_ffa/driver.c
drivers/firmware/arm_scmi/perf.c
drivers/firmware/efi/libstub/loongarch-stub.c
drivers/firmware/efi/libstub/loongarch.c
drivers/firmware/efi/libstub/x86-stub.c
drivers/gpio/gpiolib-sysfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/exynos/exynos_drm_dma.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_crt.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_dsb.c
drivers/gpu/drm/i915/display/intel_dvo.c
drivers/gpu/drm/i915/display/intel_fb.c
drivers/gpu/drm/i915/display/intel_hdmi.c
drivers/gpu/drm/i915/display/intel_lvds.c
drivers/gpu/drm/i915/display/intel_sdvo.c
drivers/gpu/drm/i915/display/intel_tv.c
drivers/gpu/drm/i915/display/skl_scaler.c
drivers/gpu/drm/i915/display/vlv_dsi.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/selftests/igt_live_test.c
drivers/gpu/drm/i915/selftests/igt_live_test.h
drivers/gpu/drm/mediatek/mtk_disp_gamma.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
drivers/gpu/drm/panfrost/panfrost_devfreq.c
drivers/gpu/drm/panfrost/panfrost_gem.c
drivers/greybus/Kconfig
drivers/hid/hid-apple.c
drivers/hid/hid-ids.h
drivers/hid/hid-lenovo.c
drivers/hid/hid-nintendo.c
drivers/hid/hid-quirks.c
drivers/hid/i2c-hid/i2c-hid-acpi.c
drivers/hwmon/acpi_power_meter.c
drivers/hwmon/corsair-psu.c
drivers/hwmon/ltc2991.c
drivers/hwmon/max31827.c
drivers/hwmon/nzxt-kraken2.c
drivers/hwtracing/coresight/coresight-etm-perf.c
drivers/hwtracing/coresight/coresight-etm4x-core.c
drivers/hwtracing/coresight/ultrasoc-smb.c
drivers/hwtracing/coresight/ultrasoc-smb.h
drivers/hwtracing/ptt/hisi_ptt.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/main.c
drivers/infiniband/hw/irdma/main.h
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/irdma/verbs.h
drivers/infiniband/ulp/rtrs/rtrs-clt.c
drivers/infiniband/ulp/rtrs/rtrs-srv.c
drivers/iommu/iommufd/device.c
drivers/iommu/iommufd/hw_pagetable.c
drivers/iommu/iommufd/ioas.c
drivers/iommu/iommufd/iommufd_private.h
drivers/iommu/iommufd/main.c
drivers/iommu/iommufd/selftest.c
drivers/iommu/iommufd/vfio_compat.c
drivers/leds/trigger/ledtrig-netdev.c
drivers/md/md.c
drivers/md/raid5.c
drivers/misc/mei/client.c
drivers/misc/mei/pxp/mei_pxp.c
drivers/net/arcnet/arcdevice.h
drivers/net/arcnet/com20020-pci.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/mv88e6xxx/pcs-639x.c
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_enet.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
drivers/net/ethernet/intel/iavf/iavf_fdir.h
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_txrx.h
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice_sriov.c
drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
drivers/net/ethernet/marvell/octeontx2/af/rpm.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/pensando/ionic/ionic_dev.h
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qualcomm/qca_debug.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
drivers/net/ethernet/stmicro/stmmac/dwmac5.h
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/hyperv/Kconfig
drivers/net/team/team.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/nvme/host/Kconfig
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/Kconfig
drivers/nvme/target/configfs.c
drivers/nvmem/core.c
drivers/of/dynamic.c
drivers/parport/parport_pc.c
drivers/pci/controller/dwc/pcie-qcom.c
drivers/pci/controller/pci-loongson.c
drivers/pci/controller/vmd.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pcie/aspm.c
drivers/perf/arm-cmn.c
drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
drivers/phy/sunplus/phy-sunplus-usb2.c
drivers/phy/ti/phy-gmii-sel.c
drivers/platform/mellanox/mlxbf-bootctl.c
drivers/platform/mellanox/mlxbf-pmc.c
drivers/platform/surface/aggregator/core.c
drivers/platform/x86/Kconfig
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/asus-wmi.h
drivers/platform/x86/intel/vbtn.c
drivers/platform/x86/intel_ips.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/wmi.c
drivers/powercap/dtpm_cpu.c
drivers/pwm/pwm-bcm2835.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/aacraid/linit.c
drivers/scsi/aacraid/src.c
drivers/scsi/be2iscsi/be_main.c
drivers/soundwire/intel_ace2x.c
drivers/soundwire/stream.c
drivers/spi/spi-atmel.c
drivers/spi/spi-cadence.c
drivers/spi/spi-imx.c
drivers/tee/optee/device.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/8250_early.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/ma35d1_serial.c
drivers/tty/serial/sc16is7xx.c
drivers/ufs/host/ufshcd-pltfrm.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/udc/core.c
drivers/usb/host/xhci-pci.c
drivers/usb/typec/class.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/pds/debugfs.c
drivers/vdpa/pds/vdpa_dev.c
fs/Kconfig
fs/afs/rxrpc.c
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_io.h
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_update.c
fs/bcachefs/btree_update_interior.c
fs/bcachefs/data_update.c
fs/bcachefs/dirent.c
fs/bcachefs/dirent.h
fs/bcachefs/extents.c
fs/bcachefs/fs-ioctl.c
fs/bcachefs/fs.c
fs/bcachefs/inode.c
fs/bcachefs/journal.c
fs/bcachefs/journal.h
fs/bcachefs/journal_io.c
fs/bcachefs/journal_reclaim.c
fs/bcachefs/recovery.c
fs/bcachefs/reflink.c
fs/bcachefs/sysfs.c
fs/btrfs/delalloc-space.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/qgroup.c
fs/btrfs/qgroup.h
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/debugfs/file.c
fs/debugfs/inode.c
fs/debugfs/internal.h
fs/ext4/file.c
fs/ext4/mballoc.c
fs/fuse/dax.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/nfsd/auth.c
fs/nfsd/nfssvc.c
fs/nfsd/vfs.c
fs/nilfs2/sufile.c
fs/nilfs2/the_nilfs.c
fs/open.c
fs/proc/task_mmu.c
fs/smb/client/cached_dir.c
fs/smb/client/cifsfs.c
fs/smb/client/connect.c
fs/smb/client/smb2misc.c
fs/smb/client/smb2ops.c
fs/smb/client/smb2pdu.c
fs/smb/client/smb2proto.h
fs/smb/common/smb2pdu.h
fs/smb/server/oplock.c
fs/smb/server/oplock.h
fs/smb/server/smb2ops.c
fs/smb/server/smb2pdu.c
fs/smb/server/vfs.c
fs/smb/server/vfs_cache.c
fs/smb/server/vfs_cache.h
fs/squashfs/block.c
fs/tracefs/event_inode.c
fs/ufs/util.c
include/drm/drm_atomic_helper.h
include/linux/arm_ffa.h
include/linux/bpf.h
include/linux/cred.h
include/linux/damon.h
include/linux/highmem.h
include/linux/hugetlb.h
include/linux/io_uring_types.h
include/linux/jbd2.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mm_inline.h
include/linux/mmzone.h
include/linux/pci.h
include/linux/platform_data/x86/asus-wmi.h
include/linux/stmmac.h
include/linux/tcp.h
include/linux/units.h
include/linux/usb/r8152.h
include/net/addrconf.h
include/net/genetlink.h
include/net/if_inet6.h
include/net/netfilter/nf_flow_table.h
include/net/tcp.h
include/net/tcp_ao.h
include/rdma/ib_umem.h
include/rdma/ib_verbs.h
include/uapi/linux/fuse.h
io_uring/io_uring.c
io_uring/kbuf.c
io_uring/poll.c
io_uring/rsrc.h
io_uring/uring_cmd.c
kernel/Kconfig.kexec
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/cgroup/legacy_freezer.c
kernel/crash_core.c
kernel/cred.c
kernel/events/core.c
kernel/exit.c
kernel/freezer.c
kernel/resource.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events_hist.c
kernel/trace/trace_output.c
kernel/workqueue.c
lib/Kconfig.debug
lib/group_cpus.c
mm/Kconfig
mm/damon/core.c
mm/damon/sysfs-schemes.c
mm/filemap.c
mm/hugetlb.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/shmem.c
mm/vmscan.c
mm/workingset.c
net/appletalk/ddp.c
net/atm/ioctl.c
net/core/drop_monitor.c
net/core/filter.c
net/core/neighbour.c
net/core/scm.c
net/core/skbuff.c
net/ipv4/ip_gre.c
net/ipv4/tcp.c
net/ipv4/tcp_ao.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/ip6_fib.c
net/ipv6/tcp_ipv6.c
net/netfilter/nf_bpf_link.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_dynset.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_fib.c
net/netfilter/nft_set_pipapo.c
net/netfilter/xt_owner.c
net/netlink/genetlink.c
net/packet/af_packet.c
net/packet/internal.h
net/psample/psample.c
net/rose/af_rose.c
net/sched/act_ct.c
net/smc/af_smc.c
net/smc/smc_clc.c
net/smc/smc_clc.h
net/sunrpc/auth.c
net/tls/tls_sw.c
net/vmw_vsock/virtio_transport_common.c
net/xdp/xsk.c
scripts/checkstack.pl
scripts/dtc/dt-extract-compatibles
scripts/gdb/linux/device.py
scripts/gdb/linux/tasks.py
scripts/sign-file.c
security/selinux/hooks.c
sound/core/pcm.c
sound/drivers/pcmtest.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/tas2781_hda_i2c.c
sound/soc/amd/acp-config.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/cs43130.c
sound/soc/codecs/da7219-aad.c
sound/soc/codecs/hdac_hda.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/nau8822.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/wm8974.c
sound/soc/codecs/wm_adsp.c
sound/soc/fsl/Kconfig
sound/soc/fsl/fsl_sai.c
sound/soc/fsl/fsl_xcvr.c
sound/soc/intel/boards/skl_hda_dsp_generic.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/intel/skylake/skl-pcm.c
sound/soc/intel/skylake/skl-sst-ipc.c
sound/soc/qcom/sc8280xp.c
sound/soc/soc-ops.c
sound/soc/soc-pcm.c
sound/soc/sof/ipc3-topology.c
sound/soc/sof/ipc4-control.c
sound/soc/sof/ipc4-topology.c
sound/soc/sof/ipc4-topology.h
sound/soc/sof/mediatek/mt8186/mt8186.c
sound/soc/sof/sof-audio.c
sound/soc/sof/sof-audio.h
sound/soc/sof/topology.c
sound/usb/mixer_quirks.c
tools/objtool/noreturns.h
tools/perf/builtin-list.c
tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
tools/perf/util/metricgroup.c
tools/testing/cxl/Kbuild
tools/testing/cxl/cxl_core_exports.c [new file with mode: 0644]
tools/testing/cxl/test/cxl.c
tools/testing/nvdimm/test/ndtest.c
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/config.x86_64
tools/testing/selftests/bpf/prog_tests/tailcalls.c
tools/testing/selftests/bpf/progs/tailcall_poke.c [new file with mode: 0644]
tools/testing/selftests/hid/config.common
tools/testing/selftests/iommu/iommufd_utils.h
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
tools/testing/selftests/lib.mk
tools/testing/selftests/mm/Makefile
tools/testing/selftests/mm/cow.c
tools/testing/selftests/mm/pagemap_ioctl.c
virt/kvm/kvm_main.c

index 43031441b2d922b3126b26ba754ea748a3f63540..3ac1c12545f20109885881b96eb0752d45a809a8 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -117,6 +117,7 @@ Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
 Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
 Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
 Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
+Chester Lin <chester62515@gmail.com> <clin@suse.com>
 Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
 Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
 Chris Lew <quic_clew@quicinc.com> <clew@codeaurora.org>
@@ -265,6 +266,9 @@ Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
 Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
 Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org>
 Jilai Wang <quic_jilaiw@quicinc.com> <jilaiw@codeaurora.org>
+Jiri Kosina <jikos@kernel.org> <jikos@jikos.cz>
+Jiri Kosina <jikos@kernel.org> <jkosina@suse.cz>
+Jiri Kosina <jikos@kernel.org> <jkosina@suse.com>
 Jiri Pirko <jiri@resnulli.us> <jiri@nvidia.com>
 Jiri Pirko <jiri@resnulli.us> <jiri@mellanox.com>
 Jiri Pirko <jiri@resnulli.us> <jpirko@redhat.com>
diff --git a/CREDITS b/CREDITS
index f33a33fd2371707b5b22e99408dc18cae59653de..81845c39e3cf3755394488ec44a507095d952a03 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2944,6 +2944,14 @@ D: IPX development and support
 N: Venkatesh Pallipadi (Venki)
 D: x86/HPET
 
+N: Antti Palosaari
+E: crope@iki.fi
+D: Various DVB drivers
+W: https://palosaari.fi/linux/
+S: Yliopistokatu 1 D 513
+S: FI-90570 Oulu
+S: FINLAND
+
 N: Kyungmin Park
 E: kyungmin.park@samsung.com
 D: Samsung S5Pv210 and Exynos4210 mobile platforms
index 0f58701367b66a57201e4a1bf1520cc069cc4122..af31e5a22d89fcc0b07413c8722fa64af3e1a5f0 100644 (file)
@@ -6,3 +6,12 @@ Description:
                OP-TEE bus provides reference to registered drivers under this directory. The <uuid>
                matches Trusted Application (TA) driver and corresponding TA in secure OS. Drivers
                are free to create needed API under optee-ta-<uuid> directory.
+
+What:          /sys/bus/tee/devices/optee-ta-<uuid>/need_supplicant
+Date:          November 2023
+KernelVersion: 6.7
+Contact:       op-tee@lists.trustedfirmware.org
+Description:
+               Allows to distinguish whether an OP-TEE based TA/device requires user-space
+               tee-supplicant to function properly or not. This attribute will be present for
+               devices which depend on tee-supplicant to be running.
index 987aa83c2649436f23ccfaf2a3e38ab77a8b6f7d..df20a3c9c74479afcc219f7445801998464e6762 100644 (file)
@@ -9,6 +9,9 @@ title: Analog Devices ADV7533/35 HDMI Encoders
 maintainers:
   - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 
+allOf:
+  - $ref: /schemas/sound/dai-common.yaml#
+
 description: |
   The ADV7533 and ADV7535 are HDMI audio and video transmitters
   compatible with HDMI 1.4 and DVI 1.0. They support color space
@@ -89,6 +92,9 @@ properties:
     $ref: /schemas/types.yaml#/definitions/uint32
     enum: [ 1, 2, 3, 4 ]
 
+  "#sound-dai-cells":
+    const: 0
+
   ports:
     description:
       The ADV7533/35 has two video ports and one audio port.
index fc11ab5fc4654e555202b51ebf029982f57f0469..1c2be8d6f6334052058b203ea79ef89cbf50a049 100644 (file)
@@ -51,7 +51,10 @@ properties:
     minItems: 1
 
   interrupts:
-    maxItems: 1
+    items:
+      - description: LCDIF DMA interrupt
+      - description: LCDIF Error interrupt
+    minItems: 1
 
   power-domains:
     maxItems: 1
@@ -131,6 +134,21 @@ allOf:
     then:
       required:
         - power-domains
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - fsl,imx23-lcdif
+    then:
+      properties:
+        interrupts:
+          minItems: 2
+          maxItems: 2
+    else:
+      properties:
+        interrupts:
+          maxItems: 1
 
 examples:
   - |
index 537e5304b73006062596cf40af749b885301d05e..ed24b617090b065ff000d265aacf0e2547e7fd5f 100644 (file)
@@ -10,7 +10,6 @@ maintainers:
   - Chun-Kuang Hu <chunkuang.hu@kernel.org>
   - Philipp Zabel <p.zabel@pengutronix.de>
   - Jitao Shi <jitao.shi@mediatek.com>
-  - Xinlei Lee <xinlei.lee@mediatek.com>
 
 description: |
   The MediaTek DSI function block is a sink of the display subsystem and can
index 73674baea75d329bfa949c9de1e71f8c9cf58562..f9160d7bac3caa5e12c4de6b19063cd3247d17b3 100644 (file)
@@ -42,6 +42,8 @@ properties:
       - lg,acx467akm-7
         # LG Corporation 7" WXGA TFT LCD panel
       - lg,ld070wx3-sl01
+        # LG Corporation 5" HD TFT LCD panel
+      - lg,lh500wx1-sd03
         # One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel
       - osddisplays,osd101t2587-53ts
         # Panasonic 10" WUXGA TFT LCD panel
index 3ec9ee95045fbce8db9f623753b2f67fb79ccd45..11422af3477e58749fca386610186f9c714dfff0 100644 (file)
@@ -208,8 +208,6 @@ properties:
       - lemaker,bl035-rgb-002
         # LG 7" (800x480 pixels) TFT LCD panel
       - lg,lb070wv8
-        # LG Corporation 5" HD TFT LCD panel
-      - lg,lh500wx1-sd03
         # LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
       - lg,lp079qx1-sp0v
         # LG 9.7" (2048x1536 pixels) TFT LCD panel
index 509d20c091af8231f728b1639b872db862a4e375..6a206111d4e0f0a737a71b0615ffd3084c3d9a7d 100644 (file)
@@ -62,6 +62,9 @@ properties:
         - description: MPM pin number
         - description: GIC SPI number for the MPM pin
 
+  '#power-domain-cells':
+    const: 0
+
 required:
   - compatible
   - reg
@@ -93,4 +96,5 @@ examples:
                            <86 183>,
                            <90 260>,
                            <91 260>;
+        #power-domain-cells = <0>;
     };
index c8448de2f2a07c8999d936ea7584058d1823a319..d01c677ad3c765e52cf69c9c93423daaeff13831 100644 (file)
@@ -90,7 +90,7 @@ properties:
             bitmap of all MHPMCOUNTERx that can monitor the range of events
 
 dependencies:
-  "riscv,event-to-mhpmevent": [ "riscv,event-to-mhpmcounters" ]
+  riscv,event-to-mhpmevent: [ "riscv,event-to-mhpmcounters" ]
 
 required:
   - compatible
index c01dff3b7f843eba22f21d556f9f316bbffa757a..a84a240a61dc1f92c403dfb973420751a2c00068 100644 (file)
@@ -14,12 +14,10 @@ allOf:
 
 properties:
   "#pwm-cells":
-    description: |
-      Should be 2 for i.MX1 and 3 for i.MX27 and newer SoCs. See pwm.yaml
-      in this directory for a description of the cells format.
-    enum:
-      - 2
-      - 3
+    description:
+      The only third cell flag supported by this binding is
+      PWM_POLARITY_INVERTED. fsl,imx1-pwm does not support this flags.
+    const: 3
 
   compatible:
     oneOf:
index e4fa6a07b4fa2c6fc5f2d94d15e0c57b1bf51783..1309bf5ae0cdd1c68a5fad255edb9d21c2a17317 100644 (file)
@@ -233,6 +233,7 @@ allOf:
               - rockchip,rk3399-grf
               - rockchip,rk3399-pmugrf
               - rockchip,rk3568-pmugrf
+              - rockchip,rk3588-pmugrf
               - rockchip,rv1108-grf
               - rockchip,rv1108-pmugrf
 
index 255a368fe534b4582c9be673523330e962803123..6464de4266ad504f8bdc89c834230f3c9f5219dc 100644 (file)
@@ -15,7 +15,8 @@ The direct-io mode can be selected with the FOPEN_DIRECT_IO flag in the
 FUSE_OPEN reply.
 
 In direct-io mode the page cache is completely bypassed for reads and writes.
-No read-ahead takes place. Shared mmap is disabled.
+No read-ahead takes place. Shared mmap is disabled by default. To allow shared
+mmap, the FUSE_DIRECT_IO_ALLOW_MMAP flag may be enabled in the FUSE_INIT reply.
 
 In cached mode reads may be satisfied from the page cache, and data may be
 read-ahead by the kernel to fill the cache.  The cache is always kept consistent
index cfa5bf1cc5423cd68ad7d83a9d7e734744481085..8a58321acce72fc23c7ab56ffa05e738c79e0c74 100644 (file)
@@ -99,7 +99,7 @@ also [6.1]::
    when it is no longer considered permitted.
 
 Linux TCP-AO will try its best to prevent you from removing a key that's
-being used, considering it a key management failure. But sine keeping
+being used, considering it a key management failure. But since keeping
 an outdated key may become a security issue and as a peer may
 unintentionally prevent the removal of an old key by always setting
 it as RNextKeyID - a forced key removal mechanism is provided, where
index 4a71ea6cb390763083ace1d6eaac5baa9d0413e3..826e59a698da197235986b567521732de8a34287 100644 (file)
@@ -130,7 +130,7 @@ Misc:
 Device Tree Bindings
 --------------------
 
-See Documentation/devicetree/bindings/arm/arm,coresight-\*.yaml for details.
+See ``Documentation/devicetree/bindings/arm/arm,coresight-*.yaml`` for details.
 
 As of this writing drivers for ITM, STMs and CTIs are not provided but are
 expected to be added as the solution matures.
index 788be9ab5b733a151bbd1ef85fe07414f374016c..9104430e148e6d4717f3c32c663914697dfe16cc 100644 (file)
@@ -171,13 +171,10 @@ S:        Supported
 F:     drivers/soc/fujitsu/a64fx-diag.c
 
 A8293 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/a8293*
 
 AACRAID SCSI RAID DRIVER
@@ -576,23 +573,17 @@ F:        drivers/iio/accel/adxl372_i2c.c
 F:     drivers/iio/accel/adxl372_spi.c
 
 AF9013 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/af9013*
 
 AF9033 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/af9033*
 
 AFFS FILE SYSTEM
@@ -650,13 +641,10 @@ F:        fs/aio.c
 F:     include/linux/*aio*.h
 
 AIRSPY MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/airspy/
 
 ALACRITECH GIGABIT ETHERNET DRIVER
@@ -2155,6 +2143,7 @@ S:        Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
 F:     arch/arm/boot/dts/nxp/imx/
 F:     arch/arm/boot/dts/nxp/mxs/
+F:     arch/arm64/boot/dts/freescale/
 X:     arch/arm64/boot/dts/freescale/fsl-*
 X:     arch/arm64/boot/dts/freescale/qoriq-*
 X:     drivers/media/i2c/
@@ -2535,7 +2524,7 @@ F:        drivers/*/*/*wpcm*
 F:     drivers/*/*wpcm*
 
 ARM/NXP S32G ARCHITECTURE
-M:     Chester Lin <clin@suse.com>
+M:     Chester Lin <chester62515@gmail.com>
 R:     Andreas Färber <afaerber@suse.de>
 R:     Matthias Brugger <mbrugger@suse.com>
 R:     NXP S32 Linux Team <s32@nxp.com>
@@ -5604,13 +5593,10 @@ F:      Documentation/driver-api/media/drivers/cx88*
 F:     drivers/media/pci/cx88/
 
 CXD2820R MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/cxd2820r*
 
 CXGB3 ETHERNET DRIVER (CXGB3)
@@ -5723,13 +5709,10 @@ F:      Documentation/devicetree/bindings/input/cypress-sf.yaml
 F:     drivers/input/keyboard/cypress-sf.c
 
 CYPRESS_FIRMWARE MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/common/cypress_firmware*
 
 CYTTSP TOUCHSCREEN DRIVER
@@ -7319,53 +7302,38 @@ T:      git git://linuxtv.org/media_tree.git
 F:     drivers/media/pci/dt3155/
 
 DVB_USB_AF9015 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/dvb-usb-v2/af9015*
 
 DVB_USB_AF9035 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/dvb-usb-v2/af9035*
 
 DVB_USB_ANYSEE MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/dvb-usb-v2/anysee*
 
 DVB_USB_AU6610 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/dvb-usb-v2/au6610*
 
 DVB_USB_CE6230 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/dvb-usb-v2/ce6230*
 
 DVB_USB_CXUSB MEDIA DRIVER
@@ -7379,22 +7347,17 @@ T:      git git://linuxtv.org/media_tree.git
 F:     drivers/media/usb/dvb-usb/cxusb*
 
 DVB_USB_EC168 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/dvb-usb-v2/ec168*
 
 DVB_USB_GL861 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/dvb-usb-v2/gl861*
 
 DVB_USB_MXL111SF MEDIA DRIVER
@@ -7408,23 +7371,18 @@ T:      git git://linuxtv.org/mkrufky/mxl111sf.git
 F:     drivers/media/usb/dvb-usb-v2/mxl111sf*
 
 DVB_USB_RTL28XXU MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/dvb-usb-v2/rtl28xxu*
 
 DVB_USB_V2 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
 W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/dvb-usb-v2/dvb_usb*
 F:     drivers/media/usb/dvb-usb-v2/usb_urb.c
 
@@ -7466,13 +7424,10 @@ F:      Documentation/devicetree/bindings/input/e3x0-button.txt
 F:     drivers/input/misc/e3x0-button.c
 
 E4000 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/e4000*
 
 EARTH_PT1 MEDIA DRIVER
@@ -7488,13 +7443,10 @@ S:      Odd Fixes
 F:     drivers/media/pci/pt3/
 
 EC100 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/ec100*
 
 ECRYPT FILE SYSTEM
@@ -8112,13 +8064,10 @@ F:      drivers/media/tuners/fc0011.c
 F:     drivers/media/tuners/fc0011.h
 
 FC2580 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/fc2580*
 
 FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
@@ -9248,13 +9197,10 @@ F:      include/trace/events/habanalabs.h
 F:     include/uapi/drm/habanalabs_accel.h
 
 HACKRF MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/hackrf/
 
 HANDSHAKE UPCALL FOR TRANSPORT LAYER SECURITY
@@ -9627,6 +9573,7 @@ F:        drivers/crypto/hisilicon/sgl.c
 F:     include/linux/hisi_acc_qm.h
 
 HISILICON ROCE DRIVER
+M:     Chengchang Tang <tangchengchang@huawei.com>
 M:     Junxian Huang <huangjunxian6@hisilicon.com>
 L:     linux-rdma@vger.kernel.org
 S:     Maintained
@@ -11327,13 +11274,10 @@ F:    Documentation/hwmon/it87.rst
 F:     drivers/hwmon/it87.c
 
 IT913X MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/it913x*
 
 ITE IT66121 HDMI BRIDGE DRIVER
@@ -12206,6 +12150,13 @@ F:     include/linux/nd.h
 F:     include/uapi/linux/ndctl.h
 F:     tools/testing/nvdimm/
 
+LIBRARY CODE
+M:     Andrew Morton <akpm@linux-foundation.org>
+L:     linux-kernel@vger.kernel.org
+S:     Supported
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-nonmm-unstable
+F:     lib/*
+
 LICENSES and SPDX stuff
 M:     Thomas Gleixner <tglx@linutronix.de>
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -12238,6 +12189,8 @@ LINUX FOR POWERPC (32-BIT AND 64-BIT)
 M:     Michael Ellerman <mpe@ellerman.id.au>
 R:     Nicholas Piggin <npiggin@gmail.com>
 R:     Christophe Leroy <christophe.leroy@csgroup.eu>
+R:     Aneesh Kumar K.V <aneesh.kumar@kernel.org>
+R:     Naveen N. Rao <naveen.n.rao@linux.ibm.com>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Supported
 W:     https://github.com/linuxppc/wiki/wiki
@@ -12685,13 +12638,10 @@ W:    http://www.tazenda.demon.co.uk/phil/linux-hp
 F:     arch/m68k/hp300/
 
 M88DS3103 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/m88ds3103*
 
 M88RS2000 MEDIA DRIVER
@@ -14585,20 +14535,16 @@ F:    include/asm-generic/tlb.h
 F:     mm/mmu_gather.c
 
 MN88472 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
 F:     drivers/media/dvb-frontends/mn88472*
 
 MN88473 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
 F:     drivers/media/dvb-frontends/mn88473*
 
@@ -14686,23 +14632,17 @@ S:    Orphan
 F:     drivers/platform/x86/msi-wmi.c
 
 MSI001 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/msi001*
 
 MSI2500 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/usb/msi2500/
 
 MSTAR INTERRUPT CONTROLLER DRIVER
@@ -15066,6 +15006,7 @@ F:      lib/random32.c
 F:     net/
 F:     tools/net/
 F:     tools/testing/selftests/net/
+X:     net/9p/
 X:     net/bluetooth/
 
 NETWORKING [IPSEC]
@@ -17773,13 +17714,10 @@ F:    drivers/bus/fsl-mc/
 F:     include/uapi/linux/fsl_mc.h
 
 QT1010 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/qt1010*
 
 QUALCOMM ATH12K WIRELESS DRIVER
@@ -18834,33 +18772,24 @@ S:    Maintained
 F:     drivers/tty/rpmsg_tty.c
 
 RTL2830 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/rtl2830*
 
 RTL2832 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/rtl2832*
 
 RTL2832_SDR MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/rtl2832_sdr*
 
 RTL8180 WIRELESS DRIVER
@@ -19637,7 +19566,6 @@ S:      Maintained
 F:     drivers/misc/sgi-xp/
 
 SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
-M:     Karsten Graul <kgraul@linux.ibm.com>
 M:     Wenjia Zhang <wenjia@linux.ibm.com>
 M:     Jan Karcher <jaka@linux.ibm.com>
 R:     D. Wythe <alibuda@linux.alibaba.com>
@@ -19670,13 +19598,10 @@ F:    drivers/media/platform/renesas/sh_vou.c
 F:     include/media/drv-intf/sh_vou.h
 
 SI2157 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/si2157*
 
 SI2165 MEDIA DRIVER
@@ -19688,13 +19613,10 @@ Q:    http://patchwork.linuxtv.org/project/linux-media/list/
 F:     drivers/media/dvb-frontends/si2165*
 
 SI2168 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/si2168*
 
 SI470X FM RADIO RECEIVER I2C DRIVER
@@ -21196,33 +21118,24 @@ W:    http://tcp-lp-mod.sourceforge.net/
 F:     net/ipv4/tcp_lp.c
 
 TDA10071 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/dvb-frontends/tda10071*
 
 TDA18212 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/tda18212*
 
 TDA18218 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/tda18218*
 
 TDA18250 MEDIA DRIVER
@@ -22158,13 +22071,10 @@ F:    include/uapi/linux/serial_core.h
 F:     include/uapi/linux/tty.h
 
 TUA9001 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org
-W:     http://palosaari.fi/linux/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
-T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/tua9001*
 
 TULIP NETWORK DRIVERS
@@ -24109,20 +24019,16 @@ S:    Orphan
 F:     drivers/net/wireless/zydas/zd1211rw/
 
 ZD1301 MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org/
-W:     http://palosaari.fi/linux/
 Q:     https://patchwork.linuxtv.org/project/linux-media/list/
 F:     drivers/media/usb/dvb-usb-v2/zd1301*
 
 ZD1301_DEMOD MEDIA DRIVER
-M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linuxtv.org/
-W:     http://palosaari.fi/linux/
 Q:     https://patchwork.linuxtv.org/project/linux-media/list/
 F:     drivers/media/dvb-frontends/zd1301_demod*
 
index 511b5616aa411c264c03d4793c077efb3df62704..e78ee7db0729b6634084058f826a9270656109ee 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index 1ab8184302db448c2407fdc8929096dd1072cfa3..5a2869a18bd555cbacdb92c5a7cdee18cf6ef842 100644 (file)
@@ -36,9 +36,7 @@
        gpios = <&gpio 42 GPIO_ACTIVE_HIGH>;
 };
 
-&leds {
-       /delete-node/ led_act;
-};
+/delete-node/ &led_act;
 
 &pm {
        /delete-property/ system-power-controller;
index a3f247c722b438bcb91d6a22bee2633158b5fde7..0342a79ccd5db2c6e450121a9a157f2d0aaf77e5 100644 (file)
@@ -37,9 +37,9 @@
 
 &clks {
        assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
-                         <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+                         <&clks IMX6QDL_CLK_LDB_DI1_SEL>, <&clks IMX6QDL_CLK_ENET_REF_SEL>;
        assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
-                                <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>;
+                                <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, <&clk50m_phy>;
 };
 
 &hdmi {
index 4ffe99ed55ca2c000efd480ee54a914cb9c2b14b..07dcecbe485dca41b66f3deef932f750d31f5544 100644 (file)
                        max-speed = <100>;
                        interrupt-parent = <&gpio5>;
                        interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+                       clocks = <&clks IMX6UL_CLK_ENET_REF>;
+                       clock-names = "rmii-ref";
                };
        };
 };
index 29b8fd03567a54431e16f239d2f3202317f16451..5387da8a2a0a37f4d9662a5498d4612f379b8e4b 100644 (file)
                        };
 
                        gpt1: timer@302d0000 {
-                               compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+                               compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
                                reg = <0x302d0000 0x10000>;
                                interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
                        };
 
                        gpt2: timer@302e0000 {
-                               compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+                               compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
                                reg = <0x302e0000 0x10000>;
                                interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
                        };
 
                        gpt3: timer@302f0000 {
-                               compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+                               compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
                                reg = <0x302f0000 0x10000>;
                                interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
                        };
 
                        gpt4: timer@30300000 {
-                               compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+                               compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
                                reg = <0x30300000 0x10000>;
                                interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
index a400c108f66a2d33f7e9ca3cf74421ad506ac75c..6c5e6856648af94099e3064081c1adb0fc792022 100644 (file)
@@ -8,6 +8,7 @@
 #include "imx28-lwe.dtsi"
 
 / {
+       model = "Liebherr XEA board";
        compatible = "lwn,imx28-xea", "fsl,imx28";
 };
 
index 7bf557c995614980a513a44915e579be6abe0304..01edf244ddeef6d6120a397cb40601a70b2fdc1a 100644 (file)
                        };
 
                        sdmmc_pwren: sdmmc-pwren {
-                               rockchip,pins = <1 RK_PB6 1 &pcfg_pull_default>;
+                               rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_default>;
                        };
 
                        sdmmc_bus4: sdmmc-bus4 {
index ffc16d6b97e1bd139957928889b5a21c6288407b..a721744cbfd17f76d6abac5c879311d12d0aaa87 100644 (file)
 
                        power-domain@RK3228_PD_VOP {
                                reg = <RK3228_PD_VOP>;
-                               clocks =<&cru ACLK_VOP>,
-                                       <&cru DCLK_VOP>,
-                                       <&cru HCLK_VOP>;
+                               clocks = <&cru ACLK_VOP>,
+                                        <&cru DCLK_VOP>,
+                                        <&cru HCLK_VOP>;
                                pm_qos = <&qos_vop>;
                                #power-domain-cells = <0>;
                        };
index e62832dcba7600d0780cec8462ff8e432ec788d9..a8287e7ab9d41ac88f2f30630c428bc18429eeff 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef _ARM_KEXEC_H
 #define _ARM_KEXEC_H
 
-#ifdef CONFIG_KEXEC
-
 /* Maximum physical address we can use pages from */
 #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
 /* Maximum address we can reach in physical address mode */
@@ -82,6 +80,4 @@ static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
 
 #endif /* __ASSEMBLY__ */
 
-#endif /* CONFIG_KEXEC */
-
 #endif /* _ARM_KEXEC_H */
index d53f56d6f840857a838517586f10ceb12f05412b..771264d4726a732030c9af167ab535a3395a532b 100644 (file)
@@ -59,7 +59,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o insn.o patch.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o insn.o patch.o
 obj-$(CONFIG_JUMP_LABEL)       += jump_label.o insn.o patch.o
-obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_CORE)       += machine_kexec.o relocate_kernel.o
 # Main staffs in KPROBES are in arch/arm/probes/ .
 obj-$(CONFIG_KPROBES)          += patch.o insn.o
 obj-$(CONFIG_OABI_COMPAT)      += sys_oabi-compat.o
index 2157493b78a9bd3cbb98508b29d504ca5d196281..df69af9323754f06527458b1d7bb37dbe75f158b 100644 (file)
@@ -501,6 +501,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
 
        name = devm_kasprintf(&pdev->dev,
                                GFP_KERNEL, "mmdc%d", ret);
+       if (!name) {
+               ret = -ENOMEM;
+               goto pmu_release_id;
+       }
 
        pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
        pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
@@ -523,9 +527,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
 
 pmu_register_err:
        pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
-       ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
        cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
        hrtimer_cancel(&pmu_mmdc->hrtimer);
+pmu_release_id:
+       ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
 pmu_free:
        kfree(pmu_mmdc);
        return ret;
index 5ce5fbf2b38e4a0d2e15802ce9638e6e28ebba7e..f69b0c17560aee381f5384928b3d6071f3f141bd 100644 (file)
                pinctrl-0 = <&pinctrl_wifi_pdn>;
                gpio = <&lsio_gpio1 28 GPIO_ACTIVE_HIGH>;
                enable-active-high;
+               regulator-always-on;
                regulator-name = "wifi_pwrdn_fake_regulator";
                regulator-settling-time-us = <100>;
-
-               regulator-state-mem {
-                       regulator-off-in-suspend;
-               };
        };
 
        reg_pcie_switch: regulator-pcie-switch {
index ce66d30a4839b1562a25fafb4c7e24676077be78..b0bb77150adccb6c9610c1b0dcf510100495a772 100644 (file)
@@ -149,7 +149,7 @@ dma_subsys: bus@5a000000 {
                clock-names = "ipg", "per";
                assigned-clocks = <&clk IMX_SC_R_LCD_0_PWM_0 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
-               #pwm-cells = <2>;
+               #pwm-cells = <3>;
                power-domains = <&pd IMX_SC_R_LCD_0_PWM_0>;
        };
 
index 49ad3413db9487c6635d8c8f30ea93d6ded497c0..7e510b21bbac555b38cede99f97b4edc177bf520 100644 (file)
@@ -29,7 +29,7 @@ lsio_subsys: bus@5d000000 {
                         <&pwm0_lpcg 1>;
                assigned-clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
-               #pwm-cells = <2>;
+               #pwm-cells = <3>;
                interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
                status = "disabled";
        };
@@ -42,7 +42,7 @@ lsio_subsys: bus@5d000000 {
                         <&pwm1_lpcg 1>;
                assigned-clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
-               #pwm-cells = <2>;
+               #pwm-cells = <3>;
                interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
                status = "disabled";
        };
@@ -55,7 +55,7 @@ lsio_subsys: bus@5d000000 {
                         <&pwm2_lpcg 1>;
                assigned-clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
-               #pwm-cells = <2>;
+               #pwm-cells = <3>;
                interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
                status = "disabled";
        };
@@ -68,7 +68,7 @@ lsio_subsys: bus@5d000000 {
                         <&pwm3_lpcg 1>;
                assigned-clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
-               #pwm-cells = <2>;
+               #pwm-cells = <3>;
                interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
                status = "disabled";
        };
index c9a610ba483689f8e595ff1e1bfab3b4cbc97fa4..1264da6012f9296be3fd062cdcced456a6b7e997 100644 (file)
                                phys = <&usb3_phy0>, <&usb3_phy0>;
                                phy-names = "usb2-phy", "usb3-phy";
                                snps,gfladj-refclk-lpm-sel-quirk;
+                               snps,parkmode-disable-ss-quirk;
                        };
 
                };
                                phys = <&usb3_phy1>, <&usb3_phy1>;
                                phy-names = "usb2-phy", "usb3-phy";
                                snps,gfladj-refclk-lpm-sel-quirk;
+                               snps,parkmode-disable-ss-quirk;
                        };
                };
 
index 4b1ce9fc1758474b4bff5cca6473e5f9eb4cacbc..c6dc3ba0d43b23f88af1656a28f1b3f1ccab0b98 100644 (file)
                        phys = <&usb3_phy0>, <&usb3_phy0>;
                        phy-names = "usb2-phy", "usb3-phy";
                        power-domains = <&pgc_otg1>;
+                       snps,parkmode-disable-ss-quirk;
                        status = "disabled";
                };
 
                        phys = <&usb3_phy1>, <&usb3_phy1>;
                        phy-names = "usb2-phy", "usb3-phy";
                        power-domains = <&pgc_otg2>;
+                       snps,parkmode-disable-ss-quirk;
                        status = "disabled";
                };
 
index 01539df335f8c2ff82ed88ff46b16f7684600293..8439dd6b3935344a903762f29826a30ebc3e799a 100644 (file)
        status = "okay";
 };
 
+&edma3 {
+       power-domains = <&pd IMX_SC_R_DMA_1_CH0>,
+                    <&pd IMX_SC_R_DMA_1_CH1>,
+                    <&pd IMX_SC_R_DMA_1_CH2>,
+                    <&pd IMX_SC_R_DMA_1_CH3>,
+                    <&pd IMX_SC_R_DMA_1_CH4>,
+                    <&pd IMX_SC_R_DMA_1_CH5>,
+                    <&pd IMX_SC_R_DMA_1_CH6>,
+                    <&pd IMX_SC_R_DMA_1_CH7>;
+};
+
 &flexcan1 {
        fsl,clk-source = /bits/ 8 <1>;
 };
index f22c1ac391c9b97b2f80a3a5108eb3a343ba3bfb..c4a0082f30d3164456e1a1bc8856cb6a378709e0 100644 (file)
                        };
                };
 
-               gpioe: gpio@2d000080 {
+               gpioe: gpio@2d000000 {
                                compatible = "fsl,imx8ulp-gpio";
                                reg = <0x2d000000 0x1000>;
                                gpio-controller;
                                gpio-ranges = <&iomuxc1 0 32 24>;
                };
 
-               gpiof: gpio@2d010080 {
+               gpiof: gpio@2d010000 {
                                compatible = "fsl,imx8ulp-gpio";
                                reg = <0x2d010000 0x1000>;
                                gpio-controller;
                        };
                };
 
-               gpiod: gpio@2e200080 {
+               gpiod: gpio@2e200000 {
                        compatible = "fsl,imx8ulp-gpio";
                        reg = <0x2e200000 0x1000>;
                        gpio-controller;
index f06139bdff97e383bc3729abbd7116eff48e23ce..3c5c67ebee5d306e47277439532c55b62d7a1c3c 100644 (file)
                fsl,pins = <
                        MX93_PAD_UART2_TXD__LPUART2_TX          0x31e
                        MX93_PAD_UART2_RXD__LPUART2_RX          0x31e
-                       MX93_PAD_SAI1_TXD0__LPUART2_RTS_B       0x31e
+                       MX93_PAD_SAI1_TXD0__LPUART2_RTS_B       0x51e
                >;
        };
 
index ceccf476644072156319648f406610c827ee0792..34c0540276d1668a2ba568f76bbb0984af38bade 100644 (file)
                                        compatible = "fsl,imx93-src-slice";
                                        reg = <0x44462400 0x400>, <0x44465800 0x400>;
                                        #power-domain-cells = <0>;
-                                       clocks = <&clk IMX93_CLK_MEDIA_AXI>,
+                                       clocks = <&clk IMX93_CLK_NIC_MEDIA_GATE>,
                                                 <&clk IMX93_CLK_MEDIA_APB>;
                                };
                        };
                        };
                };
 
-               gpio2: gpio@43810080 {
+               gpio2: gpio@43810000 {
                        compatible = "fsl,imx93-gpio", "fsl,imx8ulp-gpio";
                        reg = <0x43810000 0x1000>;
                        gpio-controller;
                        gpio-ranges = <&iomuxc 0 4 30>;
                };
 
-               gpio3: gpio@43820080 {
+               gpio3: gpio@43820000 {
                        compatible = "fsl,imx93-gpio", "fsl,imx8ulp-gpio";
                        reg = <0x43820000 0x1000>;
                        gpio-controller;
                                      <&iomuxc 26 34 2>, <&iomuxc 28 0 4>;
                };
 
-               gpio4: gpio@43830080 {
+               gpio4: gpio@43830000 {
                        compatible = "fsl,imx93-gpio", "fsl,imx8ulp-gpio";
                        reg = <0x43830000 0x1000>;
                        gpio-controller;
                        gpio-ranges = <&iomuxc 0 38 28>, <&iomuxc 28 36 2>;
                };
 
-               gpio1: gpio@47400080 {
+               gpio1: gpio@47400000 {
                        compatible = "fsl,imx93-gpio", "fsl,imx8ulp-gpio";
                        reg = <0x47400000 0x1000>;
                        gpio-controller;
index 3b7a176b79047d489b91f632357e6a0b57db7a25..c46682150e502abb2b62cc1d3170e81d475cf1b0 100644 (file)
@@ -73,7 +73,7 @@
                };
        };
 
-       memory {
+       memory@40000000 {
                reg = <0 0x40000000 0 0x40000000>;
        };
 
index a885a3fbe4562228b6f56da0fa9d5a55549d8979..2dc1bdc74e2124224d5810b4f255453605bd4999 100644 (file)
@@ -55,7 +55,7 @@
                };
        };
 
-       memory {
+       memory@40000000 {
                reg = <0 0x40000000 0 0x20000000>;
        };
 
index af4a4309bda4b93191601f6e38fc6044211278a9..b876e501216be8e19176d458a06183749015bc70 100644 (file)
                compatible = "sff,sfp";
                i2c-bus = <&i2c_sfp1>;
                los-gpios = <&pio 46 GPIO_ACTIVE_HIGH>;
+               maximum-power-milliwatt = <3000>;
                mod-def0-gpios = <&pio 49 GPIO_ACTIVE_LOW>;
                tx-disable-gpios = <&pio 20 GPIO_ACTIVE_HIGH>;
                tx-fault-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
                i2c-bus = <&i2c_sfp2>;
                los-gpios = <&pio 31 GPIO_ACTIVE_HIGH>;
                mod-def0-gpios = <&pio 47 GPIO_ACTIVE_LOW>;
+               maximum-power-milliwatt = <3000>;
                tx-disable-gpios = <&pio 15 GPIO_ACTIVE_HIGH>;
                tx-fault-gpios = <&pio 48 GPIO_ACTIVE_HIGH>;
        };
                        trip = <&cpu_trip_active_high>;
                };
 
-               cpu-active-low {
+               cpu-active-med {
                        /* active: set fan to cooling level 1 */
                        cooling-device = <&fan 1 1>;
-                       trip = <&cpu_trip_active_low>;
+                       trip = <&cpu_trip_active_med>;
                };
 
-               cpu-passive {
-                       /* passive: set fan to cooling level 0 */
+               cpu-active-low {
+                       /* active: set fan to cooling level 0 */
                        cooling-device = <&fan 0 0>;
-                       trip = <&cpu_trip_passive>;
+                       trip = <&cpu_trip_active_low>;
                };
        };
 };
index 24eda00e320d3a8873a702966f8b674b737425a3..fc751e049953c27ff9df7787642d0bd6016ad458 100644 (file)
                        reg = <0 0x11230000 0 0x1000>,
                              <0 0x11c20000 0 0x1000>;
                        interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+                       assigned-clocks = <&topckgen CLK_TOP_EMMC_416M_SEL>,
+                                         <&topckgen CLK_TOP_EMMC_250M_SEL>;
+                       assigned-clock-parents = <&apmixedsys CLK_APMIXED_MPLL>,
+                                                <&topckgen CLK_TOP_NET1PLL_D5_D2>;
                        clocks = <&topckgen CLK_TOP_EMMC_416M_SEL>,
                                 <&infracfg CLK_INFRA_MSDC_HCK_CK>,
                                 <&infracfg CLK_INFRA_MSDC_CK>,
                        thermal-sensors = <&thermal 0>;
 
                        trips {
+                               cpu_trip_crit: crit {
+                                       temperature = <125000>;
+                                       hysteresis = <2000>;
+                                       type = "critical";
+                               };
+
+                               cpu_trip_hot: hot {
+                                       temperature = <120000>;
+                                       hysteresis = <2000>;
+                                       type = "hot";
+                               };
+
                                cpu_trip_active_high: active-high {
                                        temperature = <115000>;
                                        hysteresis = <2000>;
                                        type = "active";
                                };
 
-                               cpu_trip_active_low: active-low {
+                               cpu_trip_active_med: active-med {
                                        temperature = <85000>;
                                        hysteresis = <2000>;
                                        type = "active";
                                };
 
-                               cpu_trip_passive: passive {
-                                       temperature = <40000>;
+                               cpu_trip_active_low: active-low {
+                                       temperature = <60000>;
                                        hysteresis = <2000>;
-                                       type = "passive";
+                                       type = "active";
                                };
                        };
                };
index 5122963d8743ab3fd5049032369976099e1a26d6..d258c80213b26420bb8c4590e35153f2fb4c9db0 100644 (file)
@@ -44,7 +44,7 @@
                id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
        };
 
-       usb_p1_vbus: regulator@0 {
+       usb_p1_vbus: regulator-usb-p1 {
                compatible = "regulator-fixed";
                regulator-name = "usb_vbus";
                regulator-min-microvolt = <5000000>;
@@ -53,7 +53,7 @@
                enable-active-high;
        };
 
-       usb_p0_vbus: regulator@1 {
+       usb_p0_vbus: regulator-usb-p0 {
                compatible = "regulator-fixed";
                regulator-name = "vbus";
                regulator-min-microvolt = <5000000>;
index ce336a48c897329ca045a7e81655985f9a4795b5..77f9ab94c00bd98e791df6067f2e0e8bfdf5c7cb 100644 (file)
                #address-cells = <2>;
                #size-cells = <2>;
                ranges;
-               scp_mem_reserved: scp_mem_region {
+               scp_mem_reserved: memory@50000000 {
                        compatible = "shared-dma-pool";
                        reg = <0 0x50000000 0 0x2900000>;
                        no-map;
                };
        };
 
-       ntc@0 {
+       thermal-sensor {
                compatible = "murata,ncp03wf104";
                pullup-uv = <1800000>;
                pullup-ohm = <390000>;
index bf97b60ae4d17eaf3902db1ca31dac496aa0f5be..820260348de9b655f051b0d9fc1eb78721e73fd6 100644 (file)
@@ -91,6 +91,8 @@
 
 &dsi0 {
        status = "okay";
+       /delete-property/#size-cells;
+       /delete-property/#address-cells;
        /delete-node/panel@0;
        ports {
                port {
        };
 
        touchscreen_pins: touchscreen-pins {
-               touch_int_odl {
+               touch-int-odl {
                        pinmux = <PINMUX_GPIO155__FUNC_GPIO155>;
                        input-enable;
                        bias-pull-up;
                };
 
-               touch_rst_l {
+               touch-rst-l {
                        pinmux = <PINMUX_GPIO156__FUNC_GPIO156>;
                        output-high;
                };
        };
 
        trackpad_pins: trackpad-pins {
-               trackpad_int {
+               trackpad-int {
                        pinmux = <PINMUX_GPIO7__FUNC_GPIO7>;
                        input-enable;
                        bias-disable; /* pulled externally */
index bf7de35ffcbc8ae440c34761d0a8578b9b69bc73..7881a27be0297096c6e633825b63ed01ddfd2970 100644 (file)
                #size-cells = <2>;
                ranges;
 
-               scp_mem_reserved: scp_mem_region {
+               scp_mem_reserved: memory@50000000 {
                        compatible = "shared-dma-pool";
                        reg = <0 0x50000000 0 0x2900000>;
                        no-map;
 
 &pio {
        aud_pins_default: audiopins {
-               pins_bus {
+               pins-bus {
                        pinmux = <PINMUX_GPIO97__FUNC_I2S2_MCK>,
                                <PINMUX_GPIO98__FUNC_I2S2_BCK>,
                                <PINMUX_GPIO101__FUNC_I2S2_LRCK>,
        };
 
        aud_pins_tdm_out_on: audiotdmouton {
-               pins_bus {
+               pins-bus {
                        pinmux = <PINMUX_GPIO169__FUNC_TDM_BCK_2ND>,
                                <PINMUX_GPIO170__FUNC_TDM_LRCK_2ND>,
                                <PINMUX_GPIO171__FUNC_TDM_DATA0_2ND>,
        };
 
        aud_pins_tdm_out_off: audiotdmoutoff {
-               pins_bus {
+               pins-bus {
                        pinmux = <PINMUX_GPIO169__FUNC_GPIO169>,
                                <PINMUX_GPIO170__FUNC_GPIO170>,
                                <PINMUX_GPIO171__FUNC_GPIO171>,
        };
 
        bt_pins: bt-pins {
-               pins_bt_en {
+               pins-bt-en {
                        pinmux = <PINMUX_GPIO120__FUNC_GPIO120>;
                        output-low;
                };
        };
 
-       ec_ap_int_odl: ec_ap_int_odl {
+       ec_ap_int_odl: ec-ap-int-odl {
                pins1 {
                        pinmux = <PINMUX_GPIO151__FUNC_GPIO151>;
                        input-enable;
                };
        };
 
-       h1_int_od_l: h1_int_od_l {
+       h1_int_od_l: h1-int-od-l {
                pins1 {
                        pinmux = <PINMUX_GPIO153__FUNC_GPIO153>;
                        input-enable;
        };
 
        i2c0_pins: i2c0 {
-               pins_bus {
+               pins-bus {
                        pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
                                 <PINMUX_GPIO83__FUNC_SCL0>;
                        mediatek,pull-up-adv = <3>;
        };
 
        i2c1_pins: i2c1 {
-               pins_bus {
+               pins-bus {
                        pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
                                 <PINMUX_GPIO84__FUNC_SCL1>;
                        mediatek,pull-up-adv = <3>;
        };
 
        i2c2_pins: i2c2 {
-               pins_bus {
+               pins-bus {
                        pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
                                 <PINMUX_GPIO104__FUNC_SDA2>;
                        bias-disable;
        };
 
        i2c3_pins: i2c3 {
-               pins_bus {
+               pins-bus {
                        pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
                                 <PINMUX_GPIO51__FUNC_SDA3>;
                        mediatek,pull-up-adv = <3>;
        };
 
        i2c4_pins: i2c4 {
-               pins_bus {
+               pins-bus {
                        pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
                                 <PINMUX_GPIO106__FUNC_SDA4>;
                        bias-disable;
        };
 
        i2c5_pins: i2c5 {
-               pins_bus {
+               pins-bus {
                        pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
                                 <PINMUX_GPIO49__FUNC_SDA5>;
                        mediatek,pull-up-adv = <3>;
        };
 
        i2c6_pins: i2c6 {
-               pins_bus {
+               pins-bus {
                        pinmux = <PINMUX_GPIO11__FUNC_SCL6>,
                                 <PINMUX_GPIO12__FUNC_SDA6>;
                        bias-disable;
        };
 
        mmc0_pins_default: mmc0-pins-default {
-               pins_cmd_dat {
+               pins-cmd-dat {
                        pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
                                 <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
                                 <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
                        mediatek,pull-up-adv = <01>;
                };
 
-               pins_clk {
+               pins-clk {
                        pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
                        drive-strength = <MTK_DRIVE_14mA>;
                        mediatek,pull-down-adv = <10>;
                };
 
-               pins_rst {
+               pins-rst {
                        pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
                        drive-strength = <MTK_DRIVE_14mA>;
                        mediatek,pull-down-adv = <01>;
        };
 
        mmc0_pins_uhs: mmc0-pins-uhs {
-               pins_cmd_dat {
+               pins-cmd-dat {
                        pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
                                 <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
                                 <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
                        mediatek,pull-up-adv = <01>;
                };
 
-               pins_clk {
+               pins-clk {
                        pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
                        drive-strength = <MTK_DRIVE_14mA>;
                        mediatek,pull-down-adv = <10>;
                };
 
-               pins_ds {
+               pins-ds {
                        pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>;
                        drive-strength = <MTK_DRIVE_14mA>;
                        mediatek,pull-down-adv = <10>;
                };
 
-               pins_rst {
+               pins-rst {
                        pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
                        drive-strength = <MTK_DRIVE_14mA>;
                        mediatek,pull-up-adv = <01>;
        };
 
        mmc1_pins_default: mmc1-pins-default {
-               pins_cmd_dat {
+               pins-cmd-dat {
                        pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
                                 <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
                                 <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
                        mediatek,pull-up-adv = <10>;
                };
 
-               pins_clk {
+               pins-clk {
                        pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
                        input-enable;
                        mediatek,pull-down-adv = <10>;
        };
 
        mmc1_pins_uhs: mmc1-pins-uhs {
-               pins_cmd_dat {
+               pins-cmd-dat {
                        pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
                                 <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
                                 <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
                        mediatek,pull-up-adv = <10>;
                };
 
-               pins_clk {
+               pins-clk {
                        pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
                        drive-strength = <MTK_DRIVE_8mA>;
                        mediatek,pull-down-adv = <10>;
                };
        };
 
-       panel_pins_default: panel_pins_default {
-               panel_reset {
+       panel_pins_default: panel-pins-default {
+               panel-reset {
                        pinmux = <PINMUX_GPIO45__FUNC_GPIO45>;
                        output-low;
                        bias-pull-up;
                };
        };
 
-       pwm0_pin_default: pwm0_pin_default {
+       pwm0_pin_default: pwm0-pin-default {
                pins1 {
                        pinmux = <PINMUX_GPIO176__FUNC_GPIO176>;
                        output-high;
        };
 
        scp_pins: scp {
-               pins_scp_uart {
+               pins-scp-uart {
                        pinmux = <PINMUX_GPIO110__FUNC_TP_URXD1_AO>,
                                 <PINMUX_GPIO112__FUNC_TP_UTXD1_AO>;
                };
        };
 
        spi0_pins: spi0 {
-               pins_spi {
+               pins-spi {
                        pinmux = <PINMUX_GPIO85__FUNC_SPI0_MI>,
                                 <PINMUX_GPIO86__FUNC_GPIO86>,
                                 <PINMUX_GPIO87__FUNC_SPI0_MO>,
        };
 
        spi1_pins: spi1 {
-               pins_spi {
+               pins-spi {
                        pinmux = <PINMUX_GPIO161__FUNC_SPI1_A_MI>,
                                 <PINMUX_GPIO162__FUNC_SPI1_A_CSB>,
                                 <PINMUX_GPIO163__FUNC_SPI1_A_MO>,
        };
 
        spi2_pins: spi2 {
-               pins_spi {
+               pins-spi {
                        pinmux = <PINMUX_GPIO0__FUNC_SPI2_CSB>,
                                 <PINMUX_GPIO1__FUNC_SPI2_MO>,
                                 <PINMUX_GPIO2__FUNC_SPI2_CLK>;
                        bias-disable;
                };
-               pins_spi_mi {
+               pins-spi-mi {
                        pinmux = <PINMUX_GPIO94__FUNC_SPI2_MI>;
                        mediatek,pull-down-adv = <00>;
                };
        };
 
        spi3_pins: spi3 {
-               pins_spi {
+               pins-spi {
                        pinmux = <PINMUX_GPIO21__FUNC_SPI3_MI>,
                                 <PINMUX_GPIO22__FUNC_SPI3_CSB>,
                                 <PINMUX_GPIO23__FUNC_SPI3_MO>,
        };
 
        spi4_pins: spi4 {
-               pins_spi {
+               pins-spi {
                        pinmux = <PINMUX_GPIO17__FUNC_SPI4_MI>,
                                 <PINMUX_GPIO18__FUNC_SPI4_CSB>,
                                 <PINMUX_GPIO19__FUNC_SPI4_MO>,
        };
 
        spi5_pins: spi5 {
-               pins_spi {
+               pins-spi {
                        pinmux = <PINMUX_GPIO13__FUNC_SPI5_MI>,
                                 <PINMUX_GPIO14__FUNC_SPI5_CSB>,
                                 <PINMUX_GPIO15__FUNC_SPI5_MO>,
        };
 
        uart0_pins_default: uart0-pins-default {
-               pins_rx {
+               pins-rx {
                        pinmux = <PINMUX_GPIO95__FUNC_URXD0>;
                        input-enable;
                        bias-pull-up;
                };
-               pins_tx {
+               pins-tx {
                        pinmux = <PINMUX_GPIO96__FUNC_UTXD0>;
                };
        };
 
        uart1_pins_default: uart1-pins-default {
-               pins_rx {
+               pins-rx {
                        pinmux = <PINMUX_GPIO121__FUNC_URXD1>;
                        input-enable;
                        bias-pull-up;
                };
-               pins_tx {
+               pins-tx {
                        pinmux = <PINMUX_GPIO115__FUNC_UTXD1>;
                };
-               pins_rts {
+               pins-rts {
                        pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
                        output-enable;
                };
-               pins_cts {
+               pins-cts {
                        pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
                        input-enable;
                };
        };
 
        uart1_pins_sleep: uart1-pins-sleep {
-               pins_rx {
+               pins-rx {
                        pinmux = <PINMUX_GPIO121__FUNC_GPIO121>;
                        input-enable;
                        bias-pull-up;
                };
-               pins_tx {
+               pins-tx {
                        pinmux = <PINMUX_GPIO115__FUNC_UTXD1>;
                };
-               pins_rts {
+               pins-rts {
                        pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
                        output-enable;
                };
-               pins_cts {
+               pins-cts {
                        pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
                        input-enable;
                };
        };
 
        wifi_pins_pwrseq: wifi-pins-pwrseq {
-               pins_wifi_enable {
+               pins-wifi-enable {
                        pinmux = <PINMUX_GPIO119__FUNC_GPIO119>;
                        output-low;
                };
        };
 
        wifi_pins_wakeup: wifi-pins-wakeup {
-               pins_wifi_wakeup {
+               pins-wifi-wakeup {
                        pinmux = <PINMUX_GPIO113__FUNC_GPIO113>;
                        input-enable;
                };
index 5169779d01dfb418e9f6306d4533c57ed6bcfcfe..976dc968b3ca14de97798bd5e823dbf552ca8300 100644 (file)
                        nvmem-cell-names = "calibration-data";
                };
 
-               thermal_zones: thermal-zones {
-                       cpu_thermal: cpu-thermal {
-                               polling-delay-passive = <100>;
-                               polling-delay = <500>;
-                               thermal-sensors = <&thermal 0>;
-                               sustainable-power = <5000>;
-
-                               trips {
-                                       threshold: trip-point0 {
-                                               temperature = <68000>;
-                                               hysteresis = <2000>;
-                                               type = "passive";
-                                       };
-
-                                       target: trip-point1 {
-                                               temperature = <80000>;
-                                               hysteresis = <2000>;
-                                               type = "passive";
-                                       };
-
-                                       cpu_crit: cpu-crit {
-                                               temperature = <115000>;
-                                               hysteresis = <2000>;
-                                               type = "critical";
-                                       };
-                               };
-
-                               cooling-maps {
-                                       map0 {
-                                               trip = <&target>;
-                                               cooling-device = <&cpu0
-                                                       THERMAL_NO_LIMIT
-                                                       THERMAL_NO_LIMIT>,
-                                                                <&cpu1
-                                                       THERMAL_NO_LIMIT
-                                                       THERMAL_NO_LIMIT>,
-                                                                <&cpu2
-                                                       THERMAL_NO_LIMIT
-                                                       THERMAL_NO_LIMIT>,
-                                                                <&cpu3
-                                                       THERMAL_NO_LIMIT
-                                                       THERMAL_NO_LIMIT>;
-                                               contribution = <3072>;
-                                       };
-                                       map1 {
-                                               trip = <&target>;
-                                               cooling-device = <&cpu4
-                                                       THERMAL_NO_LIMIT
-                                                       THERMAL_NO_LIMIT>,
-                                                                <&cpu5
-                                                       THERMAL_NO_LIMIT
-                                                       THERMAL_NO_LIMIT>,
-                                                                <&cpu6
-                                                       THERMAL_NO_LIMIT
-                                                       THERMAL_NO_LIMIT>,
-                                                                <&cpu7
-                                                       THERMAL_NO_LIMIT
-                                                       THERMAL_NO_LIMIT>;
-                                               contribution = <1024>;
-                                       };
-                               };
-                       };
-
-                       /* The tzts1 ~ tzts6 don't need to polling */
-                       /* The tzts1 ~ tzts6 don't need to thermal throttle */
-
-                       tzts1: tzts1 {
-                               polling-delay-passive = <0>;
-                               polling-delay = <0>;
-                               thermal-sensors = <&thermal 1>;
-                               sustainable-power = <5000>;
-                               trips {};
-                               cooling-maps {};
-                       };
-
-                       tzts2: tzts2 {
-                               polling-delay-passive = <0>;
-                               polling-delay = <0>;
-                               thermal-sensors = <&thermal 2>;
-                               sustainable-power = <5000>;
-                               trips {};
-                               cooling-maps {};
-                       };
-
-                       tzts3: tzts3 {
-                               polling-delay-passive = <0>;
-                               polling-delay = <0>;
-                               thermal-sensors = <&thermal 3>;
-                               sustainable-power = <5000>;
-                               trips {};
-                               cooling-maps {};
-                       };
-
-                       tzts4: tzts4 {
-                               polling-delay-passive = <0>;
-                               polling-delay = <0>;
-                               thermal-sensors = <&thermal 4>;
-                               sustainable-power = <5000>;
-                               trips {};
-                               cooling-maps {};
-                       };
-
-                       tzts5: tzts5 {
-                               polling-delay-passive = <0>;
-                               polling-delay = <0>;
-                               thermal-sensors = <&thermal 5>;
-                               sustainable-power = <5000>;
-                               trips {};
-                               cooling-maps {};
-                       };
-
-                       tztsABB: tztsABB {
-                               polling-delay-passive = <0>;
-                               polling-delay = <0>;
-                               thermal-sensors = <&thermal 6>;
-                               sustainable-power = <5000>;
-                               trips {};
-                               cooling-maps {};
-                       };
-               };
-
                pwm0: pwm@1100e000 {
                        compatible = "mediatek,mt8183-disp-pwm";
                        reg = <0 0x1100e000 0 0x1000>;
                        power-domains = <&spm MT8183_POWER_DOMAIN_CAM>;
                };
        };
+
+       thermal_zones: thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       polling-delay-passive = <100>;
+                       polling-delay = <500>;
+                       thermal-sensors = <&thermal 0>;
+                       sustainable-power = <5000>;
+
+                       trips {
+                               threshold: trip-point0 {
+                                       temperature = <68000>;
+                                       hysteresis = <2000>;
+                                       type = "passive";
+                               };
+
+                               target: trip-point1 {
+                                       temperature = <80000>;
+                                       hysteresis = <2000>;
+                                       type = "passive";
+                               };
+
+                               cpu_crit: cpu-crit {
+                                       temperature = <115000>;
+                                       hysteresis = <2000>;
+                                       type = "critical";
+                               };
+                       };
+
+                       cooling-maps {
+                               map0 {
+                                       trip = <&target>;
+                                       cooling-device = <&cpu0
+                                               THERMAL_NO_LIMIT
+                                               THERMAL_NO_LIMIT>,
+                                                        <&cpu1
+                                               THERMAL_NO_LIMIT
+                                               THERMAL_NO_LIMIT>,
+                                                        <&cpu2
+                                               THERMAL_NO_LIMIT
+                                               THERMAL_NO_LIMIT>,
+                                                        <&cpu3
+                                               THERMAL_NO_LIMIT
+                                               THERMAL_NO_LIMIT>;
+                                       contribution = <3072>;
+                               };
+                               map1 {
+                                       trip = <&target>;
+                                       cooling-device = <&cpu4
+                                               THERMAL_NO_LIMIT
+                                               THERMAL_NO_LIMIT>,
+                                                        <&cpu5
+                                               THERMAL_NO_LIMIT
+                                               THERMAL_NO_LIMIT>,
+                                                        <&cpu6
+                                               THERMAL_NO_LIMIT
+                                               THERMAL_NO_LIMIT>,
+                                                        <&cpu7
+                                               THERMAL_NO_LIMIT
+                                               THERMAL_NO_LIMIT>;
+                                       contribution = <1024>;
+                               };
+                       };
+               };
+
+               /* The tzts1 ~ tzts6 don't need to polling */
+               /* The tzts1 ~ tzts6 don't need to thermal throttle */
+
+               tzts1: tzts1 {
+                       polling-delay-passive = <0>;
+                       polling-delay = <0>;
+                       thermal-sensors = <&thermal 1>;
+                       sustainable-power = <5000>;
+                       trips {};
+                       cooling-maps {};
+               };
+
+               tzts2: tzts2 {
+                       polling-delay-passive = <0>;
+                       polling-delay = <0>;
+                       thermal-sensors = <&thermal 2>;
+                       sustainable-power = <5000>;
+                       trips {};
+                       cooling-maps {};
+               };
+
+               tzts3: tzts3 {
+                       polling-delay-passive = <0>;
+                       polling-delay = <0>;
+                       thermal-sensors = <&thermal 3>;
+                       sustainable-power = <5000>;
+                       trips {};
+                       cooling-maps {};
+               };
+
+               tzts4: tzts4 {
+                       polling-delay-passive = <0>;
+                       polling-delay = <0>;
+                       thermal-sensors = <&thermal 4>;
+                       sustainable-power = <5000>;
+                       trips {};
+                       cooling-maps {};
+               };
+
+               tzts5: tzts5 {
+                       polling-delay-passive = <0>;
+                       polling-delay = <0>;
+                       thermal-sensors = <&thermal 5>;
+                       sustainable-power = <5000>;
+                       trips {};
+                       cooling-maps {};
+               };
+
+               tztsABB: tztsABB {
+                       polling-delay-passive = <0>;
+                       polling-delay = <0>;
+                       thermal-sensors = <&thermal 6>;
+                       sustainable-power = <5000>;
+                       trips {};
+                       cooling-maps {};
+               };
+       };
 };
index f04ae70c470aa3f409579187c12ac9eac77b4cd9..df0c04f2ba1da9c934e08c76bf5146ff20d694e0 100644 (file)
                                        reg = <MT8186_POWER_DOMAIN_CSIRX_TOP>;
                                        clocks = <&topckgen CLK_TOP_SENINF>,
                                                 <&topckgen CLK_TOP_SENINF1>;
-                                       clock-names = "csirx_top0", "csirx_top1";
+                                       clock-names = "subsys-csirx-top0",
+                                                     "subsys-csirx-top1";
                                        #power-domain-cells = <0>;
                                };
 
                                        reg = <MT8186_POWER_DOMAIN_ADSP_AO>;
                                        clocks = <&topckgen CLK_TOP_AUDIODSP>,
                                                 <&topckgen CLK_TOP_ADSP_BUS>;
-                                       clock-names = "audioadsp", "adsp_bus";
+                                       clock-names = "audioadsp",
+                                                     "subsys-adsp-bus";
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                        #power-domain-cells = <1>;
                                                 <&mmsys CLK_MM_SMI_COMMON>,
                                                 <&mmsys CLK_MM_SMI_GALS>,
                                                 <&mmsys CLK_MM_SMI_IOMMU>;
-                                       clock-names = "disp", "mdp", "smi_infra", "smi_common",
-                                                    "smi_gals", "smi_iommu";
+                                       clock-names = "disp", "mdp",
+                                                     "subsys-smi-infra",
+                                                     "subsys-smi-common",
+                                                     "subsys-smi-gals",
+                                                     "subsys-smi-iommu";
                                        mediatek,infracfg = <&infracfg_ao>;
                                        #address-cells = <1>;
                                        #size-cells = <0>;
 
                                        power-domain@MT8186_POWER_DOMAIN_CAM {
                                                reg = <MT8186_POWER_DOMAIN_CAM>;
-                                               clocks = <&topckgen CLK_TOP_CAM>,
-                                                        <&topckgen CLK_TOP_SENINF>,
+                                               clocks = <&topckgen CLK_TOP_SENINF>,
                                                         <&topckgen CLK_TOP_SENINF1>,
                                                         <&topckgen CLK_TOP_SENINF2>,
                                                         <&topckgen CLK_TOP_SENINF3>,
+                                                        <&camsys CLK_CAM2MM_GALS>,
                                                         <&topckgen CLK_TOP_CAMTM>,
-                                                        <&camsys CLK_CAM2MM_GALS>;
-                                               clock-names = "cam-top", "cam0", "cam1", "cam2",
-                                                            "cam3", "cam-tm", "gals";
+                                                        <&topckgen CLK_TOP_CAM>;
+                                               clock-names = "cam0", "cam1", "cam2",
+                                                             "cam3", "gals",
+                                                             "subsys-cam-tm",
+                                                             "subsys-cam-top";
                                                mediatek,infracfg = <&infracfg_ao>;
                                                #address-cells = <1>;
                                                #size-cells = <0>;
 
                                        power-domain@MT8186_POWER_DOMAIN_IMG {
                                                reg = <MT8186_POWER_DOMAIN_IMG>;
-                                               clocks = <&topckgen CLK_TOP_IMG1>,
-                                                        <&imgsys1 CLK_IMG1_GALS_IMG1>;
-                                               clock-names = "img-top", "gals";
+                                               clocks = <&imgsys1 CLK_IMG1_GALS_IMG1>,
+                                                        <&topckgen CLK_TOP_IMG1>;
+                                               clock-names = "gals", "subsys-img-top";
                                                mediatek,infracfg = <&infracfg_ao>;
                                                #address-cells = <1>;
                                                #size-cells = <0>;
                                                         <&ipesys CLK_IPE_LARB20>,
                                                         <&ipesys CLK_IPE_SMI_SUBCOM>,
                                                         <&ipesys CLK_IPE_GALS_IPE>;
-                                               clock-names = "ipe-top", "ipe-larb0", "ipe-larb1",
-                                                             "ipe-smi", "ipe-gals";
+                                               clock-names = "subsys-ipe-top",
+                                                             "subsys-ipe-larb0",
+                                                             "subsys-ipe-larb1",
+                                                             "subsys-ipe-smi",
+                                                             "subsys-ipe-gals";
                                                mediatek,infracfg = <&infracfg_ao>;
                                                #power-domain-cells = <0>;
                                        };
                                                clocks = <&topckgen CLK_TOP_WPE>,
                                                         <&wpesys CLK_WPE_SMI_LARB8_CK_EN>,
                                                         <&wpesys CLK_WPE_SMI_LARB8_PCLK_EN>;
-                                               clock-names = "wpe0", "larb-ck", "larb-pclk";
+                                               clock-names = "wpe0",
+                                                             "subsys-larb-ck",
+                                                             "subsys-larb-pclk";
                                                mediatek,infracfg = <&infracfg_ao>;
                                                #power-domain-cells = <0>;
                                        };
                        #address-cells = <1>;
                        #size-cells = <1>;
 
-                       gpu_speedbin: gpu-speed-bin@59c {
+                       gpu_speedbin: gpu-speedbin@59c {
                                reg = <0x59c 0x4>;
                                bits = <0 3>;
                        };
index dd5b89b73190392cedeb7ecb7822fa9613c18b1d..5a7cab489ff3ace4d45807378c6bc9973af994cf 100644 (file)
        pinctrl-0 = <&i2c7_pins>;
 
        pmic@34 {
-               #interrupt-cells = <1>;
+               #interrupt-cells = <2>;
                compatible = "mediatek,mt6360";
                reg = <0x34>;
                interrupt-controller;
index 54c674c45b49a27c223b4240db452284f01f5f15..e0ac2e9f5b7204a646514f1793b880b7adeb36fe 100644 (file)
 
                                        power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 {
                                                reg = <MT8195_POWER_DOMAIN_VENC_CORE1>;
+                                               clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>;
+                                               clock-names = "venc1-larb";
                                                mediatek,infracfg = <&infracfg_ao>;
                                                #power-domain-cells = <0>;
                                        };
 
                                                power-domain@MT8195_POWER_DOMAIN_VENC {
                                                        reg = <MT8195_POWER_DOMAIN_VENC>;
+                                                       clocks = <&vencsys CLK_VENC_LARB>;
+                                                       clock-names = "venc0-larb";
                                                        mediatek,infracfg = <&infracfg_ao>;
                                                        #power-domain-cells = <0>;
                                                };
                        reg = <0 0x1b010000 0 0x1000>;
                        mediatek,larb-id = <20>;
                        mediatek,smi = <&smi_common_vpp>;
-                       clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>,
+                       clocks = <&vencsys_core1 CLK_VENC_CORE1_VENC>,
                                 <&vencsys_core1 CLK_VENC_CORE1_GALS>,
                                 <&vppsys0 CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1>;
                        clock-names = "apb", "smi", "gals";
index de0a1f2af983bee33e127f87e2cdba16b544e54c..7d4c5324c61bced0e082a366fbf8d27ea213bf14 100644 (file)
@@ -86,7 +86,7 @@
        sgtl5000_clk: sgtl5000-oscillator {
                compatible = "fixed-clock";
                #clock-cells = <0>;
-               clock-frequency  = <24576000>;
+               clock-frequency = <24576000>;
        };
 
        dc_12v: dc-12v-regulator {
index e729e7a22b23a6a2e93374665d6f1ce2d03e9596..cc8209795c3e53b7be5ce1e5a2f7262469767b71 100644 (file)
 
        vdec: video-codec@ff360000 {
                compatible = "rockchip,rk3328-vdec", "rockchip,rk3399-vdec";
-               reg = <0x0 0xff360000 0x0 0x400>;
+               reg = <0x0 0xff360000 0x0 0x480>;
                interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru ACLK_RKVDEC>, <&cru HCLK_RKVDEC>,
                         <&cru SCLK_VDEC_CABAC>, <&cru SCLK_VDEC_CORE>;
index 5c1929d41cc0b700998f7aa048cc815306b0f97c..cacbad35cfc854ce6823fe9901165f42cfbd2aef 100644 (file)
@@ -509,8 +509,7 @@ ap_i2c_tp: &i2c5 {
 &pci_rootport {
        mvl_wifi: wifi@0,0 {
                compatible = "pci1b4b,2b42";
-               reg = <0x83010000 0x0 0x00000000 0x0 0x00100000
-                      0x83010000 0x0 0x00100000 0x0 0x00100000>;
+               reg = <0x0000 0x0 0x0 0x0 0x0>;
                interrupt-parent = <&gpio0>;
                interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
                pinctrl-names = "default";
index 853e88455e750ec1738c10cb42d75e0f2eed3985..9e4b12ed62cbed9f1bc8e1970c4d3b57d253ac32 100644 (file)
@@ -34,8 +34,8 @@
 &pci_rootport {
        wifi@0,0 {
                compatible = "qcom,ath10k";
-               reg = <0x00010000 0x0 0x00000000 0x0 0x00000000>,
-                     <0x03010010 0x0 0x00000000 0x0 0x00200000>;
+               reg = <0x00000000 0x0 0x00000000 0x0 0x00000000>,
+                     <0x03000010 0x0 0x00000000 0x0 0x00200000>;
                qcom,ath10k-calibration-variant = "GO_DUMO";
        };
 };
index c9bf1d5c3a426418f97e34c0043c8013de690e0f..789fd0dcc88baadb05367b16d80b6c7019941160 100644 (file)
@@ -489,6 +489,7 @@ ap_i2c_audio: &i2c8 {
                #address-cells = <3>;
                #size-cells = <2>;
                ranges;
+               device_type = "pci";
        };
 };
 
index faf02e59d6c73ccc573dd52f357953bc17d68742..da0dfb237f853f9403f2acfa007e82c0418375b8 100644 (file)
                        power-domain@RK3399_PD_VDU {
                                reg = <RK3399_PD_VDU>;
                                clocks = <&cru ACLK_VDU>,
-                                        <&cru HCLK_VDU>;
+                                        <&cru HCLK_VDU>,
+                                        <&cru SCLK_VDU_CA>,
+                                        <&cru SCLK_VDU_CORE>;
                                pm_qos = <&qos_video_m1_r>,
                                         <&qos_video_m1_w>;
                                #power-domain-cells = <0>;
 
        vdec: video-codec@ff660000 {
                compatible = "rockchip,rk3399-vdec";
-               reg = <0x0 0xff660000 0x0 0x400>;
+               reg = <0x0 0xff660000 0x0 0x480>;
                interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
                clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
                         <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
index 0964761e3ce9eb6a650fe3f038bfd169b4c1c94e..c19c0f1b3778fe79f68d3657cb2b6512f70913f2 100644 (file)
                             <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-names = "sys", "pmc", "msi", "legacy", "err";
+               interrupt-names = "sys", "pmc", "msg", "legacy", "err";
                bus-range = <0x0 0xf>;
                clocks = <&cru ACLK_PCIE20_MST>, <&cru ACLK_PCIE20_SLV>,
                         <&cru ACLK_PCIE20_DBI>, <&cru PCLK_PCIE20>,
index 9570b34aca2e9308b63fb49bd44af8415454b515..d88c0e852356518a95f9dd9d8bb1c5bccd999384 100644 (file)
 &pinctrl {
        fan {
                fan_int: fan-int {
-                       rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+                       rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
                };
        };
 
        hym8563 {
                hym8563_int: hym8563-int {
-                       rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+                       rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
                };
        };
 
index 8f399c4317bdadb54540b2099d1e869fa94d979d..e3a839a12dc6f07bb4247fc30bb2af18b15d4ac2 100644 (file)
@@ -38,7 +38,7 @@
        leds {
                compatible = "gpio-leds";
                pinctrl-names = "default";
-               pinctrl-0 =<&leds_gpio>;
+               pinctrl-0 = <&leds_gpio>;
 
                led-1 {
                        gpios = <&gpio1 RK_PA2 GPIO_ACTIVE_HIGH>;
index 63151d9d237755f4c471e96e18dd90e09675de47..30db12c4fc82b54ca90eefafa4720bda57e0f9e4 100644 (file)
                emmc_data_strobe: emmc-data-strobe {
                        rockchip,pins =
                                /* emmc_data_strobe */
-                               <2 RK_PA2 1 &pcfg_pull_none>;
+                               <2 RK_PA2 1 &pcfg_pull_down>;
                };
        };
 
index 7064c0e9179f1d868c5ebf28645f1f83a205098e..8aa0499f9b032d3a2da92d416421e56df15e9f06 100644 (file)
                             <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH 0>,
                             <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH 0>,
                             <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
-               interrupt-names = "ch0", "ch1", "ch2", "ch3";
                rockchip,pmu = <&pmu1grf>;
        };
 
index b19a8aee684c873f2cea80947548af6a358b57ee..79ce70fbb751c616074fe24f0ea9772391a12ea9 100644 (file)
@@ -834,6 +834,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
                pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
 
        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+       /*
+        * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
+        * dirtiness again.
+        */
+       if (pte_sw_dirty(pte))
+               pte = pte_mkdirty(pte);
        return pte;
 }
 
index 339a55194b2c63e78a6c8083fe7acb34a3cfa5af..74a67ad87f29de261a712c275573395e02d3351d 100644 (file)
@@ -436,6 +436,10 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
        if (ret)
                goto out;
 
+       /* Silently exit if the vLPI is already mapped */
+       if (irq->hw)
+               goto out;
+
        /*
         * Emit the mapping request. If it fails, the ITS probably
         * isn't v4 compatible, so let's silently bail out. Holding
index 204b94b2e6aaa6e3afc71b292004fb3b329256b2..4ba8d67ddb097743be4e68493604579142eee2d5 100644 (file)
@@ -83,7 +83,7 @@ endif
 
 ifeq ($(CONFIG_RELOCATABLE),y)
 KBUILD_CFLAGS_KERNEL           += -fPIE
-LDFLAGS_vmlinux                        += -static -pie --no-dynamic-linker -z notext
+LDFLAGS_vmlinux                        += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs)
 endif
 
 cflags-y += $(call cc-option, -mno-check-zero-division)
index 091897d40b0375758b4822ae1a95719013314709..91d81f9730ab3f81c8e6cba875a6fdadc4b0657a 100644 (file)
@@ -32,6 +32,6 @@ static inline unsigned long efi_get_kimg_min_align(void)
 
 #define EFI_KIMG_PREFERRED_ADDRESS     PHYSADDR(VMLINUX_LOAD_ADDRESS)
 
-unsigned long kernel_entry_address(void);
+unsigned long kernel_entry_address(unsigned long kernel_addr);
 
 #endif /* _ASM_LOONGARCH_EFI_H */
index b9a4ab54285c114360c05f420b023121de44de79..9b16a3b8e70608c8765f838cfd21925d4fe51145 100644 (file)
@@ -293,7 +293,7 @@ extern const char *__elf_platform;
 #define ELF_PLAT_INIT(_r, load_addr)   do { \
        _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0;      \
        _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0;      \
-       _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0;   \
+       _r->regs[9] = _r->regs[10] /* syscall n */ = _r->regs[12] = 0;  \
        _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0;  \
        _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0;  \
        _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0;  \
index 9b4957cefa8ad24cdd792629240d5cc08bed264a..46366e783c84112f40d965d10793bd14f5d4221e 100644 (file)
 
 static __always_inline u64 drdtime(void)
 {
-       int rID = 0;
        u64 val = 0;
 
        __asm__ __volatile__(
-               "rdtime.d %0, %1 \n\t"
-               : "=r"(val), "=r"(rID)
+               "rdtime.d %0, $zero\n\t"
+               : "=r"(val)
                :
                );
        return val;
index 4fcc168f07323154b4d7fc6712ab7a2298bb0d3e..3c808c6803703c931d5bb52b6eefd71464da726c 100644 (file)
@@ -57,7 +57,7 @@ obj-$(CONFIG_MAGIC_SYSRQ)     += sysrq.o
 
 obj-$(CONFIG_RELOCATABLE)      += relocate.o
 
-obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_CORE)       += machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 
 obj-$(CONFIG_UNWINDER_GUESS)   += unwind_guess.o
index 92270f14db948271b00167ef0887703620b50c31..f623feb2129f12829b623d77c84d2ba6f677a689 100644 (file)
@@ -32,7 +32,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
        }
 
        for (unwind_start(&state, task, regs);
-            !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
+            !unwind_done(&state); unwind_next_frame(&state)) {
                addr = unwind_get_return_address(&state);
                if (!addr || !consume_entry(cookie, addr))
                        break;
index ba324ba76fa15605d9ada6c26101ce78aa108df1..a463d6961344c0899aacd72a8951b9cc5d18ea83 100644 (file)
@@ -28,6 +28,5 @@ bool default_next_frame(struct unwind_state *state)
 
        } while (!get_stack_info(state->sp, state->task, info));
 
-       state->error = true;
        return false;
 }
index 55afc27320e12a1c52fd98445ef32f383f1d2bf1..929ae240280a5fb67cb9b34fde0f84045279a3eb 100644 (file)
@@ -227,7 +227,7 @@ static bool next_frame(struct unwind_state *state)
        } while (!get_stack_info(state->sp, state->task, info));
 
 out:
-       state->error = true;
+       state->stack_info.type = STACK_TYPE_UNKNOWN;
        return false;
 }
 
index 169ff8b3915e6cc955cccf839c1b717b7f3e1bfd..4fcd6cd6da234d4dc4120cb2c4b95a69e2577358 100644 (file)
@@ -480,10 +480,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
                case 8:
                        move_reg(ctx, t1, src);
                        emit_insn(ctx, extwb, dst, t1);
+                       emit_zext_32(ctx, dst, is32);
                        break;
                case 16:
                        move_reg(ctx, t1, src);
                        emit_insn(ctx, extwh, dst, t1);
+                       emit_zext_32(ctx, dst, is32);
                        break;
                case 32:
                        emit_insn(ctx, addw, dst, src, LOONGARCH_GPR_ZERO);
@@ -772,8 +774,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
                        break;
                case 32:
                        emit_insn(ctx, revb2w, dst, dst);
-                       /* zero-extend 32 bits into 64 bits */
-                       emit_zext_32(ctx, dst, is32);
+                       /* clear the upper 32 bits */
+                       emit_zext_32(ctx, dst, true);
                        break;
                case 64:
                        emit_insn(ctx, revbd, dst, dst);
@@ -911,8 +913,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
 
        /* function return */
        case BPF_JMP | BPF_EXIT:
-               emit_sext_32(ctx, regmap[BPF_REG_0], true);
-
                if (i == ctx->prog->len - 1)
                        break;
 
@@ -988,14 +988,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
                        }
                        break;
                case BPF_DW:
-                       if (is_signed_imm12(off)) {
-                               emit_insn(ctx, ldd, dst, src, off);
-                       } else if (is_signed_imm14(off)) {
-                               emit_insn(ctx, ldptrd, dst, src, off);
-                       } else {
-                               move_imm(ctx, t1, off, is32);
-                               emit_insn(ctx, ldxd, dst, src, t1);
-                       }
+                       move_imm(ctx, t1, off, is32);
+                       emit_insn(ctx, ldxd, dst, src, t1);
                        break;
                }
 
index f5a8b2defa4bcee418aea24ab85b23c9d6fd1b06..3b0b64f0a353170ecac839677f6a8ae02bc8feb0 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef _ASM_M68K_KEXEC_H
 #define _ASM_M68K_KEXEC_H
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 
 /* Maximum physical address we can use pages from */
 #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
@@ -25,6 +25,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
 
 #endif /* __ASSEMBLY__ */
 
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
 
 #endif /* _ASM_M68K_KEXEC_H */
index 01fb69a5095f4375d641a4f0f7958b907d7b716c..f335bf3268a108a45bab079fbf0a1c8ead9beb71 100644 (file)
@@ -25,7 +25,7 @@ obj-$(CONFIG_PCI) += pcibios.o
 
 obj-$(CONFIG_M68K_NONCOHERENT_DMA) += dma.o
 
-obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_CORE)       += machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_BOOTINFO_PROC)    += bootinfo_proc.o
 obj-$(CONFIG_UBOOT)            += uboot.o
 
index 76db82542519c5555f384fb2aaa5e5587a6ee820..797ae590ebdba505c313b448720c7207b29673f8 100644 (file)
@@ -460,6 +460,7 @@ config MACH_LOONGSON2EF
 
 config MACH_LOONGSON64
        bool "Loongson 64-bit family of machines"
+       select ARCH_DMA_DEFAULT_COHERENT
        select ARCH_SPARSEMEM_ENABLE
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
@@ -1251,6 +1252,7 @@ config CPU_LOONGSON64
        select CPU_SUPPORTS_MSA
        select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
        select CPU_MIPSR2_IRQ_VI
+       select DMA_NONCOHERENT
        select WEAK_ORDERING
        select WEAK_REORDERING_BEYOND_LLSC
        select MIPS_ASID_BITS_VARIABLE
index f878f47e4501bcd90e6e6faac10e50ef44b4c2ae..ee3e2153dd13fb78894f25d9efd476075c9d86d8 100644 (file)
                                compatible = "pci0014,7a03.0",
                                                   "pci0014,7a03",
                                                   "pciclass0c0320",
-                                                  "pciclass0c03",
-                                                  "loongson, pci-gmac";
+                                                  "pciclass0c03";
 
                                reg = <0x1800 0x0 0x0 0x0 0x0>;
                                interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
index 7c69e8245c2f10aa30a98fef4601e271ac640d34..cce9428afc41fc3ec1e347a344f6e48b9b345ca4 100644 (file)
                                compatible = "pci0014,7a03.0",
                                                   "pci0014,7a03",
                                                   "pciclass020000",
-                                                  "pciclass0200",
-                                                  "loongson, pci-gmac";
+                                                  "pciclass0200";
 
                                reg = <0x1800 0x0 0x0 0x0 0x0>;
                                interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
index 33c09688210fff313b48e438986b95da0f53be6c..08ea2cde1eb5b3af1ccec1badec2f665b1a4bc92 100644 (file)
@@ -422,7 +422,7 @@ static const struct plat_smp_ops octeon_smp_ops = {
        .cpu_disable            = octeon_cpu_disable,
        .cpu_die                = octeon_cpu_die,
 #endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        .kexec_nonboot_cpu      = kexec_nonboot_cpu_jump,
 #endif
 };
@@ -502,7 +502,7 @@ static const struct plat_smp_ops octeon_78xx_smp_ops = {
        .cpu_disable            = octeon_cpu_disable,
        .cpu_die                = octeon_cpu_die,
 #endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        .kexec_nonboot_cpu      = kexec_nonboot_cpu_jump,
 #endif
 };
index d6d5fa5cc31dd7f20f9a729690d4ef40af4c545f..69e579e41e6623e8c2fd26d6ca9daf8438e4469c 100644 (file)
@@ -31,7 +31,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
                prepare_frametrace(newregs);
 }
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 struct kimage;
 extern unsigned long kexec_args[4];
 extern int (*_machine_kexec_prepare)(struct kimage *);
index 035b1a69e2d00dca84bc2a9978978da575dfc991..e007edd6b60a7ebfa59da9633ab9e68738d5404d 100644 (file)
 #define ADAPTER_ROM            8
 #define ACPI_TABLE             9
 #define SMBIOS_TABLE           10
-#define MAX_MEMORY_TYPE                11
+#define UMA_VIDEO_RAM          11
+#define VUMA_VIDEO_RAM         12
+#define MAX_MEMORY_TYPE                13
+
+#define MEM_SIZE_IS_IN_BYTES   (1 << 31)
 
 #define LOONGSON3_BOOT_MEM_MAP_MAX 128
 struct efi_memory_map_loongson {
@@ -117,7 +121,8 @@ struct irq_source_routing_table {
        u64 pci_io_start_addr;
        u64 pci_io_end_addr;
        u64 pci_config_addr;
-       u32 dma_mask_bits;
+       u16 dma_mask_bits;
+       u16 dma_noncoherent;
 } __packed;
 
 struct interface_info {
index 5719ff49eff1c88f0e3b44ee6f23287c6c4862e8..0c59e168f8008ca88685cf298f44053e097af1df 100644 (file)
@@ -35,7 +35,7 @@ struct plat_smp_ops {
        void (*cpu_die)(unsigned int cpu);
        void (*cleanup_dead_cpu)(unsigned cpu);
 #endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        void (*kexec_nonboot_cpu)(void);
 #endif
 };
index a40d8c0e4b879b2a12ad055c1940b0f63f3112fb..901bc61fa7ae922619728b29ecf066bb19a6755b 100644 (file)
@@ -93,7 +93,7 @@ static inline void __cpu_die(unsigned int cpu)
 extern void __noreturn play_dead(void);
 #endif
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 static inline void kexec_nonboot_cpu(void)
 {
        extern const struct plat_smp_ops *mp_ops;       /* private */
index 853a43ee4b446ebaf323480d0fab337b462ce2c5..ecf3278a32f7077f29bee3b5f1b535eb8b2c5d06 100644 (file)
@@ -90,7 +90,7 @@ obj-$(CONFIG_GPIO_TXX9)               += gpio_txx9.o
 
 obj-$(CONFIG_RELOCATABLE)      += relocate.o
 
-obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_KEXEC_CORE)       += machine_kexec.o relocate_kernel.o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
 obj-$(CONFIG_EARLY_PRINTK_8250)        += early_printk_8250.o
index 5387ed0a51862b66609d990cb039b27261a5c234..b630604c577f9ff3f2493b0f254363e499c8318c 100644 (file)
@@ -121,6 +121,19 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
        /*  Put the stack after the struct pt_regs.  */
        childksp = (unsigned long) childregs;
        p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
+
+       /*
+        * New tasks lose permission to use the fpu. This accelerates context
+        * switching for most programs since they don't use the fpu.
+        */
+       clear_tsk_thread_flag(p, TIF_USEDFPU);
+       clear_tsk_thread_flag(p, TIF_USEDMSA);
+       clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+       clear_tsk_thread_flag(p, TIF_FPUBOUND);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
        if (unlikely(args->fn)) {
                /* kernel thread */
                unsigned long status = p->thread.cp0_status;
@@ -149,20 +162,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
        p->thread.reg29 = (unsigned long) childregs;
        p->thread.reg31 = (unsigned long) ret_from_fork;
 
-       /*
-        * New tasks lose permission to use the fpu. This accelerates context
-        * switching for most programs since they don't use the fpu.
-        */
        childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
 
-       clear_tsk_thread_flag(p, TIF_USEDFPU);
-       clear_tsk_thread_flag(p, TIF_USEDMSA);
-       clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
-
-#ifdef CONFIG_MIPS_MT_FPAFF
-       clear_tsk_thread_flag(p, TIF_FPUBOUND);
-#endif /* CONFIG_MIPS_MT_FPAFF */
-
 #ifdef CONFIG_MIPS_FP_SUPPORT
        atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
 #endif
index c074ecce3fbf29a313c1ddc9573967d21ce5d7e3..b3dbf9ecb0d63ea81102114b1886b0cd88b23cd8 100644 (file)
@@ -434,7 +434,7 @@ const struct plat_smp_ops bmips43xx_smp_ops = {
        .cpu_disable            = bmips_cpu_disable,
        .cpu_die                = bmips_cpu_die,
 #endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        .kexec_nonboot_cpu      = kexec_nonboot_cpu_jump,
 #endif
 };
@@ -451,7 +451,7 @@ const struct plat_smp_ops bmips5000_smp_ops = {
        .cpu_disable            = bmips_cpu_disable,
        .cpu_die                = bmips_cpu_die,
 #endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        .kexec_nonboot_cpu      = kexec_nonboot_cpu_jump,
 #endif
 };
index dd55d59b88db34e07924e1f64b7e139a8e56274b..f6c37d407f365fc3df4dbaf807a17550856a8c36 100644 (file)
@@ -392,7 +392,7 @@ static void cps_smp_finish(void)
        local_irq_enable();
 }
 
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
 
 enum cpu_death {
        CPU_DEATH_HALT,
@@ -429,7 +429,7 @@ static void cps_shutdown_this_cpu(enum cpu_death death)
        }
 }
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 
 static void cps_kexec_nonboot_cpu(void)
 {
@@ -439,9 +439,9 @@ static void cps_kexec_nonboot_cpu(void)
                cps_shutdown_this_cpu(CPU_DEATH_POWER);
 }
 
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
 
-#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
 
 #ifdef CONFIG_HOTPLUG_CPU
 
@@ -610,7 +610,7 @@ static const struct plat_smp_ops cps_smp_ops = {
        .cpu_die                = cps_cpu_die,
        .cleanup_dead_cpu       = cps_cleanup_dead_cpu,
 #endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        .kexec_nonboot_cpu      = cps_kexec_nonboot_cpu,
 #endif
 };
index 8fbef537fb8859099da21db3d79c4379e27e5e88..82e2e051b4161c679602fd7f0ade283c81d1795e 100644 (file)
@@ -351,10 +351,11 @@ early_initcall(mips_smp_ipi_init);
  */
 asmlinkage void start_secondary(void)
 {
-       unsigned int cpu;
+       unsigned int cpu = raw_smp_processor_id();
 
        cpu_probe();
        per_cpu_trap_init(false);
+       rcutree_report_cpu_starting(cpu);
        mips_clockevent_init();
        mp_ops->init_secondary();
        cpu_report();
@@ -366,7 +367,6 @@ asmlinkage void start_secondary(void)
         */
 
        calibrate_delay();
-       cpu = smp_processor_id();
        cpu_data[cpu].udelay_val = loops_per_jiffy;
 
        set_cpu_sibling_map(cpu);
index c961e2999f15ac83fa0eed2541564799813a94f2..ef3750a6ffacf861ad8c9cabf97ae2deacbd2188 100644 (file)
@@ -13,6 +13,8 @@
  * Copyright (C) 2009 Lemote Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  */
+
+#include <linux/dma-map-ops.h>
 #include <linux/export.h>
 #include <linux/pci_ids.h>
 #include <asm/bootinfo.h>
@@ -147,8 +149,14 @@ void __init prom_lefi_init_env(void)
 
        loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
        if (loongson_sysconf.dma_mask_bits < 32 ||
-               loongson_sysconf.dma_mask_bits > 64)
+                       loongson_sysconf.dma_mask_bits > 64) {
                loongson_sysconf.dma_mask_bits = 32;
+               dma_default_coherent = true;
+       } else {
+               dma_default_coherent = !eirq_source->dma_noncoherent;
+       }
+
+       pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off");
 
        loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
        loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
index ee8de1735b7c04095fc345e3bbc330b62612ac26..f25caa6aa9d306e84d719e97ea54f7b8faa449c1 100644 (file)
@@ -49,8 +49,7 @@ void virtual_early_config(void)
 void __init szmem(unsigned int node)
 {
        u32 i, mem_type;
-       static unsigned long num_physpages;
-       u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
+       phys_addr_t node_id, mem_start, mem_size;
 
        /* Otherwise come from DTB */
        if (loongson_sysconf.fw_interface != LOONGSON_LEFI)
@@ -64,30 +63,46 @@ void __init szmem(unsigned int node)
 
                mem_type = loongson_memmap->map[i].mem_type;
                mem_size = loongson_memmap->map[i].mem_size;
-               mem_start = loongson_memmap->map[i].mem_start;
+
+               /* Memory size comes in MB if MEM_SIZE_IS_IN_BYTES not set */
+               if (mem_size & MEM_SIZE_IS_IN_BYTES)
+                       mem_size &= ~MEM_SIZE_IS_IN_BYTES;
+               else
+                       mem_size = mem_size << 20;
+
+               mem_start = (node_id << 44) | loongson_memmap->map[i].mem_start;
 
                switch (mem_type) {
                case SYSTEM_RAM_LOW:
                case SYSTEM_RAM_HIGH:
-                       start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
-                       node_psize = (mem_size << 20) >> PAGE_SHIFT;
-                       end_pfn  = start_pfn + node_psize;
-                       num_physpages += node_psize;
-                       pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
-                               (u32)node_id, mem_type, mem_start, mem_size);
-                       pr_info("       start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
-                               start_pfn, end_pfn, num_physpages);
-                       memblock_add_node(PFN_PHYS(start_pfn),
-                                         PFN_PHYS(node_psize), node,
+               case UMA_VIDEO_RAM:
+                       pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes usable\n",
+                               (u32)node_id, mem_type, &mem_start, &mem_size);
+                       memblock_add_node(mem_start, mem_size, node,
                                          MEMBLOCK_NONE);
                        break;
                case SYSTEM_RAM_RESERVED:
-                       pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
-                               (u32)node_id, mem_type, mem_start, mem_size);
-                       memblock_reserve(((node_id << 44) + mem_start), mem_size << 20);
+               case VIDEO_ROM:
+               case ADAPTER_ROM:
+               case ACPI_TABLE:
+               case SMBIOS_TABLE:
+                       pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes reserved\n",
+                               (u32)node_id, mem_type, &mem_start, &mem_size);
+                       memblock_reserve(mem_start, mem_size);
+                       break;
+               /* We should not reserve VUMA_VIDEO_RAM as it overlaps with MMIO */
+               case VUMA_VIDEO_RAM:
+               default:
+                       pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes unhandled\n",
+                               (u32)node_id, mem_type, &mem_start, &mem_size);
                        break;
                }
        }
+
+       /* Reserve vgabios if it comes from firmware */
+       if (loongson_sysconf.vgabios_addr)
+               memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr),
+                               SZ_256K);
 }
 
 #ifndef CONFIG_NUMA
index e420800043b0897b70f0775a452233db6f50d810..e01c8d4a805a91586e9ba09a686fa1ca89672280 100644 (file)
@@ -53,7 +53,7 @@ static void loongson_halt(void)
        }
 }
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 
 /* 0X80000000~0X80200000 is safe */
 #define MAX_ARGS       64
@@ -158,7 +158,7 @@ static int __init mips_reboot_setup(void)
        _machine_halt = loongson_halt;
        pm_power_off = loongson_poweroff;
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
        if (WARN_ON(!kexec_argv))
                return -ENOMEM;
index e015a26a40f7a55388da15033ad0b2ff08cf4777..498bdc1bb0ede8aafbf1767065a9002fb2e4b26f 100644 (file)
@@ -864,7 +864,7 @@ const struct plat_smp_ops loongson3_smp_ops = {
        .cpu_disable = loongson3_cpu_disable,
        .cpu_die = loongson3_cpu_die,
 #endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
 #endif
 };
index 1641ff9a8b83e0bab486f45d398a026d2bc83acb..833555f74ffa7241a44af41f58b32164ba381169 100644 (file)
@@ -71,7 +71,7 @@
                asm volatile("\n"                                       \
                             "1:\t" PARISC_BUG_BREAK_ASM "\n"           \
                             "\t.pushsection __bug_table,\"a\"\n"       \
-                            "\t.align %2\n"                            \
+                            "\t.align 4\n"                             \
                             "2:\t" __BUG_REL(1b) "\n"                  \
                             "\t.short %0\n"                            \
                             "\t.blockz %1-4-2\n"                       \
index 8d3eacb50d56013c49ededc48f77c26d245d2ea9..9d44e6630908d2657e102d81501b574671fb0fd4 100644 (file)
@@ -301,7 +301,6 @@ CONFIG_WQ_WATCHDOG=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_BUG_ON_DATA_CORRUPTION=y
-CONFIG_DEBUG_CREDENTIALS=y
 # CONFIG_FTRACE is not set
 CONFIG_XMON=y
 # CONFIG_RUNTIME_TESTING_MENU is not set
index 90701885762cf1ccbd739f93d8a2179811dce4a1..40677416d7b2622da03574fdbdbb86057fff95ac 100644 (file)
@@ -62,7 +62,7 @@
        .endif
 
        /* Save previous stack pointer (r1) */
-       addi    r8, r1, SWITCH_FRAME_SIZE
+       addi    r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
        PPC_STL r8, GPR1(r1)
 
        .if \allregs == 1
@@ -182,7 +182,7 @@ ftrace_no_trace:
        mflr    r3
        mtctr   r3
        REST_GPR(3, r1)
-       addi    r1, r1, SWITCH_FRAME_SIZE
+       addi    r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
        mtlr    r0
        bctr
 #endif
index b1f25bac280b4e824e68315265a2dabab8164b15..71d52a670d951be33d7fb0e3e3219ef3e21ec971 100644 (file)
@@ -385,11 +385,15 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
         * same fault IRQ is not freed by the OS before.
         */
        mutex_lock(&vas_pseries_mutex);
-       if (migration_in_progress)
+       if (migration_in_progress) {
                rc = -EBUSY;
-       else
+       } else {
                rc = allocate_setup_window(txwin, (u64 *)&domain[0],
                                   cop_feat_caps->win_type);
+               if (!rc)
+                       caps->nr_open_wins_progress++;
+       }
+
        mutex_unlock(&vas_pseries_mutex);
        if (rc)
                goto out;
@@ -404,8 +408,17 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
                goto out_free;
 
        txwin->win_type = cop_feat_caps->win_type;
-       mutex_lock(&vas_pseries_mutex);
+
        /*
+        * The migration SUSPEND thread sets migration_in_progress and
+        * closes all open windows from the list. But the window is
+        * added to the list after open and modify HCALLs. So possible
+        * that migration_in_progress is set before modify HCALL which
+        * may cause some windows are still open when the hypervisor
+        * initiates the migration.
+        * So checks the migration_in_progress flag again and close all
+        * open windows.
+        *
         * Possible to lose the acquired credit with DLPAR core
         * removal after the window is opened. So if there are any
         * closed windows (means with lost credits), do not give new
@@ -413,9 +426,11 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
         * after the existing windows are reopened when credits are
         * available.
         */
-       if (!caps->nr_close_wins) {
+       mutex_lock(&vas_pseries_mutex);
+       if (!caps->nr_close_wins && !migration_in_progress) {
                list_add(&txwin->win_list, &caps->list);
                caps->nr_open_windows++;
+               caps->nr_open_wins_progress--;
                mutex_unlock(&vas_pseries_mutex);
                vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
                return &txwin->vas_win;
@@ -433,6 +448,12 @@ out_free:
         */
        free_irq_setup(txwin);
        h_deallocate_vas_window(txwin->vas_win.winid);
+       /*
+        * Hold mutex and reduce nr_open_wins_progress counter.
+        */
+       mutex_lock(&vas_pseries_mutex);
+       caps->nr_open_wins_progress--;
+       mutex_unlock(&vas_pseries_mutex);
 out:
        atomic_dec(&cop_feat_caps->nr_used_credits);
        kfree(txwin);
@@ -937,14 +958,14 @@ int vas_migration_handler(int action)
        struct vas_caps *vcaps;
        int i, rc = 0;
 
+       pr_info("VAS migration event %d\n", action);
+
        /*
         * NX-GZIP is not enabled. Nothing to do for migration.
         */
        if (!copypaste_feat)
                return rc;
 
-       mutex_lock(&vas_pseries_mutex);
-
        if (action == VAS_SUSPEND)
                migration_in_progress = true;
        else
@@ -990,12 +1011,27 @@ int vas_migration_handler(int action)
 
                switch (action) {
                case VAS_SUSPEND:
+                       mutex_lock(&vas_pseries_mutex);
                        rc = reconfig_close_windows(vcaps, vcaps->nr_open_windows,
                                                        true);
+                       /*
+                        * Windows are included in the list after successful
+                        * open. So wait for closing these in-progress open
+                        * windows in vas_allocate_window() which will be
+                        * done if the migration_in_progress is set.
+                        */
+                       while (vcaps->nr_open_wins_progress) {
+                               mutex_unlock(&vas_pseries_mutex);
+                               msleep(10);
+                               mutex_lock(&vas_pseries_mutex);
+                       }
+                       mutex_unlock(&vas_pseries_mutex);
                        break;
                case VAS_RESUME:
+                       mutex_lock(&vas_pseries_mutex);
                        atomic_set(&caps->nr_total_credits, new_nr_creds);
                        rc = reconfig_open_windows(vcaps, new_nr_creds, true);
+                       mutex_unlock(&vas_pseries_mutex);
                        break;
                default:
                        /* should not happen */
@@ -1011,8 +1047,9 @@ int vas_migration_handler(int action)
                        goto out;
        }
 
+       pr_info("VAS migration event (%d) successful\n", action);
+
 out:
-       mutex_unlock(&vas_pseries_mutex);
        return rc;
 }
 
index 7115043ec488307658e9a9a8fdb9b653848abaad..45567cd1317837ac069be2b20b44ac789494be7b 100644 (file)
@@ -91,6 +91,8 @@ struct vas_cop_feat_caps {
 struct vas_caps {
        struct vas_cop_feat_caps caps;
        struct list_head list;  /* List of open windows */
+       int nr_open_wins_progress;      /* Number of open windows in */
+                                       /* progress. Used in migration */
        int nr_close_wins;      /* closed windows in the hypervisor for DLPAR */
        int nr_open_windows;    /* Number of successful open windows */
        u8 feat;                /* Feature type */
index 95a2a06acc6a62412894e491c3bfd5d4a161d15b..24c1799e2ec4905dc9c38148a66b6866fe20e59b 100644 (file)
@@ -685,7 +685,7 @@ config RISCV_BOOT_SPINWAIT
          If unsure what to do here, say N.
 
 config ARCH_SUPPORTS_KEXEC
-       def_bool MMU
+       def_bool y
 
 config ARCH_SELECTS_KEXEC
        def_bool y
@@ -693,7 +693,7 @@ config ARCH_SELECTS_KEXEC
        select HOTPLUG_CPU if SMP
 
 config ARCH_SUPPORTS_KEXEC_FILE
-       def_bool 64BIT && MMU
+       def_bool 64BIT
 
 config ARCH_SELECTS_KEXEC_FILE
        def_bool y
index 90b261114763753565001398edcb4c86e3631ab1..dce96f27cc89a4af44c61209be59a105c49d05ca 100644 (file)
@@ -8,9 +8,6 @@
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/leds/common.h>
 
-/* Clock frequency (in Hz) of the rtcclk */
-#define RTCCLK_FREQ            1000000
-
 / {
        model = "Microchip PolarFire-SoC Icicle Kit";
        compatible = "microchip,mpfs-icicle-reference-rtlv2210", "microchip,mpfs-icicle-kit",
                stdout-path = "serial1:115200n8";
        };
 
-       cpus {
-               timebase-frequency = <RTCCLK_FREQ>;
-       };
-
        leds {
                compatible = "gpio-leds";
 
index 184cb36a175e40742763510b8b1e7eca6d9c784a..a8d623ee9fa4cedacb177fbe7ebf94a5eb3df3d3 100644 (file)
@@ -10,9 +10,6 @@
 #include "mpfs.dtsi"
 #include "mpfs-m100pfs-fabric.dtsi"
 
-/* Clock frequency (in Hz) of the rtcclk */
-#define MTIMER_FREQ    1000000
-
 / {
        model = "Aries Embedded M100PFEVPS";
        compatible = "aries,m100pfsevp", "microchip,mpfs";
                stdout-path = "serial1:115200n8";
        };
 
-       cpus {
-               timebase-frequency = <MTIMER_FREQ>;
-       };
-
        ddrc_cache_lo: memory@80000000 {
                device_type = "memory";
                reg = <0x0 0x80000000 0x0 0x40000000>;
index c87cc2d8fe29fa2174bbedca08c272c7331283b8..ea0808ab104255ea6cfcff1a1bc99adbbdc81905 100644 (file)
@@ -6,9 +6,6 @@
 #include "mpfs.dtsi"
 #include "mpfs-polarberry-fabric.dtsi"
 
-/* Clock frequency (in Hz) of the rtcclk */
-#define MTIMER_FREQ    1000000
-
 / {
        model = "Sundance PolarBerry";
        compatible = "sundance,polarberry", "microchip,mpfs";
                stdout-path = "serial0:115200n8";
        };
 
-       cpus {
-               timebase-frequency = <MTIMER_FREQ>;
-       };
-
        ddrc_cache_lo: memory@80000000 {
                device_type = "memory";
                reg = <0x0 0x80000000 0x0 0x2e000000>;
index 013cb666c72da8e539a4bfbc8c5ddeb560272160..f9a89057943834766cb60b14a70a855df416566f 100644 (file)
@@ -6,9 +6,6 @@
 #include "mpfs.dtsi"
 #include "mpfs-sev-kit-fabric.dtsi"
 
-/* Clock frequency (in Hz) of the rtcclk */
-#define MTIMER_FREQ            1000000
-
 / {
        #address-cells = <2>;
        #size-cells = <2>;
                stdout-path = "serial1:115200n8";
        };
 
-       cpus {
-               timebase-frequency = <MTIMER_FREQ>;
-       };
-
        reserved-memory {
                #address-cells = <2>;
                #size-cells = <2>;
index e0797c7e1b3553a9f5623f730bb974c2101c6812..d1120f5f2c0153bbc9b967dd360159b6c6ee50e1 100644 (file)
@@ -11,9 +11,6 @@
 #include "mpfs.dtsi"
 #include "mpfs-tysom-m-fabric.dtsi"
 
-/* Clock frequency (in Hz) of the rtcclk */
-#define MTIMER_FREQ            1000000
-
 / {
        model = "Aldec TySOM-M-MPFS250T-REV2";
        compatible = "aldec,tysom-m-mpfs250t-rev2", "microchip,mpfs";
                stdout-path = "serial1:115200n8";
        };
 
-       cpus {
-               timebase-frequency = <MTIMER_FREQ>;
-       };
-
        ddrc_cache_lo: memory@80000000 {
                device_type = "memory";
                reg = <0x0 0x80000000 0x0 0x30000000>;
index a6faf24f1dbaf659eb9f0df6baadc34fa3246f52..266489d43912fc4457cb2bad3cc8d9d67b8d4dcd 100644 (file)
@@ -13,6 +13,7 @@
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
+               timebase-frequency = <1000000>;
 
                cpu0: cpu@0 {
                        compatible = "sifive,e51", "sifive,rocket0", "riscv";
index df40e87ee063292417622cd89c71a117ce38cb7c..aec6401a467b02a17d2bd25a369222bca815d83b 100644 (file)
@@ -34,7 +34,6 @@
                        cpu0_intc: interrupt-controller {
                                compatible = "riscv,cpu-intc";
                                interrupt-controller;
-                               #address-cells = <0>;
                                #interrupt-cells = <1>;
                        };
                };
index 197db68cc8daf711ad05a1469697ef50dcd9acd7..17a90486972468fce5e85c9287f091da14a77e66 100644 (file)
@@ -38,29 +38,35 @@ static long ax45mp_iocp_sw_workaround(void)
        return ret.error ? 0 : ret.value;
 }
 
-static bool errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
+static void errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
 {
+       static bool done;
+
        if (!IS_ENABLED(CONFIG_ERRATA_ANDES_CMO))
-               return false;
+               return;
+
+       if (done)
+               return;
+
+       done = true;
 
        if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID)
-               return false;
+               return;
 
        if (!ax45mp_iocp_sw_workaround())
-               return false;
+               return;
 
        /* Set this just to make core cbo code happy */
        riscv_cbom_block_size = 1;
        riscv_noncoherent_supported();
-
-       return true;
 }
 
 void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
                                              unsigned long archid, unsigned long impid,
                                              unsigned int stage)
 {
-       errata_probe_iocp(stage, archid, impid);
+       if (stage == RISCV_ALTERNATIVES_BOOT)
+               errata_probe_iocp(stage, archid, impid);
 
        /* we have nothing to patch here ATM so just return back */
 }
index 294044429e8e15d9230f3b96c7c5579be68857f2..ab00235b018f899e0ca0d0de297a4f14cc9d5c13 100644 (file)
@@ -899,7 +899,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
 #define PAGE_KERNEL            __pgprot(0)
 #define swapper_pg_dir         NULL
 #define TASK_SIZE              0xffffffffUL
-#define VMALLOC_START          0
+#define VMALLOC_START          _AC(0, UL)
 #define VMALLOC_END            TASK_SIZE
 
 #endif /* !CONFIG_MMU */
index 55f1d7856b5448c9242df00be2d49d061d4d15b2..8706736fd4e2dca53d096d1caa5117da9fc08873 100644 (file)
@@ -5,17 +5,19 @@
 
 void arch_crash_save_vmcoreinfo(void)
 {
-       VMCOREINFO_NUMBER(VA_BITS);
        VMCOREINFO_NUMBER(phys_ram_base);
 
        vmcoreinfo_append_str("NUMBER(PAGE_OFFSET)=0x%lx\n", PAGE_OFFSET);
        vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START);
        vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END);
+#ifdef CONFIG_MMU
+       VMCOREINFO_NUMBER(VA_BITS);
        vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START);
        vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END);
 #ifdef CONFIG_64BIT
        vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR);
        vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
+#endif
 #endif
        vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
        vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
index b77397432403d9ef028fea6855cdc97aea143d00..76ace1e0b46f623a119cc07a08ad8923079e1c81 100644 (file)
@@ -154,7 +154,6 @@ secondary_start_sbi:
        XIP_FIXUP_OFFSET a3
        add a3, a3, a1
        REG_L sp, (a3)
-       scs_load_current
 
 .Lsecondary_start_common:
 
@@ -165,6 +164,7 @@ secondary_start_sbi:
        call relocate_enable_mmu
 #endif
        call .Lsetup_trap_vector
+       scs_load_current
        tail smp_callin
 #endif /* CONFIG_SMP */
 
index 56a8c78e9e215eab146fac7ae9b645723f75a063..aac019ed63b1bdaa766262a5266deb4c8c5a4bdf 100644 (file)
@@ -40,15 +40,6 @@ struct relocation_handlers {
                                  long buffer);
 };
 
-unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
-void process_accumulated_relocations(struct module *me);
-int add_relocation_to_accumulate(struct module *me, int type, void *location,
-                                unsigned int hashtable_bits, Elf_Addr v);
-
-struct hlist_head *relocation_hashtable;
-
-struct list_head used_buckets_list;
-
 /*
  * The auipc+jalr instruction pair can reach any PC-relative offset
  * in the range [-2^31 - 2^11, 2^31 - 2^11)
@@ -64,7 +55,7 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
 
 static int riscv_insn_rmw(void *location, u32 keep, u32 set)
 {
-       u16 *parcel = location;
+       __le16 *parcel = location;
        u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
 
        insn &= keep;
@@ -77,7 +68,7 @@ static int riscv_insn_rmw(void *location, u32 keep, u32 set)
 
 static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
 {
-       u16 *parcel = location;
+       __le16 *parcel = location;
        u16 insn = le16_to_cpu(*parcel);
 
        insn &= keep;
@@ -604,7 +595,10 @@ static const struct relocation_handlers reloc_handlers[] = {
        /* 192-255 nonstandard ABI extensions  */
 };
 
-void process_accumulated_relocations(struct module *me)
+static void
+process_accumulated_relocations(struct module *me,
+                               struct hlist_head **relocation_hashtable,
+                               struct list_head *used_buckets_list)
 {
        /*
         * Only ADD/SUB/SET/ULEB128 should end up here.
@@ -624,18 +618,25 @@ void process_accumulated_relocations(struct module *me)
         *      - Each relocation entry for a location address
         */
        struct used_bucket *bucket_iter;
+       struct used_bucket *bucket_iter_tmp;
        struct relocation_head *rel_head_iter;
+       struct hlist_node *rel_head_iter_tmp;
        struct relocation_entry *rel_entry_iter;
+       struct relocation_entry *rel_entry_iter_tmp;
        int curr_type;
        void *location;
        long buffer;
 
-       list_for_each_entry(bucket_iter, &used_buckets_list, head) {
-               hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) {
+       list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
+                                used_buckets_list, head) {
+               hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
+                                         bucket_iter->bucket, node) {
                        buffer = 0;
                        location = rel_head_iter->location;
-                       list_for_each_entry(rel_entry_iter,
-                                           rel_head_iter->rel_entry, head) {
+                       list_for_each_entry_safe(rel_entry_iter,
+                                                rel_entry_iter_tmp,
+                                                rel_head_iter->rel_entry,
+                                                head) {
                                curr_type = rel_entry_iter->type;
                                reloc_handlers[curr_type].reloc_handler(
                                        me, &buffer, rel_entry_iter->value);
@@ -648,11 +649,14 @@ void process_accumulated_relocations(struct module *me)
                kfree(bucket_iter);
        }
 
-       kfree(relocation_hashtable);
+       kfree(*relocation_hashtable);
 }
 
-int add_relocation_to_accumulate(struct module *me, int type, void *location,
-                                unsigned int hashtable_bits, Elf_Addr v)
+static int add_relocation_to_accumulate(struct module *me, int type,
+                                       void *location,
+                                       unsigned int hashtable_bits, Elf_Addr v,
+                                       struct hlist_head *relocation_hashtable,
+                                       struct list_head *used_buckets_list)
 {
        struct relocation_entry *entry;
        struct relocation_head *rel_head;
@@ -661,6 +665,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
        unsigned long hash;
 
        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+       if (!entry)
+               return -ENOMEM;
+
        INIT_LIST_HEAD(&entry->head);
        entry->type = type;
        entry->value = v;
@@ -669,7 +677,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
 
        current_head = &relocation_hashtable[hash];
 
-       /* Find matching location (if any) */
+       /*
+        * Search for the relocation_head for the relocations that happen at the
+        * provided location
+        */
        bool found = false;
        struct relocation_head *rel_head_iter;
 
@@ -681,19 +692,45 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
                }
        }
 
+       /*
+        * If there has not yet been any relocations at the provided location,
+        * create a relocation_head for that location and populate it with this
+        * relocation_entry.
+        */
        if (!found) {
                rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
+
+               if (!rel_head) {
+                       kfree(entry);
+                       return -ENOMEM;
+               }
+
                rel_head->rel_entry =
                        kmalloc(sizeof(struct list_head), GFP_KERNEL);
+
+               if (!rel_head->rel_entry) {
+                       kfree(entry);
+                       kfree(rel_head);
+                       return -ENOMEM;
+               }
+
                INIT_LIST_HEAD(rel_head->rel_entry);
                rel_head->location = location;
                INIT_HLIST_NODE(&rel_head->node);
                if (!current_head->first) {
                        bucket =
                                kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
+
+                       if (!bucket) {
+                               kfree(entry);
+                               kfree(rel_head);
+                               kfree(rel_head->rel_entry);
+                               return -ENOMEM;
+                       }
+
                        INIT_LIST_HEAD(&bucket->head);
                        bucket->bucket = current_head;
-                       list_add(&bucket->head, &used_buckets_list);
+                       list_add(&bucket->head, used_buckets_list);
                }
                hlist_add_head(&rel_head->node, current_head);
        }
@@ -704,7 +741,9 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
        return 0;
 }
 
-unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
+static unsigned int
+initialize_relocation_hashtable(unsigned int num_relocations,
+                               struct hlist_head **relocation_hashtable)
 {
        /* Can safely assume that bits is not greater than sizeof(long) */
        unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
@@ -720,12 +759,13 @@ unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
 
        hashtable_size <<= should_double_size;
 
-       relocation_hashtable = kmalloc_array(hashtable_size,
-                                            sizeof(*relocation_hashtable),
-                                            GFP_KERNEL);
-       __hash_init(relocation_hashtable, hashtable_size);
+       *relocation_hashtable = kmalloc_array(hashtable_size,
+                                             sizeof(*relocation_hashtable),
+                                             GFP_KERNEL);
+       if (!*relocation_hashtable)
+               return -ENOMEM;
 
-       INIT_LIST_HEAD(&used_buckets_list);
+       __hash_init(*relocation_hashtable, hashtable_size);
 
        return hashtable_bits;
 }
@@ -742,7 +782,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
        Elf_Addr v;
        int res;
        unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
-       unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations);
+       struct hlist_head *relocation_hashtable;
+       struct list_head used_buckets_list;
+       unsigned int hashtable_bits;
+
+       hashtable_bits = initialize_relocation_hashtable(num_relocations,
+                                                        &relocation_hashtable);
+
+       if (hashtable_bits < 0)
+               return hashtable_bits;
+
+       INIT_LIST_HEAD(&used_buckets_list);
 
        pr_debug("Applying relocate section %u to %u\n", relsec,
               sechdrs[relsec].sh_info);
@@ -823,14 +873,18 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                }
 
                if (reloc_handlers[type].accumulate_handler)
-                       res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v);
+                       res = add_relocation_to_accumulate(me, type, location,
+                                                          hashtable_bits, v,
+                                                          relocation_hashtable,
+                                                          &used_buckets_list);
                else
                        res = handler(me, location, v);
                if (res)
                        return res;
        }
 
-       process_accumulated_relocations(me);
+       process_accumulated_relocations(me, &relocation_hashtable,
+                                       &used_buckets_list);
 
        return 0;
 }
index c712037dbe10ec88b9ee8f5d8559f9b73c6608fa..a2ca5b7756a5b0ca716de4fe9ae8bd51693bdb97 100644 (file)
@@ -169,7 +169,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
        pair->value &= ~missing;
 }
 
-static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
+static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
 {
        struct riscv_hwprobe pair;
 
index 90f22049d553e0e99b73ccc1291a57a2fb48aa0e..8515ed7cd8c120f75d592c401aa383da629c5729 100644 (file)
@@ -6,13 +6,13 @@
 .text
 .global test_uleb_basic
 test_uleb_basic:
-       ld      a0, second
+       lw      a0, second
        addi    a0, a0, -127
        ret
 
 .global test_uleb_large
 test_uleb_large:
-       ld      a0, fourth
+       lw      a0, fourth
        addi    a0, a0, -0x07e8
        ret
 
@@ -22,10 +22,10 @@ first:
 second:
        .reloc second, R_RISCV_SET_ULEB128, second
        .reloc second, R_RISCV_SUB_ULEB128, first
-       .dword 0
+       .word 0
 third:
        .space 1000
 fourth:
        .reloc fourth, R_RISCV_SET_ULEB128, fourth
        .reloc fourth, R_RISCV_SUB_ULEB128, third
-       .dword 0
+       .word 0
index 5eba37147caa96c077eb9ffb89233e1f679fed6d..5255f8134aeff5484e24d83b727ff9908a1b1c44 100644 (file)
@@ -550,16 +550,14 @@ int handle_misaligned_store(struct pt_regs *regs)
        } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
                len = 8;
                val.data_ulong = GET_RS2S(insn, regs);
-       } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
-                  ((insn >> SH_RD) & 0x1f)) {
+       } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
                len = 8;
                val.data_ulong = GET_RS2C(insn, regs);
 #endif
        } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
                len = 4;
                val.data_ulong = GET_RS2S(insn, regs);
-       } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
-                  ((insn >> SH_RD) & 0x1f)) {
+       } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
                len = 4;
                val.data_ulong = GET_RS2C(insn, regs);
        } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
index 438cd92e60801bd3c12cd1f891c5a9009fe115f8..dd06086293106ed963268d0c6c95fdc8c58ef694 100644 (file)
@@ -834,7 +834,6 @@ CONFIG_DEBUG_IRQFLAGS=y
 CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_REF_SCALE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
index 02dcbe82a8e512af4bd0f09f974d67d89f938d80..8207a892bbe22f37d4f9a98d3e2f0cc7210292e9 100644 (file)
@@ -587,10 +587,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
 
        if (!gmap_is_shadow(gmap))
                return;
-       if (start >= 1UL << 31)
-               /* We are only interested in prefix pages */
-               return;
-
        /*
         * Only new shadow blocks are added to the list during runtime,
         * therefore we can safely reference them all the time.
index 3bd2ab2a9a3449a411145a938d65a0b1f9a5ad62..5cb92941540b32bea8e908863c3364a0f61230c5 100644 (file)
@@ -756,7 +756,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
                pte_clear(mm, addr, ptep);
        }
        if (reset)
-               pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+               pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
        pgste_set_unlock(ptep, pgste);
        preempt_enable();
 }
index 927d80ba2332a73dd5ebfb3760b2b2b5137c86bb..76631714673ca8540985b0a1c5cd4bea18ad697c 100644 (file)
@@ -28,7 +28,7 @@
 /* The native architecture */
 #define KEXEC_ARCH KEXEC_ARCH_SH
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 /* arch/sh/kernel/machine_kexec.c */
 void reserve_crashkernel(void);
 
@@ -67,6 +67,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
 }
 #else
 static inline void reserve_crashkernel(void) { }
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
 
 #endif /* __ASM_SH_KEXEC_H */
index 69cd9ac4b2ab8a55c290a42d52bb4cfc0f4926ba..2d7e70537de04c9b731de0ffcadc807c62238754 100644 (file)
@@ -33,7 +33,7 @@ obj-$(CONFIG_SMP)             += smp.o
 obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
 obj-$(CONFIG_KGDB)             += kgdb.o
 obj-$(CONFIG_MODULES)          += sh_ksyms_32.o module.o
-obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_CORE)       += machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
 obj-$(CONFIG_IO_TRAPPED)       += io_trapped.o
index e8eeedc9b1822d5963cb2df4ce17439835f88e81..1de006b1c3393463632dd66d9a929c9a30d166ef 100644 (file)
@@ -63,7 +63,7 @@ struct machine_ops machine_ops = {
        .shutdown       = native_machine_shutdown,
        .restart        = native_machine_restart,
        .halt           = native_machine_halt,
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        .crash_shutdown = native_machine_crash_shutdown,
 #endif
 };
@@ -88,7 +88,7 @@ void machine_halt(void)
        machine_ops.halt();
 }
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 void machine_crash_shutdown(struct pt_regs *regs)
 {
        machine_ops.crash_shutdown(regs);
index 3d80515298d268e77f4684bb37782c1c589fc182..d3175f09b3aad9579fc16ceceb800a3d4af7320d 100644 (file)
@@ -220,7 +220,7 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
        request_resource(res, &code_resource);
        request_resource(res, &data_resource);
        request_resource(res, &bss_resource);
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        request_resource(res, &crashk_res);
 #endif
 
index 55c98fdd67d2b7f599ab333d5f1fa1c517c3e4eb..18d15d1ce87d5993946c31579b006594e9c4e9da 100644 (file)
@@ -178,7 +178,7 @@ static unsigned long get_cmdline_acpi_rsdp(void)
 {
        unsigned long addr = 0;
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        char val[MAX_ADDR_LEN] = { };
        int ret;
 
index 1b5d17a9f70dde9f711c53ea2db97762d73eadf3..cf1f13c8217569f6bd82eb6d513c37210628ebd2 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/coco.h>
 #include <asm/tdx.h>
 #include <asm/vmx.h>
+#include <asm/ia32.h>
 #include <asm/insn.h>
 #include <asm/insn-eval.h>
 #include <asm/pgtable.h>
index d813160b14d85172c307b9436a6703ab9b6b1768..6356060caaf311af8370ccaeb69aab85847b62d1 100644 (file)
@@ -26,6 +26,7 @@
 #include <xen/events.h>
 #endif
 
+#include <asm/apic.h>
 #include <asm/desc.h>
 #include <asm/traps.h>
 #include <asm/vdso.h>
@@ -167,7 +168,96 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
        }
 }
 
-/* Handles int $0x80 */
+#ifdef CONFIG_IA32_EMULATION
+static __always_inline bool int80_is_external(void)
+{
+       const unsigned int offs = (0x80 / 32) * 0x10;
+       const u32 bit = BIT(0x80 % 32);
+
+       /* The local APIC on XENPV guests is fake */
+       if (cpu_feature_enabled(X86_FEATURE_XENPV))
+               return false;
+
+       /*
+        * If vector 0x80 is set in the APIC ISR then this is an external
+        * interrupt. Either from broken hardware or injected by a VMM.
+        *
+        * Note: In guest mode this is only valid for secure guests where
+        * the secure module fully controls the vAPIC exposed to the guest.
+        */
+       return apic_read(APIC_ISR + offs) & bit;
+}
+
+/**
+ * int80_emulation - 32-bit legacy syscall entry
+ *
+ * This entry point can be used by 32-bit and 64-bit programs to perform
+ * 32-bit system calls.  Instances of INT $0x80 can be found inline in
+ * various programs and libraries.  It is also used by the vDSO's
+ * __kernel_vsyscall fallback for hardware that doesn't support a faster
+ * entry method.  Restarted 32-bit system calls also fall back to INT
+ * $0x80 regardless of what instruction was originally used to do the
+ * system call.
+ *
+ * This is considered a slow path.  It is not used by most libc
+ * implementations on modern hardware except during process startup.
+ *
+ * The arguments for the INT $0x80 based syscall are on stack in the
+ * pt_regs structure:
+ *   eax:                              system call number
+ *   ebx, ecx, edx, esi, edi, ebp:     arg1 - arg 6
+ */
+DEFINE_IDTENTRY_RAW(int80_emulation)
+{
+       int nr;
+
+       /* Kernel does not use INT $0x80! */
+       if (unlikely(!user_mode(regs))) {
+               irqentry_enter(regs);
+               instrumentation_begin();
+               panic("Unexpected external interrupt 0x80\n");
+       }
+
+       /*
+        * Establish kernel context for instrumentation, including for
+        * int80_is_external() below which calls into the APIC driver.
+        * Identical for soft and external interrupts.
+        */
+       enter_from_user_mode(regs);
+
+       instrumentation_begin();
+       add_random_kstack_offset();
+
+       /* Validate that this is a soft interrupt to the extent possible */
+       if (unlikely(int80_is_external()))
+               panic("Unexpected external interrupt 0x80\n");
+
+       /*
+        * The low level idtentry code pushed -1 into regs::orig_ax
+        * and regs::ax contains the syscall number.
+        *
+        * User tracing code (ptrace or signal handlers) might assume
+        * that the regs::orig_ax contains a 32-bit number on invoking
+        * a 32-bit syscall.
+        *
+        * Establish the syscall convention by saving the 32bit truncated
+        * syscall number in regs::orig_ax and by invalidating regs::ax.
+        */
+       regs->orig_ax = regs->ax & GENMASK(31, 0);
+       regs->ax = -ENOSYS;
+
+       nr = syscall_32_enter(regs);
+
+       local_irq_enable();
+       nr = syscall_enter_from_user_mode_work(regs, nr);
+       do_syscall_32_irqs_on(regs, nr);
+
+       instrumentation_end();
+       syscall_exit_to_user_mode(regs);
+}
+#else /* CONFIG_IA32_EMULATION */
+
+/* Handles int $0x80 on a 32bit kernel */
 __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
 {
        int nr = syscall_32_enter(regs);
@@ -186,6 +276,7 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
        instrumentation_end();
        syscall_exit_to_user_mode(regs);
 }
+#endif /* !CONFIG_IA32_EMULATION */
 
 static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
 {
index 27c05d08558aaadec8dea7ed8568036e277efd2c..de94e2e84ecca927d9aa0e1ab99466466c163d44 100644 (file)
@@ -275,80 +275,3 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
        ANNOTATE_NOENDBR
        int3
 SYM_CODE_END(entry_SYSCALL_compat)
-
-/*
- * 32-bit legacy system call entry.
- *
- * 32-bit x86 Linux system calls traditionally used the INT $0x80
- * instruction.  INT $0x80 lands here.
- *
- * This entry point can be used by 32-bit and 64-bit programs to perform
- * 32-bit system calls.  Instances of INT $0x80 can be found inline in
- * various programs and libraries.  It is also used by the vDSO's
- * __kernel_vsyscall fallback for hardware that doesn't support a faster
- * entry method.  Restarted 32-bit system calls also fall back to INT
- * $0x80 regardless of what instruction was originally used to do the
- * system call.
- *
- * This is considered a slow path.  It is not used by most libc
- * implementations on modern hardware except during process startup.
- *
- * Arguments:
- * eax  system call number
- * ebx  arg1
- * ecx  arg2
- * edx  arg3
- * esi  arg4
- * edi  arg5
- * ebp  arg6
- */
-SYM_CODE_START(entry_INT80_compat)
-       UNWIND_HINT_ENTRY
-       ENDBR
-       /*
-        * Interrupts are off on entry.
-        */
-       ASM_CLAC                        /* Do this early to minimize exposure */
-       ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
-
-       /*
-        * User tracing code (ptrace or signal handlers) might assume that
-        * the saved RAX contains a 32-bit number when we're invoking a 32-bit
-        * syscall.  Just in case the high bits are nonzero, zero-extend
-        * the syscall number.  (This could almost certainly be deleted
-        * with no ill effects.)
-        */
-       movl    %eax, %eax
-
-       /* switch to thread stack expects orig_ax and rdi to be pushed */
-       pushq   %rax                    /* pt_regs->orig_ax */
-
-       /* Need to switch before accessing the thread stack. */
-       SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
-
-       /* In the Xen PV case we already run on the thread stack. */
-       ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
-
-       movq    %rsp, %rax
-       movq    PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
-
-       pushq   5*8(%rax)               /* regs->ss */
-       pushq   4*8(%rax)               /* regs->rsp */
-       pushq   3*8(%rax)               /* regs->eflags */
-       pushq   2*8(%rax)               /* regs->cs */
-       pushq   1*8(%rax)               /* regs->ip */
-       pushq   0*8(%rax)               /* regs->orig_ax */
-.Lint80_keep_stack:
-
-       PUSH_AND_CLEAR_REGS rax=$-ENOSYS
-       UNWIND_HINT_REGS
-
-       cld
-
-       IBRS_ENTER
-       UNTRAIN_RET
-
-       movq    %rsp, %rdi
-       call    do_int80_syscall_32
-       jmp     swapgs_restore_regs_and_return_to_usermode
-SYM_CODE_END(entry_INT80_compat)
index 5a2ae24b1204f932e15ff3d5f08232e541e54be5..9805629479d968b3c483879bc5ca56a3373d7504 100644 (file)
@@ -75,6 +75,11 @@ static inline bool ia32_enabled(void)
        return __ia32_enabled;
 }
 
+static inline void ia32_disable(void)
+{
+       __ia32_enabled = false;
+}
+
 #else /* !CONFIG_IA32_EMULATION */
 
 static inline bool ia32_enabled(void)
@@ -82,6 +87,8 @@ static inline bool ia32_enabled(void)
        return IS_ENABLED(CONFIG_X86_32);
 }
 
+static inline void ia32_disable(void) {}
+
 #endif
 
 #endif /* _ASM_X86_IA32_H */
index 05fd175cec7d5c9a617fd1f3c1462ade1d44b06e..13639e57e1f8af4c24c0c656a9f0801516bf25f4 100644 (file)
@@ -569,6 +569,10 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_UD,          exc_invalid_op);
 DECLARE_IDTENTRY_RAW(X86_TRAP_BP,              exc_int3);
 DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF,    exc_page_fault);
 
+#if defined(CONFIG_IA32_EMULATION)
+DECLARE_IDTENTRY_RAW(IA32_SYSCALL_VECTOR,      int80_emulation);
+#endif
+
 #ifdef CONFIG_X86_MCE
 #ifdef CONFIG_X86_64
 DECLARE_IDTENTRY_MCE(X86_TRAP_MC,      exc_machine_check);
index 4d84122bd6433b165dff727887667965637ee7e4..484f4f0131a5cceedf373804573074010a9892b0 100644 (file)
@@ -32,10 +32,6 @@ void entry_SYSCALL_compat(void);
 void entry_SYSCALL_compat_safe_stack(void);
 void entry_SYSRETL_compat_unsafe_stack(void);
 void entry_SYSRETL_compat_end(void);
-void entry_INT80_compat(void);
-#ifdef CONFIG_XEN_PV
-void xen_entry_INT80_compat(void);
-#endif
 #else /* !CONFIG_IA32_EMULATION */
 #define entry_SYSCALL_compat NULL
 #define entry_SYSENTER_compat NULL
index a7eab05e5f29776a7585d7d3a53e0aa791e8a5c1..f322ebd053a91ef3a4f91d02ec106881022f00da 100644 (file)
@@ -1320,6 +1320,9 @@ static void zenbleed_check_cpu(void *unused)
 
 void amd_check_microcode(void)
 {
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               return;
+
        on_each_cpu(zenbleed_check_cpu, NULL, 1);
 }
 
index 8857abc706e469f73edf7f98953838eebdd05354..660b601f1d6c33e9ad62ec2d12d860e92d4ea420 100644 (file)
@@ -121,7 +121,7 @@ static const __initconst struct idt_data def_idts[] = {
 
 static const struct idt_data ia32_idt[] __initconst = {
 #if defined(CONFIG_IA32_EMULATION)
-       SYSG(IA32_SYSCALL_VECTOR,       entry_INT80_compat),
+       SYSG(IA32_SYSCALL_VECTOR,       asm_int80_emulation),
 #elif defined(CONFIG_X86_32)
        SYSG(IA32_SYSCALL_VECTOR,       entry_INT80_32),
 #endif
index 70472eebe71960ce05d84db4c5aa0fc35ddef66e..c67285824e82676528ab8f33e2919bc021b197d2 100644 (file)
@@ -1234,10 +1234,6 @@ void setup_ghcb(void)
        if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
                return;
 
-       /* First make sure the hypervisor talks a supported protocol. */
-       if (!sev_es_negotiate_protocol())
-               sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
-
        /*
         * Check whether the runtime #VC exception handler is active. It uses
         * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
@@ -1254,6 +1250,13 @@ void setup_ghcb(void)
                return;
        }
 
+       /*
+        * Make sure the hypervisor talks a supported protocol.
+        * This gets called only in the BSP boot phase.
+        */
+       if (!sev_es_negotiate_protocol())
+               sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+
        /*
         * Clear the boot_ghcb. The first exception comes in before the bss
         * section is cleared.
index ee8c4c3496edd050d651f0f1d1863f2c569b040e..eea6ea7f14af98b76661978eea17ccfea5b2f19d 100644 (file)
@@ -182,6 +182,7 @@ static int kvm_mmu_rmaps_stat_release(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations mmu_rmaps_stat_fops = {
+       .owner          = THIS_MODULE,
        .open           = kvm_mmu_rmaps_stat_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
index 7121463123584c5d382da47ba3a7f34a67eba706..f3bb30b40876caa53ee959602d5e4345f48a10a9 100644 (file)
@@ -1855,15 +1855,17 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        bool old_paging = is_paging(vcpu);
 
 #ifdef CONFIG_X86_64
-       if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
+       if (vcpu->arch.efer & EFER_LME) {
                if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
                        vcpu->arch.efer |= EFER_LMA;
-                       svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
+                       if (!vcpu->arch.guest_state_protected)
+                               svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
                }
 
                if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
                        vcpu->arch.efer &= ~EFER_LMA;
-                       svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
+                       if (!vcpu->arch.guest_state_protected)
+                               svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
                }
        }
 #endif
index 2c924075f6f112a594c0a4390bb7dcc2d7e8fabf..1a3aaa7dafae44fb2cba1eefc0e4b41f5532d69f 100644 (file)
@@ -5518,8 +5518,8 @@ static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
                                         struct kvm_xsave *guest_xsave)
 {
-       return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
-                                            sizeof(guest_xsave->region));
+       kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
+                                     sizeof(guest_xsave->region));
 }
 
 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
@@ -13031,7 +13031,10 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
        if (vcpu->arch.guest_state_protected)
                return true;
 
-       return vcpu->arch.preempted_in_kernel;
+       if (vcpu != kvm_get_running_vcpu())
+               return vcpu->arch.preempted_in_kernel;
+
+       return static_call(kvm_x86_get_cpl)(vcpu) == 0;
 }
 
 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
index a68f2dda0948c211e0ee4f1f2e4a000f513c463a..70b91de2e053abb5f181612e3db295b16def3c02 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/msr.h>
 #include <asm/cmdline.h>
 #include <asm/sev.h>
+#include <asm/ia32.h>
 
 #include "mm_internal.h"
 
@@ -481,6 +482,16 @@ void __init sme_early_init(void)
         */
        if (sev_status & MSR_AMD64_SEV_ES_ENABLED)
                x86_cpuinit.parallel_bringup = false;
+
+       /*
+        * The VMM is capable of injecting interrupt 0x80 and triggering the
+        * compatibility syscall path.
+        *
+        * By default, the 32-bit emulation is disabled in order to ensure
+        * the safety of the VM.
+        */
+       if (sev_status & MSR_AMD64_SEV_ENABLED)
+               ia32_disable();
 }
 
 void __init mem_encrypt_free_decrypted_mem(void)
index 8c10d9abc2394fc43f75f3324dd8d0e42c3f797a..e89e415aa7435311991a945575519a593a2abb44 100644 (file)
@@ -3025,3 +3025,49 @@ void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp
 #endif
        WARN(1, "verification of programs using bpf_throw should have failed\n");
 }
+
+void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+                              struct bpf_prog *new, struct bpf_prog *old)
+{
+       u8 *old_addr, *new_addr, *old_bypass_addr;
+       int ret;
+
+       old_bypass_addr = old ? NULL : poke->bypass_addr;
+       old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
+       new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
+
+       /*
+        * On program loading or teardown, the program's kallsym entry
+        * might not be in place, so we use __bpf_arch_text_poke to skip
+        * the kallsyms check.
+        */
+       if (new) {
+               ret = __bpf_arch_text_poke(poke->tailcall_target,
+                                          BPF_MOD_JUMP,
+                                          old_addr, new_addr);
+               BUG_ON(ret < 0);
+               if (!old) {
+                       ret = __bpf_arch_text_poke(poke->tailcall_bypass,
+                                                  BPF_MOD_JUMP,
+                                                  poke->bypass_addr,
+                                                  NULL);
+                       BUG_ON(ret < 0);
+               }
+       } else {
+               ret = __bpf_arch_text_poke(poke->tailcall_bypass,
+                                          BPF_MOD_JUMP,
+                                          old_bypass_addr,
+                                          poke->bypass_addr);
+               BUG_ON(ret < 0);
+               /* let other CPUs finish the execution of program
+                * so that it will not possible to expose them
+                * to invalid nop, stack unwind, nop state
+                */
+               if (!ret)
+                       synchronize_rcu();
+               ret = __bpf_arch_text_poke(poke->tailcall_target,
+                                          BPF_MOD_JUMP,
+                                          old_addr, NULL);
+               BUG_ON(ret < 0);
+       }
+}
index bbbfdd495ebd3ac590fd152e0504c6bedae611de..aeb33e0a3f763370b00daf427063b2010f0bcf40 100644 (file)
@@ -704,7 +704,7 @@ static struct trap_array_entry trap_array[] = {
        TRAP_ENTRY(exc_int3,                            false ),
        TRAP_ENTRY(exc_overflow,                        false ),
 #ifdef CONFIG_IA32_EMULATION
-       { entry_INT80_compat,          xen_entry_INT80_compat,          false },
+       TRAP_ENTRY(int80_emulation,                     false ),
 #endif
        TRAP_ENTRY(exc_page_fault,                      false ),
        TRAP_ENTRY(exc_divide_error,                    false ),
index 9e5e680087853a34f8a2fc7a935e60e2c28f12d5..1a9cd18dfbd31208e5d1bcfa53f4a6e90bc81cf6 100644 (file)
@@ -156,7 +156,7 @@ xen_pv_trap asm_xenpv_exc_machine_check
 #endif /* CONFIG_X86_MCE */
 xen_pv_trap asm_exc_simd_coprocessor_error
 #ifdef CONFIG_IA32_EMULATION
-xen_pv_trap entry_INT80_compat
+xen_pv_trap asm_int80_emulation
 #endif
 xen_pv_trap asm_exc_xen_unknown_trap
 xen_pv_trap asm_exc_xen_hypervisor_callback
index 4ccf1994b97adbf8e8f9add7e3ff67d3bb5b3567..d530384f8d60704d474817baa076699af2f24e2d 100644 (file)
 
 #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
 
-#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
-                          (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
                           (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
 
+#define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \
+                              (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)))
+
 #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
 #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
 
@@ -74,8 +76,12 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
        vdev->wa.clear_runtime_mem = false;
        vdev->wa.d3hot_after_power_off = true;
 
-       if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
+       REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
+       if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
+               /* Writing 1s does not clear the interrupt status register */
                vdev->wa.interrupt_clear_with_0 = true;
+               REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
+       }
 
        IVPU_PRINT_WA(punit_disabled);
        IVPU_PRINT_WA(clear_runtime_mem);
index 28c75242fca9c456afb53ffa7b77758cd088ec6b..62944e35fcee2980446a743c0099c8bfc26cbf6e 100644 (file)
@@ -399,13 +399,13 @@ acpi_evaluate_reference(acpi_handle handle,
                acpi_handle_debug(list->handles[i], "Found in reference list\n");
        }
 
-end:
        if (ACPI_FAILURE(status)) {
                list->count = 0;
                kfree(list->handles);
                list->handles = NULL;
        }
 
+end:
        kfree(buffer.pointer);
 
        return status;
index 94fbc3abe60e6a714c546bc2e8c6dce321742bbe..d3c30a28c410eaea470ea797b1114f57e8f6ea10 100644 (file)
@@ -449,9 +449,9 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
        struct sk_buff *skb;
        unsigned int len;
 
-       spin_lock(&card->cli_queue_lock);
+       spin_lock_bh(&card->cli_queue_lock);
        skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
-       spin_unlock(&card->cli_queue_lock);
+       spin_unlock_bh(&card->cli_queue_lock);
        if(skb == NULL)
                return sprintf(buf, "No data.\n");
 
@@ -956,14 +956,14 @@ static void pclose(struct atm_vcc *vcc)
        struct pkt_hdr *header;
 
        /* Remove any yet-to-be-transmitted packets from the pending queue */
-       spin_lock(&card->tx_queue_lock);
+       spin_lock_bh(&card->tx_queue_lock);
        skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
                if (SKB_CB(skb)->vcc == vcc) {
                        skb_unlink(skb, &card->tx_queue[port]);
                        solos_pop(vcc, skb);
                }
        }
-       spin_unlock(&card->tx_queue_lock);
+       spin_unlock_bh(&card->tx_queue_lock);
 
        skb = alloc_skb(sizeof(*header), GFP_KERNEL);
        if (!skb) {
index 9ea22e165acd679b1e6ef72433104bed858cfc22..548491de818ef126f598eba06287bf7899418398 100644 (file)
@@ -144,7 +144,7 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
 #endif /* CONFIG_HOTPLUG_CPU */
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 #include <linux/kexec.h>
 
 static ssize_t crash_notes_show(struct device *dev,
@@ -189,14 +189,14 @@ static const struct attribute_group crash_note_cpu_attr_group = {
 #endif
 
 static const struct attribute_group *common_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        &crash_note_cpu_attr_group,
 #endif
        NULL
 };
 
 static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        &crash_note_cpu_attr_group,
 #endif
        NULL
index 91536ee05f144eb48fcc43a6a3a0d6f13f4561fa..7e2d1f0d903a6e165bb5e07c1e5b7e315c50b7c6 100644 (file)
@@ -362,6 +362,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
        devcd->devcd_dev.class = &devcd_class;
 
        mutex_lock(&devcd->mutex);
+       dev_set_uevent_suppress(&devcd->devcd_dev, true);
        if (device_add(&devcd->devcd_dev))
                goto put_device;
 
@@ -376,6 +377,8 @@ void dev_coredumpm(struct device *dev, struct module *owner,
                              "devcoredump"))
                dev_warn(dev, "devcoredump create_link failed\n");
 
+       dev_set_uevent_suppress(&devcd->devcd_dev, false);
+       kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
        INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
        schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
        mutex_unlock(&devcd->mutex);
index f3b9a4d0fa3bb2a9c0c03614bae78f206b0c77b2..8a13babd826ce3c96a7f73bf7a7e723179e047b1 100644 (file)
@@ -180,6 +180,9 @@ static inline unsigned long memblk_nr_poison(struct memory_block *mem)
 }
 #endif
 
+/*
+ * Must acquire mem_hotplug_lock in write mode.
+ */
 static int memory_block_online(struct memory_block *mem)
 {
        unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
@@ -204,10 +207,11 @@ static int memory_block_online(struct memory_block *mem)
        if (mem->altmap)
                nr_vmemmap_pages = mem->altmap->free;
 
+       mem_hotplug_begin();
        if (nr_vmemmap_pages) {
                ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        ret = online_pages(start_pfn + nr_vmemmap_pages,
@@ -215,7 +219,7 @@ static int memory_block_online(struct memory_block *mem)
        if (ret) {
                if (nr_vmemmap_pages)
                        mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
-               return ret;
+               goto out;
        }
 
        /*
@@ -227,9 +231,14 @@ static int memory_block_online(struct memory_block *mem)
                                          nr_vmemmap_pages);
 
        mem->zone = zone;
+out:
+       mem_hotplug_done();
        return ret;
 }
 
+/*
+ * Must acquire mem_hotplug_lock in write mode.
+ */
 static int memory_block_offline(struct memory_block *mem)
 {
        unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
@@ -247,6 +256,7 @@ static int memory_block_offline(struct memory_block *mem)
        if (mem->altmap)
                nr_vmemmap_pages = mem->altmap->free;
 
+       mem_hotplug_begin();
        if (nr_vmemmap_pages)
                adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
                                          -nr_vmemmap_pages);
@@ -258,13 +268,15 @@ static int memory_block_offline(struct memory_block *mem)
                if (nr_vmemmap_pages)
                        adjust_present_page_count(pfn_to_page(start_pfn),
                                                  mem->group, nr_vmemmap_pages);
-               return ret;
+               goto out;
        }
 
        if (nr_vmemmap_pages)
                mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
 
        mem->zone = NULL;
+out:
+       mem_hotplug_done();
        return ret;
 }
 
index 92592f944a3df2bab401ecfc1f1ecaf80af2747d..ac63a73ccdaaa23f119966c10f71f0bd28fb43a8 100644 (file)
@@ -410,8 +410,7 @@ out:
                        rb_entry(node, struct regmap_range_node, node);
 
                /* If there's nothing in the cache there's nothing to sync */
-               ret = regcache_read(map, this->selector_reg, &i);
-               if (ret != 0)
+               if (regcache_read(map, this->selector_reg, &i) != 0)
                        continue;
 
                ret = _regmap_write(map, this->selector_reg, i);
index ad1acd9b7426b11228004c9a3916c1b119a6b7d0..dbc3950c5960905926d3aaaba205466e70eaf68a 100644 (file)
@@ -767,6 +767,7 @@ config SM_CAMCC_8450
 
 config SM_CAMCC_8550
        tristate "SM8550 Camera Clock Controller"
+       depends on ARM64 || COMPILE_TEST
        select SM_GCC_8550
        help
          Support for the camera clock controller on SM8550 devices.
index aa53797dbfc145b2e921e9d2007e129485df7b29..75071e0cd3216e94c279992c6c90800b5747cbac 100644 (file)
@@ -138,7 +138,7 @@ PNAME(mux_pll_src_5plls_p)  = { "cpll", "gpll", "gpll_div2", "gpll_div3", "usb480
 PNAME(mux_pll_src_4plls_p)     = { "cpll", "gpll", "gpll_div2", "usb480m" };
 PNAME(mux_pll_src_3plls_p)     = { "cpll", "gpll", "gpll_div2" };
 
-PNAME(mux_aclk_peri_src_p)     = { "gpll_peri", "cpll_peri", "gpll_div2_peri", "gpll_div3_peri" };
+PNAME(mux_clk_peri_src_p)      = { "gpll", "cpll", "gpll_div2", "gpll_div3" };
 PNAME(mux_mmc_src_p)           = { "cpll", "gpll", "gpll_div2", "xin24m" };
 PNAME(mux_clk_cif_out_src_p)           = { "clk_cif_src", "xin24m" };
 PNAME(mux_sclk_vop_src_p)      = { "cpll", "gpll", "gpll_div2", "gpll_div3" };
@@ -275,23 +275,17 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
                        RK2928_CLKGATE_CON(0), 11, GFLAGS),
 
        /* PD_PERI */
-       GATE(0, "gpll_peri", "gpll", CLK_IGNORE_UNUSED,
+       COMPOSITE(0, "clk_peri_src", mux_clk_peri_src_p, 0,
+                       RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS,
                        RK2928_CLKGATE_CON(2), 0, GFLAGS),
-       GATE(0, "cpll_peri", "cpll", CLK_IGNORE_UNUSED,
-                       RK2928_CLKGATE_CON(2), 0, GFLAGS),
-       GATE(0, "gpll_div2_peri", "gpll_div2", CLK_IGNORE_UNUSED,
-                       RK2928_CLKGATE_CON(2), 0, GFLAGS),
-       GATE(0, "gpll_div3_peri", "gpll_div3", CLK_IGNORE_UNUSED,
-                       RK2928_CLKGATE_CON(2), 0, GFLAGS),
-       COMPOSITE_NOGATE(0, "aclk_peri_src", mux_aclk_peri_src_p, 0,
-                       RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS),
-       COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
+
+       COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "clk_peri_src", 0,
                        RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
                        RK2928_CLKGATE_CON(2), 3, GFLAGS),
-       COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0,
+       COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "clk_peri_src", 0,
                        RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
                        RK2928_CLKGATE_CON(2), 2, GFLAGS),
-       GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0,
+       GATE(ACLK_PERI, "aclk_peri", "clk_peri_src", 0,
                        RK2928_CLKGATE_CON(2), 1, GFLAGS),
 
        GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
@@ -316,7 +310,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(2), 15, GFLAGS),
 
-       COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+       COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
                        RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
                        RK2928_CLKGATE_CON(2), 11, GFLAGS),
 
@@ -490,7 +484,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
        GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
        GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
-       GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS),
+       GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
        GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
        GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
        GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),
index 16dabe2b9c47f483c772ab3b366464ada70df062..db713e1526cdc3cc12298cdc72adb076acd9379b 100644 (file)
@@ -72,6 +72,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
        RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
        RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
        RK3036_PLL_RATE(297000000, 2, 99, 4, 1, 1, 0),
+       RK3036_PLL_RATE(292500000, 1, 195, 4, 4, 1, 0),
        RK3036_PLL_RATE(241500000, 2, 161, 4, 2, 1, 0),
        RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
        RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
index 1cc9be85ba4cd1679838714b91953938b3aeddfc..7d97790b893d709b74fef1d66c05a6ca4150401f 100644 (file)
@@ -363,10 +363,9 @@ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
 {
        resource_size_t base = -1;
 
-       down_read(&cxl_dpa_rwsem);
+       lockdep_assert_held(&cxl_dpa_rwsem);
        if (cxled->dpa_res)
                base = cxled->dpa_res->start;
-       up_read(&cxl_dpa_rwsem);
 
        return base;
 }
@@ -839,6 +838,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
                        cxld->target_type = CXL_DECODER_HOSTONLYMEM;
                else
                        cxld->target_type = CXL_DECODER_DEVMEM;
+
+               guard(rwsem_write)(&cxl_region_rwsem);
                if (cxld->id != cxl_num_decoders_committed(port)) {
                        dev_warn(&port->dev,
                                 "decoder%d.%d: Committed out of order\n",
index fc5c2b414793bb351ae0077857b78e72e153e902..2f43d368ba07308c27a2aba69a3a3330f7413325 100644 (file)
@@ -227,10 +227,16 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
        if (!port || !is_cxl_endpoint(port))
                return -EINVAL;
 
-       rc = down_read_interruptible(&cxl_dpa_rwsem);
+       rc = down_read_interruptible(&cxl_region_rwsem);
        if (rc)
                return rc;
 
+       rc = down_read_interruptible(&cxl_dpa_rwsem);
+       if (rc) {
+               up_read(&cxl_region_rwsem);
+               return rc;
+       }
+
        if (cxl_num_decoders_committed(port) == 0) {
                /* No regions mapped to this memdev */
                rc = cxl_get_poison_by_memdev(cxlmd);
@@ -239,6 +245,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
                rc =  cxl_get_poison_by_endpoint(port);
        }
        up_read(&cxl_dpa_rwsem);
+       up_read(&cxl_region_rwsem);
 
        return rc;
 }
@@ -324,10 +331,16 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
        if (!IS_ENABLED(CONFIG_DEBUG_FS))
                return 0;
 
-       rc = down_read_interruptible(&cxl_dpa_rwsem);
+       rc = down_read_interruptible(&cxl_region_rwsem);
        if (rc)
                return rc;
 
+       rc = down_read_interruptible(&cxl_dpa_rwsem);
+       if (rc) {
+               up_read(&cxl_region_rwsem);
+               return rc;
+       }
+
        rc = cxl_validate_poison_dpa(cxlmd, dpa);
        if (rc)
                goto out;
@@ -355,6 +368,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
        trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
 out:
        up_read(&cxl_dpa_rwsem);
+       up_read(&cxl_region_rwsem);
 
        return rc;
 }
@@ -372,10 +386,16 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
        if (!IS_ENABLED(CONFIG_DEBUG_FS))
                return 0;
 
-       rc = down_read_interruptible(&cxl_dpa_rwsem);
+       rc = down_read_interruptible(&cxl_region_rwsem);
        if (rc)
                return rc;
 
+       rc = down_read_interruptible(&cxl_dpa_rwsem);
+       if (rc) {
+               up_read(&cxl_region_rwsem);
+               return rc;
+       }
+
        rc = cxl_validate_poison_dpa(cxlmd, dpa);
        if (rc)
                goto out;
@@ -412,6 +432,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
        trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
 out:
        up_read(&cxl_dpa_rwsem);
+       up_read(&cxl_region_rwsem);
 
        return rc;
 }
index eff20e83d0a64e8ba791a214f8fb4564135baded..37e1652afbc7eac56fffbb0a5692ea2a1cd82411 100644 (file)
@@ -620,7 +620,7 @@ void read_cdat_data(struct cxl_port *port)
        struct pci_dev *pdev = NULL;
        struct cxl_memdev *cxlmd;
        size_t cdat_length;
-       void *cdat_table;
+       void *cdat_table, *cdat_buf;
        int rc;
 
        if (is_cxl_memdev(uport)) {
@@ -651,16 +651,15 @@ void read_cdat_data(struct cxl_port *port)
                return;
        }
 
-       cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32),
-                                 GFP_KERNEL);
-       if (!cdat_table)
+       cdat_buf = devm_kzalloc(dev, cdat_length + sizeof(__le32), GFP_KERNEL);
+       if (!cdat_buf)
                return;
 
-       rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length);
+       rc = cxl_cdat_read_table(dev, cdat_doe, cdat_buf, &cdat_length);
        if (rc)
                goto err;
 
-       cdat_table = cdat_table + sizeof(__le32);
+       cdat_table = cdat_buf + sizeof(__le32);
        if (cdat_checksum(cdat_table, cdat_length))
                goto err;
 
@@ -670,7 +669,7 @@ void read_cdat_data(struct cxl_port *port)
 
 err:
        /* Don't leave table data allocated on error */
-       devm_kfree(dev, cdat_table);
+       devm_kfree(dev, cdat_buf);
        dev_err(dev, "Failed to read/validate CDAT.\n");
 }
 EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
index 7684c843e5a59c51c6d41556bfb18150c9cbfd26..5d8e06b0ba6e88f383953982cf836c1da8099ec6 100644 (file)
@@ -23,7 +23,7 @@ const struct device_type cxl_pmu_type = {
 
 static void remove_dev(void *dev)
 {
-       device_del(dev);
+       device_unregister(dev);
 }
 
 int devm_cxl_pmu_add(struct device *parent, struct cxl_pmu_regs *regs,
index 38441634e4c68371fa7fc03aee8979e581303fde..b7c93bb18f6e75adfb129e175be5afcba98b10de 100644 (file)
@@ -226,9 +226,9 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at
                            char *buf)
 {
        struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
-       u64 base = cxl_dpa_resource_start(cxled);
 
-       return sysfs_emit(buf, "%#llx\n", base);
+       guard(rwsem_read)(&cxl_dpa_rwsem);
+       return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
 }
 static DEVICE_ATTR_RO(dpa_resource);
 
index 56e575c79bb49187f909aa87d4f6c5d9894c3b75..3e817a6f94c6a4d2ac5113558a6c7633f7120821 100644 (file)
@@ -2467,10 +2467,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
        struct cxl_poison_context ctx;
        int rc = 0;
 
-       rc = down_read_interruptible(&cxl_region_rwsem);
-       if (rc)
-               return rc;
-
        ctx = (struct cxl_poison_context) {
                .port = port
        };
@@ -2480,7 +2476,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
                rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
                                             &ctx);
 
-       up_read(&cxl_region_rwsem);
        return rc;
 }
 
index 6a3abe5b17908dc3fbbfa4812fdc8e6c6ebbb504..b53f46245c377f05520c8275c95bf10c59be34d7 100644 (file)
@@ -828,6 +828,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
        dma_pool_destroy(fsl_chan->tcd_pool);
        fsl_chan->tcd_pool = NULL;
        fsl_chan->is_sw = false;
+       fsl_chan->srcid = 0;
 }
 
 void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
index 4635e16d7705e9036d644d95159bee216415fff6..238a69bd0d6f5d3ba6d8329543c49d3a750dba21 100644 (file)
@@ -396,9 +396,8 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
                link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
                                             DL_FLAG_PM_RUNTIME |
                                             DL_FLAG_RPM_ACTIVE);
-               if (IS_ERR(link)) {
-                       dev_err(dev, "Failed to add device_link to %d: %ld\n", i,
-                               PTR_ERR(link));
+               if (!link) {
+                       dev_err(dev, "Failed to add device_link to %d\n", i);
                        return -EINVAL;
                }
 
@@ -631,6 +630,8 @@ static int fsl_edma_suspend_late(struct device *dev)
 
        for (i = 0; i < fsl_edma->n_chans; i++) {
                fsl_chan = &fsl_edma->chans[i];
+               if (fsl_edma->chan_masked & BIT(i))
+                       continue;
                spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
                /* Make sure chan is idle or will force disable. */
                if (unlikely(!fsl_chan->idle)) {
@@ -655,13 +656,16 @@ static int fsl_edma_resume_early(struct device *dev)
 
        for (i = 0; i < fsl_edma->n_chans; i++) {
                fsl_chan = &fsl_edma->chans[i];
+               if (fsl_edma->chan_masked & BIT(i))
+                       continue;
                fsl_chan->pm_state = RUNNING;
                edma_write_tcdreg(fsl_chan, 0, csr);
                if (fsl_chan->slave_id != 0)
                        fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
        }
 
-       edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+       if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
+               edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
 
        return 0;
 }
index 7b54a3939ea135613f652535433582755220c4cb..315c004f58e4757558dac20cd4536a4fb4a9edd3 100644 (file)
@@ -440,12 +440,14 @@ union wqcfg {
 /*
  * This macro calculates the offset into the GRPCFG register
  * idxd - struct idxd *
- * n - wq id
- * ofs - the index of the 32b dword for the config register
+ * n - group id
+ * ofs - the index of the 64b qword for the config register
  *
- * The WQCFG register block is divided into groups per each wq. The n index
- * allows us to move to the register group that's for that particular wq.
- * Each register is 32bits. The ofs gives us the number of register to access.
+ * The GRPCFG register block is divided into three sub-registers, which
+ * are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move
+ * to the register block that contains the three sub-registers.
+ * Each register block is 64bits. And the ofs gives us the offset
+ * within the GRPWQCFG register to access.
  */
 #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
                                           (n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
index c01db23e3333f70316ea1bfbca99930fabfdc1cf..3f922518e3a525f22b49c56ae655a670c63aa10e 100644 (file)
@@ -182,13 +182,6 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
 
        portal = idxd_wq_portal_addr(wq);
 
-       /*
-        * The wmb() flushes writes to coherent DMA data before
-        * possibly triggering a DMA read. The wmb() is necessary
-        * even on UP because the recipient is a device.
-        */
-       wmb();
-
        /*
         * Pending the descriptor to the lockless list for the irq_entry
         * that we designated the descriptor to.
@@ -199,6 +192,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
                llist_add(&desc->llnode, &ie->pending_llist);
        }
 
+       /*
+        * The wmb() flushes writes to coherent DMA data before
+        * possibly triggering a DMA read. The wmb() is necessary
+        * even on UP because the recipient is a device.
+        */
+       wmb();
+
        if (wq_dedicated(wq)) {
                iosubmit_cmds512(portal, desc->hw, 1);
        } else {
index 72d83cd9ed6bcd41aa8050c00cc41393f01693e5..90857d08a1a7435fc130b168eb155cd2f6e94242 100644 (file)
@@ -1246,8 +1246,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
        enum dma_slave_buswidth max_width;
        struct stm32_dma_desc *desc;
        size_t xfer_count, offset;
-       u32 num_sgs, best_burst, dma_burst, threshold;
-       int i;
+       u32 num_sgs, best_burst, threshold;
+       int dma_burst, i;
 
        num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
        desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
@@ -1266,6 +1266,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
                best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
                                                      threshold, max_width);
                dma_burst = stm32_dma_get_burst(chan, best_burst);
+               if (dma_burst < 0) {
+                       kfree(desc);
+                       return NULL;
+               }
 
                stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
                desc->sg_req[i].chan_reg.dma_scr =
index 2b6fd6e37c610741563ca80d6372bb3d21225a40..1272b1541f61e2a3e8103c37318bf262a61e4ffa 100644 (file)
@@ -74,7 +74,9 @@ static struct psil_ep am62_src_ep_map[] = {
        PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
        PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
        PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
-       /* PDMA_MAIN0 - SPI0-3 */
+       /* PDMA_MAIN0 - SPI0-2 */
+       PSIL_PDMA_XY_PKT(0x4300),
+       PSIL_PDMA_XY_PKT(0x4301),
        PSIL_PDMA_XY_PKT(0x4302),
        PSIL_PDMA_XY_PKT(0x4303),
        PSIL_PDMA_XY_PKT(0x4304),
@@ -85,8 +87,6 @@ static struct psil_ep am62_src_ep_map[] = {
        PSIL_PDMA_XY_PKT(0x4309),
        PSIL_PDMA_XY_PKT(0x430a),
        PSIL_PDMA_XY_PKT(0x430b),
-       PSIL_PDMA_XY_PKT(0x430c),
-       PSIL_PDMA_XY_PKT(0x430d),
        /* PDMA_MAIN1 - UART0-6 */
        PSIL_PDMA_XY_PKT(0x4400),
        PSIL_PDMA_XY_PKT(0x4401),
@@ -141,7 +141,9 @@ static struct psil_ep am62_dst_ep_map[] = {
        /* SAUL */
        PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
        PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
-       /* PDMA_MAIN0 - SPI0-3 */
+       /* PDMA_MAIN0 - SPI0-2 */
+       PSIL_PDMA_XY_PKT(0xc300),
+       PSIL_PDMA_XY_PKT(0xc301),
        PSIL_PDMA_XY_PKT(0xc302),
        PSIL_PDMA_XY_PKT(0xc303),
        PSIL_PDMA_XY_PKT(0xc304),
@@ -152,8 +154,6 @@ static struct psil_ep am62_dst_ep_map[] = {
        PSIL_PDMA_XY_PKT(0xc309),
        PSIL_PDMA_XY_PKT(0xc30a),
        PSIL_PDMA_XY_PKT(0xc30b),
-       PSIL_PDMA_XY_PKT(0xc30c),
-       PSIL_PDMA_XY_PKT(0xc30d),
        /* PDMA_MAIN1 - UART0-6 */
        PSIL_PDMA_XY_PKT(0xc400),
        PSIL_PDMA_XY_PKT(0xc401),
index ca9d71f914220a63bed00563d7bfa51df91fbfc8..4cf9123b0e932640f0974aacc6486b8df00fc656 100644 (file)
@@ -84,7 +84,9 @@ static struct psil_ep am62a_src_ep_map[] = {
        PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
        PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
        PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
-       /* PDMA_MAIN0 - SPI0-3 */
+       /* PDMA_MAIN0 - SPI0-2 */
+       PSIL_PDMA_XY_PKT(0x4300),
+       PSIL_PDMA_XY_PKT(0x4301),
        PSIL_PDMA_XY_PKT(0x4302),
        PSIL_PDMA_XY_PKT(0x4303),
        PSIL_PDMA_XY_PKT(0x4304),
@@ -95,8 +97,6 @@ static struct psil_ep am62a_src_ep_map[] = {
        PSIL_PDMA_XY_PKT(0x4309),
        PSIL_PDMA_XY_PKT(0x430a),
        PSIL_PDMA_XY_PKT(0x430b),
-       PSIL_PDMA_XY_PKT(0x430c),
-       PSIL_PDMA_XY_PKT(0x430d),
        /* PDMA_MAIN1 - UART0-6 */
        PSIL_PDMA_XY_PKT(0x4400),
        PSIL_PDMA_XY_PKT(0x4401),
@@ -151,7 +151,9 @@ static struct psil_ep am62a_dst_ep_map[] = {
        /* SAUL */
        PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
        PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
-       /* PDMA_MAIN0 - SPI0-3 */
+       /* PDMA_MAIN0 - SPI0-2 */
+       PSIL_PDMA_XY_PKT(0xc300),
+       PSIL_PDMA_XY_PKT(0xc301),
        PSIL_PDMA_XY_PKT(0xc302),
        PSIL_PDMA_XY_PKT(0xc303),
        PSIL_PDMA_XY_PKT(0xc304),
@@ -162,8 +164,6 @@ static struct psil_ep am62a_dst_ep_map[] = {
        PSIL_PDMA_XY_PKT(0xc309),
        PSIL_PDMA_XY_PKT(0xc30a),
        PSIL_PDMA_XY_PKT(0xc30b),
-       PSIL_PDMA_XY_PKT(0xc30c),
-       PSIL_PDMA_XY_PKT(0xc30d),
        /* PDMA_MAIN1 - UART0-6 */
        PSIL_PDMA_XY_PKT(0xc400),
        PSIL_PDMA_XY_PKT(0xc401),
index 442a0ebeb953e983d053d4c70d919e3a8aee213e..ce7cf736f0208466a1623520ab80cb5eed62cb8f 100644 (file)
@@ -925,7 +925,6 @@ dpll_pin_parent_pin_set(struct dpll_pin *pin, struct nlattr *parent_nest,
                        struct netlink_ext_ack *extack)
 {
        struct nlattr *tb[DPLL_A_PIN_MAX + 1];
-       enum dpll_pin_state state;
        u32 ppin_idx;
        int ret;
 
@@ -936,10 +935,14 @@ dpll_pin_parent_pin_set(struct dpll_pin *pin, struct nlattr *parent_nest,
                return -EINVAL;
        }
        ppin_idx = nla_get_u32(tb[DPLL_A_PIN_PARENT_ID]);
-       state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
-       ret = dpll_pin_on_pin_state_set(pin, ppin_idx, state, extack);
-       if (ret)
-               return ret;
+
+       if (tb[DPLL_A_PIN_STATE]) {
+               enum dpll_pin_state state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
+
+               ret = dpll_pin_on_pin_state_set(pin, ppin_idx, state, extack);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
index 87e730dfefa08d89cae6487726e264d6dd945451..8625de20fc71752018c261445f250d4abc492f1e 100644 (file)
@@ -966,10 +966,10 @@ static int mc_probe(struct platform_device *pdev)
        edac_mc_id = emif_get_id(pdev->dev.of_node);
 
        regval = readl(ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
-       num_chans = FIELD_PREP(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
+       num_chans = FIELD_GET(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
        num_chans++;
 
-       num_csrows = FIELD_PREP(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
+       num_csrows = FIELD_GET(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
        num_csrows *= 2;
        if (!num_csrows)
                num_csrows = 1;
index 07b72c67924704aa41ce19df458dc5ad4c46a48b..6146b2927d5c56af6bc3b9722c1789f29a4498fe 100644 (file)
@@ -99,6 +99,7 @@ struct ffa_drv_info {
        void *tx_buffer;
        bool mem_ops_native;
        bool bitmap_created;
+       bool notif_enabled;
        unsigned int sched_recv_irq;
        unsigned int cpuhp_state;
        struct ffa_pcpu_irq __percpu *irq_pcpu;
@@ -782,7 +783,7 @@ static void ffa_notification_info_get(void)
                        if (ids_processed >= max_ids - 1)
                                break;
 
-                       part_id = packed_id_list[++ids_processed];
+                       part_id = packed_id_list[ids_processed++];
 
                        if (!ids_count[list]) { /* Global Notification */
                                __do_sched_recv_cb(part_id, 0, false);
@@ -794,7 +795,7 @@ static void ffa_notification_info_get(void)
                                if (ids_processed >= max_ids - 1)
                                        break;
 
-                               vcpu_id = packed_id_list[++ids_processed];
+                               vcpu_id = packed_id_list[ids_processed++];
 
                                __do_sched_recv_cb(part_id, vcpu_id, true);
                        }
@@ -889,6 +890,8 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args)
 
 #define FFA_SECURE_PARTITION_ID_FLAG   BIT(15)
 
+#define ffa_notifications_disabled()   (!drv_info->notif_enabled)
+
 enum notify_type {
        NON_SECURE_VM,
        SECURE_PARTITION,
@@ -908,6 +911,9 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
        struct ffa_dev_part_info *partition;
        bool cb_valid;
 
+       if (ffa_notifications_disabled())
+               return -EOPNOTSUPP;
+
        partition = xa_load(&drv_info->partition_info, part_id);
        write_lock(&partition->rw_lock);
 
@@ -1001,6 +1007,9 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
        int rc;
        enum notify_type type = ffa_notify_type_get(dev->vm_id);
 
+       if (ffa_notifications_disabled())
+               return -EOPNOTSUPP;
+
        if (notify_id >= FFA_MAX_NOTIFICATIONS)
                return -EINVAL;
 
@@ -1027,6 +1036,9 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
        u32 flags = 0;
        enum notify_type type = ffa_notify_type_get(dev->vm_id);
 
+       if (ffa_notifications_disabled())
+               return -EOPNOTSUPP;
+
        if (notify_id >= FFA_MAX_NOTIFICATIONS)
                return -EINVAL;
 
@@ -1057,6 +1069,9 @@ static int ffa_notify_send(struct ffa_device *dev, int notify_id,
 {
        u32 flags = 0;
 
+       if (ffa_notifications_disabled())
+               return -EOPNOTSUPP;
+
        if (is_per_vcpu)
                flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16);
 
@@ -1233,7 +1248,7 @@ static void ffa_partitions_cleanup(void)
        if (!count)
                return;
 
-       info = kcalloc(count, sizeof(**info), GFP_KERNEL);
+       info = kcalloc(count, sizeof(*info), GFP_KERNEL);
        if (!info)
                return;
 
@@ -1311,8 +1326,10 @@ static int ffa_sched_recv_irq_map(void)
 
 static void ffa_sched_recv_irq_unmap(void)
 {
-       if (drv_info->sched_recv_irq)
+       if (drv_info->sched_recv_irq) {
                irq_dispose_mapping(drv_info->sched_recv_irq);
+               drv_info->sched_recv_irq = 0;
+       }
 }
 
 static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
@@ -1329,17 +1346,23 @@ static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
 
 static void ffa_uninit_pcpu_irq(void)
 {
-       if (drv_info->cpuhp_state)
+       if (drv_info->cpuhp_state) {
                cpuhp_remove_state(drv_info->cpuhp_state);
+               drv_info->cpuhp_state = 0;
+       }
 
-       if (drv_info->notif_pcpu_wq)
+       if (drv_info->notif_pcpu_wq) {
                destroy_workqueue(drv_info->notif_pcpu_wq);
+               drv_info->notif_pcpu_wq = NULL;
+       }
 
        if (drv_info->sched_recv_irq)
                free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);
 
-       if (drv_info->irq_pcpu)
+       if (drv_info->irq_pcpu) {
                free_percpu(drv_info->irq_pcpu);
+               drv_info->irq_pcpu = NULL;
+       }
 }
 
 static int ffa_init_pcpu_irq(unsigned int irq)
@@ -1388,22 +1411,23 @@ static void ffa_notifications_cleanup(void)
                ffa_notification_bitmap_destroy();
                drv_info->bitmap_created = false;
        }
+       drv_info->notif_enabled = false;
 }
 
-static int ffa_notifications_setup(void)
+static void ffa_notifications_setup(void)
 {
        int ret, irq;
 
        ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
        if (ret) {
-               pr_err("Notifications not supported, continuing with it ..\n");
-               return 0;
+               pr_info("Notifications not supported, continuing with it ..\n");
+               return;
        }
 
        ret = ffa_notification_bitmap_create();
        if (ret) {
-               pr_err("notification_bitmap_create error %d\n", ret);
-               return ret;
+               pr_info("Notification bitmap create error %d\n", ret);
+               return;
        }
        drv_info->bitmap_created = true;
 
@@ -1422,14 +1446,11 @@ static int ffa_notifications_setup(void)
        hash_init(drv_info->notifier_hash);
        mutex_init(&drv_info->notify_lock);
 
-       /* Register internal scheduling callback */
-       ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
-                                      drv_info, true);
-       if (!ret)
-               return ret;
+       drv_info->notif_enabled = true;
+       return;
 cleanup:
+       pr_info("Notification setup failed %d, not enabled\n", ret);
        ffa_notifications_cleanup();
-       return ret;
 }
 
 static int __init ffa_init(void)
@@ -1483,17 +1504,18 @@ static int __init ffa_init(void)
        mutex_init(&drv_info->rx_lock);
        mutex_init(&drv_info->tx_lock);
 
-       ffa_setup_partitions();
-
        ffa_set_up_mem_ops_native_flag();
 
-       ret = ffa_notifications_setup();
+       ffa_notifications_setup();
+
+       ffa_setup_partitions();
+
+       ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
+                                      drv_info, true);
        if (ret)
-               goto partitions_cleanup;
+               pr_info("Failed to register driver sched callback %d\n", ret);
 
        return 0;
-partitions_cleanup:
-       ffa_partitions_cleanup();
 free_pages:
        if (drv_info->tx_buffer)
                free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
index c2435be0ae1be851b58fafef7242fc54bcf43774..e11555de99ab867c41306f95caad0e55174b9ab4 100644 (file)
@@ -152,7 +152,7 @@ struct perf_dom_info {
        u32 opp_count;
        u32 sustained_freq_khz;
        u32 sustained_perf_level;
-       u32 mult_factor;
+       unsigned long mult_factor;
        struct scmi_perf_domain_info info;
        struct scmi_opp opp[MAX_OPPS];
        struct scmi_fc_info *fc_info;
@@ -268,13 +268,14 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
                dom_info->sustained_perf_level =
                                        le32_to_cpu(attr->sustained_perf_level);
                if (!dom_info->sustained_freq_khz ||
-                   !dom_info->sustained_perf_level)
+                   !dom_info->sustained_perf_level ||
+                   dom_info->level_indexing_mode)
                        /* CPUFreq converts to kHz, hence default 1000 */
                        dom_info->mult_factor = 1000;
                else
                        dom_info->mult_factor =
-                                       (dom_info->sustained_freq_khz * 1000) /
-                                       dom_info->sustained_perf_level;
+                                       (dom_info->sustained_freq_khz * 1000UL)
+                                       dom_info->sustained_perf_level;
                strscpy(dom_info->info.name, attr->name,
                        SCMI_SHORT_NAME_MAX_SIZE);
        }
@@ -798,7 +799,7 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
                if (!dom->level_indexing_mode)
                        freq = dom->opp[idx].perf * dom->mult_factor;
                else
-                       freq = dom->opp[idx].indicative_freq * 1000;
+                       freq = dom->opp[idx].indicative_freq * dom->mult_factor;
 
                data.level = dom->opp[idx].perf;
                data.freq = freq;
@@ -845,7 +846,8 @@ static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
        } else {
                struct scmi_opp *opp;
 
-               opp = LOOKUP_BY_FREQ(dom->opps_by_freq, freq / 1000);
+               opp = LOOKUP_BY_FREQ(dom->opps_by_freq,
+                                    freq / dom->mult_factor);
                if (!opp)
                        return -EIO;
 
@@ -879,7 +881,7 @@ static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
                if (!opp)
                        return -EIO;
 
-               *freq = opp->indicative_freq * 1000;
+               *freq = opp->indicative_freq * dom->mult_factor;
        }
 
        return ret;
@@ -902,7 +904,7 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
                if (!dom->level_indexing_mode)
                        opp_freq = opp->perf * dom->mult_factor;
                else
-                       opp_freq = opp->indicative_freq * 1000;
+                       opp_freq = opp->indicative_freq * dom->mult_factor;
 
                if (opp_freq < *freq)
                        continue;
index 72c71ae201f0dad34ab0b035c7af8eeb525d66ba..d6ec5d4b8dbe0878e029a349e6f7110d6e22bc19 100644 (file)
@@ -35,9 +35,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
        return status;
 }
 
-unsigned long kernel_entry_address(void)
+unsigned long kernel_entry_address(unsigned long kernel_addr)
 {
        unsigned long base = (unsigned long)&kernel_offset - kernel_offset;
 
-       return (unsigned long)&kernel_entry - base + VMLINUX_LOAD_ADDRESS;
+       return (unsigned long)&kernel_entry - base + kernel_addr;
 }
index 807cba2693fc177abd3255073a0b56121182ac86..0e0aa6cda73f7c9fbcce5ab354e05e1623583710 100644 (file)
@@ -37,9 +37,9 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
        return EFI_SUCCESS;
 }
 
-unsigned long __weak kernel_entry_address(void)
+unsigned long __weak kernel_entry_address(unsigned long kernel_addr)
 {
-       return *(unsigned long *)(PHYSADDR(VMLINUX_LOAD_ADDRESS) + 8);
+       return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr;
 }
 
 efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
@@ -73,7 +73,7 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
        csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
        csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
 
-       real_kernel_entry = (void *)kernel_entry_address();
+       real_kernel_entry = (void *)kernel_entry_address(kernel_addr);
 
        real_kernel_entry(true, (unsigned long)cmdline_ptr,
                          (unsigned long)efi_system_table);
index 1bfdae34df3934ecf7cb5e375718b9a687da6976..da9b7b8d0716df3dfbfb8c15160de9828bea4f92 100644 (file)
@@ -307,17 +307,20 @@ static void setup_unaccepted_memory(void)
                efi_err("Memory acceptance protocol failed\n");
 }
 
+static efi_char16_t *efistub_fw_vendor(void)
+{
+       unsigned long vendor = efi_table_attr(efi_system_table, fw_vendor);
+
+       return (efi_char16_t *)vendor;
+}
+
 static const efi_char16_t apple[] = L"Apple";
 
 static void setup_quirks(struct boot_params *boot_params)
 {
-       efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
-               efi_table_attr(efi_system_table, fw_vendor);
-
-       if (!memcmp(fw_vendor, apple, sizeof(apple))) {
-               if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
-                       retrieve_apple_device_properties(boot_params);
-       }
+       if (IS_ENABLED(CONFIG_APPLE_PROPERTIES) &&
+           !memcmp(efistub_fw_vendor(), apple, sizeof(apple)))
+               retrieve_apple_device_properties(boot_params);
 }
 
 /*
@@ -765,11 +768,25 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
 
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
                u64 range = KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR - kernel_total_size;
+               static const efi_char16_t ami[] = L"American Megatrends";
 
                efi_get_seed(seed, sizeof(seed));
 
                virt_addr += (range * seed[1]) >> 32;
                virt_addr &= ~(CONFIG_PHYSICAL_ALIGN - 1);
+
+               /*
+                * Older Dell systems with AMI UEFI firmware v2.0 may hang
+                * while decompressing the kernel if physical address
+                * randomization is enabled.
+                *
+                * https://bugzilla.kernel.org/show_bug.cgi?id=218173
+                */
+               if (efi_system_table->hdr.revision <= EFI_2_00_SYSTEM_TABLE_REVISION &&
+                   !memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
+                       efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
+                       seed[0] = 0;
+               }
        }
 
        status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
index 6f309a3b2d9ade8a30e14e76a3b861c573302705..12d853845bb803bb8b5d35aa72a11e9adb8bb85d 100644 (file)
@@ -474,14 +474,17 @@ static ssize_t export_store(const struct class *class,
                goto done;
 
        status = gpiod_set_transitory(desc, false);
-       if (!status) {
-               status = gpiod_export(desc, true);
-               if (status < 0)
-                       gpiod_free(desc);
-               else
-                       set_bit(FLAG_SYSFS, &desc->flags);
+       if (status) {
+               gpiod_free(desc);
+               goto done;
        }
 
+       status = gpiod_export(desc, true);
+       if (status < 0)
+               gpiod_free(desc);
+       else
+               set_bit(FLAG_SYSFS, &desc->flags);
+
 done:
        if (status)
                pr_debug("%s: status %d\n", __func__, status);
index 5c0817cbc7c2b1b10ab4e22b19f89d8ef120c5f0..8dee52ce26d0cb07d5787218a5ec5b1ea4ba501b 100644 (file)
@@ -3791,10 +3791,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
                adev->gfx.mcbp = true;
        else if (amdgpu_mcbp == 0)
                adev->gfx.mcbp = false;
-       else if ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 0, 0)) &&
-                (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) &&
-                adev->gfx.num_gfx_rings)
-               adev->gfx.mcbp = true;
 
        if (amdgpu_sriov_vf(adev))
                adev->gfx.mcbp = true;
@@ -4520,8 +4516,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 
        amdgpu_ras_suspend(adev);
 
-       amdgpu_ttm_set_buffer_funcs_status(adev, false);
-
        amdgpu_device_ip_suspend_phase1(adev);
 
        if (!adev->in_s0ix)
@@ -4531,6 +4525,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
        if (r)
                return r;
 
+       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
        amdgpu_fence_driver_hw_fini(adev);
 
        amdgpu_device_ip_suspend_phase2(adev);
index 2b488fcf2f95b2021872383c39c1c03b22004531..e51e8918e6671aa27bb7ce239a19594a43638f10 100644 (file)
@@ -46,6 +46,8 @@
 #define MCA_REG__STATUS__ERRORCODEEXT(x)       MCA_REG_FIELD(x, 21, 16)
 #define MCA_REG__STATUS__ERRORCODE(x)          MCA_REG_FIELD(x, 15, 0)
 
+#define MCA_REG__SYND__ERRORINFORMATION(x)     MCA_REG_FIELD(x, 17, 0)
+
 enum amdgpu_mca_ip {
        AMDGPU_MCA_IP_UNKNOW = -1,
        AMDGPU_MCA_IP_PSP = 0,
index d79b4ca1ecfc4a0028bceb48703ea2cf3d989067..5ad03f2afdb45aa5233c5c48b3e12276513be831 100644 (file)
@@ -1343,6 +1343,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
 
        abo = ttm_to_amdgpu_bo(bo);
 
+       WARN_ON(abo->vm_bo);
+
        if (abo->kfd_bo)
                amdgpu_amdkfd_release_notify(abo);
 
index a3dc68e989108e52c71a24bd97fff44f6183f8db..63fb4cd85e53b71c106daa9a5e3179da6cc42b1c 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/reboot.h>
 #include <linux/syscalls.h>
 #include <linux/pm_runtime.h>
+#include <linux/list_sort.h>
 
 #include "amdgpu.h"
 #include "amdgpu_ras.h"
@@ -3665,6 +3666,21 @@ static struct ras_err_node *amdgpu_ras_error_node_new(void)
        return err_node;
 }
 
+static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
+{
+       struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
+       struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
+       struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
+       struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
+
+       if (unlikely(infoa->socket_id != infob->socket_id))
+               return infoa->socket_id - infob->socket_id;
+       else
+               return infoa->die_id - infob->die_id;
+
+       return 0;
+}
+
 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
                                                      struct amdgpu_smuio_mcm_config_info *mcm_info)
 {
@@ -3682,6 +3698,7 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
 
        err_data->err_list_count++;
        list_add_tail(&err_node->node, &err_data->err_node_list);
+       list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
 
        return &err_node->err_info;
 }
index a2287bb252235e8eb9a906a9216bbf568a68fd33..a160265ddc07c141ecde4c6bc973655c1a812563 100644 (file)
@@ -642,13 +642,14 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
 
        if (!entry->bo)
                return;
+
+       entry->bo->vm_bo = NULL;
        shadow = amdgpu_bo_shadowed(entry->bo);
        if (shadow) {
                ttm_bo_set_bulk_move(&shadow->tbo, NULL);
                amdgpu_bo_unref(&shadow);
        }
        ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
-       entry->bo->vm_bo = NULL;
 
        spin_lock(&entry->vm->status_lock);
        list_del(&entry->vm_status);
index 49e934975719772ca3195747c24b9c941ca057ab..4db6bb73ead427d1c95a98ed55b42cadb0af0e95 100644 (file)
@@ -129,6 +129,11 @@ static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
 {
        int data;
 
+       if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 2)) {
+               /* Default enabled */
+               *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+               return;
+       }
        /* AMD_CG_SUPPORT_HDP_LS */
        data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
        if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
index 9df011323d4b975f46053d3aedb269915fb75588..6ede85b28cc8c0bbfd6a7e94c6a3d1a677e958bf 100644 (file)
@@ -155,13 +155,6 @@ static int jpeg_v4_0_5_hw_init(void *handle)
        struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
        int r;
 
-       adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
-                               (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
-
-       WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
-               ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
-               VCN_JPEG_DB_CTRL__EN_MASK);
-
        r = amdgpu_ring_test_helper(ring);
        if (r)
                return r;
@@ -336,6 +329,14 @@ static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
        if (adev->pm.dpm_enabled)
                amdgpu_dpm_enable_jpeg(adev, true);
 
+       /* doorbell programming is done for every playback */
+       adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+                               (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+
+       WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
+               ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+               VCN_JPEG_DB_CTRL__EN_MASK);
+
        /* disable power gating */
        r = jpeg_v4_0_5_disable_static_power_gating(adev);
        if (r)
index 3cf4684d0d3f3c2cef3138faa4bc264d838f6b0c..df1844d0800f2e5d9bcbc9d546ef3e52e060b733 100644 (file)
@@ -60,7 +60,7 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
 #define GFX_CMD_USB_PD_USE_LFB 0x480
 
 /* Retry times for vmbx ready wait */
-#define PSP_VMBX_POLLING_LIMIT 20000
+#define PSP_VMBX_POLLING_LIMIT 3000
 
 /* VBIOS gfl defines */
 #define MBOX_READY_MASK 0x80000000
@@ -161,14 +161,18 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
 static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
-       int retry_loop, ret;
+       int retry_loop, retry_cnt, ret;
 
+       retry_cnt =
+               (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) ?
+                       PSP_VMBX_POLLING_LIMIT :
+                       10;
        /* Wait for bootloader to signify that it is ready having bit 31 of
         * C2PMSG_35 set to 1. All other bits are expected to be cleared.
         * If there is an error in processing command, bits[7:0] will be set.
         * This is applicable for PSP v13.0.6 and newer.
         */
-       for (retry_loop = 0; retry_loop < PSP_VMBX_POLLING_LIMIT; retry_loop++) {
+       for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) {
                ret = psp_wait_for(
                        psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
                        0x80000000, 0xffffffff, false);
@@ -821,7 +825,7 @@ static int psp_v13_0_query_boot_status(struct psp_context *psp)
        if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
                return 0;
 
-       if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10007)
+       if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10109)
                return 0;
 
        for_each_inst(i, inst_mask) {
index 45377a1752503b6d69b632b42a78036b524f2f7a..8d5d86675a7fea5e4e5e8bc4e49cdbb580ae17a6 100644 (file)
@@ -813,12 +813,12 @@ static int sdma_v2_4_early_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int r;
 
+       adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+
        r = sdma_v2_4_init_microcode(adev);
        if (r)
                return r;
 
-       adev->sdma.num_instances = SDMA_MAX_INSTANCE;
-
        sdma_v2_4_set_ring_funcs(adev);
        sdma_v2_4_set_buffer_funcs(adev);
        sdma_v2_4_set_vm_pte_funcs(adev);
index 83c240f741b51951f92bbbf11341b3324702ba71..0058f3f7cf6e438b28062722687b52dc8fd67242 100644 (file)
@@ -1643,6 +1643,32 @@ static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
                *flags |= AMD_CG_SUPPORT_SDMA_LS;
 }
 
+static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
+        * disallow GFXOFF in some cases leading to
+        * hangs in SDMA.  Disallow GFXOFF while SDMA is active.
+        * We can probably just limit this to 5.2.3,
+        * but it shouldn't hurt for other parts since
+        * this GFXOFF will be disallowed anyway when SDMA is
+        * active, this just makes it explicit.
+        */
+       amdgpu_gfx_off_ctrl(adev, false);
+}
+
+static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
+        * disallow GFXOFF in some cases leading to
+        * hangs in SDMA.  Allow GFXOFF when SDMA is complete.
+        */
+       amdgpu_gfx_off_ctrl(adev, true);
+}
+
 const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
        .name = "sdma_v5_2",
        .early_init = sdma_v5_2_early_init,
@@ -1690,6 +1716,8 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
        .test_ib = sdma_v5_2_ring_test_ib,
        .insert_nop = sdma_v5_2_ring_insert_nop,
        .pad_ib = sdma_v5_2_ring_pad_ib,
+       .begin_use = sdma_v5_2_ring_begin_use,
+       .end_use = sdma_v5_2_ring_end_use,
        .emit_wreg = sdma_v5_2_ring_emit_wreg,
        .emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
index c82776e5e9aa2a154fd04ba61616bc3864fad9b1..51342809af03478f5a1767c7eed98007110ca01b 100644 (file)
@@ -1423,11 +1423,14 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
        if (amdgpu_sriov_vf(adev))
                *flags = 0;
 
-       adev->nbio.funcs->get_clockgating_state(adev, flags);
+       if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state)
+               adev->nbio.funcs->get_clockgating_state(adev, flags);
 
-       adev->hdp.funcs->get_clock_gating_state(adev, flags);
+       if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state)
+               adev->hdp.funcs->get_clock_gating_state(adev, flags);
 
-       if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) {
+       if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) &&
+           (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))) {
                /* AMD_CG_SUPPORT_DRM_MGCG */
                data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
                if (!(data & 0x01000000))
@@ -1440,9 +1443,11 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
        }
 
        /* AMD_CG_SUPPORT_ROM_MGCG */
-       adev->smuio.funcs->get_clock_gating_state(adev, flags);
+       if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state)
+               adev->smuio.funcs->get_clock_gating_state(adev, flags);
 
-       adev->df.funcs->get_clockgating_state(adev, flags);
+       if (adev->df.funcs && adev->df.funcs->get_clockgating_state)
+               adev->df.funcs->get_clockgating_state(adev, flags);
 }
 
 static int soc15_common_set_powergating_state(void *handle,
index b452796fc6d39ea6e203346e0218d19adb6e829d..c8c00c2a5224a75989d1a83a5048c63b41d86683 100644 (file)
@@ -5182,6 +5182,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
        if (plane->type == DRM_PLANE_TYPE_CURSOR)
                return;
 
+       if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
+               goto ffu;
+
        num_clips = drm_plane_get_damage_clips_count(new_plane_state);
        clips = drm_plane_get_damage_clips(new_plane_state);
 
index c7a29bb737e24d0394e770529f1d3f43d0333aae..aac98f93545a22eb38464be4ba32ef41e4b77dfd 100644 (file)
@@ -63,6 +63,12 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
                DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
                edid_caps->panel_patch.disable_fams = true;
                break;
+       /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
+       case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
+       case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
+               DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
+               edid_caps->panel_patch.remove_sink_ext_caps = true;
+               break;
        default:
                return;
        }
index 7cdb1a8a0ba06b9e471d2faf0935d2b4e8e9d626..6a96810a477e6775f81437b405ac13dc1d892d83 100644 (file)
@@ -2386,7 +2386,13 @@ static enum bp_result get_vram_info_v30(
                return BP_RESULT_BADBIOSTABLE;
 
        info->num_chans = info_v30->channel_num;
-       info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
+       /* As suggested by VBIOS we should always use
+        * dram_channel_width_bytes = 2 when using VRAM
+        * table version 3.0. This is because the channel_width
+        * param in the VRAM info table is changed in 7000 series and
+        * no longer represents the memory channel width.
+        */
+       info->dram_channel_width_bytes = 2;
 
        return result;
 }
index 9649934ea186d6484ce8bc44155541f2f6fbc78e..e2a3aa8812df496e7286e9fcd2601ae27767e487 100644 (file)
@@ -465,6 +465,7 @@ struct dc_cursor_mi_param {
        struct fixed31_32 v_scale_ratio;
        enum dc_rotation_angle rotation;
        bool mirror;
+       struct dc_stream_state *stream;
 };
 
 /* IPP related types */
index 139cf31d2e456f0546be62ade92f5cd06a49ac12..89c3bf0fe0c9916d3d725f4d4a72cce2ef349077 100644 (file)
@@ -1077,8 +1077,16 @@ void hubp2_cursor_set_position(
        if (src_y_offset < 0)
                src_y_offset = 0;
        /* Save necessary cursor info x, y position. w, h is saved in attribute func. */
-       hubp->cur_rect.x = src_x_offset + param->viewport.x;
-       hubp->cur_rect.y = src_y_offset + param->viewport.y;
+       if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+           param->rotation != ROTATION_ANGLE_0) {
+               hubp->cur_rect.x = 0;
+               hubp->cur_rect.y = 0;
+               hubp->cur_rect.w = param->stream->timing.h_addressable;
+               hubp->cur_rect.h = param->stream->timing.v_addressable;
+       } else {
+               hubp->cur_rect.x = src_x_offset + param->viewport.x;
+               hubp->cur_rect.y = src_y_offset + param->viewport.y;
+       }
 }
 
 void hubp2_clk_cntl(struct hubp *hubp, bool enable)
index ea7d60f9a9b45afb9500cda966ca8faf6dfbf985..6042a5a6a44f8c32187b2bea702892572f08ec57 100644 (file)
@@ -61,8 +61,12 @@ endif
 endif
 
 ifneq ($(CONFIG_FRAME_WARN),0)
+ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+frame_warn_flag := -Wframe-larger-than=3072
+else
 frame_warn_flag := -Wframe-larger-than=2048
 endif
+endif
 
 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
index 39cf1ae3a3e16238f59e16db69556a6156cef067..f154a3eb1d1a0ae03252dde817b57627595bc33b 100644 (file)
@@ -124,7 +124,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
                        .phyclk_mhz = 600.0,
                        .phyclk_d18_mhz = 667.0,
                        .dscclk_mhz = 186.0,
-                       .dtbclk_mhz = 625.0,
+                       .dtbclk_mhz = 600.0,
                },
                {
                        .state = 1,
@@ -133,7 +133,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
                        .phyclk_mhz = 810.0,
                        .phyclk_d18_mhz = 667.0,
                        .dscclk_mhz = 209.0,
-                       .dtbclk_mhz = 625.0,
+                       .dtbclk_mhz = 600.0,
                },
                {
                        .state = 2,
@@ -142,7 +142,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
                        .phyclk_mhz = 810.0,
                        .phyclk_d18_mhz = 667.0,
                        .dscclk_mhz = 209.0,
-                       .dtbclk_mhz = 625.0,
+                       .dtbclk_mhz = 600.0,
                },
                {
                        .state = 3,
@@ -151,7 +151,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
                        .phyclk_mhz = 810.0,
                        .phyclk_d18_mhz = 667.0,
                        .dscclk_mhz = 371.0,
-                       .dtbclk_mhz = 625.0,
+                       .dtbclk_mhz = 600.0,
                },
                {
                        .state = 4,
@@ -160,7 +160,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
                        .phyclk_mhz = 810.0,
                        .phyclk_d18_mhz = 667.0,
                        .dscclk_mhz = 417.0,
-                       .dtbclk_mhz = 625.0,
+                       .dtbclk_mhz = 600.0,
                },
        },
        .num_states = 5,
@@ -348,6 +348,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
                                clock_limits[i].socclk_mhz;
                        dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
                                clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+                       dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+                               clock_limits[i].dtbclk_mhz;
                        dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
                                clk_table->num_entries;
                        dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
@@ -360,6 +362,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
                                clk_table->num_entries;
                        dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
                                clk_table->num_entries;
+                       dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
+                               clk_table->num_entries;
                }
        }
 
index 59718ee33e5137e728921e286034f513505e8816..180f8a98a361a4d07f01c4a7d207ce3ad3196fdf 100644 (file)
@@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
                                mode_lib->ms.NoOfDPPThisState,
                                mode_lib->ms.dpte_group_bytes,
                                s->HostVMInefficiencyFactor,
-                               mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+                               mode_lib->ms.soc.hostvm_min_page_size_kbytes,
                                mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
 
                s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
@@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
                                                mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
                                                mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
                                                mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
-                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes,
                                                mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
                                                mode_lib->ms.MetaRowBytes[j][k],
                                                mode_lib->ms.DPTEBytesPerRow[j][k],
@@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
                CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
                CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
                CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
-               CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+               CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
                CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
                CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
                CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
@@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
                UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
                UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
                UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
-               UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+               UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
                UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
                UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
                UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
@@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
        CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
        CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
        CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
-       CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+       CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
        CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
        CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
        CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
@@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                        mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
                        locals->dpte_group_bytes,
                        s->HostVMInefficiencyFactor,
-                       mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+                       mode_lib->ms.soc.hostvm_min_page_size_kbytes,
                        mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
 
        locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
@@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                        CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
                        CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
                        CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
-                       CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+                       CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
                        CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
                        CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
                        CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                                                mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
                                                mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
                                                mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
-                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes,
                                                locals->PDEAndMetaPTEBytesFrame[k],
                                                locals->MetaRowByte[k],
                                                locals->PixelPTEBytesPerRow[k],
@@ -9447,12 +9447,12 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
 
                // Output
                CalculateWatermarks_params->Watermark = &s->dummy_watermark; // Watermarks *Watermark
-               CalculateWatermarks_params->DRAMClockChangeSupport = &mode_lib->ms.support.DRAMClockChangeSupport[j];
+               CalculateWatermarks_params->DRAMClockChangeSupport = &mode_lib->ms.support.DRAMClockChangeSupport[0];
                CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0][0]; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
                CalculateWatermarks_params->SubViewportLinesNeededInMALL = &mode_lib->ms.SubViewportLinesNeededInMALL[j]; // dml_uint_t SubViewportLinesNeededInMALL[]
-               CalculateWatermarks_params->FCLKChangeSupport = &mode_lib->ms.support.FCLKChangeSupport[j];
+               CalculateWatermarks_params->FCLKChangeSupport = &mode_lib->ms.support.FCLKChangeSupport[0];
                CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // dml_float_t *MaxActiveFCLKChangeLatencySupported
-               CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport[j];
+               CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport[0];
 
                CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                        &mode_lib->scratch,
index fa8fe5bf7e57566fdb897dfe891692a9d2db781c..db06a5b749b408d04fad1e7df947a08e5b3e096c 100644 (file)
@@ -423,8 +423,9 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
                }
 
                for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels; i++) {
-                       p->in_states->state_array[i].dtbclk_mhz =
-                               dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz;
+                       if (dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz > 0)
+                               p->in_states->state_array[i].dtbclk_mhz =
+                                       dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz;
                }
 
                for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels; i++) {
index 2b8b8366538e96a9dd99b3b6a51a7d08f22b0453..cdb903116eb7ca54e91746324f47ffe18abe70db 100644 (file)
@@ -3417,7 +3417,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
                .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
                .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
                .rotation = pipe_ctx->plane_state->rotation,
-               .mirror = pipe_ctx->plane_state->horizontal_mirror
+               .mirror = pipe_ctx->plane_state->horizontal_mirror,
+               .stream = pipe_ctx->stream,
        };
        bool pipe_split_on = false;
        bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
index 996e4ee99023a8b283219b7cc08537a256db1025..e5cfaaef70b3f7ad01da7d2b4a2359c601e9d19f 100644 (file)
@@ -287,8 +287,8 @@ bool set_default_brightness_aux(struct dc_link *link)
        if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
                if (!read_default_bl_aux(link, &default_backlight))
                        default_backlight = 150000;
-               // if > 5000, it might be wrong readback
-               if (default_backlight > 5000000)
+               // if < 1 nits or > 5000, it might be wrong readback
+               if (default_backlight < 1000 || default_backlight > 5000000)
                        default_backlight = 150000;
 
                return edp_set_backlight_level_nits(link, true,
index a522a7c0291108cd3ad2f2f57822aa3f758070b6..1675314a3ff20856519666689fb56d36a27c60b8 100644 (file)
@@ -839,6 +839,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
                                ((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) ||
                                (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07)))
                                isPSRSUSupported = false;
+                       else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
+                               isPSRSUSupported = false;
                        else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
                                isPSRSUSupported = true;
                }
index 0d1209f2cf313bb2b4244cab48ba808aa9dba027..1c5049e894e30e1640d5da41049c2cca7980d076 100644 (file)
@@ -1085,6 +1085,10 @@ struct gpu_metrics_v3_0 {
        uint16_t                        average_dram_reads;
        /* time filtered DRAM write bandwidth [MB/sec] */
        uint16_t                        average_dram_writes;
+       /* time filtered IPU read bandwidth [MB/sec] */
+       uint16_t                        average_ipu_reads;
+       /* time filtered IPU write bandwidth [MB/sec] */
+       uint16_t                        average_ipu_writes;
 
        /* Driver attached timestamp (in ns) */
        uint64_t                        system_clock_counter;
@@ -1104,6 +1108,8 @@ struct gpu_metrics_v3_0 {
        uint32_t                        average_all_core_power;
        /* calculated core power [mW] */
        uint16_t                        average_core_power[16];
+       /* time filtered total system power [mW] */
+       uint16_t                        average_sys_power;
        /* maximum IRM defined STAPM power limit [mW] */
        uint16_t                        stapm_power_limit;
        /* time filtered STAPM power limit [mW] */
@@ -1116,6 +1122,8 @@ struct gpu_metrics_v3_0 {
        uint16_t                        average_ipuclk_frequency;
        uint16_t                        average_fclk_frequency;
        uint16_t                        average_vclk_frequency;
+       uint16_t                        average_uclk_frequency;
+       uint16_t                        average_mpipu_frequency;
 
        /* Current clocks */
        /* target core frequency [MHz] */
@@ -1125,6 +1133,15 @@ struct gpu_metrics_v3_0 {
        /* GFXCLK frequency limit enforced on GFX [MHz] */
        uint16_t                        current_gfx_maxfreq;
 
+       /* Throttle Residency (ASIC dependent) */
+       uint32_t                        throttle_residency_prochot;
+       uint32_t                        throttle_residency_spl;
+       uint32_t                        throttle_residency_fppt;
+       uint32_t                        throttle_residency_sppt;
+       uint32_t                        throttle_residency_thm_core;
+       uint32_t                        throttle_residency_thm_gfx;
+       uint32_t                        throttle_residency_thm_soc;
+
        /* Metrics table alpha filter time constant [us] */
        uint32_t                        time_filter_alphavalue;
 };
index ca2ece24e1e07bf97fc4a66e3baf25f0ea5acd13..49028dde0f87b13be19c093d13df7e539c91a2b4 100644 (file)
@@ -2198,10 +2198,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
        } else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
                if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
                        *states = ATTR_STATE_UNSUPPORTED;
-       } else if (DEVICE_ATTR_IS(pp_dpm_mclk_od)) {
+       } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
                if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
                        *states = ATTR_STATE_UNSUPPORTED;
-       } else if (DEVICE_ATTR_IS(pp_dpm_sclk_od)) {
+       } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
                if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
                        *states = ATTR_STATE_UNSUPPORTED;
        } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
index 23fa71cafb145fba9fba66274e2ddcb080e5be7d..f8b2e6cc2568886a797c5dbecac78486f76df6ed 100644 (file)
@@ -1408,6 +1408,16 @@ typedef enum {
        METRICS_PCIE_WIDTH,
        METRICS_CURR_FANPWM,
        METRICS_CURR_SOCKETPOWER,
+       METRICS_AVERAGE_VPECLK,
+       METRICS_AVERAGE_IPUCLK,
+       METRICS_AVERAGE_MPIPUCLK,
+       METRICS_THROTTLER_RESIDENCY_PROCHOT,
+       METRICS_THROTTLER_RESIDENCY_SPL,
+       METRICS_THROTTLER_RESIDENCY_FPPT,
+       METRICS_THROTTLER_RESIDENCY_SPPT,
+       METRICS_THROTTLER_RESIDENCY_THM_CORE,
+       METRICS_THROTTLER_RESIDENCY_THM_GFX,
+       METRICS_THROTTLER_RESIDENCY_THM_SOC,
 } MetricsMember_t;
 
 enum smu_cmn2asic_mapping_type {
index 22f88842a7fd21e9dd089c0896ff9b03b1876fd4..8f42771e1f0a2836c9cf874cfe3cc3e56658386e 100644 (file)
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if
 // any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 6
+#define PMFW_DRIVER_IF_VERSION 7
 
 typedef struct {
   int32_t value;
@@ -150,37 +150,50 @@ typedef struct {
 } DpmClocks_t;
 
 typedef struct {
-  uint16_t CoreFrequency[16];        //Target core frequency [MHz]
-  uint16_t CorePower[16];            //CAC calculated core power [mW]
-  uint16_t CoreTemperature[16];      //TSEN measured core temperature [centi-C]
-  uint16_t GfxTemperature;           //TSEN measured GFX temperature [centi-C]
-  uint16_t SocTemperature;           //TSEN measured SOC temperature [centi-C]
-  uint16_t StapmOpnLimit;            //Maximum IRM defined STAPM power limit [mW]
-  uint16_t StapmCurrentLimit;        //Time filtered STAPM power limit [mW]
-  uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz]
-  uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz]
-  uint16_t SkinTemp;                 //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C]
-  uint16_t GfxclkFrequency;          //Time filtered target GFXCLK frequency [MHz]
-  uint16_t FclkFrequency;            //Time filtered target FCLK frequency [MHz]
-  uint16_t GfxActivity;              //Time filtered GFX busy % [0-100]
-  uint16_t SocclkFrequency;          //Time filtered target SOCCLK frequency [MHz]
-  uint16_t VclkFrequency;            //Time filtered target VCLK frequency [MHz]
-  uint16_t VcnActivity;              //Time filtered VCN busy % [0-100]
-  uint16_t VpeclkFrequency;          //Time filtered target VPECLK frequency [MHz]
-  uint16_t IpuclkFrequency;          //Time filtered target IPUCLK frequency [MHz]
-  uint16_t IpuBusy[8];               //Time filtered IPU per-column busy % [0-100]
-  uint16_t DRAMReads;                //Time filtered DRAM read bandwidth [MB/sec]
-  uint16_t DRAMWrites;               //Time filtered DRAM write bandwidth [MB/sec]
-  uint16_t CoreC0Residency[16];      //Time filtered per-core C0 residency % [0-100]
-  uint16_t IpuPower;                 //Time filtered IPU power [mW]
-  uint32_t ApuPower;                 //Time filtered APU power [mW]
-  uint32_t GfxPower;                 //Time filtered GFX power [mW]
-  uint32_t dGpuPower;                //Time filtered dGPU power [mW]
-  uint32_t SocketPower;              //Time filtered power used for PPT/STAPM [APU+dGPU] [mW]
-  uint32_t AllCorePower;             //Time filtered sum of core power across all cores in the socket [mW]
-  uint32_t FilterAlphaValue;         //Metrics table alpha filter time constant [us]
-  uint32_t MetricsCounter;           //Counter that is incremented on every metrics table update [PM_TIMER cycles]
-  uint32_t spare[16];
+  uint16_t CoreFrequency[16];          //Target core frequency [MHz]
+  uint16_t CorePower[16];              //CAC calculated core power [mW]
+  uint16_t CoreTemperature[16];        //TSEN measured core temperature [centi-C]
+  uint16_t GfxTemperature;             //TSEN measured GFX temperature [centi-C]
+  uint16_t SocTemperature;             //TSEN measured SOC temperature [centi-C]
+  uint16_t StapmOpnLimit;              //Maximum IRM defined STAPM power limit [mW]
+  uint16_t StapmCurrentLimit;          //Time filtered STAPM power limit [mW]
+  uint16_t InfrastructureCpuMaxFreq;   //CCLK frequency limit enforced on classic cores [MHz]
+  uint16_t InfrastructureGfxMaxFreq;   //GFXCLK frequency limit enforced on GFX [MHz]
+  uint16_t SkinTemp;                   //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C]
+  uint16_t GfxclkFrequency;            //Time filtered target GFXCLK frequency [MHz]
+  uint16_t FclkFrequency;              //Time filtered target FCLK frequency [MHz]
+  uint16_t GfxActivity;                //Time filtered GFX busy % [0-100]
+  uint16_t SocclkFrequency;            //Time filtered target SOCCLK frequency [MHz]
+  uint16_t VclkFrequency;              //Time filtered target VCLK frequency [MHz]
+  uint16_t VcnActivity;                //Time filtered VCN busy % [0-100]
+  uint16_t VpeclkFrequency;            //Time filtered target VPECLK frequency [MHz]
+  uint16_t IpuclkFrequency;            //Time filtered target IPUCLK frequency [MHz]
+  uint16_t IpuBusy[8];                 //Time filtered IPU per-column busy % [0-100]
+  uint16_t DRAMReads;                  //Time filtered DRAM read bandwidth [MB/sec]
+  uint16_t DRAMWrites;                 //Time filtered DRAM write bandwidth [MB/sec]
+  uint16_t CoreC0Residency[16];        //Time filtered per-core C0 residency % [0-100]
+  uint16_t IpuPower;                   //Time filtered IPU power [mW]
+  uint32_t ApuPower;                   //Time filtered APU power [mW]
+  uint32_t GfxPower;                   //Time filtered GFX power [mW]
+  uint32_t dGpuPower;                  //Time filtered dGPU power [mW]
+  uint32_t SocketPower;                //Time filtered power used for PPT/STAPM [APU+dGPU] [mW]
+  uint32_t AllCorePower;               //Time filtered sum of core power across all cores in the socket [mW]
+  uint32_t FilterAlphaValue;           //Metrics table alpha filter time constant [us]
+  uint32_t MetricsCounter;             //Counter that is incremented on every metrics table update [PM_TIMER cycles]
+  uint16_t MemclkFrequency;            //Time filtered target MEMCLK frequency [MHz]
+  uint16_t MpipuclkFrequency;          //Time filtered target MPIPUCLK frequency [MHz]
+  uint16_t IpuReads;                   //Time filtered IPU read bandwidth [MB/sec]
+  uint16_t IpuWrites;                  //Time filtered IPU write bandwidth [MB/sec]
+  uint32_t ThrottleResidency_PROCHOT;  //Counter that is incremented on every metrics table update when PROCHOT was engaged [PM_TIMER cycles]
+  uint32_t ThrottleResidency_SPL;      //Counter that is incremented on every metrics table update when SPL was engaged [PM_TIMER cycles]
+  uint32_t ThrottleResidency_FPPT;     //Counter that is incremented on every metrics table update when fast PPT was engaged [PM_TIMER cycles]
+  uint32_t ThrottleResidency_SPPT;     //Counter that is incremented on every metrics table update when slow PPT was engaged [PM_TIMER cycles]
+  uint32_t ThrottleResidency_THM_CORE; //Counter that is incremented on every metrics table update when CORE thermal throttling was engaged [PM_TIMER cycles]
+  uint32_t ThrottleResidency_THM_GFX;  //Counter that is incremented on every metrics table update when GFX thermal throttling was engaged [PM_TIMER cycles]
+  uint32_t ThrottleResidency_THM_SOC;  //Counter that is incremented on every metrics table update when SOC thermal throttling was engaged [PM_TIMER cycles]
+  uint16_t Psys;                       //Time filtered Psys power [mW]
+  uint16_t spare1;
+  uint32_t spare[6];
 } SmuMetrics_t;
 
 //ISP tile definitions
index 0e5a77c3c2e216362b2e5363f1ec25f62940ede3..900a2d9e6d85481abd728cbdd11c9fe66e826a09 100644 (file)
@@ -2593,13 +2593,20 @@ static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct
 static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
                                  enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
 {
+       struct smu_context *smu = adev->powerplay.pp_handle;
        uint32_t errcode, instlo;
 
        instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
        if (instlo != 0x03b30400)
                return false;
 
-       errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode);
+       if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
+               errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
+               errcode &= 0xff;
+       } else {
+               errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode);
+       }
+
        return mca_smu_check_error_code(adev, mca_ras, errcode);
 }
 
index 03b38c3a9968431914912131c8a82794dca4c283..94ccdbfd709092ba24ceea84c8ea125e861e8544 100644 (file)
@@ -246,11 +246,20 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
                *value = 0;
                break;
        case METRICS_AVERAGE_UCLK:
-               *value = 0;
+               *value = metrics->MemclkFrequency;
                break;
        case METRICS_AVERAGE_FCLK:
                *value = metrics->FclkFrequency;
                break;
+       case METRICS_AVERAGE_VPECLK:
+               *value = metrics->VpeclkFrequency;
+               break;
+       case METRICS_AVERAGE_IPUCLK:
+               *value = metrics->IpuclkFrequency;
+               break;
+       case METRICS_AVERAGE_MPIPUCLK:
+               *value = metrics->MpipuclkFrequency;
+               break;
        case METRICS_AVERAGE_GFXACTIVITY:
                *value = metrics->GfxActivity / 100;
                break;
@@ -270,8 +279,26 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
                *value = metrics->SocTemperature / 100 *
                SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
                break;
-       case METRICS_THROTTLER_STATUS:
-               *value = 0;
+       case METRICS_THROTTLER_RESIDENCY_PROCHOT:
+               *value = metrics->ThrottleResidency_PROCHOT;
+               break;
+       case METRICS_THROTTLER_RESIDENCY_SPL:
+               *value = metrics->ThrottleResidency_SPL;
+               break;
+       case METRICS_THROTTLER_RESIDENCY_FPPT:
+               *value = metrics->ThrottleResidency_FPPT;
+               break;
+       case METRICS_THROTTLER_RESIDENCY_SPPT:
+               *value = metrics->ThrottleResidency_SPPT;
+               break;
+       case METRICS_THROTTLER_RESIDENCY_THM_CORE:
+               *value = metrics->ThrottleResidency_THM_CORE;
+               break;
+       case METRICS_THROTTLER_RESIDENCY_THM_GFX:
+               *value = metrics->ThrottleResidency_THM_GFX;
+               break;
+       case METRICS_THROTTLER_RESIDENCY_THM_SOC:
+               *value = metrics->ThrottleResidency_THM_SOC;
                break;
        case METRICS_VOLTAGE_VDDGFX:
                *value = 0;
@@ -498,6 +525,8 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
                sizeof(uint16_t) * 16);
        gpu_metrics->average_dram_reads = metrics.DRAMReads;
        gpu_metrics->average_dram_writes = metrics.DRAMWrites;
+       gpu_metrics->average_ipu_reads = metrics.IpuReads;
+       gpu_metrics->average_ipu_writes = metrics.IpuWrites;
 
        gpu_metrics->average_socket_power = metrics.SocketPower;
        gpu_metrics->average_ipu_power = metrics.IpuPower;
@@ -505,6 +534,7 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
        gpu_metrics->average_gfx_power = metrics.GfxPower;
        gpu_metrics->average_dgpu_power = metrics.dGpuPower;
        gpu_metrics->average_all_core_power = metrics.AllCorePower;
+       gpu_metrics->average_sys_power = metrics.Psys;
        memcpy(&gpu_metrics->average_core_power[0],
                &metrics.CorePower[0],
                sizeof(uint16_t) * 16);
@@ -515,6 +545,8 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
        gpu_metrics->average_fclk_frequency = metrics.FclkFrequency;
        gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
        gpu_metrics->average_ipuclk_frequency = metrics.IpuclkFrequency;
+       gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
+       gpu_metrics->average_mpipu_frequency = metrics.MpipuclkFrequency;
 
        memcpy(&gpu_metrics->current_coreclk[0],
                &metrics.CoreFrequency[0],
@@ -522,6 +554,14 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
        gpu_metrics->current_core_maxfreq = metrics.InfrastructureCpuMaxFreq;
        gpu_metrics->current_gfx_maxfreq = metrics.InfrastructureGfxMaxFreq;
 
+       gpu_metrics->throttle_residency_prochot = metrics.ThrottleResidency_PROCHOT;
+       gpu_metrics->throttle_residency_spl = metrics.ThrottleResidency_SPL;
+       gpu_metrics->throttle_residency_fppt = metrics.ThrottleResidency_FPPT;
+       gpu_metrics->throttle_residency_sppt = metrics.ThrottleResidency_SPPT;
+       gpu_metrics->throttle_residency_thm_core = metrics.ThrottleResidency_THM_CORE;
+       gpu_metrics->throttle_residency_thm_gfx = metrics.ThrottleResidency_THM_GFX;
+       gpu_metrics->throttle_residency_thm_soc = metrics.ThrottleResidency_THM_SOC;
+
        gpu_metrics->time_filter_alphavalue = metrics.FilterAlphaValue;
        gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
index ba82a1142adf730e9ce8ab31b45bf0eb62405504..3e6a4e2044c0eb8bf6e099dcc3a22e0fc5810c42 100644 (file)
@@ -313,6 +313,7 @@ config DRM_TOSHIBA_TC358768
        select REGMAP_I2C
        select DRM_PANEL
        select DRM_MIPI_DSI
+       select VIDEOMODE_HELPERS
        help
          Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
 
index 2444fc33dd7c77e2e512f80f086d422218b953e7..68ffcc0b00dca11f64e7839fdfc8933888acc053 100644 (file)
@@ -2012,7 +2012,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
                        return ret;
 
                drm_atomic_helper_async_commit(dev, state);
-               drm_atomic_helper_cleanup_planes(dev, state);
+               drm_atomic_helper_unprepare_planes(dev, state);
 
                return 0;
        }
@@ -2072,7 +2072,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
        return 0;
 
 err:
-       drm_atomic_helper_cleanup_planes(dev, state);
+       drm_atomic_helper_unprepare_planes(dev, state);
        return ret;
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit);
@@ -2650,6 +2650,39 @@ fail_prepare_fb:
 }
 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
 
+/**
+ * drm_atomic_helper_unprepare_planes - release plane resources on aborts
+ * @dev: DRM device
+ * @state: atomic state object with old state structures
+ *
+ * This function cleans up plane state, specifically framebuffers, from the
+ * atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
+ * when aborting an atomic commit. For cleaning up after a successful commit
+ * use drm_atomic_helper_cleanup_planes().
+ */
+void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
+                                       struct drm_atomic_state *state)
+{
+       struct drm_plane *plane;
+       struct drm_plane_state *new_plane_state;
+       int i;
+
+       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+               const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+               if (funcs->end_fb_access)
+                       funcs->end_fb_access(plane, new_plane_state);
+       }
+
+       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+               const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+               if (funcs->cleanup_fb)
+                       funcs->cleanup_fb(plane, new_plane_state);
+       }
+}
+EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
+
 static bool plane_crtc_active(const struct drm_plane_state *state)
 {
        return state->crtc && state->crtc->state->active;
@@ -2784,6 +2817,17 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
 
                funcs->atomic_flush(crtc, old_state);
        }
+
+       /*
+        * Signal end of framebuffer access here before hw_done. After hw_done,
+        * a later commit might have already released the plane state.
+        */
+       for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+               const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+               if (funcs->end_fb_access)
+                       funcs->end_fb_access(plane, old_plane_state);
+       }
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
 
@@ -2911,40 +2955,22 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
  * configuration. Hence the old configuration must be perserved in @old_state to
  * be able to call this function.
  *
- * This function must also be called on the new state when the atomic update
- * fails at any point after calling drm_atomic_helper_prepare_planes().
+ * This function may not be called on the new state when the atomic update
+ * fails at any point after calling drm_atomic_helper_prepare_planes(). Use
+ * drm_atomic_helper_unprepare_planes() in this case.
  */
 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
                                      struct drm_atomic_state *old_state)
 {
        struct drm_plane *plane;
-       struct drm_plane_state *old_plane_state, *new_plane_state;
+       struct drm_plane_state *old_plane_state;
        int i;
 
-       for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
+       for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
                const struct drm_plane_helper_funcs *funcs = plane->helper_private;
 
-               if (funcs->end_fb_access)
-                       funcs->end_fb_access(plane, new_plane_state);
-       }
-
-       for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
-               const struct drm_plane_helper_funcs *funcs;
-               struct drm_plane_state *plane_state;
-
-               /*
-                * This might be called before swapping when commit is aborted,
-                * in which case we have to cleanup the new state.
-                */
-               if (old_plane_state == plane->state)
-                       plane_state = new_plane_state;
-               else
-                       plane_state = old_plane_state;
-
-               funcs = plane->helper_private;
-
                if (funcs->cleanup_fb)
-                       funcs->cleanup_fb(plane, plane_state);
+                       funcs->cleanup_fb(plane, old_plane_state);
        }
 }
 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
index 2ed2585ded3784882dd90260e070e27017a6d1f2..6899b3dc1f12a553c52043c2b73c985e72f627b9 100644 (file)
@@ -236,7 +236,7 @@ static int
 drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
 {
        if (file_priv->was_master &&
-           rcu_access_pointer(file_priv->pid) == task_pid(current))
+           rcu_access_pointer(file_priv->pid) == task_tgid(current))
                return 0;
 
        if (!capable(CAP_SYS_ADMIN))
index df9bf3c9206e717bdb7409ee0c99574df947ed0b..cb90e70d85e862a495f2e8691813161a93b7a030 100644 (file)
@@ -715,8 +715,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
        struct drm_mode_set set;
        uint32_t __user *set_connectors_ptr;
        struct drm_modeset_acquire_ctx ctx;
-       int ret;
-       int i;
+       int ret, i, num_connectors = 0;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EOPNOTSUPP;
@@ -871,6 +870,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
                                        connector->name);
 
                        connector_set[i] = connector;
+                       num_connectors++;
                }
        }
 
@@ -879,7 +879,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
        set.y = crtc_req->y;
        set.mode = mode;
        set.connectors = connector_set;
-       set.num_connectors = crtc_req->count_connectors;
+       set.num_connectors = num_connectors;
        set.fb = fb;
 
        if (drm_drv_uses_atomic_modeset(dev))
@@ -892,7 +892,7 @@ out:
                drm_framebuffer_put(fb);
 
        if (connector_set) {
-               for (i = 0; i < crtc_req->count_connectors; i++) {
+               for (i = 0; i < num_connectors; i++) {
                        if (connector_set[i])
                                drm_connector_put(connector_set[i]);
                }
index 39db08f803eac2c08167bb14d2403957ffb0ca0f..3b40650998728cbf70d8ec2aadfc671d31e71612 100644 (file)
@@ -2309,7 +2309,8 @@ int drm_edid_override_connector_update(struct drm_connector *connector)
 
        override = drm_edid_override_get(connector);
        if (override) {
-               num_modes = drm_edid_connector_update(connector, override);
+               if (drm_edid_connector_update(connector, override) == 0)
+                       num_modes = drm_edid_connector_add_modes(connector);
 
                drm_edid_free(override);
 
index a971590b81323021cd697b8a71bc4686e2c40ca4..e2c7373f20c6b791f9706c1dad0758f363c63073 100644 (file)
@@ -107,18 +107,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
                return 0;
 
        if (!priv->mapping) {
-               void *mapping;
+               void *mapping = NULL;
 
                if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
                        mapping = arm_iommu_create_mapping(&platform_bus_type,
                                EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
                else if (IS_ENABLED(CONFIG_IOMMU_DMA))
                        mapping = iommu_get_domain_for_dev(priv->dma_dev);
-               else
-                       mapping = ERR_PTR(-ENODEV);
 
-               if (IS_ERR(mapping))
-                       return PTR_ERR(mapping);
+               if (!mapping)
+                       return -ENODEV;
                priv->mapping = mapping;
        }
 
index f3aaa4ea3e68208b1f6bfccaf49a2d646e43ded1..dd9903eab563eee8537dfb731dca112f1dd53dea 100644 (file)
@@ -1861,6 +1861,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
                return ret;
 
        crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
+       if (IS_ERR(crtc))
+               return PTR_ERR(crtc);
        crtc->pipe_clk = &hdata->phy_clk;
 
        ret = hdmi_create_connector(encoder);
index c4585e445198d3a3f51c6398b3b14dbe8ba9adea..67143a0f518930d755385e18df6abb362680a6b5 100644 (file)
@@ -1440,6 +1440,13 @@ static void gen11_dsi_post_disable(struct intel_atomic_state *state,
 static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
                                                 struct drm_display_mode *mode)
 {
+       struct drm_i915_private *i915 = to_i915(connector->dev);
+       enum drm_mode_status status;
+
+       status = intel_cpu_transcoder_mode_valid(i915, mode);
+       if (status != MODE_OK)
+               return status;
+
        /* FIXME: DSC? */
        return intel_dsi_mode_valid(connector, mode);
 }
index 913e5d230a4df9ae519cb626e2e515969d49cca3..6f6b348b8a40544f8cd0ccd301ce0fd0ba00b3f2 100644 (file)
@@ -348,8 +348,13 @@ intel_crt_mode_valid(struct drm_connector *connector,
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        int max_dotclk = dev_priv->max_dotclk_freq;
+       enum drm_mode_status status;
        int max_clock;
 
+       status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+       if (status != MODE_OK)
+               return status;
+
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
 
index a2a806262c9e1f4e35c955ca40998fe70a56eb6e..63ba4d54a715290ce1128c67788ff3b19777b935 100644 (file)
@@ -906,12 +906,18 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
                            const struct intel_crtc_state *new_crtc_state)
 {
+       if (!new_crtc_state->hw.active)
+               return false;
+
        return is_enabling(active_planes, old_crtc_state, new_crtc_state);
 }
 
 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
                             const struct intel_crtc_state *new_crtc_state)
 {
+       if (!old_crtc_state->hw.active)
+               return false;
+
        return is_disabling(active_planes, old_crtc_state, new_crtc_state);
 }
 
@@ -928,6 +934,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
 static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
                         const struct intel_crtc_state *new_crtc_state)
 {
+       if (!new_crtc_state->hw.active)
+               return false;
+
        return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) ||
                (new_crtc_state->vrr.enable &&
                 (new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
@@ -937,6 +946,9 @@ static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
 static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
                          const struct intel_crtc_state *new_crtc_state)
 {
+       if (!old_crtc_state->hw.active)
+               return false;
+
        return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) ||
                (old_crtc_state->vrr.enable &&
                 (new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
@@ -7476,7 +7488,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
                for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
                        intel_color_cleanup_commit(new_crtc_state);
 
-               drm_atomic_helper_cleanup_planes(dev, &state->base);
+               drm_atomic_helper_unprepare_planes(dev, &state->base);
                intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
                return ret;
        }
@@ -7857,6 +7869,16 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
            mode->vtotal > vtotal_max)
                return MODE_V_ILLEGAL;
 
+       return MODE_OK;
+}
+
+enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
+                                                    const struct drm_display_mode *mode)
+{
+       /*
+        * Additional transcoder timing limits,
+        * excluding BXT/GLK DSI transcoders.
+        */
        if (DISPLAY_VER(dev_priv) >= 5) {
                if (mode->hdisplay < 64 ||
                    mode->htotal - mode->hdisplay < 32)
index 0e5dffe8f0189ec5a57b74bd4fc4e23b5ede65df..a05c7e2b782eac9beedfddea4050a0b0c70e3ec4 100644 (file)
@@ -403,6 +403,9 @@ enum drm_mode_status
 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
                                const struct drm_display_mode *mode,
                                bool bigjoiner);
+enum drm_mode_status
+intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
+                               const struct drm_display_mode *mode);
 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
 bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
 bool is_trans_port_sync_master(const struct intel_crtc_state *state);
index 2852958dd4e75808523485633eb86d3e759d65df..b21bcd40f11100de9027a4509f618a3fa9a1d720 100644 (file)
@@ -1172,6 +1172,10 @@ intel_dp_mode_valid(struct drm_connector *_connector,
        enum drm_mode_status status;
        bool dsc = false, bigjoiner = false;
 
+       status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+       if (status != MODE_OK)
+               return status;
+
        if (mode->flags & DRM_MODE_FLAG_DBLCLK)
                return MODE_H_ILLEGAL;
 
index dbc1b66c8ee4859edd37450b945959a228832459..1abfafbbfa7571e936f2c839ce0f4377bfda6677 100644 (file)
@@ -650,19 +650,30 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
                            const struct intel_crtc_state *crtc_state,
                            u8 link_bw, u8 rate_select)
 {
-       u8 link_config[2];
+       u8 lane_count = crtc_state->lane_count;
 
-       /* Write the link configuration data */
-       link_config[0] = link_bw;
-       link_config[1] = crtc_state->lane_count;
        if (crtc_state->enhanced_framing)
-               link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-       drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+               lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+       if (link_bw) {
+               /* DP and eDP v1.3 and earlier link bw set method. */
+               u8 link_config[] = { link_bw, lane_count };
 
-       /* eDP 1.4 rate select method. */
-       if (!link_bw)
-               drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
-                                 &rate_select, 1);
+               drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config,
+                                 ARRAY_SIZE(link_config));
+       } else {
+               /*
+                * eDP v1.4 and later link rate set method.
+                *
+                * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if
+                * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET.
+                *
+                * eDP v1.5 sinks allow choosing either, and the last choice
+                * shall be active.
+                */
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select);
+       }
 }
 
 /*
index 851b312bd84494cca9e074bb5f3fb2dcf49472bf..aa10612626136d5f51d4eda2a7b2f002dd9983f6 100644 (file)
@@ -959,6 +959,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
                return 0;
        }
 
+       *status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+       if (*status != MODE_OK)
+               return 0;
+
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
                *status = MODE_NO_DBLESCAN;
                return 0;
@@ -993,6 +997,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
        if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
                bigjoiner = true;
                max_dotclk *= 2;
+
+               /* TODO: add support for bigjoiner */
+               *status = MODE_CLOCK_HIGH;
+               return 0;
        }
 
        if (DISPLAY_VER(dev_priv) >= 10 &&
@@ -1027,11 +1035,15 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
         * Big joiner configuration needs DSC for TGL which is not true for
         * XE_LPD where uncompressed joiner is supported.
         */
-       if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
-               return MODE_CLOCK_HIGH;
+       if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) {
+               *status = MODE_CLOCK_HIGH;
+               return 0;
+       }
 
-       if (mode_rate > max_rate && !dsc)
-               return MODE_CLOCK_HIGH;
+       if (mode_rate > max_rate && !dsc) {
+               *status = MODE_CLOCK_HIGH;
+               return 0;
+       }
 
        *status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
        return 0;
index 78b6fe24dcd8bbdf1c70095320337852c3d096bb..7fd6280c54a79afa9d81517bb9c30283f7e0e987 100644 (file)
@@ -340,7 +340,7 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
 }
 
 static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
-                             unsigned int dewake_scanline)
+                             int dewake_scanline)
 {
        struct intel_crtc *crtc = dsb->crtc;
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
index 55d6743374bdd64da887e8d58630abbffcea8ada..9111e9d46486d8920077eb2cbd3b01c09f911a16 100644 (file)
@@ -217,11 +217,17 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
                     struct drm_display_mode *mode)
 {
        struct intel_connector *connector = to_intel_connector(_connector);
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
        const struct drm_display_mode *fixed_mode =
                intel_panel_fixed_mode(connector, mode);
        int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq;
        int target_clock = mode->clock;
+       enum drm_mode_status status;
+
+       status = intel_cpu_transcoder_mode_valid(i915, mode);
+       if (status != MODE_OK)
+               return status;
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
index 19b35ece31f1b7b245681ea7f58d75a4417b5668..646f367a13f5a064949cfbc91d176c572b9bfc99 100644 (file)
@@ -1374,7 +1374,8 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
        struct drm_i915_private *i915 = to_i915(fb->base.dev);
        unsigned int stride_tiles;
 
-       if (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
+       if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
+           src_stride_tiles < dst_stride_tiles)
                stride_tiles = src_stride_tiles;
        else
                stride_tiles = dst_stride_tiles;
@@ -1501,8 +1502,20 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
 
                        size += remap_info->size;
                } else {
-                       unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane,
-                                                                             remap_info->width);
+                       unsigned int dst_stride;
+
+                       /*
+                        * The hardware automagically calculates the CCS AUX surface
+                        * stride from the main surface stride so can't really remap a
+                        * smaller subset (unless we'd remap in whole AUX page units).
+                        */
+                       if (intel_fb_needs_pot_stride_remap(fb) &&
+                           intel_fb_is_ccs_modifier(fb->base.modifier))
+                               dst_stride = remap_info->src_stride;
+                       else
+                               dst_stride = remap_info->width;
+
+                       dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride);
 
                        assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
                        color_plane_info->mapping_stride = dst_stride *
index ac315f8e782017e04540e9e46e372066217f5e38..bfa456fa7d25c90144dda94733c1683940934b85 100644 (file)
@@ -1983,6 +1983,10 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
        bool ycbcr_420_only;
        enum intel_output_format sink_format;
 
+       status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+       if (status != MODE_OK)
+               return status;
+
        if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
                clock *= 2;
 
index 2a4ca7e65775e5723077add76bab7bad1507752a..bcbdd1984fd9089577f208d4f17ddfc858a25b13 100644 (file)
@@ -389,11 +389,16 @@ intel_lvds_mode_valid(struct drm_connector *_connector,
                      struct drm_display_mode *mode)
 {
        struct intel_connector *connector = to_intel_connector(_connector);
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
        const struct drm_display_mode *fixed_mode =
                intel_panel_fixed_mode(connector, mode);
        int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
        enum drm_mode_status status;
 
+       status = intel_cpu_transcoder_mode_valid(i915, mode);
+       if (status != MODE_OK)
+               return status;
+
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
 
index a636f42ceae555bd2c34ec8746128273f3e2c00d..a9ac7d45d1f3324c2b327c9c217c262cac7f5e4d 100644 (file)
@@ -1921,13 +1921,19 @@ static enum drm_mode_status
 intel_sdvo_mode_valid(struct drm_connector *connector,
                      struct drm_display_mode *mode)
 {
+       struct drm_i915_private *i915 = to_i915(connector->dev);
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
        struct intel_sdvo_connector *intel_sdvo_connector =
                to_intel_sdvo_connector(connector);
-       int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
        bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
+       int max_dotclk = i915->max_dotclk_freq;
+       enum drm_mode_status status;
        int clock = mode->clock;
 
+       status = intel_cpu_transcoder_mode_valid(i915, mode);
+       if (status != MODE_OK)
+               return status;
+
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
 
index 31a79fdfc8128485141b39accb07804f669fc7b6..2ee4f0d9585136e889732b59e7bd91e3498f7714 100644 (file)
@@ -958,8 +958,14 @@ static enum drm_mode_status
 intel_tv_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
 {
+       struct drm_i915_private *i915 = to_i915(connector->dev);
        const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
-       int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+       int max_dotclk = i915->max_dotclk_freq;
+       enum drm_mode_status status;
+
+       status = intel_cpu_transcoder_mode_valid(i915, mode);
+       if (status != MODE_OK)
+               return status;
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
index 1e7c97243fcf558611b338ae50d81a33bc552954..8a934bada6245defe0e808ba968d5d24fffdea7c 100644 (file)
@@ -504,7 +504,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 {
        struct drm_plane *plane = NULL;
        struct intel_plane *intel_plane;
-       struct intel_plane_state *plane_state = NULL;
        struct intel_crtc_scaler_state *scaler_state =
                &crtc_state->scaler_state;
        struct drm_atomic_state *drm_state = crtc_state->uapi.state;
@@ -536,6 +535,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 
        /* walkthrough scaler_users bits and start assigning scalers */
        for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+               struct intel_plane_state *plane_state = NULL;
                int *scaler_id;
                const char *name;
                int idx, ret;
index 55da627a8b8d222911cd49b1ca18437db2ee4bf4..f488394d3108e417cfb59ddbe6a12935e927639b 100644 (file)
@@ -1541,9 +1541,25 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
        .destroy = intel_dsi_encoder_destroy,
 };
 
+static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
+                                              struct drm_display_mode *mode)
+{
+       struct drm_i915_private *i915 = to_i915(connector->dev);
+
+       if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+               enum drm_mode_status status;
+
+               status = intel_cpu_transcoder_mode_valid(i915, mode);
+               if (status != MODE_OK)
+                       return status;
+       }
+
+       return intel_dsi_mode_valid(connector, mode);
+}
+
 static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
        .get_modes = intel_dsi_get_modes,
-       .mode_valid = intel_dsi_mode_valid,
+       .mode_valid = vlv_dsi_mode_valid,
        .atomic_check = intel_digital_connector_atomic_check,
 };
 
index d5ed904f355d5addffa56446f4be412c7b1a385d..6801f8b95c53d1ed989f6bb7c72205b2d7cd236a 100644 (file)
@@ -1293,7 +1293,7 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
        if (msg)
                drm_notice(&engine->i915->drm,
                           "Resetting %s for %s\n", engine->name, msg);
-       atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
+       i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
 
        ret = intel_gt_reset_engine(engine);
        if (ret) {
index d37698bd6b91aeb1b39ffe729a1cbd76463d361e..17df71117cc70de27e057af8d1b0ac392db3aa5c 100644 (file)
@@ -5001,7 +5001,8 @@ static void capture_error_state(struct intel_guc *guc,
                        if (match) {
                                intel_engine_set_hung_context(e, ce);
                                engine_mask |= e->mask;
-                               atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
+                               i915_increase_reset_engine_count(&i915->gpu_error,
+                                                                e);
                        }
                }
 
@@ -5013,7 +5014,7 @@ static void capture_error_state(struct intel_guc *guc,
        } else {
                intel_engine_set_hung_context(ce->engine, ce);
                engine_mask = ce->engine->mask;
-               atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
+               i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
        }
 
        with_intel_runtime_pm(&i915->runtime_pm, wakeref)
index 9f5971f5e980145d940bb5c59701471071d393cd..48f6c00402c47a255d2cd0fc56fc5b5be3e80a38 100644 (file)
@@ -16,6 +16,7 @@
 
 #include "display/intel_display_device.h"
 #include "gt/intel_engine.h"
+#include "gt/intel_engine_types.h"
 #include "gt/intel_gt_types.h"
 #include "gt/uc/intel_uc_fw.h"
 
@@ -232,7 +233,7 @@ struct i915_gpu_error {
        atomic_t reset_count;
 
        /** Number of times an engine has been reset */
-       atomic_t reset_engine_count[I915_NUM_ENGINES];
+       atomic_t reset_engine_count[MAX_ENGINE_CLASS];
 };
 
 struct drm_i915_error_state_buf {
@@ -255,7 +256,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
                                          const struct intel_engine_cs *engine)
 {
-       return atomic_read(&error->reset_engine_count[engine->uabi_class]);
+       return atomic_read(&error->reset_engine_count[engine->class]);
+}
+
+static inline void
+i915_increase_reset_engine_count(struct i915_gpu_error *error,
+                                const struct intel_engine_cs *engine)
+{
+       atomic_inc(&error->reset_engine_count[engine->class]);
 }
 
 #define CORE_DUMP_FLAG_NONE           0x0
index 4ddc6d902752af31341e03867e74053c2befbbcd..7d41874a49c589a9b14140559e28d8fde58c013e 100644 (file)
@@ -37,8 +37,9 @@ int igt_live_test_begin(struct igt_live_test *t,
                }
 
                for_each_engine(engine, gt, id)
-                       t->reset_engine[id] =
-                       i915_reset_engine_count(&i915->gpu_error, engine);
+                       t->reset_engine[i][id] =
+                               i915_reset_engine_count(&i915->gpu_error,
+                                                       engine);
        }
 
        t->reset_global = i915_reset_count(&i915->gpu_error);
@@ -66,14 +67,14 @@ int igt_live_test_end(struct igt_live_test *t)
 
        for_each_gt(gt, i915, i) {
                for_each_engine(engine, gt, id) {
-                       if (t->reset_engine[id] ==
+                       if (t->reset_engine[i][id] ==
                            i915_reset_engine_count(&i915->gpu_error, engine))
                                continue;
 
                        gt_err(gt, "%s(%s): engine '%s' was reset %d times!\n",
                               t->func, t->name, engine->name,
                               i915_reset_engine_count(&i915->gpu_error, engine) -
-                              t->reset_engine[id]);
+                              t->reset_engine[i][id]);
                        return -EIO;
                }
        }
index 36ed42736c52169e1a4f407ce3de97fbebbde3c5..83e3ad430922fe798ec5098ce15dda4ff3ed4a94 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef IGT_LIVE_TEST_H
 #define IGT_LIVE_TEST_H
 
+#include "gt/intel_gt_defines.h" /* for I915_MAX_GT */
 #include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
 
 struct drm_i915_private;
@@ -17,7 +18,7 @@ struct igt_live_test {
        const char *name;
 
        unsigned int reset_global;
-       unsigned int reset_engine[I915_NUM_ENGINES];
+       unsigned int reset_engine[I915_MAX_GT][I915_NUM_ENGINES];
 };
 
 /*
index f81dc34c9c3ef47c42f82ce4704f144fff7b9296..c1bc8b00d9380ce8f320cb106ca17f461257036b 100644 (file)
@@ -203,7 +203,7 @@ void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state)
        /* Disable RELAY mode to pass the processed image */
        cfg_val &= ~GAMMA_RELAY_MODE;
 
-       cfg_val = readl(gamma->regs + DISP_GAMMA_CFG);
+       writel(cfg_val, gamma->regs + DISP_GAMMA_CFG);
 }
 
 void mtk_gamma_config(struct device *dev, unsigned int w,
index c277b9fae950206d818edcbaab963a83b77615d5..db43f9dff912e40a9dc411d8d7154f0c1261baca 100644 (file)
@@ -788,6 +788,7 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
                                                                          crtc);
        struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+       unsigned long flags;
 
        if (mtk_crtc->event && mtk_crtc_state->base.event)
                DRM_ERROR("new event while there is still a pending event\n");
@@ -795,7 +796,11 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
        if (mtk_crtc_state->base.event) {
                mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
                WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+               spin_lock_irqsave(&crtc->dev->event_lock, flags);
                mtk_crtc->event = mtk_crtc_state->base.event;
+               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
                mtk_crtc_state->base.event = NULL;
        }
 }
@@ -921,7 +926,14 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
 
 struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
 {
-       struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+       struct mtk_drm_crtc *mtk_crtc = NULL;
+
+       if (!crtc)
+               return NULL;
+
+       mtk_crtc = to_mtk_crtc(crtc);
+       if (!mtk_crtc)
+               return NULL;
 
        return mtk_crtc->dma_dev;
 }
index 2dfaa613276a6d7ef836b2bcbfc2ecac730466f1..2b0c35cacbc6da24b1ff20820e3afaa48256788f 100644 (file)
@@ -443,6 +443,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
        struct mtk_drm_private *private = drm->dev_private;
        struct mtk_drm_private *priv_n;
        struct device *dma_dev = NULL;
+       struct drm_crtc *crtc;
        int ret, i, j;
 
        if (drm_firmware_drivers_only())
@@ -519,7 +520,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
        }
 
        /* Use OVL device for all DMA memory allocations */
-       dma_dev = mtk_drm_crtc_dma_dev_get(drm_crtc_from_index(drm, 0));
+       crtc = drm_crtc_from_index(drm, 0);
+       if (crtc)
+               dma_dev = mtk_drm_crtc_dma_dev_get(crtc);
        if (!dma_dev) {
                ret = -ENODEV;
                dev_err(drm->dev, "Need at least one OVL device\n");
index 7840b6428afbe468b2ad51ac2f59c3fc2c77a3e1..118807e38422b6fb69babfb410de64aa09cfb41c 100644 (file)
@@ -2474,7 +2474,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
 
 err_cleanup:
        if (ret)
-               drm_atomic_helper_cleanup_planes(dev, state);
+               drm_atomic_helper_unprepare_planes(dev, state);
 done:
        pm_runtime_put_autosuspend(dev->dev);
        return ret;
index 5a2f273d95c8cf0247980a3e8076b0aa2ba6495e..0e32e71e123f3726dfc005d78b139c010f476a82 100644 (file)
  * DEALINGS IN THE SOFTWARE.
  */
 
+/**
+ * msgqTxHeader -- TX queue data structure
+ * @version: the version of this structure, must be 0
+ * @size: the size of the entire queue, including this header
+ * @msgSize: the padded size of queue element, 16 is minimum
+ * @msgCount: the number of elements in this queue
+ * @writePtr: head index of this queue
+ * @flags: 1 = swap the RX pointers
+ * @rxHdrOff: offset of readPtr in this structure
+ * @entryOff: offset of beginning of queue (msgqRxHeader), relative to
+ *          beginning of this structure
+ *
+ * The command queue is a queue of RPCs that are sent from the driver to the
+ * GSP.  The status queue is a queue of messages/responses from GSP-RM to the
+ * driver.  Although the driver allocates memory for both queues, the command
+ * queue is owned by the driver and the status queue is owned by GSP-RM.  In
+ * addition, the headers of the two queues must not share the same 4K page.
+ *
+ * Each queue is prefixed with this data structure.  The idea is that a queue
+ * and its header are written to only by their owner.  That is, only the
+ * driver writes to the command queue and command queue header, and only the
+ * GSP writes to the status (receive) queue and its header.
+ *
+ * This is enforced by the concept of "swapping" the RX pointers.  This is
+ * why the 'flags' field must be set to 1.  'rxHdrOff' is how the GSP knows
+ * where the where the tail pointer of its status queue.
+ *
+ * When the driver writes a new RPC to the command queue, it updates writePtr.
+ * When it reads a new message from the status queue, it updates readPtr.  In
+ * this way, the GSP knows when a new command is in the queue (it polls
+ * writePtr) and it knows how much free space is in the status queue (it
+ * checks readPtr).  The driver never cares about how much free space is in
+ * the status queue.
+ *
+ * As usual, producers write to the head pointer, and consumers read from the
+ * tail pointer.  When head == tail, the queue is empty.
+ *
+ * So to summarize:
+ * command.writePtr = head of command queue
+ * command.readPtr = tail of status queue
+ * status.writePtr = head of status queue
+ * status.readPtr = tail of command queue
+ */
 typedef struct
 {
     NvU32 version;   // queue version
@@ -38,6 +81,14 @@ typedef struct
     NvU32 entryOff;  // Offset of entries from start of backing store.
 } msgqTxHeader;
 
+/**
+ * msgqRxHeader - RX queue data structure
+ * @readPtr: tail index of the other queue
+ *
+ * Although this is a separate struct, it could easily be merged into
+ * msgqTxHeader.  msgqTxHeader.rxHdrOff is simply the offset of readPtr
+ * from the beginning of msgqTxHeader.
+ */
 typedef struct
 {
     NvU32 readPtr; // message id of last message read
index e4279f1772a1b7c8ba53a31a0df941081f4849fc..377d0e0cef8481f33a1a066a87e687f8352ca6f7 100644 (file)
@@ -385,7 +385,7 @@ nvkm_uoutp_mthd_inherit(struct nvkm_outp *outp, void *argv, u32 argc)
 
        /* Ensure an ior is hooked up to this outp already */
        ior = outp->func->inherit(outp);
-       if (!ior)
+       if (!ior || !ior->arm.head)
                return -ENODEV;
 
        /* With iors, there will be a separate output path for each type of connector - and all of
index f6725a5f5bfb8ade0295872731cbfea19d6c8857..44fb86841c058a1ef9961936fd1f4637059dce89 100644 (file)
@@ -1377,6 +1377,13 @@ r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
        return 0;
 }
 
+/**
+ * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
+ *
+ * The GSP sequencer is a list of I/O commands that the GSP can send to
+ * the driver to perform for various purposes.  The most common usage is to
+ * perform a special mid-initialization reset.
+ */
 static int
 r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
 {
@@ -1716,6 +1723,23 @@ r535_gsp_libos_id8(const char *name)
        return id;
 }
 
+/**
+ * create_pte_array() - creates a PTE array of a physically contiguous buffer
+ * @ptes: pointer to the array
+ * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned)
+ * @size: size of the buffer
+ *
+ * GSP-RM sometimes expects physically-contiguous buffers to have an array of
+ * "PTEs" for each page in that buffer.  Although in theory that allows for
+ * the buffer to be physically discontiguous, GSP-RM does not currently
+ * support that.
+ *
+ * In this case, the PTEs are DMA addresses of each page of the buffer.  Since
+ * the buffer is physically contiguous, calculating all the PTEs is simple
+ * math.
+ *
+ * See memdescGetPhysAddrsForGpu()
+ */
 static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
 {
        unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
@@ -1725,6 +1749,35 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
                ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
 }
 
+/**
+ * r535_gsp_libos_init() -- create the libos arguments structure
+ *
+ * The logging buffers are byte queues that contain encoded printf-like
+ * messages from GSP-RM.  They need to be decoded by a special application
+ * that can parse the buffers.
+ *
+ * The 'loginit' buffer contains logs from early GSP-RM init and
+ * exception dumps.  The 'logrm' buffer contains the subsequent logs. Both are
+ * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
+ *
+ * The physical address map for the log buffer is stored in the buffer
+ * itself, starting with offset 1. Offset 0 contains the "put" pointer.
+ *
+ * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
+ * configured for a larger page size (e.g. 64K pages), we need to give
+ * the GSP an array of 4K pages. Fortunately, since the buffer is
+ * physically contiguous, it's simple math to calculate the addresses.
+ *
+ * The buffers must be a multiple of GSP_PAGE_SIZE.  GSP-RM also currently
+ * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
+ * buffers to be physically contiguous anyway.
+ *
+ * The memory allocated for the arguments must remain until the GSP sends the
+ * init_done RPC.
+ *
+ * See _kgspInitLibosLoggingStructures (allocates memory for buffers)
+ * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
+ */
 static int
 r535_gsp_libos_init(struct nvkm_gsp *gsp)
 {
@@ -1835,6 +1888,35 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
                nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
 }
 
+/**
+ * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
+ *
+ * The GSP uses a three-level page table, called radix3, to map the firmware.
+ * Each 64-bit "pointer" in the table is either the bus address of an entry in
+ * the next table (for levels 0 and 1) or the bus address of the next page in
+ * the GSP firmware image itself.
+ *
+ * Level 0 contains a single entry in one page that points to the first page
+ * of level 1.
+ *
+ * Level 1, since it's also only one page in size, contains up to 512 entries,
+ * one for each page in Level 2.
+ *
+ * Level 2 can be up to 512 pages in size, and each of those entries points to
+ * the next page of the firmware image.  Since there can be up to 512*512
+ * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB.
+ *
+ * Internally, the GSP has its window into system memory, but the base
+ * physical address of the aperture is not 0.  In fact, it varies depending on
+ * the GPU architecture.  Since the GPU is a PCI device, this window is
+ * accessed via DMA and is therefore bound by IOMMU translation.  The end
+ * result is that GSP-RM must translate the bus addresses in the table to GSP
+ * physical addresses.  All this should happen transparently.
+ *
+ * Returns 0 on success, or negative error code
+ *
+ * See kgspCreateRadix3_IMPL
+ */
 static int
 nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
                   struct nvkm_gsp_radix3 *rx3)
index 1b811d6972a16df8c4335552b2655578510d38f8..201022ae9214a0d508b8240f9b77b8ddb60a126c 100644 (file)
 #include <subdev/mmu.h>
 
 struct gk20a_instobj {
-       struct nvkm_memory memory;
+       struct nvkm_instobj base;
        struct nvkm_mm_node *mn;
        struct gk20a_instmem *imem;
 
        /* CPU mapping */
        u32 *vaddr;
 };
-#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, base.memory)
 
 /*
  * Used for objects allocated using the DMA API
@@ -148,7 +148,7 @@ gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
        list_del(&obj->vaddr_node);
        vunmap(obj->base.vaddr);
        obj->base.vaddr = NULL;
-       imem->vaddr_use -= nvkm_memory_size(&obj->base.memory);
+       imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory);
        nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use,
                   imem->vaddr_max);
 }
@@ -283,7 +283,7 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
 {
        struct gk20a_instobj *node = gk20a_instobj(memory);
        struct nvkm_vmm_map map = {
-               .memory = &node->memory,
+               .memory = &node->base.memory,
                .offset = offset,
                .mem = node->mn,
        };
@@ -391,8 +391,8 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
                return -ENOMEM;
        *_node = &node->base;
 
-       nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
-       node->base.memory.ptrs = &gk20a_instobj_ptrs;
+       nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.base.memory);
+       node->base.base.memory.ptrs = &gk20a_instobj_ptrs;
 
        node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
                                           &node->handle, GFP_KERNEL,
@@ -438,8 +438,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
        *_node = &node->base;
        node->dma_addrs = (void *)(node->pages + npages);
 
-       nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
-       node->base.memory.ptrs = &gk20a_instobj_ptrs;
+       nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.base.memory);
+       node->base.base.memory.ptrs = &gk20a_instobj_ptrs;
 
        /* Allocate backing memory */
        for (i = 0; i < npages; i++) {
@@ -533,7 +533,7 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
        else
                ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
                                             align, &node);
-       *pmemory = node ? &node->memory : NULL;
+       *pmemory = node ? &node->base.memory : NULL;
        if (ret)
                return ret;
 
index e34bc60764010f8307a01ce4fe21808bb99e5e43..8379e72d77ab2b81dc16c11400b8e35641a6e906 100644 (file)
@@ -31,7 +31,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
 
        type |= 0x00000001; /* PAGE_ALL */
        if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
-               type |= 0x00000004; /* HUB_ONLY */
+               type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
 
        mutex_lock(&vmm->mmu->mutex);
 
index 6e3670508e3a5a48fea062f74c8d3d4abbe413c9..30919c872ac8d50fc638c468cd968e52bc61855e 100644 (file)
@@ -326,7 +326,7 @@ static const struct drm_display_mode ltk050h3148w_mode = {
 static const struct ltk050h3146w_desc ltk050h3148w_data = {
        .mode = &ltk050h3148w_mode,
        .init = ltk050h3148w_init_sequence,
-       .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+       .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST,
 };
 
 static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
index f59c82ea887013c6b4a333a01fc5c3e1444bbc04..2d30da38c2c3e44b833a9af53dd0cfd1b770d57a 100644 (file)
@@ -29,14 +29,20 @@ static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfr
 static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
                                   u32 flags)
 {
+       struct panfrost_device *ptdev = dev_get_drvdata(dev);
        struct dev_pm_opp *opp;
+       int err;
 
        opp = devfreq_recommended_opp(dev, freq, flags);
        if (IS_ERR(opp))
                return PTR_ERR(opp);
        dev_pm_opp_put(opp);
 
-       return dev_pm_opp_set_rate(dev, *freq);
+       err =  dev_pm_opp_set_rate(dev, *freq);
+       if (!err)
+               ptdev->pfdevfreq.current_frequency = *freq;
+
+       return err;
 }
 
 static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
@@ -58,7 +64,6 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
        spin_lock_irqsave(&pfdevfreq->lock, irqflags);
 
        panfrost_devfreq_update_utilization(pfdevfreq);
-       pfdevfreq->current_frequency = status->current_frequency;
 
        status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
                                                   pfdevfreq->idle_time));
@@ -164,6 +169,14 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
 
        panfrost_devfreq_profile.initial_freq = cur_freq;
 
+       /*
+        * We could wait until panfrost_devfreq_target() to set this value, but
+        * since the simple_ondemand governor works asynchronously, there's a
+        * chance by the time someone opens the device's fdinfo file, current
+        * frequency hasn't been updated yet, so let's just do an early set.
+        */
+       pfdevfreq->current_frequency = cur_freq;
+
        /*
         * Set the recommend OPP this will enable and configure the regulator
         * if any and will avoid a switch off by regulator_late_cleanup()
index 0cf64456e29a4f74cc21e8fa3e57772e4eb506ee..d47b40b82b0bc4d189a2fa6772aaf3a91ee54c18 100644 (file)
@@ -200,7 +200,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
        struct panfrost_gem_object *bo = to_panfrost_bo(obj);
        enum drm_gem_object_status res = 0;
 
-       if (bo->base.pages)
+       if (bo->base.base.import_attach || bo->base.pages)
                res |= DRM_GEM_OBJECT_RESIDENT;
 
        if (bo->base.madv == PANFROST_MADV_DONTNEED)
index 033d31dbf3b8cf7f093103439dc672faa1f548e0..ab81ceceb3371d5d4684b7539bdae0d48511a3a6 100644 (file)
@@ -20,6 +20,7 @@ if GREYBUS
 config GREYBUS_BEAGLEPLAY
        tristate "Greybus BeaglePlay driver"
        depends on SERIAL_DEV_BUS
+       select CRC_CCITT
        help
          Select this option if you have a BeaglePlay where CC1352
          co-processor acts as Greybus SVC.
index d9e9829b22001aa468c1fd837fc692ce807d313f..b9c7c0ed7bcc37e426f460d8292659fc3246b126 100644 (file)
@@ -347,6 +347,8 @@ static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
        { "Hailuck" },
        { "Jamesdonkey" },
        { "A3R" },
+       { "hfd.cn" },
+       { "WKB603" },
 };
 
 static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
index c6e4e0d1f2147e6221c10e607859354d2c1a32be..72046039d1be755784de32d22b12cdca76f0c9bf 100644 (file)
 
 #define USB_VENDOR_ID_LABTEC           0x1020
 #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
+#define USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE   0x8888
 
 #define USB_VENDOR_ID_LAVIEW           0x22D4
 #define USB_DEVICE_ID_GLORIOUS_MODEL_I 0x1503
index 7c1b33be9d1343e90a75651daa2b9afc163c5656..149a3c74346b4f28b0f97fba19b99427e85cd89a 100644 (file)
@@ -692,7 +692,8 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
                 * so set middlebutton_state to 3
                 * to never apply workaround anymore
                 */
-               if (cptkbd_data->middlebutton_state == 1 &&
+               if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD &&
+                               cptkbd_data->middlebutton_state == 1 &&
                                usage->type == EV_REL &&
                                (usage->code == REL_X || usage->code == REL_Y)) {
                        cptkbd_data->middlebutton_state = 3;
index 138f154fecef343eceff5cf3ede94812615668d7..997c3a1adacab265bbb9b08ae98964f36ca60113 100644 (file)
@@ -325,28 +325,28 @@ struct joycon_imu_cal {
  * All the controller's button values are stored in a u32.
  * They can be accessed with bitwise ANDs.
  */
-static const u32 JC_BTN_Y      = BIT(0);
-static const u32 JC_BTN_X      = BIT(1);
-static const u32 JC_BTN_B      = BIT(2);
-static const u32 JC_BTN_A      = BIT(3);
-static const u32 JC_BTN_SR_R   = BIT(4);
-static const u32 JC_BTN_SL_R   = BIT(5);
-static const u32 JC_BTN_R      = BIT(6);
-static const u32 JC_BTN_ZR     = BIT(7);
-static const u32 JC_BTN_MINUS  = BIT(8);
-static const u32 JC_BTN_PLUS   = BIT(9);
-static const u32 JC_BTN_RSTICK = BIT(10);
-static const u32 JC_BTN_LSTICK = BIT(11);
-static const u32 JC_BTN_HOME   = BIT(12);
-static const u32 JC_BTN_CAP    = BIT(13); /* capture button */
-static const u32 JC_BTN_DOWN   = BIT(16);
-static const u32 JC_BTN_UP     = BIT(17);
-static const u32 JC_BTN_RIGHT  = BIT(18);
-static const u32 JC_BTN_LEFT   = BIT(19);
-static const u32 JC_BTN_SR_L   = BIT(20);
-static const u32 JC_BTN_SL_L   = BIT(21);
-static const u32 JC_BTN_L      = BIT(22);
-static const u32 JC_BTN_ZL     = BIT(23);
+#define JC_BTN_Y        BIT(0)
+#define JC_BTN_X        BIT(1)
+#define JC_BTN_B        BIT(2)
+#define JC_BTN_A        BIT(3)
+#define JC_BTN_SR_R     BIT(4)
+#define JC_BTN_SL_R     BIT(5)
+#define JC_BTN_R        BIT(6)
+#define JC_BTN_ZR       BIT(7)
+#define JC_BTN_MINUS    BIT(8)
+#define JC_BTN_PLUS     BIT(9)
+#define JC_BTN_RSTICK   BIT(10)
+#define JC_BTN_LSTICK   BIT(11)
+#define JC_BTN_HOME     BIT(12)
+#define JC_BTN_CAP      BIT(13) /* capture button */
+#define JC_BTN_DOWN     BIT(16)
+#define JC_BTN_UP       BIT(17)
+#define JC_BTN_RIGHT    BIT(18)
+#define JC_BTN_LEFT     BIT(19)
+#define JC_BTN_SR_L     BIT(20)
+#define JC_BTN_SL_L     BIT(21)
+#define JC_BTN_L        BIT(22)
+#define JC_BTN_ZL       BIT(23)
 
 enum joycon_msg_type {
        JOYCON_MSG_TYPE_NONE,
@@ -927,14 +927,27 @@ static int joycon_request_calibration(struct joycon_ctlr *ctlr)
  */
 static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr)
 {
-       int i;
+       int i, divz = 0;
 
        for (i = 0; i < 3; i++) {
                ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] -
                                                ctlr->accel_cal.offset[i];
                ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] -
                                                ctlr->gyro_cal.offset[i];
+
+               if (ctlr->imu_cal_accel_divisor[i] == 0) {
+                       ctlr->imu_cal_accel_divisor[i] = 1;
+                       divz++;
+               }
+
+               if (ctlr->imu_cal_gyro_divisor[i] == 0) {
+                       ctlr->imu_cal_gyro_divisor[i] = 1;
+                       divz++;
+               }
        }
+
+       if (divz)
+               hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz);
 }
 
 static const s16 DFLT_ACCEL_OFFSET /*= 0*/;
@@ -1163,16 +1176,16 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr,
                    JC_IMU_SAMPLES_PER_DELTA_AVG) {
                        ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum /
                                                 ctlr->imu_delta_samples_count;
-                       /* don't ever want divide by zero shenanigans */
-                       if (ctlr->imu_avg_delta_ms == 0) {
-                               ctlr->imu_avg_delta_ms = 1;
-                               hid_warn(ctlr->hdev,
-                                        "calculated avg imu delta of 0\n");
-                       }
                        ctlr->imu_delta_samples_count = 0;
                        ctlr->imu_delta_samples_sum = 0;
                }
 
+               /* don't ever want divide by zero shenanigans */
+               if (ctlr->imu_avg_delta_ms == 0) {
+                       ctlr->imu_avg_delta_ms = 1;
+                       hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n");
+               }
+
                /* useful for debugging IMU sample rate */
                hid_dbg(ctlr->hdev,
                        "imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n",
index ea472923fab07841ba6ee136da95a4a060690c4d..e0bbf0c6345d68ff1b348dde11421ac292fdae78 100644 (file)
@@ -121,6 +121,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_T609A), HID_QUIRK_MULTI_INPUT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
index ac918a9ea8d34440df8ad9c6a009d60a4ffdbbf0..1b49243adb16a5606038d239be963c35c5b205a3 100644 (file)
@@ -40,6 +40,11 @@ static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
         * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
         */
        { "CHPN0001" },
+       /*
+        * The IDEA5002 ACPI device causes high interrupt usage and spurious
+        * wakeups from suspend.
+        */
+       { "IDEA5002" },
        { }
 };
 
index 8db740214ffda93090e832e7e6f2cbaa36d79ef1..703666b95bf49b1b6cc3ec349e067ee82bcf178d 100644 (file)
@@ -31,6 +31,7 @@
 #define POWER_METER_CAN_NOTIFY (1 << 3)
 #define POWER_METER_IS_BATTERY (1 << 8)
 #define UNKNOWN_HYSTERESIS     0xFFFFFFFF
+#define UNKNOWN_POWER          0xFFFFFFFF
 
 #define METER_NOTIFY_CONFIG    0x80
 #define METER_NOTIFY_TRIP      0x81
@@ -348,6 +349,9 @@ static ssize_t show_power(struct device *dev,
        update_meter(resource);
        mutex_unlock(&resource->lock);
 
+       if (resource->power == UNKNOWN_POWER)
+               return -ENODATA;
+
        return sprintf(buf, "%llu\n", resource->power * 1000);
 }
 
index 904890598c116b9de1bac58e8d7a68e87aa1a8fc..2c7c92272fe3998a138b864077a8117d43a9470d 100644 (file)
@@ -899,7 +899,23 @@ static struct hid_driver corsairpsu_driver = {
        .reset_resume   = corsairpsu_resume,
 #endif
 };
-module_hid_driver(corsairpsu_driver);
+
+static int __init corsair_init(void)
+{
+       return hid_register_driver(&corsairpsu_driver);
+}
+
+static void __exit corsair_exit(void)
+{
+       hid_unregister_driver(&corsairpsu_driver);
+}
+
+/*
+ * With module_init() the driver would load before the HID bus when
+ * built-in, so use late_initcall() instead.
+ */
+late_initcall(corsair_init);
+module_exit(corsair_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Wilken Gottwalt <wilken.gottwalt@posteo.net>");
index bd63c61129a94aa85eb46d83b60e89309b9ebe62..fc53fdcb2b6caad55b78b87e7289210aa8e46d47 100644 (file)
@@ -373,7 +373,7 @@ static int ltc2991_init(struct ltc2991_state *st)
                           LTC2991_REPEAT_ACQ_EN);
        if (ret)
                return dev_err_probe(st->dev, ret,
-                                    "Error: Failed to set contiuous mode.\n");
+                                    "Error: Failed to set continuous mode.\n");
 
        /* Enable all channels and trigger conversions */
        return regmap_write(st->regmap, LTC2991_CH_EN_TRIGGER,
index fd1fed1a797cd4ed2ae14b98dc09088cde292e4b..a1ce6514566912424c42a87af8e255da7fff29d1 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/i2c.h>
 #include <linux/mutex.h>
 #include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
 
 #define MAX31827_T_REG                 0x0
 #define MAX31827_CONFIGURATION_REG     0x2
index 428c77b5fce5a2a01967c40b47d01287994a554b..7caf387eb1449fd2148256d3711475a2b858c951 100644 (file)
@@ -161,13 +161,13 @@ static int kraken2_probe(struct hid_device *hdev,
        ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
        if (ret) {
                hid_err(hdev, "hid hw start failed with %d\n", ret);
-               goto fail_and_stop;
+               return ret;
        }
 
        ret = hid_hw_open(hdev);
        if (ret) {
                hid_err(hdev, "hid hw open failed with %d\n", ret);
-               goto fail_and_close;
+               goto fail_and_stop;
        }
 
        priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "kraken2",
index 5ca6278baff4fa70ff6856458dce44beeecc6339..89e8ed214ea4967620c20ad1c1a1e39f9d068400 100644 (file)
@@ -493,7 +493,7 @@ static void etm_event_start(struct perf_event *event, int flags)
                goto fail_end_stop;
 
        /* Finally enable the tracer */
-       if (coresight_enable_source(csdev, CS_MODE_PERF, event))
+       if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
                goto fail_disable_path;
 
        /*
@@ -587,7 +587,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
                return;
 
        /* stop tracer */
-       coresight_disable_source(csdev, event);
+       source_ops(csdev)->disable(csdev, event);
 
        /* tell the core */
        event->hw.state = PERF_HES_STOPPED;
index 77b0271ce6eb98adc74de3e4abbf65f398ace0a5..34aee59dd14739504f2a82588d049befc0d06970 100644 (file)
@@ -2224,7 +2224,7 @@ static void clear_etmdrvdata(void *info)
        per_cpu(delayed_probe, cpu) = NULL;
 }
 
-static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
+static void etm4_remove_dev(struct etmv4_drvdata *drvdata)
 {
        bool had_delayed_probe;
        /*
@@ -2253,7 +2253,7 @@ static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
        }
 }
 
-static void __exit etm4_remove_amba(struct amba_device *adev)
+static void etm4_remove_amba(struct amba_device *adev)
 {
        struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
 
@@ -2261,7 +2261,7 @@ static void __exit etm4_remove_amba(struct amba_device *adev)
                etm4_remove_dev(drvdata);
 }
 
-static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
+static int etm4_remove_platform_dev(struct platform_device *pdev)
 {
        struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
 
index e9a32a97fbee6912aa0a2a4852f634f7579b56c4..6e32d31a95fe0865f96d09c1c83fd2e272e2ac31 100644 (file)
@@ -99,7 +99,7 @@ static int smb_open(struct inode *inode, struct file *file)
                                        struct smb_drv_data, miscdev);
        int ret = 0;
 
-       mutex_lock(&drvdata->mutex);
+       spin_lock(&drvdata->spinlock);
 
        if (drvdata->reading) {
                ret = -EBUSY;
@@ -115,7 +115,7 @@ static int smb_open(struct inode *inode, struct file *file)
 
        drvdata->reading = true;
 out:
-       mutex_unlock(&drvdata->mutex);
+       spin_unlock(&drvdata->spinlock);
 
        return ret;
 }
@@ -132,10 +132,8 @@ static ssize_t smb_read(struct file *file, char __user *data, size_t len,
        if (!len)
                return 0;
 
-       mutex_lock(&drvdata->mutex);
-
        if (!sdb->data_size)
-               goto out;
+               return 0;
 
        to_copy = min(sdb->data_size, len);
 
@@ -145,20 +143,15 @@ static ssize_t smb_read(struct file *file, char __user *data, size_t len,
 
        if (copy_to_user(data, sdb->buf_base + sdb->buf_rdptr, to_copy)) {
                dev_dbg(dev, "Failed to copy data to user\n");
-               to_copy = -EFAULT;
-               goto out;
+               return -EFAULT;
        }
 
        *ppos += to_copy;
-
        smb_update_read_ptr(drvdata, to_copy);
-
-       dev_dbg(dev, "%zu bytes copied\n", to_copy);
-out:
        if (!sdb->data_size)
                smb_reset_buffer(drvdata);
-       mutex_unlock(&drvdata->mutex);
 
+       dev_dbg(dev, "%zu bytes copied\n", to_copy);
        return to_copy;
 }
 
@@ -167,9 +160,9 @@ static int smb_release(struct inode *inode, struct file *file)
        struct smb_drv_data *drvdata = container_of(file->private_data,
                                        struct smb_drv_data, miscdev);
 
-       mutex_lock(&drvdata->mutex);
+       spin_lock(&drvdata->spinlock);
        drvdata->reading = false;
-       mutex_unlock(&drvdata->mutex);
+       spin_unlock(&drvdata->spinlock);
 
        return 0;
 }
@@ -262,7 +255,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
        struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
        int ret = 0;
 
-       mutex_lock(&drvdata->mutex);
+       spin_lock(&drvdata->spinlock);
 
        /* Do nothing, the trace data is reading by other interface now */
        if (drvdata->reading) {
@@ -294,7 +287,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
 
        dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
 out:
-       mutex_unlock(&drvdata->mutex);
+       spin_unlock(&drvdata->spinlock);
 
        return ret;
 }
@@ -304,7 +297,7 @@ static int smb_disable(struct coresight_device *csdev)
        struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
        int ret = 0;
 
-       mutex_lock(&drvdata->mutex);
+       spin_lock(&drvdata->spinlock);
 
        if (drvdata->reading) {
                ret = -EBUSY;
@@ -327,7 +320,7 @@ static int smb_disable(struct coresight_device *csdev)
 
        dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
 out:
-       mutex_unlock(&drvdata->mutex);
+       spin_unlock(&drvdata->spinlock);
 
        return ret;
 }
@@ -408,7 +401,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
        if (!buf)
                return 0;
 
-       mutex_lock(&drvdata->mutex);
+       spin_lock(&drvdata->spinlock);
 
        /* Don't do anything if another tracer is using this sink. */
        if (atomic_read(&csdev->refcnt) != 1)
@@ -432,7 +425,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
        if (!buf->snapshot && lost)
                perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
 out:
-       mutex_unlock(&drvdata->mutex);
+       spin_unlock(&drvdata->spinlock);
 
        return data_size;
 }
@@ -484,7 +477,6 @@ static int smb_init_data_buffer(struct platform_device *pdev,
 static void smb_init_hw(struct smb_drv_data *drvdata)
 {
        smb_disable_hw(drvdata);
-       smb_reset_buffer(drvdata);
 
        writel(SMB_LB_CFG_LO_DEFAULT, drvdata->base + SMB_LB_CFG_LO_REG);
        writel(SMB_LB_CFG_HI_DEFAULT, drvdata->base + SMB_LB_CFG_HI_REG);
@@ -590,37 +582,33 @@ static int smb_probe(struct platform_device *pdev)
                return ret;
        }
 
-       mutex_init(&drvdata->mutex);
+       ret = smb_config_inport(dev, true);
+       if (ret)
+               return ret;
+
+       smb_reset_buffer(drvdata);
+       platform_set_drvdata(pdev, drvdata);
+       spin_lock_init(&drvdata->spinlock);
        drvdata->pid = -1;
 
        ret = smb_register_sink(pdev, drvdata);
        if (ret) {
+               smb_config_inport(&pdev->dev, false);
                dev_err(dev, "Failed to register SMB sink\n");
                return ret;
        }
 
-       ret = smb_config_inport(dev, true);
-       if (ret) {
-               smb_unregister_sink(drvdata);
-               return ret;
-       }
-
-       platform_set_drvdata(pdev, drvdata);
-
        return 0;
 }
 
 static int smb_remove(struct platform_device *pdev)
 {
        struct smb_drv_data *drvdata = platform_get_drvdata(pdev);
-       int ret;
-
-       ret = smb_config_inport(&pdev->dev, false);
-       if (ret)
-               return ret;
 
        smb_unregister_sink(drvdata);
 
+       smb_config_inport(&pdev->dev, false);
+
        return 0;
 }
 
index d2e14e8d2c8a8c5004f106792fbd0de8241dde39..82a44c14a8829cd029c78979454e20efc8264b8f 100644 (file)
@@ -8,7 +8,7 @@
 #define _ULTRASOC_SMB_H
 
 #include <linux/miscdevice.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
 
 /* Offset of SMB global registers */
 #define SMB_GLB_CFG_REG                0x00
@@ -105,7 +105,7 @@ struct smb_data_buffer {
  * @csdev:     Component vitals needed by the framework.
  * @sdb:       Data buffer for SMB.
  * @miscdev:   Specifics to handle "/dev/xyz.smb" entry.
- * @mutex:     Control data access to one at a time.
+ * @spinlock:  Control data access to one at a time.
  * @reading:   Synchronise user space access to SMB buffer.
  * @pid:       Process ID of the process being monitored by the
  *             session that is using this component.
@@ -116,7 +116,7 @@ struct smb_drv_data {
        struct coresight_device *csdev;
        struct smb_data_buffer sdb;
        struct miscdevice miscdev;
-       struct mutex mutex;
+       spinlock_t spinlock;
        bool reading;
        pid_t pid;
        enum cs_mode mode;
index 49ea1b0f7489035211c51b753b9de3f0166c70fd..a991ecb7515a34d70d966d6d0fdbac86ebf34888 100644 (file)
@@ -342,9 +342,9 @@ static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
                return ret;
 
        hisi_ptt->trace_irq = pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ);
-       ret = devm_request_threaded_irq(&pdev->dev, hisi_ptt->trace_irq,
-                                       NULL, hisi_ptt_isr, 0,
-                                       DRV_NAME, hisi_ptt);
+       ret = devm_request_irq(&pdev->dev, hisi_ptt->trace_irq, hisi_ptt_isr,
+                               IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME,
+                               hisi_ptt);
        if (ret) {
                pci_err(pdev, "failed to request irq %d, ret = %d\n",
                        hisi_ptt->trace_irq, ret);
@@ -1000,6 +1000,9 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
                return -EOPNOTSUPP;
        }
 
+       if (event->attach_state & PERF_ATTACH_TASK)
+               return -EOPNOTSUPP;
+
        if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
                return -ENOENT;
 
@@ -1178,6 +1181,10 @@ static void hisi_ptt_pmu_del(struct perf_event *event, int flags)
        hisi_ptt_pmu_stop(event, PERF_EF_UPDATE);
 }
 
+static void hisi_ptt_pmu_read(struct perf_event *event)
+{
+}
+
 static void hisi_ptt_remove_cpuhp_instance(void *hotplug_node)
 {
        cpuhp_state_remove_instance_nocalls(hisi_ptt_pmu_online, hotplug_node);
@@ -1221,6 +1228,7 @@ static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
                .stop           = hisi_ptt_pmu_stop,
                .add            = hisi_ptt_pmu_add,
                .del            = hisi_ptt_pmu_del,
+               .read           = hisi_ptt_pmu_read,
        };
 
        reg = readl(hisi_ptt->iobase + HISI_PTT_LOCATION);
index f9ab671c8eda556f6ac9aa72ae787392beb72edd..07c571c7b69992e21a6ff863b6ce7bfc2fc2355b 100644 (file)
@@ -96,12 +96,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
                return page_size;
        }
 
-       /* rdma_for_each_block() has a bug if the page size is smaller than the
-        * page size used to build the umem. For now prevent smaller page sizes
-        * from being returned.
-        */
-       pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
-
        /* The best result is the smallest page size that results in the minimum
         * number of required pages. Compute the largest page size that could
         * work based on VA address bits that don't change.
index 8a6da87f464b0d70e0c0e1adf45ed9e1a7c85a78..94a7f3b0c71cc2778b08af5c0afa06f7efad815b 100644 (file)
@@ -1971,7 +1971,7 @@ int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
        int rc;
        u32 netdev_speed;
        struct net_device *netdev;
-       struct ethtool_link_ksettings lksettings;
+       struct ethtool_link_ksettings lksettings = {};
 
        if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
                return -EINVAL;
index f79369c8360a5f615402287d2f8440469a24f3f6..a99c68247af0cc7d9e2274e20f40580bde90ed61 100644 (file)
@@ -71,7 +71,7 @@ static char version[] =
                BNXT_RE_DESC "\n";
 
 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
-MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
+MODULE_DESCRIPTION(BNXT_RE_DESC);
 MODULE_LICENSE("Dual BSD/GPL");
 
 /* globals */
index 0cd2612a49870f1e831f5a6b088aff3260ce2cb6..2bca9560f32ddd02628eb655c1be261489c726c9 100644 (file)
@@ -4760,10 +4760,15 @@ static int check_cong_type(struct ib_qp *ibqp,
                cong_alg->wnd_mode_sel = WND_LIMIT;
                break;
        default:
-               ibdev_err(&hr_dev->ib_dev,
-                         "error type(%u) for congestion selection.\n",
-                         hr_dev->caps.cong_type);
-               return -EINVAL;
+               ibdev_warn(&hr_dev->ib_dev,
+                          "invalid type(%u) for congestion selection.\n",
+                          hr_dev->caps.cong_type);
+               hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
+               cong_alg->alg_sel = CONG_DCQCN;
+               cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+               cong_alg->dip_vld = DIP_INVALID;
+               cong_alg->wnd_mode_sel = WND_LIMIT;
+               break;
        }
 
        return 0;
index 8fa7e4a18e737ae55243f0e160af0b2cbad2277c..bd4b2b89644442341226e6c5716f5ddb221ea1a1 100644 (file)
@@ -321,7 +321,11 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
                        break;
                case IRDMA_AE_QP_SUSPEND_COMPLETE:
                        if (iwqp->iwdev->vsi.tc_change_pending) {
-                               atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
+                               if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
+                                       wake_up(&iwqp->iwdev->suspend_wq);
+                       }
+                       if (iwqp->suspend_pending) {
+                               iwqp->suspend_pending = false;
                                wake_up(&iwqp->iwdev->suspend_wq);
                        }
                        break;
@@ -581,9 +585,6 @@ static void irdma_destroy_cqp(struct irdma_pci_f *rf)
        struct irdma_cqp *cqp = &rf->cqp;
        int status = 0;
 
-       if (rf->cqp_cmpl_wq)
-               destroy_workqueue(rf->cqp_cmpl_wq);
-
        status = irdma_sc_cqp_destroy(dev->cqp);
        if (status)
                ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
@@ -748,6 +749,9 @@ static void irdma_destroy_ccq(struct irdma_pci_f *rf)
        struct irdma_ccq *ccq = &rf->ccq;
        int status = 0;
 
+       if (rf->cqp_cmpl_wq)
+               destroy_workqueue(rf->cqp_cmpl_wq);
+
        if (!rf->reset)
                status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
        if (status)
@@ -1180,7 +1184,6 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
        int status;
        struct irdma_ceq_init_info info = {};
        struct irdma_sc_dev *dev = &rf->sc_dev;
-       u64 scratch;
        u32 ceq_size;
 
        info.ceq_id = ceq_id;
@@ -1201,14 +1204,13 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
        iwceq->sc_ceq.ceq_id = ceq_id;
        info.dev = dev;
        info.vsi = vsi;
-       scratch = (uintptr_t)&rf->cqp.sc_cqp;
        status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
        if (!status) {
                if (dev->ceq_valid)
                        status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
                                                   IRDMA_OP_CEQ_CREATE);
                else
-                       status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
+                       status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
        }
 
        if (status) {
index 9ac48b4dab413d4f4eef6a51ebe7b3fd13e97a6c..3f13200ff71bc03a3f5e7817af12390eded1bc3b 100644 (file)
@@ -48,7 +48,7 @@ static void irdma_prep_tc_change(struct irdma_device *iwdev)
        /* Wait for all qp's to suspend */
        wait_event_timeout(iwdev->suspend_wq,
                           !atomic_read(&iwdev->vsi.qp_suspend_reqs),
-                          IRDMA_EVENT_TIMEOUT);
+                          msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
        irdma_ws_reset(&iwdev->vsi);
 }
 
index d66d87bb8bc4d8935de8a2f7a498555b506e3ce0..b65bc2ea542f56b2e72bae1db491dd5969c620e2 100644 (file)
@@ -78,7 +78,7 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
 
 #define MAX_DPC_ITERATIONS     128
 
-#define IRDMA_EVENT_TIMEOUT            50000
+#define IRDMA_EVENT_TIMEOUT_MS         5000
 #define IRDMA_VCHNL_EVENT_TIMEOUT      100000
 #define IRDMA_RST_TIMEOUT_HZ           4
 
index 2138f0a2ff859ec20b55a46b87675d1d4fc30116..b5eb8d421988c1abd73cf4eb3a93adc6f2944089 100644 (file)
@@ -1157,6 +1157,21 @@ exit:
        return prio;
 }
 
+static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
+{
+       if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
+                               !iwqp->suspend_pending,
+                               msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
+               iwqp->suspend_pending = false;
+               ibdev_warn(&iwqp->iwdev->ibdev,
+                          "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
+                          iwqp->ibqp.qp_num, iwqp->last_aeq);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  * irdma_modify_qp_roce - modify qp request
  * @ibqp: qp's pointer for modify
@@ -1420,17 +1435,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
                        info.next_iwarp_state = IRDMA_QP_STATE_SQD;
                        issue_modify_qp = 1;
+                       iwqp->suspend_pending = true;
                        break;
                case IB_QPS_SQE:
                case IB_QPS_ERR:
                case IB_QPS_RESET:
-                       if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
-                               spin_unlock_irqrestore(&iwqp->lock, flags);
-                               info.next_iwarp_state = IRDMA_QP_STATE_SQD;
-                               irdma_hw_modify_qp(iwdev, iwqp, &info, true);
-                               spin_lock_irqsave(&iwqp->lock, flags);
-                       }
-
                        if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
                                spin_unlock_irqrestore(&iwqp->lock, flags);
                                if (udata && udata->inlen) {
@@ -1467,6 +1476,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                        ctx_info->rem_endpoint_idx = udp_info->arp_idx;
                        if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
                                return -EINVAL;
+                       if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
+                               ret = irdma_wait_for_suspend(iwqp);
+                               if (ret)
+                                       return ret;
+                       }
                        spin_lock_irqsave(&iwqp->lock, flags);
                        if (iwqp->iwarp_state == info.curr_iwarp_state) {
                                iwqp->iwarp_state = info.next_iwarp_state;
@@ -2900,7 +2914,7 @@ static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
        iwmr->type = reg_type;
 
        pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
-               iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;
+               iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K;
 
        iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
        if (unlikely(!iwmr->page_size)) {
@@ -2932,6 +2946,11 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
        int err;
        u8 lvl;
 
+       /* iWarp: Catch page not starting on OS page boundary */
+       if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
+           ib_umem_offset(iwmr->region))
+               return -EINVAL;
+
        total = req.sq_pages + req.rq_pages + 1;
        if (total > iwmr->page_cnt)
                return -EINVAL;
index c42ac22de00e9372f7db62fdc5b8b691fac99c45..cfa140b36395ae9f49a9b928baa4d5a1a0aaf336 100644 (file)
@@ -198,6 +198,7 @@ struct irdma_qp {
        u8 flush_issued : 1;
        u8 sig_all : 1;
        u8 pau_mode : 1;
+       u8 suspend_pending : 1;
        u8 rsvd : 1;
        u8 iwarp_state;
        u16 term_sq_flush_code;
index 07261523c554735fd1ef3cf950a0e5a399b9cbaa..7f3167ce2972246447f7009110279313e8ca9194 100644 (file)
@@ -384,7 +384,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
        struct rtrs_clt_path *clt_path;
        int err;
 
-       if (WARN_ON(!req->in_use))
+       if (!req->in_use)
                return;
        if (WARN_ON(!req->con))
                return;
@@ -1699,7 +1699,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
                clt_path->s.dev_ref++;
                max_send_wr = min_t(int, wr_limit,
                              /* QD * (REQ + RSP + FR REGS or INVS) + drain */
-                             clt_path->queue_depth * 3 + 1);
+                             clt_path->queue_depth * 4 + 1);
                max_recv_wr = min_t(int, wr_limit,
                              clt_path->queue_depth * 3 + 1);
                max_send_sge = 2;
@@ -2350,8 +2350,6 @@ static int init_conns(struct rtrs_clt_path *clt_path)
        if (err)
                goto destroy;
 
-       rtrs_start_hb(&clt_path->s);
-
        return 0;
 
 destroy:
@@ -2625,6 +2623,7 @@ static int init_path(struct rtrs_clt_path *clt_path)
                goto out;
        }
        rtrs_clt_path_up(clt_path);
+       rtrs_start_hb(&clt_path->s);
 out:
        mutex_unlock(&clt_path->init_mutex);
 
index 75e56604e4628622915340087665bc86e7db83eb..1d33efb8fb03be74be953c280a9df61fdbba6ac4 100644 (file)
@@ -65,8 +65,9 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
 {
        enum rtrs_srv_state old_state;
        bool changed = false;
+       unsigned long flags;
 
-       spin_lock_irq(&srv_path->state_lock);
+       spin_lock_irqsave(&srv_path->state_lock, flags);
        old_state = srv_path->state;
        switch (new_state) {
        case RTRS_SRV_CONNECTED:
@@ -87,7 +88,7 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
        }
        if (changed)
                srv_path->state = new_state;
-       spin_unlock_irq(&srv_path->state_lock);
+       spin_unlock_irqrestore(&srv_path->state_lock, flags);
 
        return changed;
 }
@@ -550,7 +551,10 @@ static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
                struct rtrs_srv_mr *srv_mr;
 
                srv_mr = &srv_path->mrs[i];
-               rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
+
+               if (always_invalidate)
+                       rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
+
                ib_dereg_mr(srv_mr->mr);
                ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
                                srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
@@ -709,20 +713,23 @@ static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
        WARN_ON(wc->opcode != IB_WC_SEND);
 }
 
-static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
+static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
 {
        struct rtrs_srv_sess *srv = srv_path->srv;
        struct rtrs_srv_ctx *ctx = srv->ctx;
-       int up;
+       int up, ret = 0;
 
        mutex_lock(&srv->paths_ev_mutex);
        up = ++srv->paths_up;
        if (up == 1)
-               ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+               ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
        mutex_unlock(&srv->paths_ev_mutex);
 
        /* Mark session as established */
-       srv_path->established = true;
+       if (!ret)
+               srv_path->established = true;
+
+       return ret;
 }
 
 static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
@@ -851,7 +858,12 @@ static int process_info_req(struct rtrs_srv_con *con,
                goto iu_free;
        kobject_get(&srv_path->kobj);
        get_device(&srv_path->srv->dev);
-       rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
+       err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
+       if (!err) {
+               rtrs_err(s, "rtrs_srv_change_state(), err: %d\n", err);
+               goto iu_free;
+       }
+
        rtrs_srv_start_hb(srv_path);
 
        /*
@@ -860,7 +872,11 @@ static int process_info_req(struct rtrs_srv_con *con,
         * all connections are successfully established.  Thus, simply notify
         * listener with a proper event if we are the first path.
         */
-       rtrs_srv_path_up(srv_path);
+       err = rtrs_srv_path_up(srv_path);
+       if (err) {
+               rtrs_err(s, "rtrs_srv_path_up(), err: %d\n", err);
+               goto iu_free;
+       }
 
        ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
                                      tx_iu->dma_addr,
@@ -1516,7 +1532,6 @@ static void rtrs_srv_close_work(struct work_struct *work)
 
        srv_path = container_of(work, typeof(*srv_path), close_work);
 
-       rtrs_srv_destroy_path_files(srv_path);
        rtrs_srv_stop_hb(srv_path);
 
        for (i = 0; i < srv_path->s.con_num; i++) {
@@ -1536,6 +1551,8 @@ static void rtrs_srv_close_work(struct work_struct *work)
        /* Wait for all completion */
        wait_for_completion(&srv_path->complete_done);
 
+       rtrs_srv_destroy_path_files(srv_path);
+
        /* Notify upper layer if we are the last path */
        rtrs_srv_path_down(srv_path);
 
index 59d3a07300d934484e11cf3a15f68543e11e168a..873630c111c1fc70a5297f101a67a83090f7f9ff 100644 (file)
@@ -571,7 +571,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
                        continue;
                destroy_hwpt = (*do_attach)(idev, hwpt);
                if (IS_ERR(destroy_hwpt)) {
-                       iommufd_put_object(&hwpt->obj);
+                       iommufd_put_object(idev->ictx, &hwpt->obj);
                        /*
                         * -EINVAL means the domain is incompatible with the
                         * device. Other error codes should propagate to
@@ -583,7 +583,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
                        goto out_unlock;
                }
                *pt_id = hwpt->obj.id;
-               iommufd_put_object(&hwpt->obj);
+               iommufd_put_object(idev->ictx, &hwpt->obj);
                goto out_unlock;
        }
 
@@ -652,7 +652,7 @@ static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id,
                destroy_hwpt = ERR_PTR(-EINVAL);
                goto out_put_pt_obj;
        }
-       iommufd_put_object(pt_obj);
+       iommufd_put_object(idev->ictx, pt_obj);
 
        /* This destruction has to be after we unlock everything */
        if (destroy_hwpt)
@@ -660,7 +660,7 @@ static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id,
        return 0;
 
 out_put_pt_obj:
-       iommufd_put_object(pt_obj);
+       iommufd_put_object(idev->ictx, pt_obj);
        return PTR_ERR(destroy_hwpt);
 }
 
@@ -792,7 +792,7 @@ static int iommufd_access_change_ioas_id(struct iommufd_access *access, u32 id)
        if (IS_ERR(ioas))
                return PTR_ERR(ioas);
        rc = iommufd_access_change_ioas(access, ioas);
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(access->ictx, &ioas->obj);
        return rc;
 }
 
@@ -941,7 +941,7 @@ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
 
                access->ops->unmap(access->data, iova, length);
 
-               iommufd_put_object(&access->obj);
+               iommufd_put_object(access->ictx, &access->obj);
                xa_lock(&ioas->iopt.access_list);
        }
        xa_unlock(&ioas->iopt.access_list);
@@ -1243,6 +1243,6 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
 out_free:
        kfree(data);
 out_put:
-       iommufd_put_object(&idev->obj);
+       iommufd_put_object(ucmd->ictx, &idev->obj);
        return rc;
 }
index 2abbeafdbd22d86019f665662f30fb367c40bd2e..cbb5df0a6c32f835b50535a84bde3f44bfb4d6db 100644 (file)
@@ -318,9 +318,9 @@ out_unlock:
        if (ioas)
                mutex_unlock(&ioas->mutex);
 out_put_pt:
-       iommufd_put_object(pt_obj);
+       iommufd_put_object(ucmd->ictx, pt_obj);
 out_put_idev:
-       iommufd_put_object(&idev->obj);
+       iommufd_put_object(ucmd->ictx, &idev->obj);
        return rc;
 }
 
@@ -345,7 +345,7 @@ int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd)
        rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain,
                                     enable);
 
-       iommufd_put_object(&hwpt_paging->common.obj);
+       iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
        return rc;
 }
 
@@ -368,6 +368,6 @@ int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
        rc = iopt_read_and_clear_dirty_data(
                &ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd);
 
-       iommufd_put_object(&hwpt_paging->common.obj);
+       iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
        return rc;
 }
index d5624577f79f1b69940a4006b6bf092c04ea97a5..74224827654815fbe16ea0da18128ca470c15ffd 100644 (file)
@@ -105,7 +105,7 @@ int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd)
                rc = -EMSGSIZE;
 out_put:
        up_read(&ioas->iopt.iova_rwsem);
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ucmd->ictx, &ioas->obj);
        return rc;
 }
 
@@ -175,7 +175,7 @@ out_free:
                interval_tree_remove(node, &allowed_iova);
                kfree(container_of(node, struct iopt_allowed, node));
        }
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ucmd->ictx, &ioas->obj);
        return rc;
 }
 
@@ -228,7 +228,7 @@ int iommufd_ioas_map(struct iommufd_ucmd *ucmd)
        cmd->iova = iova;
        rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
 out_put:
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ucmd->ictx, &ioas->obj);
        return rc;
 }
 
@@ -258,7 +258,7 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
                return PTR_ERR(src_ioas);
        rc = iopt_get_pages(&src_ioas->iopt, cmd->src_iova, cmd->length,
                            &pages_list);
-       iommufd_put_object(&src_ioas->obj);
+       iommufd_put_object(ucmd->ictx, &src_ioas->obj);
        if (rc)
                return rc;
 
@@ -279,7 +279,7 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
        cmd->dst_iova = iova;
        rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
 out_put_dst:
-       iommufd_put_object(&dst_ioas->obj);
+       iommufd_put_object(ucmd->ictx, &dst_ioas->obj);
 out_pages:
        iopt_free_pages_list(&pages_list);
        return rc;
@@ -315,7 +315,7 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
        rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
 
 out_put:
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ucmd->ictx, &ioas->obj);
        return rc;
 }
 
@@ -393,6 +393,6 @@ int iommufd_ioas_option(struct iommufd_ucmd *ucmd)
                rc = -EOPNOTSUPP;
        }
 
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ucmd->ictx, &ioas->obj);
        return rc;
 }
index a74cfefffbc6c5045c7b22063f66978f1f275e59..abae041e256f7ed1a0a6fcc68a48087098effc6b 100644 (file)
@@ -21,6 +21,7 @@ struct iommufd_ctx {
        struct file *file;
        struct xarray objects;
        struct xarray groups;
+       wait_queue_head_t destroy_wait;
 
        u8 account_mode;
        /* Compatibility with VFIO no iommu */
@@ -135,7 +136,7 @@ enum iommufd_object_type {
 
 /* Base struct for all objects with a userspace ID handle. */
 struct iommufd_object {
-       struct rw_semaphore destroy_rwsem;
+       refcount_t shortterm_users;
        refcount_t users;
        enum iommufd_object_type type;
        unsigned int id;
@@ -143,10 +144,15 @@ struct iommufd_object {
 
 static inline bool iommufd_lock_obj(struct iommufd_object *obj)
 {
-       if (!down_read_trylock(&obj->destroy_rwsem))
+       if (!refcount_inc_not_zero(&obj->users))
                return false;
-       if (!refcount_inc_not_zero(&obj->users)) {
-               up_read(&obj->destroy_rwsem);
+       if (!refcount_inc_not_zero(&obj->shortterm_users)) {
+               /*
+                * If the caller doesn't already have a ref on obj this must be
+                * called under the xa_lock. Otherwise the caller is holding a
+                * ref on users. Thus it cannot be one before this decrement.
+                */
+               refcount_dec(&obj->users);
                return false;
        }
        return true;
@@ -154,10 +160,16 @@ static inline bool iommufd_lock_obj(struct iommufd_object *obj)
 
 struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
                                          enum iommufd_object_type type);
-static inline void iommufd_put_object(struct iommufd_object *obj)
+static inline void iommufd_put_object(struct iommufd_ctx *ictx,
+                                     struct iommufd_object *obj)
 {
+       /*
+        * Users first, then shortterm so that REMOVE_WAIT_SHORTTERM never sees
+        * a spurious !0 users with a 0 shortterm_users.
+        */
        refcount_dec(&obj->users);
-       up_read(&obj->destroy_rwsem);
+       if (refcount_dec_and_test(&obj->shortterm_users))
+               wake_up_interruptible_all(&ictx->destroy_wait);
 }
 
 void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj);
@@ -165,17 +177,49 @@ void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
                                      struct iommufd_object *obj);
 void iommufd_object_finalize(struct iommufd_ctx *ictx,
                             struct iommufd_object *obj);
-void __iommufd_object_destroy_user(struct iommufd_ctx *ictx,
-                                  struct iommufd_object *obj, bool allow_fail);
+
+enum {
+       REMOVE_WAIT_SHORTTERM = 1,
+};
+int iommufd_object_remove(struct iommufd_ctx *ictx,
+                         struct iommufd_object *to_destroy, u32 id,
+                         unsigned int flags);
+
+/*
+ * The caller holds a users refcount and wants to destroy the object. At this
+ * point the caller has no shortterm_users reference and at least the xarray
+ * will be holding one.
+ */
 static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
                                               struct iommufd_object *obj)
 {
-       __iommufd_object_destroy_user(ictx, obj, false);
+       int ret;
+
+       ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT_SHORTTERM);
+
+       /*
+        * If there is a bug and we couldn't destroy the object then we did put
+        * back the caller's users refcount and will eventually try to free it
+        * again during close.
+        */
+       WARN_ON(ret);
 }
-static inline void iommufd_object_deref_user(struct iommufd_ctx *ictx,
-                                            struct iommufd_object *obj)
+
+/*
+ * The HWPT allocated by autodomains is used in possibly many devices and
+ * is automatically destroyed when its refcount reaches zero.
+ *
+ * If userspace uses the HWPT manually, even for a short term, then it will
+ * disrupt this refcounting and the auto-free in the kernel will not work.
+ * Userspace that tries to use the automatically allocated HWPT must be careful
+ * to ensure that it is consistently destroyed, eg by not racing accesses
+ * and by not attaching an automatic HWPT to a device manually.
+ */
+static inline void
+iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx,
+                                  struct iommufd_object *obj)
 {
-       __iommufd_object_destroy_user(ictx, obj, true);
+       iommufd_object_remove(ictx, obj, obj->id, 0);
 }
 
 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
@@ -311,7 +355,7 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
                lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
 
                if (hwpt_paging->auto_domain) {
-                       iommufd_object_deref_user(ictx, &hwpt->obj);
+                       iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
                        return;
                }
        }
index 45b9d40773b13a4255c3d6114240fd7e2a469eef..c9091e46d208abeea14aea1c649a016c39a077ba 100644 (file)
@@ -33,7 +33,6 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
                                             size_t size,
                                             enum iommufd_object_type type)
 {
-       static struct lock_class_key obj_keys[IOMMUFD_OBJ_MAX];
        struct iommufd_object *obj;
        int rc;
 
@@ -41,15 +40,8 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
        if (!obj)
                return ERR_PTR(-ENOMEM);
        obj->type = type;
-       /*
-        * In most cases the destroy_rwsem is obtained with try so it doesn't
-        * interact with lockdep, however on destroy we have to sleep. This
-        * means if we have to destroy an object while holding a get on another
-        * object it triggers lockdep. Using one locking class per object type
-        * is a simple and reasonable way to avoid this.
-        */
-       __init_rwsem(&obj->destroy_rwsem, "iommufd_object::destroy_rwsem",
-                    &obj_keys[type]);
+       /* Starts out bias'd by 1 until it is removed from the xarray */
+       refcount_set(&obj->shortterm_users, 1);
        refcount_set(&obj->users, 1);
 
        /*
@@ -129,92 +121,113 @@ struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
        return obj;
 }
 
+static int iommufd_object_dec_wait_shortterm(struct iommufd_ctx *ictx,
+                                            struct iommufd_object *to_destroy)
+{
+       if (refcount_dec_and_test(&to_destroy->shortterm_users))
+               return 0;
+
+       if (wait_event_timeout(ictx->destroy_wait,
+                               refcount_read(&to_destroy->shortterm_users) ==
+                                       0,
+                               msecs_to_jiffies(10000)))
+               return 0;
+
+       pr_crit("Time out waiting for iommufd object to become free\n");
+       refcount_inc(&to_destroy->shortterm_users);
+       return -EBUSY;
+}
+
 /*
  * Remove the given object id from the xarray if the only reference to the
- * object is held by the xarray. The caller must call ops destroy().
+ * object is held by the xarray.
  */
-static struct iommufd_object *iommufd_object_remove(struct iommufd_ctx *ictx,
-                                                   u32 id, bool extra_put)
+int iommufd_object_remove(struct iommufd_ctx *ictx,
+                         struct iommufd_object *to_destroy, u32 id,
+                         unsigned int flags)
 {
        struct iommufd_object *obj;
        XA_STATE(xas, &ictx->objects, id);
-
-       xa_lock(&ictx->objects);
-       obj = xas_load(&xas);
-       if (xa_is_zero(obj) || !obj) {
-               obj = ERR_PTR(-ENOENT);
-               goto out_xa;
-       }
+       bool zerod_shortterm = false;
+       int ret;
 
        /*
-        * If the caller is holding a ref on obj we put it here under the
-        * spinlock.
+        * The purpose of the shortterm_users is to ensure deterministic
+        * destruction of objects used by external drivers and destroyed by this
+        * function. Any temporary increment of the refcount must increment
+        * shortterm_users, such as during ioctl execution.
         */
-       if (extra_put)
+       if (flags & REMOVE_WAIT_SHORTTERM) {
+               ret = iommufd_object_dec_wait_shortterm(ictx, to_destroy);
+               if (ret) {
+                       /*
+                        * We have a bug. Put back the callers reference and
+                        * defer cleaning this object until close.
+                        */
+                       refcount_dec(&to_destroy->users);
+                       return ret;
+               }
+               zerod_shortterm = true;
+       }
+
+       xa_lock(&ictx->objects);
+       obj = xas_load(&xas);
+       if (to_destroy) {
+               /*
+                * If the caller is holding a ref on obj we put it here under
+                * the spinlock.
+                */
                refcount_dec(&obj->users);
 
+               if (WARN_ON(obj != to_destroy)) {
+                       ret = -ENOENT;
+                       goto err_xa;
+               }
+       } else if (xa_is_zero(obj) || !obj) {
+               ret = -ENOENT;
+               goto err_xa;
+       }
+
        if (!refcount_dec_if_one(&obj->users)) {
-               obj = ERR_PTR(-EBUSY);
-               goto out_xa;
+               ret = -EBUSY;
+               goto err_xa;
        }
 
        xas_store(&xas, NULL);
        if (ictx->vfio_ioas == container_of(obj, struct iommufd_ioas, obj))
                ictx->vfio_ioas = NULL;
-
-out_xa:
        xa_unlock(&ictx->objects);
 
-       /* The returned object reference count is zero */
-       return obj;
-}
-
-/*
- * The caller holds a users refcount and wants to destroy the object. Returns
- * true if the object was destroyed. In all cases the caller no longer has a
- * reference on obj.
- */
-void __iommufd_object_destroy_user(struct iommufd_ctx *ictx,
-                                  struct iommufd_object *obj, bool allow_fail)
-{
-       struct iommufd_object *ret;
-
        /*
-        * The purpose of the destroy_rwsem is to ensure deterministic
-        * destruction of objects used by external drivers and destroyed by this
-        * function. Any temporary increment of the refcount must hold the read
-        * side of this, such as during ioctl execution.
-        */
-       down_write(&obj->destroy_rwsem);
-       ret = iommufd_object_remove(ictx, obj->id, true);
-       up_write(&obj->destroy_rwsem);
-
-       if (allow_fail && IS_ERR(ret))
-               return;
-
-       /*
-        * If there is a bug and we couldn't destroy the object then we did put
-        * back the caller's refcount and will eventually try to free it again
-        * during close.
+        * Since users is zero any positive users_shortterm must be racing
+        * iommufd_put_object(), or we have a bug.
         */
-       if (WARN_ON(IS_ERR(ret)))
-               return;
+       if (!zerod_shortterm) {
+               ret = iommufd_object_dec_wait_shortterm(ictx, obj);
+               if (WARN_ON(ret))
+                       return ret;
+       }
 
        iommufd_object_ops[obj->type].destroy(obj);
        kfree(obj);
+       return 0;
+
+err_xa:
+       if (zerod_shortterm) {
+               /* Restore the xarray owned reference */
+               refcount_set(&obj->shortterm_users, 1);
+       }
+       xa_unlock(&ictx->objects);
+
+       /* The returned object reference count is zero */
+       return ret;
 }
 
 static int iommufd_destroy(struct iommufd_ucmd *ucmd)
 {
        struct iommu_destroy *cmd = ucmd->cmd;
-       struct iommufd_object *obj;
 
-       obj = iommufd_object_remove(ucmd->ictx, cmd->id, false);
-       if (IS_ERR(obj))
-               return PTR_ERR(obj);
-       iommufd_object_ops[obj->type].destroy(obj);
-       kfree(obj);
-       return 0;
+       return iommufd_object_remove(ucmd->ictx, NULL, cmd->id, 0);
 }
 
 static int iommufd_fops_open(struct inode *inode, struct file *filp)
@@ -238,6 +251,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
        xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
        xa_init(&ictx->groups);
        ictx->file = filp;
+       init_waitqueue_head(&ictx->destroy_wait);
        filp->private_data = ictx;
        return 0;
 }
index 5d93434003d8ad666af55e212372e37b81c06895..022ef8f55088a6b1e7d452ad4260510cca5bb303 100644 (file)
@@ -86,7 +86,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
        if (IS_ERR(ioas))
                return;
        *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova);
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ucmd->ictx, &ioas->obj);
 }
 
 struct mock_iommu_domain {
@@ -500,7 +500,7 @@ get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
                return hwpt;
        if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
            hwpt->domain->ops != mock_ops.default_domain_ops) {
-               iommufd_put_object(&hwpt->obj);
+               iommufd_put_object(ucmd->ictx, &hwpt->obj);
                return ERR_PTR(-EINVAL);
        }
        *mock = container_of(hwpt->domain, struct mock_iommu_domain, domain);
@@ -518,7 +518,7 @@ get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
                return hwpt;
        if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
            hwpt->domain->ops != &domain_nested_ops) {
-               iommufd_put_object(&hwpt->obj);
+               iommufd_put_object(ucmd->ictx, &hwpt->obj);
                return ERR_PTR(-EINVAL);
        }
        *mock_nested = container_of(hwpt->domain,
@@ -681,7 +681,7 @@ static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
        rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
 
 out_dev_obj:
-       iommufd_put_object(dev_obj);
+       iommufd_put_object(ucmd->ictx, dev_obj);
        return rc;
 }
 
@@ -699,7 +699,7 @@ static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
        down_write(&ioas->iopt.iova_rwsem);
        rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
        up_write(&ioas->iopt.iova_rwsem);
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ucmd->ictx, &ioas->obj);
        return rc;
 }
 
@@ -754,7 +754,7 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
        rc = 0;
 
 out_put:
-       iommufd_put_object(&hwpt->obj);
+       iommufd_put_object(ucmd->ictx, &hwpt->obj);
        return rc;
 }
 
@@ -1233,7 +1233,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
 out_free:
        kvfree(tmp);
 out_put:
-       iommufd_put_object(&hwpt->obj);
+       iommufd_put_object(ucmd->ictx, &hwpt->obj);
        return rc;
 }
 
index 538fbf76354d13d5b7f6478a82dd40e2daf67add..a3ad5f0b6c59dddc8fdd17723eb01ff8342c86ab 100644 (file)
@@ -41,7 +41,7 @@ int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id)
        if (IS_ERR(ioas))
                return PTR_ERR(ioas);
        *out_ioas_id = ioas->obj.id;
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ictx, &ioas->obj);
        return 0;
 }
 EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_get_id, IOMMUFD_VFIO);
@@ -98,7 +98,7 @@ int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
 
        if (ictx->vfio_ioas && iommufd_lock_obj(&ictx->vfio_ioas->obj)) {
                ret = 0;
-               iommufd_put_object(&ictx->vfio_ioas->obj);
+               iommufd_put_object(ictx, &ictx->vfio_ioas->obj);
                goto out_abort;
        }
        ictx->vfio_ioas = ioas;
@@ -133,7 +133,7 @@ int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd)
                if (IS_ERR(ioas))
                        return PTR_ERR(ioas);
                cmd->ioas_id = ioas->obj.id;
-               iommufd_put_object(&ioas->obj);
+               iommufd_put_object(ucmd->ictx, &ioas->obj);
                return iommufd_ucmd_respond(ucmd, sizeof(*cmd));
 
        case IOMMU_VFIO_IOAS_SET:
@@ -143,7 +143,7 @@ int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd)
                xa_lock(&ucmd->ictx->objects);
                ucmd->ictx->vfio_ioas = ioas;
                xa_unlock(&ucmd->ictx->objects);
-               iommufd_put_object(&ioas->obj);
+               iommufd_put_object(ucmd->ictx, &ioas->obj);
                return 0;
 
        case IOMMU_VFIO_IOAS_CLEAR:
@@ -190,7 +190,7 @@ static int iommufd_vfio_map_dma(struct iommufd_ctx *ictx, unsigned int cmd,
        iova = map.iova;
        rc = iopt_map_user_pages(ictx, &ioas->iopt, &iova, u64_to_user_ptr(map.vaddr),
                                 map.size, iommu_prot, 0);
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ictx, &ioas->obj);
        return rc;
 }
 
@@ -249,7 +249,7 @@ static int iommufd_vfio_unmap_dma(struct iommufd_ctx *ictx, unsigned int cmd,
                rc = -EFAULT;
 
 err_put:
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ictx, &ioas->obj);
        return rc;
 }
 
@@ -272,7 +272,7 @@ static int iommufd_vfio_cc_iommu(struct iommufd_ctx *ictx)
        }
        mutex_unlock(&ioas->mutex);
 
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ictx, &ioas->obj);
        return rc;
 }
 
@@ -349,7 +349,7 @@ static int iommufd_vfio_set_iommu(struct iommufd_ctx *ictx, unsigned long type)
         */
        if (type == VFIO_TYPE1_IOMMU)
                rc = iopt_disable_large_pages(&ioas->iopt);
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ictx, &ioas->obj);
        return rc;
 }
 
@@ -511,7 +511,7 @@ static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
 
 out_put:
        up_read(&ioas->iopt.iova_rwsem);
-       iommufd_put_object(&ioas->obj);
+       iommufd_put_object(ictx, &ioas->obj);
        return rc;
 }
 
index e358e77e4b38f9c6885fd01da7daff0dc7c46b5f..d76214fa9ad8645441e9c53814719ba0109f30ac 100644 (file)
@@ -226,6 +226,11 @@ static int set_device_name(struct led_netdev_data *trigger_data,
 
        cancel_delayed_work_sync(&trigger_data->work);
 
+       /*
+        * Take RTNL lock before trigger_data lock to prevent potential
+        * deadlock with netdev notifier registration.
+        */
+       rtnl_lock();
        mutex_lock(&trigger_data->lock);
 
        if (trigger_data->net_dev) {
@@ -245,16 +250,14 @@ static int set_device_name(struct led_netdev_data *trigger_data,
        trigger_data->carrier_link_up = false;
        trigger_data->link_speed = SPEED_UNKNOWN;
        trigger_data->duplex = DUPLEX_UNKNOWN;
-       if (trigger_data->net_dev != NULL) {
-               rtnl_lock();
+       if (trigger_data->net_dev)
                get_device_state(trigger_data);
-               rtnl_unlock();
-       }
 
        trigger_data->last_activity = 0;
 
        set_baseline_state(trigger_data);
        mutex_unlock(&trigger_data->lock);
+       rtnl_unlock();
 
        return 0;
 }
index c94373d64f2cd4cbbdb8d49591d23b8f129aa46c..b066abbffd10e08f6057156e55487cb172be8437 100644 (file)
@@ -490,7 +490,7 @@ int mddev_suspend(struct mddev *mddev, bool interruptible)
 }
 EXPORT_SYMBOL_GPL(mddev_suspend);
 
-void mddev_resume(struct mddev *mddev)
+static void __mddev_resume(struct mddev *mddev, bool recovery_needed)
 {
        lockdep_assert_not_held(&mddev->reconfig_mutex);
 
@@ -507,12 +507,18 @@ void mddev_resume(struct mddev *mddev)
        percpu_ref_resurrect(&mddev->active_io);
        wake_up(&mddev->sb_wait);
 
-       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       if (recovery_needed)
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
        md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
 
        mutex_unlock(&mddev->suspend_mutex);
 }
+
+void mddev_resume(struct mddev *mddev)
+{
+       return __mddev_resume(mddev, true);
+}
 EXPORT_SYMBOL_GPL(mddev_resume);
 
 /*
@@ -4840,25 +4846,29 @@ action_show(struct mddev *mddev, char *page)
        return sprintf(page, "%s\n", type);
 }
 
-static void stop_sync_thread(struct mddev *mddev)
+/**
+ * stop_sync_thread() - wait for sync_thread to stop if it's running.
+ * @mddev:     the array.
+ * @locked:    if set, reconfig_mutex will still be held after this function
+ *             return; if not set, reconfig_mutex will be released after this
+ *             function return.
+ * @check_seq: if set, only wait for curent running sync_thread to stop, noted
+ *             that new sync_thread can still start.
+ */
+static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
 {
-       if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
-               return;
+       int sync_seq;
 
-       if (mddev_lock(mddev))
-               return;
+       if (check_seq)
+               sync_seq = atomic_read(&mddev->sync_seq);
 
-       /*
-        * Check again in case MD_RECOVERY_RUNNING is cleared before lock is
-        * held.
-        */
        if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
-               mddev_unlock(mddev);
+               if (!locked)
+                       mddev_unlock(mddev);
                return;
        }
 
-       if (work_pending(&mddev->del_work))
-               flush_workqueue(md_misc_wq);
+       mddev_unlock(mddev);
 
        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        /*
@@ -4866,21 +4876,28 @@ static void stop_sync_thread(struct mddev *mddev)
         * never happen
         */
        md_wakeup_thread_directly(mddev->sync_thread);
+       if (work_pending(&mddev->sync_work))
+               flush_work(&mddev->sync_work);
 
-       mddev_unlock(mddev);
+       wait_event(resync_wait,
+                  !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+                  (check_seq && sync_seq != atomic_read(&mddev->sync_seq)));
+
+       if (locked)
+               mddev_lock_nointr(mddev);
 }
 
 static void idle_sync_thread(struct mddev *mddev)
 {
-       int sync_seq = atomic_read(&mddev->sync_seq);
-
        mutex_lock(&mddev->sync_mutex);
        clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-       stop_sync_thread(mddev);
 
-       wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) ||
-                       !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
+       if (mddev_lock(mddev)) {
+               mutex_unlock(&mddev->sync_mutex);
+               return;
+       }
 
+       stop_sync_thread(mddev, false, true);
        mutex_unlock(&mddev->sync_mutex);
 }
 
@@ -4888,11 +4905,13 @@ static void frozen_sync_thread(struct mddev *mddev)
 {
        mutex_lock(&mddev->sync_mutex);
        set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-       stop_sync_thread(mddev);
 
-       wait_event(resync_wait, mddev->sync_thread == NULL &&
-                       !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
+       if (mddev_lock(mddev)) {
+               mutex_unlock(&mddev->sync_mutex);
+               return;
+       }
 
+       stop_sync_thread(mddev, false, false);
        mutex_unlock(&mddev->sync_mutex);
 }
 
@@ -6264,14 +6283,7 @@ static void md_clean(struct mddev *mddev)
 
 static void __md_stop_writes(struct mddev *mddev)
 {
-       set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-       if (work_pending(&mddev->del_work))
-               flush_workqueue(md_misc_wq);
-       if (mddev->sync_thread) {
-               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-               md_reap_sync_thread(mddev);
-       }
-
+       stop_sync_thread(mddev, true, false);
        del_timer_sync(&mddev->safemode_timer);
 
        if (mddev->pers && mddev->pers->quiesce) {
@@ -6355,25 +6367,16 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
        int err = 0;
        int did_freeze = 0;
 
+       if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+               return -EBUSY;
+
        if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
                did_freeze = 1;
                set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
        }
-       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
-               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 
-       /*
-        * Thread might be blocked waiting for metadata update which will now
-        * never happen
-        */
-       md_wakeup_thread_directly(mddev->sync_thread);
-
-       if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
-               return -EBUSY;
-       mddev_unlock(mddev);
-       wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
-                                         &mddev->recovery));
+       stop_sync_thread(mddev, false, false);
        wait_event(mddev->sb_wait,
                   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
        mddev_lock_nointr(mddev);
@@ -6383,29 +6386,30 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
            mddev->sync_thread ||
            test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
                pr_warn("md: %s still in use.\n",mdname(mddev));
-               if (did_freeze) {
-                       clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-                       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-                       md_wakeup_thread(mddev->thread);
-               }
                err = -EBUSY;
                goto out;
        }
+
        if (mddev->pers) {
                __md_stop_writes(mddev);
 
-               err  = -ENXIO;
-               if (mddev->ro == MD_RDONLY)
+               if (mddev->ro == MD_RDONLY) {
+                       err  = -ENXIO;
                        goto out;
+               }
+
                mddev->ro = MD_RDONLY;
                set_disk_ro(mddev->gendisk, 1);
+       }
+
+out:
+       if ((mddev->pers && !err) || did_freeze) {
                clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
                sysfs_notify_dirent_safe(mddev->sysfs_state);
-               err = 0;
        }
-out:
+
        mutex_unlock(&mddev->open_mutex);
        return err;
 }
@@ -6426,20 +6430,8 @@ static int do_md_stop(struct mddev *mddev, int mode,
                set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
        }
-       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
-               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-
-       /*
-        * Thread might be blocked waiting for metadata update which will now
-        * never happen
-        */
-       md_wakeup_thread_directly(mddev->sync_thread);
 
-       mddev_unlock(mddev);
-       wait_event(resync_wait, (mddev->sync_thread == NULL &&
-                                !test_bit(MD_RECOVERY_RUNNING,
-                                          &mddev->recovery)));
-       mddev_lock_nointr(mddev);
+       stop_sync_thread(mddev, true, false);
 
        mutex_lock(&mddev->open_mutex);
        if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
@@ -9403,7 +9395,15 @@ static void md_start_sync(struct work_struct *ws)
                goto not_running;
        }
 
-       suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev);
+       mddev_unlock(mddev);
+       /*
+        * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
+        * not set it again. Otherwise, we may cause issue like this one:
+        *     https://bugzilla.kernel.org/show_bug.cgi?id=218200
+        * Therefore, use __mddev_resume(mddev, false).
+        */
+       if (suspend)
+               __mddev_resume(mddev, false);
        md_wakeup_thread(mddev->sync_thread);
        sysfs_notify_dirent_safe(mddev->sysfs_action);
        md_new_event();
@@ -9415,7 +9415,15 @@ not_running:
        clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
        clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-       suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev);
+       mddev_unlock(mddev);
+       /*
+        * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
+        * not set it again. Otherwise, we may cause issue like this one:
+        *     https://bugzilla.kernel.org/show_bug.cgi?id=218200
+        * Therefore, use __mddev_resume(mddev, false).
+        */
+       if (suspend)
+               __mddev_resume(mddev, false);
 
        wake_up(&resync_wait);
        if (test_and_clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
index dc031d42f53bc678e775741318985db351d17c01..26e1e8a5e94191c8c01a4e37fa48e7d28fac99e1 100644 (file)
@@ -5892,11 +5892,11 @@ static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
        int dd_idx;
 
        for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
-               if (dd_idx == sh->pd_idx)
+               if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
                        continue;
 
                min_sector = min(min_sector, sh->dev[dd_idx].sector);
-               max_sector = min(max_sector, sh->dev[dd_idx].sector);
+               max_sector = max(max_sector, sh->dev[dd_idx].sector);
        }
 
        spin_lock_irq(&conf->device_lock);
index 9c8fc87938a7458fb5121c525304da9181fc0a7f..9d090fa07516f06a567c0308c995393e57239010 100644 (file)
@@ -2011,7 +2011,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time
 
        mei_hdr = mei_msg_hdr_init(cb);
        if (IS_ERR(mei_hdr)) {
-               rets = -PTR_ERR(mei_hdr);
+               rets = PTR_ERR(mei_hdr);
                mei_hdr = NULL;
                goto err;
        }
@@ -2032,7 +2032,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time
 
        hbuf_slots = mei_hbuf_empty_slots(dev);
        if (hbuf_slots < 0) {
-               rets = -EOVERFLOW;
+               buf_len = -EOVERFLOW;
                goto out;
        }
 
index f77d78fa50549e69f0a0873b87bbc4711899b356..787c6a27a4be60f9322c6aad9e1d7143da24e47e 100644 (file)
@@ -84,9 +84,10 @@ mei_pxp_send_message(struct device *dev, const void *message, size_t size, unsig
                                byte = ret;
                        break;
                }
+               return byte;
        }
 
-       return byte;
+       return 0;
 }
 
 /**
index 19e996a829c9db8008e51becadc9acbc5b6e0832..b54275389f8acf1cd11341288f3692ca9e1c9f5d 100644 (file)
@@ -186,6 +186,8 @@ do {                                                                        \
 #define ARC_IS_5MBIT    1   /* card default speed is 5MBit */
 #define ARC_CAN_10MBIT  2   /* card uses COM20022, supporting 10MBit,
                                 but default is 2.5MBit. */
+#define ARC_HAS_LED     4   /* card has software controlled LEDs */
+#define ARC_HAS_ROTARY  8   /* card has rotary encoder */
 
 /* information needed to define an encapsulation driver */
 struct ArcProto {
index c580acb8b1d34e75a87c079a6a17e3f395b62878..7b5c8bb02f11941f6210200c23ee2f74272d49a3 100644 (file)
@@ -213,12 +213,13 @@ static int com20020pci_probe(struct pci_dev *pdev,
                if (!strncmp(ci->name, "EAE PLX-PCI FB2", 15))
                        lp->backplane = 1;
 
-               /* Get the dev_id from the PLX rotary coder */
-               if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
-                       dev_id_mask = 0x3;
-               dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
-
-               snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+               if (ci->flags & ARC_HAS_ROTARY) {
+                       /* Get the dev_id from the PLX rotary coder */
+                       if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
+                               dev_id_mask = 0x3;
+                       dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
+                       snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+               }
 
                if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
                        pr_err("IO address %Xh is empty!\n", ioaddr);
@@ -230,6 +231,10 @@ static int com20020pci_probe(struct pci_dev *pdev,
                        goto err_free_arcdev;
                }
 
+               ret = com20020_found(dev, IRQF_SHARED);
+               if (ret)
+                       goto err_free_arcdev;
+
                card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
                                    GFP_KERNEL);
                if (!card) {
@@ -239,41 +244,39 @@ static int com20020pci_probe(struct pci_dev *pdev,
 
                card->index = i;
                card->pci_priv = priv;
-               card->tx_led.brightness_set = led_tx_set;
-               card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
-                                               GFP_KERNEL, "arc%d-%d-tx",
-                                               dev->dev_id, i);
-               card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
-                                               "pci:green:tx:%d-%d",
-                                               dev->dev_id, i);
-
-               card->tx_led.dev = &dev->dev;
-               card->recon_led.brightness_set = led_recon_set;
-               card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
-                                               GFP_KERNEL, "arc%d-%d-recon",
-                                               dev->dev_id, i);
-               card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
-                                               "pci:red:recon:%d-%d",
-                                               dev->dev_id, i);
-               card->recon_led.dev = &dev->dev;
-               card->dev = dev;
-
-               ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
-               if (ret)
-                       goto err_free_arcdev;
 
-               ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
-               if (ret)
-                       goto err_free_arcdev;
-
-               dev_set_drvdata(&dev->dev, card);
-
-               ret = com20020_found(dev, IRQF_SHARED);
-               if (ret)
-                       goto err_free_arcdev;
-
-               devm_arcnet_led_init(dev, dev->dev_id, i);
+               if (ci->flags & ARC_HAS_LED) {
+                       card->tx_led.brightness_set = led_tx_set;
+                       card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+                                                       GFP_KERNEL, "arc%d-%d-tx",
+                                                       dev->dev_id, i);
+                       card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+                                                       "pci:green:tx:%d-%d",
+                                                       dev->dev_id, i);
+
+                       card->tx_led.dev = &dev->dev;
+                       card->recon_led.brightness_set = led_recon_set;
+                       card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+                                                       GFP_KERNEL, "arc%d-%d-recon",
+                                                       dev->dev_id, i);
+                       card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+                                                       "pci:red:recon:%d-%d",
+                                                       dev->dev_id, i);
+                       card->recon_led.dev = &dev->dev;
+
+                       ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+                       if (ret)
+                               goto err_free_arcdev;
+
+                       ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+                       if (ret)
+                               goto err_free_arcdev;
+
+                       dev_set_drvdata(&dev->dev, card);
+                       devm_arcnet_led_init(dev, dev->dev_id, i);
+               }
 
+               card->dev = dev;
                list_add(&card->list, &priv->list_dev);
                continue;
 
@@ -329,7 +332,7 @@ static struct com20020_pci_card_info card_info_5mbit = {
 };
 
 static struct com20020_pci_card_info card_info_sohard = {
-       .name = "PLX-PCI",
+       .name = "SOHARD SH ARC-PCI",
        .devcount = 1,
        /* SOHARD needs PCI base addr 4 */
        .chan_map_tbl = {
@@ -364,7 +367,7 @@ static struct com20020_pci_card_info card_info_eae_arc1 = {
                },
        },
        .rotary = 0x0,
-       .flags = ARC_CAN_10MBIT,
+       .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
 };
 
 static struct com20020_pci_card_info card_info_eae_ma1 = {
@@ -396,7 +399,7 @@ static struct com20020_pci_card_info card_info_eae_ma1 = {
                },
        },
        .rotary = 0x0,
-       .flags = ARC_CAN_10MBIT,
+       .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
 };
 
 static struct com20020_pci_card_info card_info_eae_fb2 = {
@@ -421,7 +424,7 @@ static struct com20020_pci_card_info card_info_eae_fb2 = {
                },
        },
        .rotary = 0x0,
-       .flags = ARC_CAN_10MBIT,
+       .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
 };
 
 static const struct pci_device_id com20020pci_id_table[] = {
index 3fed406fb46ae6361c5d1819470b750083e60ef6..ff4b39601c937b78df3090fc5742f87a194e15be 100644 (file)
@@ -2713,10 +2713,18 @@ static int ksz_connect_tag_protocol(struct dsa_switch *ds,
 {
        struct ksz_tagger_data *tagger_data;
 
-       tagger_data = ksz_tagger_data(ds);
-       tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
-
-       return 0;
+       switch (proto) {
+       case DSA_TAG_PROTO_KSZ8795:
+               return 0;
+       case DSA_TAG_PROTO_KSZ9893:
+       case DSA_TAG_PROTO_KSZ9477:
+       case DSA_TAG_PROTO_LAN937X:
+               tagger_data = ksz_tagger_data(ds);
+               tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
+               return 0;
+       default:
+               return -EPROTONOSUPPORT;
+       }
 }
 
 static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
index 9a8429f5d09cbf90bd4341614a273072068414ec..d758a6c1b226380f7b9335a73ce90fdc2e65ddce 100644 (file)
@@ -465,6 +465,7 @@ mv88e639x_pcs_select(struct mv88e6xxx_chip *chip, int port,
        case PHY_INTERFACE_MODE_10GBASER:
        case PHY_INTERFACE_MODE_XAUI:
        case PHY_INTERFACE_MODE_RXAUI:
+       case PHY_INTERFACE_MODE_USXGMII:
                return &mpcs->xg_pcs;
 
        default:
@@ -873,7 +874,8 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
        struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
        int err;
 
-       if (interface == PHY_INTERFACE_MODE_10GBASER) {
+       if (interface == PHY_INTERFACE_MODE_10GBASER ||
+           interface == PHY_INTERFACE_MODE_USXGMII) {
                err = mv88e6393x_erratum_5_2(mpcs);
                if (err)
                        return err;
@@ -886,12 +888,37 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
        return mv88e639x_xg_pcs_enable(mpcs);
 }
 
+static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
+                                       struct phylink_link_state *state)
+{
+       struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+       u16 status, lp_status;
+       int err;
+
+       if (state->interface != PHY_INTERFACE_MODE_USXGMII)
+               return mv88e639x_xg_pcs_get_state(pcs, state);
+
+       state->link = false;
+
+       err = mv88e639x_read(mpcs, MV88E6390_USXGMII_PHY_STATUS, &status);
+       err = err ? : mv88e639x_read(mpcs, MV88E6390_USXGMII_LP_STATUS, &lp_status);
+       if (err) {
+               dev_err(mpcs->mdio.dev.parent,
+                       "can't read USXGMII status: %pe\n", ERR_PTR(err));
+               return;
+       }
+
+       state->link = !!(status & MDIO_USXGMII_LINK);
+       state->an_complete = state->link;
+       phylink_decode_usxgmii_word(state, lp_status);
+}
+
 static const struct phylink_pcs_ops mv88e6393x_xg_pcs_ops = {
        .pcs_enable = mv88e6393x_xg_pcs_enable,
        .pcs_disable = mv88e6393x_xg_pcs_disable,
        .pcs_pre_config = mv88e6393x_xg_pcs_pre_config,
        .pcs_post_config = mv88e6393x_xg_pcs_post_config,
-       .pcs_get_state = mv88e639x_xg_pcs_get_state,
+       .pcs_get_state = mv88e6393x_xg_pcs_get_state,
        .pcs_config = mv88e639x_xg_pcs_config,
 };
 
index 3d6f0a466a9ed48c32915899359ac103b254c1ab..f9f886289b970ab2b351fec3b4c589437e5c101e 100644 (file)
@@ -328,9 +328,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
         * compare it to the stored version, just create the meta
         */
        if (io_sq->disable_meta_caching) {
-               if (unlikely(!ena_tx_ctx->meta_valid))
-                       return -EINVAL;
-
                *have_meta = true;
                return ena_com_create_meta(io_sq, ena_meta);
        }
index b5bca48148309993402f7ef0b215e94ba66676ed..c44c44e26ddfe74a93b7f1fb3c3ca90f978909e2 100644 (file)
@@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
                              struct ena_tx_buffer *tx_info);
 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
                                            int first_index, int count);
+static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+                                                 int first_index, int count);
 
 /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
 static void ena_increase_stat(u64 *statp, u64 cnt,
@@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
 
 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
 {
+       u32 xdp_first_ring = adapter->xdp_first_ring;
+       u32 xdp_num_queues = adapter->xdp_num_queues;
        int rc = 0;
 
-       rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
-                                            adapter->xdp_num_queues);
+       rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
        if (rc)
                goto setup_err;
 
-       rc = ena_create_io_tx_queues_in_range(adapter,
-                                             adapter->xdp_first_ring,
-                                             adapter->xdp_num_queues);
+       rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
        if (rc)
                goto create_err;
 
        return 0;
 
 create_err:
-       ena_free_all_io_tx_resources(adapter);
+       ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
 setup_err:
        return rc;
 }
@@ -1492,11 +1493,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
                if (unlikely(!skb))
                        return NULL;
 
-               /* sync this buffer for CPU use */
-               dma_sync_single_for_cpu(rx_ring->dev,
-                                       dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
-                                       len,
-                                       DMA_FROM_DEVICE);
                skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
                dma_sync_single_for_device(rx_ring->dev,
                                           dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
@@ -1515,17 +1511,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
 
        buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
 
-       pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
-
        /* If XDP isn't loaded try to reuse part of the RX buffer */
        reuse_rx_buf_page = !is_xdp_loaded &&
                            ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
 
-       dma_sync_single_for_cpu(rx_ring->dev,
-                               pre_reuse_paddr + pkt_offset,
-                               len,
-                               DMA_FROM_DEVICE);
-
        if (!reuse_rx_buf_page)
                ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
 
@@ -1671,20 +1660,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
        }
 }
 
-static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
 {
        struct ena_rx_buffer *rx_info;
        int ret;
 
+       /* XDP multi-buffer packets not supported */
+       if (unlikely(num_descs > 1)) {
+               netdev_err_once(rx_ring->adapter->netdev,
+                               "xdp: dropped unsupported multi-buffer packets\n");
+               ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
+               return ENA_XDP_DROP;
+       }
+
        rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
        xdp_prepare_buff(xdp, page_address(rx_info->page),
                         rx_info->buf_offset,
                         rx_ring->ena_bufs[0].len, false);
-       /* If for some reason we received a bigger packet than
-        * we expect, then we simply drop it
-        */
-       if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
-               return ENA_XDP_DROP;
 
        ret = ena_xdp_execute(rx_ring, xdp);
 
@@ -1719,6 +1711,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
        int xdp_flags = 0;
        int total_len = 0;
        int xdp_verdict;
+       u8 pkt_offset;
        int rc = 0;
        int i;
 
@@ -1745,15 +1738,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
 
                /* First descriptor might have an offset set by the device */
                rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
-               rx_info->buf_offset += ena_rx_ctx.pkt_offset;
+               pkt_offset = ena_rx_ctx.pkt_offset;
+               rx_info->buf_offset += pkt_offset;
 
                netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
                          "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
                          rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
                          ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
 
+               dma_sync_single_for_cpu(rx_ring->dev,
+                                       dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+                                       rx_ring->ena_bufs[0].len,
+                                       DMA_FROM_DEVICE);
+
                if (ena_xdp_present_ring(rx_ring))
-                       xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
+                       xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
 
                /* allocate skb and fill it */
                if (xdp_verdict == ENA_XDP_PASS)
@@ -1777,7 +1776,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
                                if (xdp_verdict & ENA_XDP_FORWARDED) {
                                        ena_unmap_rx_buff_attrs(rx_ring,
                                                                &rx_ring->rx_buffer_info[req_id],
-                                                               0);
+                                                               DMA_ATTR_SKIP_CPU_SYNC);
                                        rx_ring->rx_buffer_info[req_id].page = NULL;
                                }
                        }
index 80b44043e6c53f07a1c54b5100f19f7d0d8bee08..28c9b6f1a54f148d056c8106de3427c0fd479cce 100644 (file)
@@ -553,17 +553,17 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
 
 /* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
  * @adapter: pointer to adapter struct
- * @skb: particular skb to send timestamp with
+ * @shhwtstamps: particular skb_shared_hwtstamps to save timestamp
  *
  * if the timestamp is valid, we convert it into the timecounter ns
  * value, then store that result into the hwtstamps structure which
  * is passed up the network stack
  */
-static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
+static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtstamps *shhwtstamps,
                               u64 timestamp)
 {
        timestamp -= atomic_read(&aq_ptp->offset_ingress);
-       aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
+       aq_ptp_convert_to_hwtstamp(aq_ptp, shhwtstamps, timestamp);
 }
 
 void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
@@ -639,7 +639,7 @@ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
               &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
 }
 
-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
                      unsigned int len)
 {
        struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
@@ -648,7 +648,7 @@ u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
                                                   p, len, &timestamp);
 
        if (ret > 0)
-               aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
+               aq_ptp_rx_hwtstamp(aq_ptp, shhwtstamps, timestamp);
 
        return ret;
 }
index 28ccb7ca2df9e7d5b71b92c8dce9bea3b21d3a5e..210b723f22072cdbe09ca89111916346a7316803 100644 (file)
@@ -67,7 +67,7 @@ int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
 /* Return either ring is belong to PTP or not*/
 bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
 
-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
                      unsigned int len);
 
 struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
@@ -143,7 +143,7 @@ static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
 }
 
 static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic,
-                                   struct sk_buff *skb, u8 *p,
+                                   struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
                                    unsigned int len)
 {
        return 0;
index 4de22eed099a8443fb8ac9fb88199baeae36bbb7..e1885c1eb100a1fa67c0588832a991718728562c 100644 (file)
@@ -647,7 +647,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
                }
                if (is_ptp_ring)
                        buff->len -=
-                               aq_ptp_extract_ts(self->aq_nic, skb,
+                               aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb),
                                                  aq_buf_vaddr(&buff->rxdata),
                                                  buff->len);
 
@@ -742,6 +742,8 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
                struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
                bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
                struct aq_ring_buff_s *buff_ = NULL;
+               u16 ptp_hwtstamp_len = 0;
+               struct skb_shared_hwtstamps shhwtstamps;
                struct sk_buff *skb = NULL;
                unsigned int next_ = 0U;
                struct xdp_buff xdp;
@@ -810,11 +812,12 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
                hard_start = page_address(buff->rxdata.page) +
                             buff->rxdata.pg_off - rx_ring->page_offset;
 
-               if (is_ptp_ring)
-                       buff->len -=
-                               aq_ptp_extract_ts(rx_ring->aq_nic, skb,
-                                                 aq_buf_vaddr(&buff->rxdata),
-                                                 buff->len);
+               if (is_ptp_ring) {
+                       ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps,
+                                                            aq_buf_vaddr(&buff->rxdata),
+                                                            buff->len);
+                       buff->len -= ptp_hwtstamp_len;
+               }
 
                xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
                xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
@@ -834,6 +837,9 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
                if (IS_ERR(skb) || !skb)
                        continue;
 
+               if (ptp_hwtstamp_len > 0)
+                       *skb_hwtstamps(skb) = shhwtstamps;
+
                if (buff->is_vlan)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                               buff->vlan_rx_tag);
@@ -932,11 +938,14 @@ void aq_ring_free(struct aq_ring_s *self)
                return;
 
        kfree(self->buff_ring);
+       self->buff_ring = NULL;
 
-       if (self->dx_ring)
+       if (self->dx_ring) {
                dma_free_coherent(aq_nic_get_dev(self->aq_nic),
                                  self->size * self->dx_size, self->dx_ring,
                                  self->dx_ring_pa);
+               self->dx_ring = NULL;
+       }
 }
 
 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
index d0359b569afeb17e3ef1340bac5b3645636af237..579eebb6fc566364dc94c4b7c890d8f547e200d5 100644 (file)
@@ -1748,16 +1748,32 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
                             struct sk_buff *skb)
 {
+       skb_mark_for_recycle(skb);
+
        if (skb->dev != bp->dev) {
                /* this packet belongs to a vf-rep */
                bnxt_vf_rep_rx(bp, skb);
                return;
        }
        skb_record_rx_queue(skb, bnapi->index);
-       skb_mark_for_recycle(skb);
        napi_gro_receive(&bnapi->napi, skb);
 }
 
+static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
+                            struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
+{
+       u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
+
+       if (BNXT_PTP_RX_TS_VALID(flags))
+               goto ts_valid;
+       if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
+               return false;
+
+ts_valid:
+       *cmpl_ts = ts;
+       return true;
+}
+
 /* returns the following:
  * 1       - 1 packet successfully received
  * 0       - successful TPA_START, packet not completed yet
@@ -1783,6 +1799,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        struct sk_buff *skb;
        struct xdp_buff xdp;
        u32 flags, misc;
+       u32 cmpl_ts;
        void *data;
        int rc = 0;
 
@@ -2005,10 +2022,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
                }
        }
 
-       if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
-                    RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
+       if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
                if (bp->flags & BNXT_FLAG_CHIP_P5) {
-                       u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
                        u64 ns, ts;
 
                        if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
@@ -10731,10 +10746,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
        bnxt_free_mem(bp, irq_re_init);
 }
 
-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 {
-       int rc = 0;
-
        if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
                /* If we get here, it means firmware reset is in progress
                 * while we are trying to close.  We can safely proceed with
@@ -10749,15 +10762,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 
 #ifdef CONFIG_BNXT_SRIOV
        if (bp->sriov_cfg) {
+               int rc;
+
                rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
                                                      !bp->sriov_cfg,
                                                      BNXT_SRIOV_CFG_WAIT_TMO);
-               if (rc)
-                       netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+               if (!rc)
+                       netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
+               else if (rc < 0)
+                       netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
        }
 #endif
        __bnxt_close_nic(bp, irq_re_init, link_re_init);
-       return rc;
 }
 
 static int bnxt_close(struct net_device *dev)
@@ -13940,6 +13956,8 @@ static int bnxt_resume(struct device *device)
        if (rc)
                goto resume_exit;
 
+       bnxt_clear_reservations(bp, true);
+
        if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
                rc = -ENODEV;
                goto resume_exit;
index e702dbc3e6b131d1bf622be3ae781201a9f8e5f3..a7d7b09ea16202579794baad15cde8e4764f89e9 100644 (file)
@@ -161,7 +161,7 @@ struct rx_cmp {
        #define RX_CMP_FLAGS_ERROR                              (1 << 6)
        #define RX_CMP_FLAGS_PLACEMENT                          (7 << 7)
        #define RX_CMP_FLAGS_RSS_VALID                          (1 << 10)
-       #define RX_CMP_FLAGS_UNUSED                             (1 << 11)
+       #define RX_CMP_FLAGS_PKT_METADATA_PRESENT               (1 << 11)
         #define RX_CMP_FLAGS_ITYPES_SHIFT                       12
         #define RX_CMP_FLAGS_ITYPES_MASK                        0xf000
         #define RX_CMP_FLAGS_ITYPE_UNKNOWN                      (0 << 12)
@@ -188,6 +188,12 @@ struct rx_cmp {
        __le32 rx_cmp_rss_hash;
 };
 
+#define BNXT_PTP_RX_TS_VALID(flags)                            \
+       (((flags) & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS)
+
+#define BNXT_ALL_RX_TS_VALID(flags)                            \
+       !((flags) & RX_CMP_FLAGS_PKT_METADATA_PRESENT)
+
 #define RX_CMP_HASH_VALID(rxcmp)                               \
        ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
 
@@ -2375,7 +2381,7 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
 int bnxt_half_open_nic(struct bnxt *bp);
 void bnxt_half_close_nic(struct bnxt *bp);
 void bnxt_reenable_sriov(struct bnxt *bp);
-int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_close_nic(struct bnxt *, bool, bool);
 void bnxt_get_ring_err_stats(struct bnxt *bp,
                             struct bnxt_total_ring_err_stats *stats);
 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
index f302dac565996d11f7d6f1f35d8100fd6aad9896..89809f1b129c0e6c4a19394599003cabdb25caf5 100644 (file)
@@ -449,15 +449,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
                        return -ENODEV;
                }
                bnxt_ulp_stop(bp);
-               if (netif_running(bp->dev)) {
-                       rc = bnxt_close_nic(bp, true, true);
-                       if (rc) {
-                               NL_SET_ERR_MSG_MOD(extack, "Failed to close");
-                               dev_close(bp->dev);
-                               rtnl_unlock();
-                               break;
-                       }
-               }
+               if (netif_running(bp->dev))
+                       bnxt_close_nic(bp, true, true);
                bnxt_vf_reps_free(bp);
                rc = bnxt_hwrm_func_drv_unrgtr(bp);
                if (rc) {
index f3f384773ac038ada07b65a357bc411df2ffdfe8..5f67a7f94e7d1f3a72d037b94743c5531fc105d3 100644 (file)
@@ -165,9 +165,8 @@ static int bnxt_set_coalesce(struct net_device *dev,
 reset_coalesce:
        if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
                if (update_stats) {
-                       rc = bnxt_close_nic(bp, true, false);
-                       if (!rc)
-                               rc = bnxt_open_nic(bp, true, false);
+                       bnxt_close_nic(bp, true, false);
+                       rc = bnxt_open_nic(bp, true, false);
                } else {
                        rc = bnxt_hwrm_set_coal(bp);
                }
@@ -972,12 +971,7 @@ static int bnxt_set_channels(struct net_device *dev,
                         * before PF unload
                         */
                }
-               rc = bnxt_close_nic(bp, true, false);
-               if (rc) {
-                       netdev_err(bp->dev, "Set channel failure rc :%x\n",
-                                  rc);
-                       return rc;
-               }
+               bnxt_close_nic(bp, true, false);
        }
 
        if (sh) {
@@ -4042,12 +4036,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
                bnxt_run_fw_tests(bp, test_mask, &test_results);
        } else {
                bnxt_ulp_stop(bp);
-               rc = bnxt_close_nic(bp, true, false);
-               if (rc) {
-                       etest->flags |= ETH_TEST_FL_FAILED;
-                       bnxt_ulp_start(bp, rc);
-                       return;
-               }
+               bnxt_close_nic(bp, true, false);
                bnxt_run_fw_tests(bp, test_mask, &test_results);
 
                buf[BNXT_MACLPBK_TEST_IDX] = 1;
index f3886710e77873a83e8b8aad3eb2f3e2ba7d465e..6e3da3362bd6177765dd5c9ebd888c2c387d7338 100644 (file)
@@ -521,9 +521,8 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
 
        if (netif_running(bp->dev)) {
                if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
-                       rc = bnxt_close_nic(bp, false, false);
-                       if (!rc)
-                               rc = bnxt_open_nic(bp, false, false);
+                       bnxt_close_nic(bp, false, false);
+                       rc = bnxt_open_nic(bp, false, false);
                } else {
                        bnxt_ptp_cfg_tstamp_filters(bp);
                }
index 38d89d80b4a9c7ed2470b6ce6ddb8a3557e9069e..273c9ba48f09a179ee175fbab10fde84f0738836 100644 (file)
@@ -2075,6 +2075,7 @@ destroy_flow_table:
        rhashtable_destroy(&tc_info->flow_table);
 free_tc_info:
        kfree(tc_info);
+       bp->tc_info = NULL;
        return rc;
 }
 
index 48b6191efa56c70cc82ba0a83bb5ee64e6bb3fe2..f52830dfb26a1e52f517e61e9108f6f9f80e13a4 100644 (file)
@@ -6474,6 +6474,14 @@ static void tg3_dump_state(struct tg3 *tp)
        int i;
        u32 *regs;
 
+       /* If it is a PCI error, all registers will be 0xffff,
+        * we don't dump them out, just report the error and return
+        */
+       if (tp->pdev->error_state != pci_channel_io_normal) {
+               netdev_err(tp->dev, "PCI channel ERROR!\n");
+               return;
+       }
+
        regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
        if (!regs)
                return;
@@ -11259,7 +11267,8 @@ static void tg3_reset_task(struct work_struct *work)
        rtnl_lock();
        tg3_full_lock(tp, 0);
 
-       if (tp->pcierr_recovery || !netif_running(tp->dev)) {
+       if (tp->pcierr_recovery || !netif_running(tp->dev) ||
+           tp->pdev->error_state != pci_channel_io_normal) {
                tg3_flag_clear(tp, RESET_TASK_PENDING);
                tg3_full_unlock(tp);
                rtnl_unlock();
index 4798fb7fe35d14070acf82e25ae5d63a8d1bb3e5..b6a534a3e0b123007070ce4511d300bb20186951 100644 (file)
@@ -139,7 +139,8 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
        err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
                                 filter_block->acl_id, acl_entry_cfg);
 
-       dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+       dma_unmap_single(dev, acl_entry_cfg->key_iova,
+                        DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
                         DMA_TO_DEVICE);
        if (err) {
                dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
@@ -181,8 +182,8 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
        err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
                                    block->acl_id, acl_entry_cfg);
 
-       dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
-                        DMA_TO_DEVICE);
+       dma_unmap_single(dev, acl_entry_cfg->key_iova,
+                        DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
        if (err) {
                dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
                kfree(cmd_buff);
index 97d3151076d534d51cde377c98348f30acf459f9..e01a246124ac69955476f20bd42a1e6ab9032871 100644 (file)
@@ -1998,9 +1998,6 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
        return notifier_from_errno(err);
 }
 
-static struct notifier_block dpaa2_switch_port_switchdev_nb;
-static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
-
 static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
                                         struct net_device *upper_dev,
                                         struct netlink_ext_ack *extack)
@@ -2043,9 +2040,7 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
                goto err_egress_flood;
 
        err = switchdev_bridge_port_offload(netdev, netdev, NULL,
-                                           &dpaa2_switch_port_switchdev_nb,
-                                           &dpaa2_switch_port_switchdev_blocking_nb,
-                                           false, extack);
+                                           NULL, NULL, false, extack);
        if (err)
                goto err_switchdev_offload;
 
@@ -2079,9 +2074,7 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo
 
 static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
 {
-       switchdev_bridge_port_unoffload(netdev, NULL,
-                                       &dpaa2_switch_port_switchdev_nb,
-                                       &dpaa2_switch_port_switchdev_blocking_nb);
+       switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL);
 }
 
 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
index c3b7694a74851c1a9bb2e84ca7621b6bb72b40ba..e08c7b572497d386a8cc90a09ef4cf2caad18810 100644 (file)
@@ -3731,31 +3731,26 @@ static int fec_set_features(struct net_device *netdev,
        return 0;
 }
 
-static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
-{
-       struct vlan_ethhdr *vhdr;
-       unsigned short vlan_TCI = 0;
-
-       if (skb->protocol == htons(ETH_P_ALL)) {
-               vhdr = (struct vlan_ethhdr *)(skb->data);
-               vlan_TCI = ntohs(vhdr->h_vlan_TCI);
-       }
-
-       return vlan_TCI;
-}
-
 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
                                 struct net_device *sb_dev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       u16 vlan_tag;
+       u16 vlan_tag = 0;
 
        if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
                return netdev_pick_tx(ndev, skb, NULL);
 
-       vlan_tag = fec_enet_get_raw_vlan_tci(skb);
-       if (!vlan_tag)
+       /* VLAN is present in the payload.*/
+       if (eth_type_vlan(skb->protocol)) {
+               struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
+
+               vlan_tag = ntohs(vhdr->h_vlan_TCI);
+       /*  VLAN is present in the skb but not yet pushed in the payload.*/
+       } else if (skb_vlan_tag_present(skb)) {
+               vlan_tag = skb->vlan_tci;
+       } else {
                return vlan_tag;
+       }
 
        return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
 }
index 928d934cb21a5af1a0fbdc144a12dbee5e4e1960..f75668c479351913e20f54c103125bbd345b8a47 100644 (file)
@@ -66,6 +66,27 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
        }
 }
 
+static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv)
+{
+#define HNS_MAC_LINK_WAIT_TIME 5
+#define HNS_MAC_LINK_WAIT_CNT 40
+
+       u32 link_status = 0;
+       int i;
+
+       if (!mac_ctrl_drv->get_link_status)
+               return link_status;
+
+       for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) {
+               msleep(HNS_MAC_LINK_WAIT_TIME);
+               mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status);
+               if (!link_status)
+                       break;
+       }
+
+       return link_status;
+}
+
 void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
 {
        struct mac_driver *mac_ctrl_drv;
@@ -83,6 +104,14 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
                                                               &sfp_prsnt);
                if (!ret)
                        *link_status = *link_status && sfp_prsnt;
+
+               /* for FIBER port, it may have a fake link up.
+                * when the link status changes from down to up, we need to do
+                * anti-shake. the anti-shake time is base on tests.
+                * only FIBER port need to do this.
+                */
+               if (*link_status && !mac_cb->link)
+                       *link_status = hns_mac_link_anti_shake(mac_ctrl_drv);
        }
 
        mac_cb->link = *link_status;
index 0900abf5c5086b9836c28f4a9f11d550b6ccaaa5..8a713eed446582f87916c71586a19542dec79252 100644 (file)
@@ -142,7 +142,8 @@ MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
 
 static void fill_desc(struct hnae_ring *ring, void *priv,
                      int size, dma_addr_t dma, int frag_end,
-                     int buf_num, enum hns_desc_type type, int mtu)
+                     int buf_num, enum hns_desc_type type, int mtu,
+                     bool is_gso)
 {
        struct hnae_desc *desc = &ring->desc[ring->next_to_use];
        struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -275,6 +276,15 @@ static int hns_nic_maybe_stop_tso(
        return 0;
 }
 
+static int hns_nic_maybe_stop_tx_v2(struct sk_buff **out_skb, int *bnum,
+                                   struct hnae_ring *ring)
+{
+       if (skb_is_gso(*out_skb))
+               return hns_nic_maybe_stop_tso(out_skb, bnum, ring);
+       else
+               return hns_nic_maybe_stop_tx(out_skb, bnum, ring);
+}
+
 static void fill_tso_desc(struct hnae_ring *ring, void *priv,
                          int size, dma_addr_t dma, int frag_end,
                          int buf_num, enum hns_desc_type type, int mtu)
@@ -300,6 +310,19 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
                                mtu);
 }
 
+static void fill_desc_v2(struct hnae_ring *ring, void *priv,
+                        int size, dma_addr_t dma, int frag_end,
+                        int buf_num, enum hns_desc_type type, int mtu,
+                        bool is_gso)
+{
+       if (is_gso)
+               fill_tso_desc(ring, priv, size, dma, frag_end, buf_num, type,
+                             mtu);
+       else
+               fill_v2_desc(ring, priv, size, dma, frag_end, buf_num, type,
+                            mtu);
+}
+
 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
                                struct sk_buff *skb,
                                struct hns_nic_ring_data *ring_data)
@@ -313,6 +336,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
        int seg_num;
        dma_addr_t dma;
        int size, next_to_use;
+       bool is_gso;
        int i;
 
        switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
@@ -339,8 +363,9 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
                ring->stats.sw_err_cnt++;
                goto out_err_tx_ok;
        }
+       is_gso = skb_is_gso(skb);
        priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
-                           buf_num, DESC_TYPE_SKB, ndev->mtu);
+                           buf_num, DESC_TYPE_SKB, ndev->mtu, is_gso);
 
        /* fill the fragments */
        for (i = 1; i < seg_num; i++) {
@@ -354,7 +379,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
                }
                priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
                                    seg_num - 1 == i ? 1 : 0, buf_num,
-                                   DESC_TYPE_PAGE, ndev->mtu);
+                                   DESC_TYPE_PAGE, ndev->mtu, is_gso);
        }
 
        /*complete translate all packets*/
@@ -1776,15 +1801,6 @@ static int hns_nic_set_features(struct net_device *netdev,
                        netdev_info(netdev, "enet v1 do not support tso!\n");
                break;
        default:
-               if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
-                       priv->ops.fill_desc = fill_tso_desc;
-                       priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
-                       /* The chip only support 7*4096 */
-                       netif_set_tso_max_size(netdev, 7 * 4096);
-               } else {
-                       priv->ops.fill_desc = fill_v2_desc;
-                       priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
-               }
                break;
        }
        netdev->features = features;
@@ -2159,16 +2175,9 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
                priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
        } else {
                priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
-               if ((netdev->features & NETIF_F_TSO) ||
-                   (netdev->features & NETIF_F_TSO6)) {
-                       priv->ops.fill_desc = fill_tso_desc;
-                       priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
-                       /* This chip only support 7*4096 */
-                       netif_set_tso_max_size(netdev, 7 * 4096);
-               } else {
-                       priv->ops.fill_desc = fill_v2_desc;
-                       priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
-               }
+               priv->ops.fill_desc = fill_desc_v2;
+               priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx_v2;
+               netif_set_tso_max_size(netdev, 7 * 4096);
                /* enable tso when init
                 * control tso on/off through TSE bit in bd
                 */
index ffa9d6573f54bcfebf50cf851ee6134289cd293b..3f3ee032f631c4ca371b27c3ddc766d66b5c3788 100644 (file)
@@ -44,7 +44,8 @@ struct hns_nic_ring_data {
 struct hns_nic_ops {
        void (*fill_desc)(struct hnae_ring *ring, void *priv,
                          int size, dma_addr_t dma, int frag_end,
-                         int buf_num, enum hns_desc_type type, int mtu);
+                         int buf_num, enum hns_desc_type type, int mtu,
+                         bool is_gso);
        int (*maybe_stop_tx)(struct sk_buff **out_skb,
                             int *bnum, struct hnae_ring *ring);
        void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
index f7a332e51524d1a28b2895cf16ce2c2e22639109..1ab8dbe2d8800d0ca3e158870dc8580a604c444c 100644 (file)
@@ -16224,7 +16224,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
               I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
        if (val < MAX_FRAME_SIZE_DEFAULT)
                dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
-                        i, val);
+                        pf->hw.port, val);
 
        /* Add a filter to drop all Flow control frames from any VSI from being
         * transmitted. By doing so we stop a malicious VF from sending out
index e7ab89dc883aa63927e15e1e1383ae271e38348f..63b45c61cc4aa3631a3e5715af64d60c9b48d326 100644 (file)
@@ -292,6 +292,7 @@ struct iavf_adapter {
 #define IAVF_FLAG_QUEUES_DISABLED              BIT(17)
 #define IAVF_FLAG_SETUP_NETDEV_FEATURES                BIT(18)
 #define IAVF_FLAG_REINIT_MSIX_NEEDED           BIT(20)
+#define IAVF_FLAG_FDIR_ENABLED                 BIT(21)
 /* duplicates for common code */
 #define IAVF_FLAG_DCB_ENABLED                  0
        /* flags for admin queue service task */
index 6f236d1a6444e83cd86abed8c51c1d744ed642f4..dc499fe7734ec9afee6c5794f568e7e6d189f721 100644 (file)
@@ -827,18 +827,10 @@ static int __iavf_set_coalesce(struct net_device *netdev,
        struct iavf_adapter *adapter = netdev_priv(netdev);
        int i;
 
-       if (ec->rx_coalesce_usecs == 0) {
-               if (ec->use_adaptive_rx_coalesce)
-                       netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
-       } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
-                  (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
+       if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
                netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
                return -EINVAL;
-       } else if (ec->tx_coalesce_usecs == 0) {
-               if (ec->use_adaptive_tx_coalesce)
-                       netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
-       } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
-                  (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
+       } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
                netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
                return -EINVAL;
        }
@@ -1069,7 +1061,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
        struct iavf_fdir_fltr *rule = NULL;
        int ret = 0;
 
-       if (!FDIR_FLTR_SUPPORT(adapter))
+       if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
                return -EOPNOTSUPP;
 
        spin_lock_bh(&adapter->fdir_fltr_lock);
@@ -1211,7 +1203,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
        unsigned int cnt = 0;
        int val = 0;
 
-       if (!FDIR_FLTR_SUPPORT(adapter))
+       if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
                return -EOPNOTSUPP;
 
        cmd->data = IAVF_MAX_FDIR_FILTERS;
@@ -1403,7 +1395,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
        int count = 50;
        int err;
 
-       if (!FDIR_FLTR_SUPPORT(adapter))
+       if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
                return -EOPNOTSUPP;
 
        if (fsp->flow_type & FLOW_MAC_EXT)
@@ -1444,12 +1436,16 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
        spin_lock_bh(&adapter->fdir_fltr_lock);
        iavf_fdir_list_add_fltr(adapter, fltr);
        adapter->fdir_active_fltr++;
-       fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
-       adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+       if (adapter->link_up) {
+               fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+               adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+       } else {
+               fltr->state = IAVF_FDIR_FLTR_INACTIVE;
+       }
        spin_unlock_bh(&adapter->fdir_fltr_lock);
 
-       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
-
+       if (adapter->link_up)
+               mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 ret:
        if (err && fltr)
                kfree(fltr);
@@ -1471,7 +1467,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
        struct iavf_fdir_fltr *fltr = NULL;
        int err = 0;
 
-       if (!FDIR_FLTR_SUPPORT(adapter))
+       if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
                return -EOPNOTSUPP;
 
        spin_lock_bh(&adapter->fdir_fltr_lock);
@@ -1480,6 +1476,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
                if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
                        fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
                        adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+               } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
+                       list_del(&fltr->list);
+                       kfree(fltr);
+                       adapter->fdir_active_fltr--;
+                       fltr = NULL;
                } else {
                        err = -EBUSY;
                }
@@ -1788,7 +1789,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
                ret = 0;
                break;
        case ETHTOOL_GRXCLSRLCNT:
-               if (!FDIR_FLTR_SUPPORT(adapter))
+               if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
                        break;
                spin_lock_bh(&adapter->fdir_fltr_lock);
                cmd->rule_cnt = adapter->fdir_active_fltr;
index 9eb9f73f6adf3a52b42d0751032adff5fad9bcc0..d31bd923ba8cbf2d86272f6955847cc92d2db5e6 100644 (file)
@@ -6,12 +6,25 @@
 
 struct iavf_adapter;
 
-/* State of Flow Director filter */
+/* State of Flow Director filter
+ *
+ * *_REQUEST states are used to mark filter to be sent to PF driver to perform
+ * an action (either add or delete filter). *_PENDING states are an indication
+ * that request was sent to PF and the driver is waiting for response.
+ *
+ * Both DELETE and DISABLE states are being used to delete a filter in PF.
+ * The difference is that after a successful response filter in DEL_PENDING
+ * state is being deleted from VF driver as well and filter in DIS_PENDING state
+ * is being changed to INACTIVE state.
+ */
 enum iavf_fdir_fltr_state_t {
        IAVF_FDIR_FLTR_ADD_REQUEST,     /* User requests to add filter */
        IAVF_FDIR_FLTR_ADD_PENDING,     /* Filter pending add by the PF */
        IAVF_FDIR_FLTR_DEL_REQUEST,     /* User requests to delete filter */
        IAVF_FDIR_FLTR_DEL_PENDING,     /* Filter pending delete by the PF */
+       IAVF_FDIR_FLTR_DIS_REQUEST,     /* Filter scheduled to be disabled */
+       IAVF_FDIR_FLTR_DIS_PENDING,     /* Filter pending disable by the PF */
+       IAVF_FDIR_FLTR_INACTIVE,        /* Filter inactive on link down */
        IAVF_FDIR_FLTR_ACTIVE,          /* Filter is active */
 };
 
index c862ebcd2e392ef0a086c4033fadfae9fc8ceb63..e8d5b889addcb4489eb00cd077c7165618a245b7 100644 (file)
@@ -276,27 +276,6 @@ void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
        kfree(mem->va);
 }
 
-/**
- * iavf_lock_timeout - try to lock mutex but give up after timeout
- * @lock: mutex that should be locked
- * @msecs: timeout in msecs
- *
- * Returns 0 on success, negative on failure
- **/
-static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
-{
-       unsigned int wait, delay = 10;
-
-       for (wait = 0; wait < msecs; wait += delay) {
-               if (mutex_trylock(lock))
-                       return 0;
-
-               msleep(delay);
-       }
-
-       return -1;
-}
-
 /**
  * iavf_schedule_reset - Set the flags and schedule a reset event
  * @adapter: board private structure
@@ -1353,18 +1332,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
  **/
 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
 {
-       struct iavf_fdir_fltr *fdir, *fdirtmp;
+       struct iavf_fdir_fltr *fdir;
 
        /* remove all Flow Director filters */
        spin_lock_bh(&adapter->fdir_fltr_lock);
-       list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
-                                list) {
+       list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
                if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
-                       list_del(&fdir->list);
-                       kfree(fdir);
-                       adapter->fdir_active_fltr--;
-               } else {
-                       fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+                       /* Cancel a request, keep filter as inactive */
+                       fdir->state = IAVF_FDIR_FLTR_INACTIVE;
+               } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+                        fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
+                       /* Disable filters which are active or have a pending
+                        * request to PF to be added
+                        */
+                       fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
                }
        }
        spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -4112,6 +4093,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
        }
 }
 
+/**
+ * iavf_restore_fdir_filters
+ * @adapter: board private structure
+ *
+ * Restore existing FDIR filters when VF netdev comes back up.
+ **/
+static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
+{
+       struct iavf_fdir_fltr *f;
+
+       spin_lock_bh(&adapter->fdir_fltr_lock);
+       list_for_each_entry(f, &adapter->fdir_list_head, list) {
+               if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
+                       /* Cancel a request, keep filter as active */
+                       f->state = IAVF_FDIR_FLTR_ACTIVE;
+               } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
+                          f->state == IAVF_FDIR_FLTR_INACTIVE) {
+                       /* Add filters which are inactive or have a pending
+                        * request to PF to be deleted
+                        */
+                       f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+                       adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+               }
+       }
+       spin_unlock_bh(&adapter->fdir_fltr_lock);
+}
+
 /**
  * iavf_open - Called when a network interface is made active
  * @netdev: network interface device structure
@@ -4179,8 +4187,9 @@ static int iavf_open(struct net_device *netdev)
 
        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 
-       /* Restore VLAN filters that were removed with IFF_DOWN */
+       /* Restore filters that were removed with IFF_DOWN */
        iavf_restore_filters(adapter);
+       iavf_restore_fdir_filters(adapter);
 
        iavf_configure(adapter);
 
@@ -4311,6 +4320,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
        return ret;
 }
 
+/**
+ * iavf_disable_fdir - disable Flow Director and clear existing filters
+ * @adapter: board private structure
+ **/
+static void iavf_disable_fdir(struct iavf_adapter *adapter)
+{
+       struct iavf_fdir_fltr *fdir, *fdirtmp;
+       bool del_filters = false;
+
+       adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
+
+       /* remove all Flow Director filters */
+       spin_lock_bh(&adapter->fdir_fltr_lock);
+       list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+                                list) {
+               if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
+                   fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
+                       /* Delete filters not registered in PF */
+                       list_del(&fdir->list);
+                       kfree(fdir);
+                       adapter->fdir_active_fltr--;
+               } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+                          fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
+                          fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
+                       /* Filters registered in PF, schedule their deletion */
+                       fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+                       del_filters = true;
+               } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+                       /* Request to delete filter already sent to PF, change
+                        * state to DEL_PENDING to delete filter after PF's
+                        * response, not set as INACTIVE
+                        */
+                       fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
+               }
+       }
+       spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+       if (del_filters) {
+               adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+               mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+       }
+}
+
 #define NETIF_VLAN_OFFLOAD_FEATURES    (NETIF_F_HW_VLAN_CTAG_RX | \
                                         NETIF_F_HW_VLAN_CTAG_TX | \
                                         NETIF_F_HW_VLAN_STAG_RX | \
@@ -4336,6 +4388,13 @@ static int iavf_set_features(struct net_device *netdev,
            ((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
                iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
 
+       if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
+               if (features & NETIF_F_NTUPLE)
+                       adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
+               else
+                       iavf_disable_fdir(adapter);
+       }
+
        return 0;
 }
 
@@ -4685,6 +4744,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
 
        features = iavf_fix_netdev_vlan_features(adapter, features);
 
+       if (!FDIR_FLTR_SUPPORT(adapter))
+               features &= ~NETIF_F_NTUPLE;
+
        return iavf_fix_strip_features(adapter, features);
 }
 
@@ -4802,6 +4864,12 @@ int iavf_process_config(struct iavf_adapter *adapter)
        if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
                netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
+       if (FDIR_FLTR_SUPPORT(adapter)) {
+               netdev->hw_features |= NETIF_F_NTUPLE;
+               netdev->features |= NETIF_F_NTUPLE;
+               adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
+       }
+
        netdev->priv_flags |= IFF_UNICAST_FLT;
 
        /* Do not turn on offloads when they are requested to be turned off.
@@ -4825,34 +4893,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
        return 0;
 }
 
-/**
- * iavf_shutdown - Shutdown the device in preparation for a reboot
- * @pdev: pci device structure
- **/
-static void iavf_shutdown(struct pci_dev *pdev)
-{
-       struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
-       struct net_device *netdev = adapter->netdev;
-
-       netif_device_detach(netdev);
-
-       if (netif_running(netdev))
-               iavf_close(netdev);
-
-       if (iavf_lock_timeout(&adapter->crit_lock, 5000))
-               dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
-       /* Prevent the watchdog from running. */
-       iavf_change_state(adapter, __IAVF_REMOVE);
-       adapter->aq_required = 0;
-       mutex_unlock(&adapter->crit_lock);
-
-#ifdef CONFIG_PM
-       pci_save_state(pdev);
-
-#endif
-       pci_disable_device(pdev);
-}
-
 /**
  * iavf_probe - Device Initialization Routine
  * @pdev: PCI device information struct
@@ -5063,16 +5103,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
  **/
 static void iavf_remove(struct pci_dev *pdev)
 {
-       struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
        struct iavf_fdir_fltr *fdir, *fdirtmp;
        struct iavf_vlan_filter *vlf, *vlftmp;
        struct iavf_cloud_filter *cf, *cftmp;
        struct iavf_adv_rss *rss, *rsstmp;
        struct iavf_mac_filter *f, *ftmp;
+       struct iavf_adapter *adapter;
        struct net_device *netdev;
        struct iavf_hw *hw;
 
-       netdev = adapter->netdev;
+       /* Don't proceed with remove if netdev is already freed */
+       netdev = pci_get_drvdata(pdev);
+       if (!netdev)
+               return;
+
+       adapter = iavf_pdev_to_adapter(pdev);
        hw = &adapter->hw;
 
        if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
@@ -5184,11 +5229,25 @@ static void iavf_remove(struct pci_dev *pdev)
 
        destroy_workqueue(adapter->wq);
 
+       pci_set_drvdata(pdev, NULL);
+
        free_netdev(netdev);
 
        pci_disable_device(pdev);
 }
 
+/**
+ * iavf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void iavf_shutdown(struct pci_dev *pdev)
+{
+       iavf_remove(pdev);
+
+       if (system_state == SYSTEM_POWER_OFF)
+               pci_set_power_state(pdev, PCI_D3hot);
+}
+
 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
 
 static struct pci_driver iavf_driver = {
index 7e6ee32d19b696dcd04cbbbc7e72526ad9adf1f0..10ba36602c0c14c72f61d4a27776e3f98469065f 100644 (file)
@@ -15,7 +15,6 @@
  */
 #define IAVF_ITR_DYNAMIC       0x8000  /* use top bit as a flag */
 #define IAVF_ITR_MASK          0x1FFE  /* mask for ITR register value */
-#define IAVF_MIN_ITR                2  /* reg uses 2 usec resolution */
 #define IAVF_ITR_100K              10  /* all values below must be even */
 #define IAVF_ITR_50K               20
 #define IAVF_ITR_20K               50
index 64c4443dbef9caca468d919c4cf5b1a4c6dfa7ca..2d9366be0ec506c9b69c3b25d69044a4e47ab03b 100644 (file)
@@ -1735,8 +1735,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter)
  **/
 void iavf_del_fdir_filter(struct iavf_adapter *adapter)
 {
+       struct virtchnl_fdir_del f = {};
        struct iavf_fdir_fltr *fdir;
-       struct virtchnl_fdir_del f;
        bool process_fltr = false;
        int len;
 
@@ -1753,11 +1753,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
        list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
                if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
                        process_fltr = true;
-                       memset(&f, 0, len);
                        f.vsi_id = fdir->vc_add_msg.vsi_id;
                        f.flow_id = fdir->flow_id;
                        fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
                        break;
+               } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
+                       process_fltr = true;
+                       f.vsi_id = fdir->vc_add_msg.vsi_id;
+                       f.flow_id = fdir->flow_id;
+                       fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
+                       break;
                }
        }
        spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -1901,6 +1906,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
                netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
 }
 
+/**
+ * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
+ * @adapter: private adapter structure
+ *
+ * Called after a reset to re-add all FDIR filters and delete some of them
+ * if they were pending to be deleted.
+ */
+static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
+{
+       struct iavf_fdir_fltr *f, *ftmp;
+       bool add_filters = false;
+
+       spin_lock_bh(&adapter->fdir_fltr_lock);
+       list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
+               if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
+                   f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+                   f->state == IAVF_FDIR_FLTR_ACTIVE) {
+                       /* All filters and requests have been removed in PF,
+                        * restore them
+                        */
+                       f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+                       add_filters = true;
+               } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
+                          f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+                       /* Link down state, leave filters as inactive */
+                       f->state = IAVF_FDIR_FLTR_INACTIVE;
+               } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
+                          f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+                       /* Delete filters that were pending to be deleted, the
+                        * list on PF is already cleared after a reset
+                        */
+                       list_del(&f->list);
+                       kfree(f);
+                       adapter->fdir_active_fltr--;
+               }
+       }
+       spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+       if (add_filters)
+               adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+}
+
 /**
  * iavf_virtchnl_completion
  * @adapter: adapter structure
@@ -2078,7 +2125,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                        spin_lock_bh(&adapter->fdir_fltr_lock);
                        list_for_each_entry(fdir, &adapter->fdir_list_head,
                                            list) {
-                               if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+                               if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
+                                   fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
                                        fdir->state = IAVF_FDIR_FLTR_ACTIVE;
                                        dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
                                                 iavf_stat_str(&adapter->hw,
@@ -2214,6 +2262,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
 
                spin_unlock_bh(&adapter->mac_vlan_list_lock);
 
+               iavf_activate_fdir_filters(adapter);
+
                iavf_parse_vf_resource_msg(adapter);
 
                /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
@@ -2390,7 +2440,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
                                         list) {
                        if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
-                               if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+                               if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
+                                   del_fltr->status ==
+                                   VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
                                        dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
                                                 fdir->loc);
                                        list_del(&fdir->list);
@@ -2402,6 +2454,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                                                 del_fltr->status);
                                        iavf_print_fdir_fltr(adapter, fdir);
                                }
+                       } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+                               if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
+                                   del_fltr->status ==
+                                   VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+                                       fdir->state = IAVF_FDIR_FLTR_INACTIVE;
+                               } else {
+                                       fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+                                       dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
+                                                del_fltr->status);
+                                       iavf_print_fdir_fltr(adapter, fdir);
+                               }
                        }
                }
                spin_unlock_bh(&adapter->fdir_fltr_lock);
index 2a5e6616cc0a794afd8d46dd63fb5d98a85f52b7..e1494f24f661d3c2c791846b5bdc2a1f2af8ff03 100644 (file)
@@ -374,16 +374,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
  */
 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
 {
-       struct ice_pf *pf;
-
        if (!vf || !q_vector)
                return -EINVAL;
 
-       pf = vf->pf;
-
        /* always add one to account for the OICR being the first MSIX */
-       return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
-               q_vector->v_idx + 1;
+       return vf->first_vector_idx + q_vector->v_idx + 1;
 }
 
 /**
index d7b10dc67f0352a2caca63eb79742925bbea13e7..80dc4bcdd3a41cd0baa0e3e29f0fddb29053341f 100644 (file)
@@ -32,7 +32,6 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
                /* setup outer VLAN ops */
                vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
                vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
-               vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
 
                /* setup inner VLAN ops */
                vlan_ops = &vsi->inner_vlan_ops;
@@ -47,8 +46,13 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
 
                vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
                vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
-               vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
        }
+
+       /* all Rx traffic should be in the domain of the assigned port VLAN,
+        * so prevent disabling Rx VLAN filtering
+        */
+       vlan_ops->dis_rx_filtering = noop_vlan;
+
        vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
 }
 
@@ -77,6 +81,8 @@ static void ice_port_vlan_off(struct ice_vsi *vsi)
                vlan_ops->del_vlan = ice_vsi_del_vlan;
        }
 
+       vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+
        if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
                vlan_ops->ena_rx_filtering = noop_vlan;
        else
@@ -141,7 +147,6 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
                &vsi->outer_vlan_ops : &vsi->inner_vlan_ops;
 
        vlan_ops->add_vlan = ice_vsi_add_vlan;
-       vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
        vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
        vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
 }
index de11b3186bd7ea5731c3b63850841ffa39279ac8..1c7b4ded948b63205a72217cda869218e3cf10bf 100644 (file)
@@ -1523,7 +1523,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
        u16 num_q_vectors_mapped, vsi_id, vector_id;
        struct virtchnl_irq_map_info *irqmap_info;
        struct virtchnl_vector_map *map;
-       struct ice_pf *pf = vf->pf;
        struct ice_vsi *vsi;
        int i;
 
@@ -1535,7 +1534,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
         * there is actually at least a single VF queue vector mapped
         */
        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
-           pf->vfs.num_msix_per < num_q_vectors_mapped ||
+           vf->num_msix < num_q_vectors_mapped ||
            !num_q_vectors_mapped) {
                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
                goto error_param;
@@ -1557,7 +1556,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
                /* vector_id is always 0-based for each VF, and can never be
                 * larger than or equal to the max allowed interrupts per VF
                 */
-               if (!(vector_id < pf->vfs.num_msix_per) ||
+               if (!(vector_id < vf->num_msix) ||
                    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
                    (!vector_id && (map->rxq_map || map->txq_map))) {
                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
index 552970c7dec09715ece049a737ee32e2712e6b41..a9bdf3283a852f638d72396c8bf84dc5fe76f0b1 100644 (file)
@@ -1193,6 +1193,13 @@ int octep_device_setup(struct octep_device *oct)
        if (ret)
                return ret;
 
+       INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task);
+       INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task);
+       INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task);
+       oct->poll_non_ioq_intr = true;
+       queue_delayed_work(octep_wq, &oct->intr_poll_task,
+                          msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+
        atomic_set(&oct->hb_miss_cnt, 0);
        INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
 
@@ -1258,7 +1265,8 @@ static bool get_fw_ready_status(struct pci_dev *pdev)
 
                pci_read_config_byte(pdev, (pos + 8), &status);
                dev_info(&pdev->dev, "Firmware ready status = %u\n", status);
-               return status;
+#define FW_STATUS_READY 1ULL
+               return status == FW_STATUS_READY;
        }
        return false;
 }
@@ -1325,21 +1333,18 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_octep_config;
        }
 
-       octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
-                               &octep_dev->conf->fw_info);
+       err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
+                                     &octep_dev->conf->fw_info);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to get firmware info\n");
+               goto register_dev_err;
+       }
        dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n",
                 octep_dev->conf->fw_info.hb_interval,
                 octep_dev->conf->fw_info.hb_miss_count);
        queue_delayed_work(octep_wq, &octep_dev->hb_task,
                           msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval));
 
-       INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
-       INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
-       INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task);
-       octep_dev->poll_non_ioq_intr = true;
-       queue_delayed_work(octep_wq, &octep_dev->intr_poll_task,
-                          msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
-
        netdev->netdev_ops = &octep_netdev_ops;
        octep_set_ethtool_ops(netdev);
        netif_carrier_off(netdev);
index 6845556581c3fa3f01c74e8a138d1c6a7a945723..5df42634ceb84c2dca063559a7e83685d12563aa 100644 (file)
@@ -1945,7 +1945,7 @@ struct mcs_hw_info {
        u8 tcam_entries;        /* RX/TX Tcam entries per mcs block */
        u8 secy_entries;        /* RX/TX SECY entries per mcs block */
        u8 sc_entries;          /* RX/TX SC CAM entries per mcs block */
-       u8 sa_entries;          /* PN table entries = SA entries */
+       u16 sa_entries;         /* PN table entries = SA entries */
        u64 rsvd[16];
 };
 
index c43f19dfbd74403817b8f65a336e57104bbeb61a..c1775bd01c2b4879f6ca2edb57310c04e4c52588 100644 (file)
@@ -117,7 +117,7 @@ void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id
        reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
        stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
 
-       reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
+       reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id);
        stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
 
        reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
@@ -215,7 +215,7 @@ void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
                reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
                stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
 
-               reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
+               reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id);
                stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
 
                if (mcs->hw->mcs_blks > 1) {
@@ -1219,6 +1219,17 @@ struct mcs *mcs_get_pdata(int mcs_id)
        return NULL;
 }
 
+bool is_mcs_bypass(int mcs_id)
+{
+       struct mcs *mcs_dev;
+
+       list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+               if (mcs_dev->mcs_id == mcs_id)
+                       return mcs_dev->bypass;
+       }
+       return true;
+}
+
 void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
 {
        u64 val = 0;
@@ -1436,7 +1447,7 @@ static int mcs_x2p_calibration(struct mcs *mcs)
        return err;
 }
 
-static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
+static void mcs_set_external_bypass(struct mcs *mcs, bool bypass)
 {
        u64 val;
 
@@ -1447,6 +1458,7 @@ static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
        else
                val &= ~BIT_ULL(6);
        mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+       mcs->bypass = bypass;
 }
 
 static void mcs_global_cfg(struct mcs *mcs)
index 0f89dcb764654b604cee5967296ca58101d02c35..f927cc61dfd21f996a4b85f43d695a71eb44358d 100644 (file)
@@ -149,6 +149,7 @@ struct mcs {
        u16                     num_vec;
        void                    *rvu;
        u16                     *tx_sa_active;
+       bool                      bypass;
 };
 
 struct mcs_ops {
@@ -206,6 +207,7 @@ void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *
 int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
 int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
 int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
+bool is_mcs_bypass(int mcs_id);
 
 /* CN10K-B APIs */
 void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
index f3ab01fc363c8deb6261e185097ac6c04f7e84aa..f4c6de89002c1d92e203a7455e86ceefa8d8f418 100644 (file)
                offset = 0x9d8ull;                      \
        offset; })
 
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(a) ({ \
+       u64 offset;                                     \
+                                                       \
+       offset = 0xee80ull;                             \
+       if (mcs->hw->mcs_blks > 1)                      \
+               offset = 0xe818ull;                     \
+       offset += (a) * 0x8ull;                         \
+       offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) ({        \
+       u64 offset;                                     \
+                                                       \
+       offset = 0xa680ull;                             \
+       if (mcs->hw->mcs_blks > 1)                      \
+               offset = 0xd018ull;                     \
+       offset += (a) * 0x8ull;                         \
+       offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a)        ({      \
+       u64 offset;                                             \
+                                                               \
+       offset = 0xf680ull;                                     \
+       if (mcs->hw->mcs_blks > 1)                              \
+               offset = 0xe018ull;                             \
+       offset += (a) * 0x8ull;                                 \
+       offset; })
+
 #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a)  (0xe680ull + (a) * 0x8ull)
 #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a)   (0xde80ull + (a) * 0x8ull)
-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a)    (0xa680ull + (a) * 0x8ull)
 #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a)      (0xd218 + (a) * 0x8ull)
-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a)   (0xd018ull + (a) * 0x8ull)
-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a)        (0xee80ull + (a) * 0x8ull)
 #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a)                (0xb680ull + (a) * 0x8ull)
-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
 #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a)      (0x12680ull + (a) * 0x8ull)
 #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
 #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a)     (0x13680ull + (a) * 0x8ull)
index af21e2030cff28f258b45a9d88701ef0f5f6b1dd..4728ba34b0e34cc3f5741bfc62b2ba49e00c2b8f 100644 (file)
@@ -373,6 +373,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
        cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
        rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
 
+       /* Disable forward pause to driver */
+       cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+       cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD;
+       rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
        /* Enable channel mask for all LMACS */
        if (is_dev_rpm2(rpm))
                rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff);
@@ -616,12 +621,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
 
        if (rx_pause) {
                cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
-                               RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
-                               RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+                        RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
        } else {
                cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
-                               RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
-                               RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+                       RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
        }
 
        if (tx_pause) {
index 22c395c7d040b494886060257b13bed6e0b01bb8..731bb82b577c20b753c6ad396527997ed4ca2362 100644 (file)
@@ -2631,6 +2631,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
        rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
        rvu_mac_reset(rvu, pcifunc);
 
+       if (rvu->mcs_blk_cnt)
+               rvu_mcs_flr_handler(rvu, pcifunc);
+
        mutex_unlock(&rvu->flr_lock);
 }
 
index c4d999ef5ab4b2cf9cb3f388e3b452e87fd3fad7..cce2806aaa50cc0712562664f72aa5b6fc67d2d7 100644 (file)
@@ -345,6 +345,7 @@ struct nix_hw {
        struct nix_txvlan txvlan;
        struct nix_ipolicer *ipolicer;
        u64    *tx_credits;
+       u8      cc_mcs_cnt;
 };
 
 /* RVU block's capabilities or functionality,
index c70932625d0da0a396c732167b023f822a63271d..21b5d71c1e37582fac1569fc6f79693f8f2ff0d2 100644 (file)
@@ -538,7 +538,7 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
 
        rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
        if (!rvu_dl->devlink_wq)
-               goto err;
+               return -ENOMEM;
 
        INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
        INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
@@ -546,9 +546,6 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
        INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
 
        return 0;
-err:
-       rvu_nix_health_reporters_destroy(rvu_dl);
-       return -ENOMEM;
 }
 
 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
@@ -1087,7 +1084,7 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
 
        rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
        if (!rvu_dl->devlink_wq)
-               goto err;
+               return -ENOMEM;
 
        INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
        INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
@@ -1095,9 +1092,6 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
        INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
 
        return 0;
-err:
-       rvu_npa_health_reporters_destroy(rvu_dl);
-       return -ENOMEM;
 }
 
 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
index c112c71ff576f8a693412ff30b435eb0ee3a193f..4227ebb4a758db2bd86aa714ddc0e8ca5c2f8322 100644 (file)
@@ -12,6 +12,7 @@
 #include "rvu_reg.h"
 #include "rvu.h"
 #include "npc.h"
+#include "mcs.h"
 #include "cgx.h"
 #include "lmac_common.h"
 #include "rvu_npc_hash.h"
@@ -4389,6 +4390,12 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
                            SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
        }
 
+       /* Get MCS external bypass status for CN10K-B */
+       if (mcs_get_blkcnt() == 1) {
+               /* Adjust for 2 credits when external bypass is disabled */
+               nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
+       }
+
        /* Set credits for Tx links assuming max packet length allowed.
         * This will be reconfigured based on MTU set for PF/VF.
         */
@@ -4412,6 +4419,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
                        tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
                        /* Enable credits and set credit pkt count to max allowed */
                        cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+                       cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
 
                        link = iter + slink;
                        nix_hw->tx_credits[link] = tx_credits;
index 16cfc802e348d9d5bc7bc6f2ef4e52eae3c0000b..0bcf3e559280650110d46d86be7f092e52ed2ba5 100644 (file)
@@ -389,7 +389,13 @@ static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
        int bank, nixlf, index;
 
        /* get ucast entry rule entry index */
-       nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
+       if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) {
+               dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n",
+                       __func__, pf_func);
+               /* Action 0 is drop */
+               return 0;
+       }
+
        index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
                                         NIXLF_UCAST_ENTRY);
        bank = npc_get_bank(mcam, index);
@@ -665,6 +671,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
        int blkaddr, ucast_idx, index;
        struct nix_rx_action action = { 0 };
        u64 relaxed_mask;
+       u8 flow_key_alg;
 
        if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
                return;
@@ -695,6 +702,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
                action.op = NIX_RX_ACTIONOP_UCAST;
        }
 
+       flow_key_alg = action.flow_key_alg;
+
        /* RX_ACTION set to MCAST for CGX PF's */
        if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
            is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
@@ -734,7 +743,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
        req.vf = pcifunc;
        req.index = action.index;
        req.match_id = action.match_id;
-       req.flow_key_alg = action.flow_key_alg;
+       req.flow_key_alg = flow_key_alg;
 
        rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
 }
@@ -848,6 +857,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
        u8 mac_addr[ETH_ALEN] = { 0 };
        struct nix_rx_action action = { 0 };
        struct rvu_pfvf *pfvf;
+       u8 flow_key_alg;
        u16 vf_func;
 
        /* Only CGX PF/VF can add allmulticast entry */
@@ -882,6 +892,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
                *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
                                                        blkaddr, ucast_idx);
 
+       flow_key_alg = action.flow_key_alg;
        if (action.op != NIX_RX_ACTIONOP_RSS) {
                *(u64 *)&action = 0;
                action.op = NIX_RX_ACTIONOP_UCAST;
@@ -918,7 +929,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
        req.vf = pcifunc | vf_func;
        req.index = action.index;
        req.match_id = action.match_id;
-       req.flow_key_alg = action.flow_key_alg;
+       req.flow_key_alg = flow_key_alg;
 
        rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
 }
@@ -984,11 +995,38 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
        mutex_unlock(&mcam->lock);
 }
 
+static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action,
+                                             struct rvu_pfvf *pfvf, int mcam_index, int blkaddr,
+                                             int alg_idx)
+
+{
+       struct npc_mcam *mcam = &rvu->hw->mcam;
+       struct rvu_hwinfo *hw = rvu->hw;
+       int bank, op_rss;
+
+       if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index))
+               return;
+
+       op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list);
+
+       bank = npc_get_bank(mcam, mcam_index);
+       mcam_index &= (mcam->banksize - 1);
+
+       /* If Rx action is MCAST update only RSS algorithm index */
+       if (!op_rss) {
+               *(u64 *)&action = rvu_read64(rvu, blkaddr,
+                               NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank));
+
+               action.flow_key_alg = alg_idx;
+       }
+       rvu_write64(rvu, blkaddr,
+                   NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action);
+}
+
 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
                                    int group, int alg_idx, int mcam_index)
 {
        struct npc_mcam *mcam = &rvu->hw->mcam;
-       struct rvu_hwinfo *hw = rvu->hw;
        struct nix_rx_action action;
        int blkaddr, index, bank;
        struct rvu_pfvf *pfvf;
@@ -1044,15 +1082,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
        /* If PF's promiscuous entry is enabled,
         * Set RSS action for that entry as well
         */
-       if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
-           is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
-               bank = npc_get_bank(mcam, index);
-               index &= (mcam->banksize - 1);
+       npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
+                                         alg_idx);
 
-               rvu_write64(rvu, blkaddr,
-                           NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
-                           *(u64 *)&action);
-       }
+       index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+                                        nixlf, NIXLF_ALLMULTI_ENTRY);
+       /* If PF's allmulti  entry is enabled,
+        * Set RSS action for that entry as well
+        */
+       npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
+                                         alg_idx);
 }
 
 void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
index b3150f05329196fc4bedd965a2387de602c5a3f0..d46ac29adb966dec7dea89e50fbcdbbd0f5e61a4 100644 (file)
@@ -31,8 +31,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
        {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
                              {0x1200, 0x12E0} } },
        {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
-                             {0x1610, 0x1618}, {0x1700, 0x17B0} } },
-       {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
+                             {0x1610, 0x1618}, {0x1700, 0x17C8} } },
+       {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17C8} } },
        {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
 };
 
index b42e631e52d0fd686144881bb255f1529a80879e..18c1c9f361cc623c4b0247bc184142635458cdf2 100644 (file)
 
 #define NIX_AF_LINKX_BASE_MASK         GENMASK_ULL(11, 0)
 #define NIX_AF_LINKX_RANGE_MASK                GENMASK_ULL(19, 16)
+#define NIX_AF_LINKX_MCS_CNT_MASK      GENMASK_ULL(33, 32)
 
 /* SSO */
 #define SSO_AF_CONST                   (0x1000)
index 9efcec549834e800ff82465838ad59653f30115a..53f6258a973c287883a4fa63393115dcbfa63bb5 100644 (file)
@@ -334,9 +334,12 @@ static void otx2_get_pauseparam(struct net_device *netdev,
        if (is_otx2_lbkvf(pfvf->pdev))
                return;
 
+       mutex_lock(&pfvf->mbox.lock);
        req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
-       if (!req)
+       if (!req) {
+               mutex_unlock(&pfvf->mbox.lock);
                return;
+       }
 
        if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
                rsp = (struct cgx_pause_frm_cfg *)
@@ -344,6 +347,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
                pause->rx_pause = rsp->rx_pause;
                pause->tx_pause = rsp->tx_pause;
        }
+       mutex_unlock(&pfvf->mbox.lock);
 }
 
 static int otx2_set_pauseparam(struct net_device *netdev,
index 532e324bdcc8e6cbd975017474d06a6af303ae85..a57455aebff6fc58e24c4a4da2d60d78e59f439f 100644 (file)
@@ -1650,6 +1650,21 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
        mutex_unlock(&mbox->lock);
 }
 
+static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
+{
+       int vf;
+
+       /* The AF driver will determine whether to allow the VF netdev or not */
+       if (is_otx2_vf(pfvf->pcifunc))
+               return true;
+
+       /* check if there are any trusted VFs associated with the PF netdev */
+       for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
+               if (pfvf->vf_configs[vf].trusted)
+                       return true;
+       return false;
+}
+
 static void otx2_do_set_rx_mode(struct otx2_nic *pf)
 {
        struct net_device *netdev = pf->netdev;
@@ -1682,12 +1697,21 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
        if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
                req->mode |= NIX_RX_MODE_ALLMULTI;
 
-       req->mode |= NIX_RX_MODE_USE_MCE;
+       if (otx2_promisc_use_mce_list(pf))
+               req->mode |= NIX_RX_MODE_USE_MCE;
 
        otx2_sync_mbox_msg(&pf->mbox);
        mutex_unlock(&pf->mbox.lock);
 }
 
+static void otx2_set_irq_coalesce(struct otx2_nic *pfvf)
+{
+       int cint;
+
+       for (cint = 0; cint < pfvf->hw.cint_cnt; cint++)
+               otx2_config_irq_coalescing(pfvf, cint);
+}
+
 static void otx2_dim_work(struct work_struct *w)
 {
        struct dim_cq_moder cur_moder;
@@ -1703,6 +1727,7 @@ static void otx2_dim_work(struct work_struct *w)
                CQ_TIMER_THRESH_MAX : cur_moder.usec;
        pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
                NAPI_POLL_WEIGHT : cur_moder.pkts;
+       otx2_set_irq_coalesce(pfvf);
        dim->state = DIM_START_MEASURE;
 }
 
@@ -2682,11 +2707,14 @@ static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
        pf->vf_configs[vf].trusted = enable;
        rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
 
-       if (rc)
+       if (rc) {
                pf->vf_configs[vf].trusted = !enable;
-       else
+       } else {
                netdev_info(pf->netdev, "VF %d is %strusted\n",
                            vf, enable ? "" : "not ");
+               otx2_set_rx_mode(netdev);
+       }
+
        return rc;
 }
 
index 6ee15f3c25ede947ce0103a7d8b00cd7291c177e..4d519ea833b2c7c4fa439ee56fdd07962221030c 100644 (file)
@@ -512,11 +512,18 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
 {
        struct dim_sample dim_sample;
        u64 rx_frames, rx_bytes;
+       u64 tx_frames, tx_bytes;
 
        rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
                OTX2_GET_RX_STATS(RX_UCAST);
        rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
-       dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
+       tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
+       tx_frames = OTX2_GET_TX_STATS(TX_UCAST);
+
+       dim_update_sample(pfvf->napi_events,
+                         rx_frames + tx_frames,
+                         rx_bytes + tx_bytes,
+                         &dim_sample);
        net_dim(&cq_poll->dim, dim_sample);
 }
 
@@ -558,16 +565,9 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
                if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
                        return workdone;
 
-               /* Check for adaptive interrupt coalesce */
-               if (workdone != 0 &&
-                   ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
-                    OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
-                       /* Adjust irq coalese using net_dim */
+               /* Adjust irq coalese using net_dim */
+               if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
                        otx2_adjust_adaptive_coalese(pfvf, cq_poll);
-                       /* Update irq coalescing */
-                       for (i = 0; i < pfvf->hw.cint_cnt; i++)
-                               otx2_config_irq_coalescing(pfvf, i);
-               }
 
                if (unlikely(!filled_cnt)) {
                        struct refill_work *work;
index b2a5da9739d2b463e9565ffe943cb7ec1916b70a..729a11b5fb2524e23e91e599a9b9c74729d9da11 100644 (file)
@@ -826,6 +826,7 @@ enum {
        MLX5E_STATE_DESTROYING,
        MLX5E_STATE_XDP_TX_ENABLED,
        MLX5E_STATE_XDP_ACTIVE,
+       MLX5E_STATE_CHANNELS_ACTIVE,
 };
 
 struct mlx5e_modify_sq_param {
index 4e923a2874aefe089faa454dc4c8f04e4e776b3f..86bf007fd05b7327a79918b5de9beea9353b70e1 100644 (file)
@@ -83,6 +83,9 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
        struct mlx5_flow_spec *spec;
        int err;
 
+       if (IS_ERR(post_act))
+               return PTR_ERR(post_act);
+
        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
        if (!spec)
                return -ENOMEM;
@@ -111,6 +114,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *po
        struct mlx5e_post_act_handle *handle;
        int err;
 
+       if (IS_ERR(post_act))
+               return ERR_CAST(post_act);
+
        handle = kzalloc(sizeof(*handle), GFP_KERNEL);
        if (!handle)
                return ERR_PTR(-ENOMEM);
index 655496598c688496bb30e14c0a0d6e52e2d93040..161c5190c236a0d8d048bd6a253a36cdeb12b9bc 100644 (file)
@@ -121,7 +121,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
        if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
                esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
 
-       sa_entry->esn_state.esn = esn;
+       if (sa_entry->esn_state.esn_msb)
+               sa_entry->esn_state.esn = esn;
+       else
+               /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+                * the first packet sent using a given SA will contain a sequence
+                * number of 1.
+                */
+               sa_entry->esn_state.esn = max_t(u32, esn, 1);
        sa_entry->esn_state.esn_msb = esn_msb;
 
        if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
@@ -335,6 +342,27 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
                attrs->replay_esn.esn = sa_entry->esn_state.esn;
                attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
                attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+               switch (x->replay_esn->replay_window) {
+               case 32:
+                       attrs->replay_esn.replay_window =
+                               MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
+                       break;
+               case 64:
+                       attrs->replay_esn.replay_window =
+                               MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
+                       break;
+               case 128:
+                       attrs->replay_esn.replay_window =
+                               MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
+                       break;
+               case 256:
+                       attrs->replay_esn.replay_window =
+                               MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
+                       break;
+               default:
+                       WARN_ON(true);
+                       return;
+               }
        }
 
        attrs->dir = x->xso.dir;
@@ -907,9 +935,11 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
                return;
 
        mlx5e_accel_ipsec_fs_cleanup(ipsec);
-       if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
+       if (ipsec->netevent_nb.notifier_call) {
                unregister_netevent_notifier(&ipsec->netevent_nb);
-       if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+               ipsec->netevent_nb.notifier_call = NULL;
+       }
+       if (ipsec->aso)
                mlx5e_ipsec_aso_cleanup(ipsec);
        destroy_workqueue(ipsec->wq);
        kfree(ipsec);
@@ -1018,6 +1048,12 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
                }
        }
 
+       if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
+           !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
+               NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
+               return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -1113,14 +1149,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
        .xdo_dev_state_free     = mlx5e_xfrm_free_state,
        .xdo_dev_offload_ok     = mlx5e_ipsec_offload_ok,
        .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
-};
-
-static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
-       .xdo_dev_state_add      = mlx5e_xfrm_add_state,
-       .xdo_dev_state_delete   = mlx5e_xfrm_del_state,
-       .xdo_dev_state_free     = mlx5e_xfrm_free_state,
-       .xdo_dev_offload_ok     = mlx5e_ipsec_offload_ok,
-       .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
 
        .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
        .xdo_dev_policy_add = mlx5e_xfrm_add_policy,
@@ -1138,11 +1166,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
 
        mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
 
-       if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
-               netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
-       else
-               netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
-
+       netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
        netdev->features |= NETIF_F_HW_ESP;
        netdev->hw_enc_features |= NETIF_F_HW_ESP;
 
index 8f4a37bceaf455c7bea24f4a21349b543e33818c..adaea3493193ef05dcf148dfda376b520acc94b7 100644 (file)
@@ -189,11 +189,19 @@ struct mlx5e_ipsec_ft {
        u32 refcnt;
 };
 
+struct mlx5e_ipsec_drop {
+       struct mlx5_flow_handle *rule;
+       struct mlx5_fc *fc;
+};
+
 struct mlx5e_ipsec_rule {
        struct mlx5_flow_handle *rule;
        struct mlx5_modify_hdr *modify_hdr;
        struct mlx5_pkt_reformat *pkt_reformat;
        struct mlx5_fc *fc;
+       struct mlx5e_ipsec_drop replay;
+       struct mlx5e_ipsec_drop auth;
+       struct mlx5e_ipsec_drop trailer;
 };
 
 struct mlx5e_ipsec_miss {
@@ -201,19 +209,6 @@ struct mlx5e_ipsec_miss {
        struct mlx5_flow_handle *rule;
 };
 
-struct mlx5e_ipsec_rx {
-       struct mlx5e_ipsec_ft ft;
-       struct mlx5e_ipsec_miss pol;
-       struct mlx5e_ipsec_miss sa;
-       struct mlx5e_ipsec_rule status;
-       struct mlx5e_ipsec_miss status_drop;
-       struct mlx5_fc *status_drop_cnt;
-       struct mlx5e_ipsec_fc *fc;
-       struct mlx5_fs_chains *chains;
-       u8 allow_tunnel_mode : 1;
-       struct xarray ipsec_obj_id_map;
-};
-
 struct mlx5e_ipsec_tx_create_attr {
        int prio;
        int pol_level;
@@ -248,6 +243,7 @@ struct mlx5e_ipsec {
        struct mlx5_ipsec_fs *roce;
        u8 is_uplink_rep: 1;
        struct mlx5e_ipsec_mpv_work mpv_work;
+       struct xarray ipsec_obj_id_map;
 };
 
 struct mlx5e_ipsec_esn_state {
index f41c976dc33f931eb3ac19431684fbd206a9e413..c1e89dc77db965e24f54e85d431e8e3e6b7cc183 100644 (file)
@@ -32,6 +32,22 @@ struct mlx5e_ipsec_tx {
        u8 allow_tunnel_mode : 1;
 };
 
+struct mlx5e_ipsec_status_checks {
+       struct mlx5_flow_group *drop_all_group;
+       struct mlx5e_ipsec_drop all;
+};
+
+struct mlx5e_ipsec_rx {
+       struct mlx5e_ipsec_ft ft;
+       struct mlx5e_ipsec_miss pol;
+       struct mlx5e_ipsec_miss sa;
+       struct mlx5e_ipsec_rule status;
+       struct mlx5e_ipsec_status_checks status_drops;
+       struct mlx5e_ipsec_fc *fc;
+       struct mlx5_fs_chains *chains;
+       u8 allow_tunnel_mode : 1;
+};
+
 /* IPsec RX flow steering */
 static enum mlx5_traffic_types family2tt(u32 family)
 {
@@ -128,14 +144,37 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
        return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
 }
 
-static int ipsec_status_rule(struct mlx5_core_dev *mdev,
-                            struct mlx5e_ipsec_rx *rx,
-                            struct mlx5_flow_destination *dest)
+static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
+                                        struct mlx5e_ipsec_rx *rx)
+{
+       mlx5_del_flow_rules(rx->status_drops.all.rule);
+       mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
+       mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
+}
+
+static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+                                        struct mlx5e_ipsec_rx *rx)
 {
-       u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+       mlx5_del_flow_rules(rx->status.rule);
+
+       if (rx != ipsec->rx_esw)
+               return;
+
+#ifdef CONFIG_MLX5_ESWITCH
+       mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+#endif
+}
+
+static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
+                                        struct mlx5e_ipsec_rx *rx)
+{
+       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+       struct mlx5_flow_table *ft = rx->ft.status;
+       struct mlx5_core_dev *mdev = ipsec->mdev;
+       struct mlx5_flow_destination dest = {};
        struct mlx5_flow_act flow_act = {};
-       struct mlx5_modify_hdr *modify_hdr;
-       struct mlx5_flow_handle *fte;
+       struct mlx5_flow_handle *rule;
+       struct mlx5_fc *flow_counter;
        struct mlx5_flow_spec *spec;
        int err;
 
@@ -143,48 +182,273 @@ static int ipsec_status_rule(struct mlx5_core_dev *mdev,
        if (!spec)
                return -ENOMEM;
 
-       /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
-       MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
-       MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
-       MLX5_SET(copy_action_in, action, src_offset, 0);
-       MLX5_SET(copy_action_in, action, length, 7);
-       MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
-       MLX5_SET(copy_action_in, action, dst_offset, 24);
+       flow_counter = mlx5_fc_create(mdev, true);
+       if (IS_ERR(flow_counter)) {
+               err = PTR_ERR(flow_counter);
+               mlx5_core_err(mdev,
+                             "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+               goto err_cnt;
+       }
+       sa_entry->ipsec_rule.auth.fc = flow_counter;
 
-       modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
-                                             1, action);
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+       flow_act.flags = FLOW_ACT_NO_APPEND;
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+       dest.counter_id = mlx5_fc_id(flow_counter);
+       if (rx == ipsec->rx_esw)
+               spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
 
-       if (IS_ERR(modify_hdr)) {
-               err = PTR_ERR(modify_hdr);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
+       MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+       MLX5_SET(fte_match_param, spec->match_value,
+                misc_parameters_2.metadata_reg_c_2,
+                sa_entry->ipsec_obj_id | BIT(31));
+       spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+       rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
                mlx5_core_err(mdev,
-                             "fail to alloc ipsec copy modify_header_id err=%d\n", err);
-               goto out_spec;
+                             "Failed to add ipsec rx status drop rule, err=%d\n", err);
+               goto err_rule;
        }
+       sa_entry->ipsec_rule.auth.rule = rule;
 
-       /* create fte */
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
-                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+       flow_counter = mlx5_fc_create(mdev, true);
+       if (IS_ERR(flow_counter)) {
+               err = PTR_ERR(flow_counter);
+               mlx5_core_err(mdev,
+                             "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+               goto err_cnt_2;
+       }
+       sa_entry->ipsec_rule.trailer.fc = flow_counter;
+
+       dest.counter_id = mlx5_fc_id(flow_counter);
+       MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
+       rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               mlx5_core_err(mdev,
+                             "Failed to add ipsec rx status drop rule, err=%d\n", err);
+               goto err_rule_2;
+       }
+       sa_entry->ipsec_rule.trailer.rule = rule;
+
+       kvfree(spec);
+       return 0;
+
+err_rule_2:
+       mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
+err_cnt_2:
+       mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
+err_rule:
+       mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
+err_cnt:
+       kvfree(spec);
+       return err;
+}
+
+static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
+{
+       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+       struct mlx5_flow_table *ft = rx->ft.status;
+       struct mlx5_core_dev *mdev = ipsec->mdev;
+       struct mlx5_flow_destination dest = {};
+       struct mlx5_flow_act flow_act = {};
+       struct mlx5_flow_handle *rule;
+       struct mlx5_fc *flow_counter;
+       struct mlx5_flow_spec *spec;
+       int err;
+
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec)
+               return -ENOMEM;
+
+       flow_counter = mlx5_fc_create(mdev, true);
+       if (IS_ERR(flow_counter)) {
+               err = PTR_ERR(flow_counter);
+               mlx5_core_err(mdev,
+                             "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+               goto err_cnt;
+       }
+
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+       flow_act.flags = FLOW_ACT_NO_APPEND;
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+       dest.counter_id = mlx5_fc_id(flow_counter);
+       if (rx == ipsec->rx_esw)
+               spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+       MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+       MLX5_SET(fte_match_param, spec->match_value,  misc_parameters_2.metadata_reg_c_2,
+                sa_entry->ipsec_obj_id | BIT(31));
+       spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+       rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               mlx5_core_err(mdev,
+                             "Failed to add ipsec rx status drop rule, err=%d\n", err);
+               goto err_rule;
+       }
+
+       sa_entry->ipsec_rule.replay.rule = rule;
+       sa_entry->ipsec_rule.replay.fc = flow_counter;
+
+       kvfree(spec);
+       return 0;
+
+err_rule:
+       mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+       kvfree(spec);
+       return err;
+}
+
+static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
+                                          struct mlx5e_ipsec_rx *rx)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       struct mlx5_flow_table *ft = rx->ft.status;
+       struct mlx5_core_dev *mdev = ipsec->mdev;
+       struct mlx5_flow_destination dest = {};
+       struct mlx5_flow_act flow_act = {};
+       struct mlx5_flow_handle *rule;
+       struct mlx5_fc *flow_counter;
+       struct mlx5_flow_spec *spec;
+       struct mlx5_flow_group *g;
+       u32 *flow_group_in;
+       int err = 0;
+
+       flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!flow_group_in || !spec) {
+               err = -ENOMEM;
+               goto err_out;
+       }
+
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+       g = mlx5_create_flow_group(ft, flow_group_in);
+       if (IS_ERR(g)) {
+               err = PTR_ERR(g);
+               mlx5_core_err(mdev,
+                             "Failed to add ipsec rx status drop flow group, err=%d\n", err);
+               goto err_out;
+       }
+
+       flow_counter = mlx5_fc_create(mdev, false);
+       if (IS_ERR(flow_counter)) {
+               err = PTR_ERR(flow_counter);
+               mlx5_core_err(mdev,
+                             "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+               goto err_cnt;
+       }
+
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+       dest.counter_id = mlx5_fc_id(flow_counter);
+       if (rx == ipsec->rx_esw)
+               spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+       rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               mlx5_core_err(mdev,
+                             "Failed to add ipsec rx status drop rule, err=%d\n", err);
+               goto err_rule;
+       }
+
+       rx->status_drops.drop_all_group = g;
+       rx->status_drops.all.rule = rule;
+       rx->status_drops.all.fc = flow_counter;
+
+       kvfree(flow_group_in);
+       kvfree(spec);
+       return 0;
+
+err_rule:
+       mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+       mlx5_destroy_flow_group(g);
+err_out:
+       kvfree(flow_group_in);
+       kvfree(spec);
+       return err;
+}
+
+static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+                                      struct mlx5e_ipsec_rx *rx,
+                                      struct mlx5_flow_destination *dest)
+{
+       struct mlx5_flow_act flow_act = {};
+       struct mlx5_flow_handle *rule;
+       struct mlx5_flow_spec *spec;
+       int err;
+
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec)
+               return -ENOMEM;
+
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                        misc_parameters_2.ipsec_syndrome);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                        misc_parameters_2.metadata_reg_c_4);
+       MLX5_SET(fte_match_param, spec->match_value,
+                misc_parameters_2.ipsec_syndrome, 0);
+       MLX5_SET(fte_match_param, spec->match_value,
+                misc_parameters_2.metadata_reg_c_4, 0);
+       if (rx == ipsec->rx_esw)
+               spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+       spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+       flow_act.flags = FLOW_ACT_NO_APPEND;
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
                          MLX5_FLOW_CONTEXT_ACTION_COUNT;
-       flow_act.modify_hdr = modify_hdr;
-       fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
-       if (IS_ERR(fte)) {
-               err = PTR_ERR(fte);
-               mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
-               goto out;
+       rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               mlx5_core_warn(ipsec->mdev,
+                              "Failed to add ipsec rx status pass rule, err=%d\n", err);
+               goto err_rule;
        }
 
+       rx->status.rule = rule;
        kvfree(spec);
-       rx->status.rule = fte;
-       rx->status.modify_hdr = modify_hdr;
        return 0;
 
-out:
-       mlx5_modify_header_dealloc(mdev, modify_hdr);
-out_spec:
+err_rule:
        kvfree(spec);
        return err;
 }
 
+static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+                                        struct mlx5e_ipsec_rx *rx)
+{
+       ipsec_rx_status_pass_destroy(ipsec, rx);
+       ipsec_rx_status_drop_destroy(ipsec, rx);
+}
+
+static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+                                      struct mlx5e_ipsec_rx *rx,
+                                      struct mlx5_flow_destination *dest)
+{
+       int err;
+
+       err = ipsec_rx_status_drop_all_create(ipsec, rx);
+       if (err)
+               return err;
+
+       err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+       if (err)
+               goto err_pass_create;
+
+       return 0;
+
+err_pass_create:
+       ipsec_rx_status_drop_destroy(ipsec, rx);
+       return err;
+}
+
 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
                             struct mlx5_flow_table *ft,
                             struct mlx5e_ipsec_miss *miss,
@@ -333,12 +597,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
        mlx5_destroy_flow_table(rx->ft.sa);
        if (rx->allow_tunnel_mode)
                mlx5_eswitch_unblock_encap(mdev);
-       if (rx == ipsec->rx_esw) {
-               mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
-       } else {
-               mlx5_del_flow_rules(rx->status.rule);
-               mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
-       }
+       mlx5_ipsec_rx_status_destroy(ipsec, rx);
        mlx5_destroy_flow_table(rx->ft.status);
 
        mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -419,7 +678,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
        if (err)
                return err;
 
-       ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
+       ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
        if (IS_ERR(ft)) {
                err = PTR_ERR(ft);
                goto err_fs_ft_status;
@@ -428,10 +687,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 
        dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
        dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
-       if (rx == ipsec->rx_esw)
-               err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
-       else
-               err = ipsec_status_rule(mdev, rx, dest);
+       err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
        if (err)
                goto err_add;
 
@@ -956,13 +1212,22 @@ static void setup_fte_esp(struct mlx5_flow_spec *spec)
        MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
 }
 
-static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
+static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
 {
        /* SPI number */
        spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
 
-       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
-       MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
+       if (encap) {
+               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                                misc_parameters.inner_esp_spi);
+               MLX5_SET(fte_match_param, spec->match_value,
+                        misc_parameters.inner_esp_spi, spi);
+       } else {
+               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                                misc_parameters.outer_esp_spi);
+               MLX5_SET(fte_match_param, spec->match_value,
+                        misc_parameters.outer_esp_spi, spi);
+       }
 }
 
 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
@@ -1052,29 +1317,48 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
                               struct mlx5_flow_act *flow_act)
 {
        enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
-       u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+       u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
        struct mlx5_core_dev *mdev = ipsec->mdev;
        struct mlx5_modify_hdr *modify_hdr;
+       u8 num_of_actions = 1;
 
-       MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+       MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
        switch (dir) {
        case XFRM_DEV_OFFLOAD_IN:
-               MLX5_SET(set_action_in, action, field,
+               MLX5_SET(set_action_in, action[0], field,
                         MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+
+               num_of_actions++;
+               MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
+               MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
+               MLX5_SET(set_action_in, action[1], data, val);
+               MLX5_SET(set_action_in, action[1], offset, 0);
+               MLX5_SET(set_action_in, action[1], length, 32);
+
+               if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
+                       num_of_actions++;
+                       MLX5_SET(set_action_in, action[2], action_type,
+                                MLX5_ACTION_TYPE_SET);
+                       MLX5_SET(set_action_in, action[2], field,
+                                MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
+                       MLX5_SET(set_action_in, action[2], data, 0);
+                       MLX5_SET(set_action_in, action[2], offset, 0);
+                       MLX5_SET(set_action_in, action[2], length, 32);
+               }
                break;
        case XFRM_DEV_OFFLOAD_OUT:
-               MLX5_SET(set_action_in, action, field,
+               MLX5_SET(set_action_in, action[0], field,
                         MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
                break;
        default:
                return -EINVAL;
        }
 
-       MLX5_SET(set_action_in, action, data, val);
-       MLX5_SET(set_action_in, action, offset, 0);
-       MLX5_SET(set_action_in, action, length, 32);
+       MLX5_SET(set_action_in, action[0], data, val);
+       MLX5_SET(set_action_in, action[0], offset, 0);
+       MLX5_SET(set_action_in, action[0], length, 32);
 
-       modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
+       modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
        if (IS_ERR(modify_hdr)) {
                mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
                              PTR_ERR(modify_hdr));
@@ -1321,8 +1605,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
        else
                setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
 
-       setup_fte_spi(spec, attrs->spi);
-       setup_fte_esp(spec);
+       setup_fte_spi(spec, attrs->spi, attrs->encap);
+       if (!attrs->encap)
+               setup_fte_esp(spec);
        setup_fte_no_frags(spec);
        setup_fte_upper_proto_match(spec, &attrs->upspec);
 
@@ -1372,6 +1657,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
                mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
                goto err_add_flow;
        }
+       if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+               err = rx_add_rule_drop_replay(sa_entry, rx);
+       if (err)
+               goto err_add_replay;
+
+       err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
+       if (err)
+               goto err_drop_reason;
+
        kvfree(spec);
 
        sa_entry->ipsec_rule.rule = rule;
@@ -1380,6 +1674,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
        sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
        return 0;
 
+err_drop_reason:
+       if (sa_entry->ipsec_rule.replay.rule) {
+               mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
+               mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
+       }
+err_add_replay:
+       mlx5_del_flow_rules(rule);
 err_add_flow:
        mlx5_fc_destroy(mdev, counter);
 err_add_cnt:
@@ -1428,7 +1729,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 
        switch (attrs->type) {
        case XFRM_DEV_OFFLOAD_CRYPTO:
-               setup_fte_spi(spec, attrs->spi);
+               setup_fte_spi(spec, attrs->spi, false);
                setup_fte_esp(spec);
                setup_fte_reg_a(spec);
                break;
@@ -1809,8 +2110,11 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
        struct mlx5_eswitch *esw = mdev->priv.eswitch;
        int err = 0;
 
-       if (esw)
-               down_write(&esw->mode_lock);
+       if (esw) {
+               err = mlx5_esw_lock(esw);
+               if (err)
+                       return err;
+       }
 
        if (mdev->num_block_ipsec) {
                err = -EBUSY;
@@ -1821,7 +2125,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
 
 unlock:
        if (esw)
-               up_write(&esw->mode_lock);
+               mlx5_esw_unlock(esw);
 
        return err;
 }
@@ -1887,6 +2191,17 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 
        if (ipsec_rule->modify_hdr)
                mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+
+       mlx5_del_flow_rules(ipsec_rule->trailer.rule);
+       mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
+
+       mlx5_del_flow_rules(ipsec_rule->auth.rule);
+       mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+
+       if (ipsec_rule->replay.rule) {
+               mlx5_del_flow_rules(ipsec_rule->replay.rule);
+               mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
+       }
        mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
        rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
 }
@@ -1957,7 +2272,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
        kfree(ipsec->rx_ipv6);
 
        if (ipsec->is_uplink_rep) {
-               xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
+               xa_destroy(&ipsec->ipsec_obj_id_map);
 
                mutex_destroy(&ipsec->tx_esw->ft.mutex);
                WARN_ON(ipsec->tx_esw->ft.refcnt);
@@ -2020,7 +2335,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
                mutex_init(&ipsec->tx_esw->ft.mutex);
                mutex_init(&ipsec->rx_esw->ft.mutex);
                ipsec->tx_esw->ns = ns_esw;
-               xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
+               xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
        } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
                ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
        } else {
index a91f772dc981a15f3834409bb1f4f5973a9c2236..6e00afe4671b78ae48eb3eaaddf8456eb09ab0e4 100644 (file)
@@ -6,6 +6,8 @@
 #include "ipsec.h"
 #include "lib/crypto.h"
 #include "lib/ipsec_fs_roce.h"
+#include "fs_core.h"
+#include "eswitch.h"
 
 enum {
        MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
@@ -38,7 +40,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
            MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
                caps |= MLX5_IPSEC_CAP_CRYPTO;
 
-       if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
+       if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
+           (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
+            (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
+            is_mdev_legacy_mode(mdev)))) {
                if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
                                              reformat_add_esp_trasport) &&
                    MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
@@ -95,7 +100,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
 
                if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
                        MLX5_SET(ipsec_aso, aso_ctx, window_sz,
-                                attrs->replay_esn.replay_window / 64);
+                                attrs->replay_esn.replay_window);
                        MLX5_SET(ipsec_aso, aso_ctx, mode,
                                 MLX5_IPSEC_ASO_REPLAY_PROTECTION);
                }
@@ -559,6 +564,7 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
        dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
                         DMA_BIDIRECTIONAL);
        kfree(aso);
+       ipsec->aso = NULL;
 }
 
 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
index ea58c691743302bfc0ccc4420f15ff4ff06e3cae..0c87ddb8a7a2188cef8a586bb5df04da5241f969 100644 (file)
@@ -2731,6 +2731,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
 {
        int i;
 
+       ASSERT_RTNL();
        if (chs->ptp) {
                mlx5e_ptp_close(chs->ptp);
                chs->ptp = NULL;
@@ -3012,17 +3013,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
        if (mlx5e_is_vport_rep(priv))
                mlx5e_rep_activate_channels(priv);
 
+       set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
+
        mlx5e_wait_channels_min_rx_wqes(&priv->channels);
 
        if (priv->rx_res)
                mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
 }
 
+static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv)
+{
+       WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state));
+       if (current_work() != &priv->tx_timeout_work)
+               cancel_work_sync(&priv->tx_timeout_work);
+}
+
 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
 {
        if (priv->rx_res)
                mlx5e_rx_res_channels_deactivate(priv->rx_res);
 
+       clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
+       mlx5e_cancel_tx_timeout_work(priv);
+
        if (mlx5e_is_vport_rep(priv))
                mlx5e_rep_deactivate_channels(priv);
 
@@ -4801,8 +4814,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
        struct net_device *netdev = priv->netdev;
        int i;
 
-       rtnl_lock();
-       mutex_lock(&priv->state_lock);
+       /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
+        * through this flow. However, channel closing flows have to wait for
+        * this work to finish while holding rtnl lock too. So either get the
+        * lock or find that channels are being closed for other reason and
+        * this work is not relevant anymore.
+        */
+       while (!rtnl_trylock()) {
+               if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
+                       return;
+               msleep(20);
+       }
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                goto unlock;
@@ -4821,7 +4843,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
        }
 
 unlock:
-       mutex_unlock(&priv->state_lock);
        rtnl_unlock();
 }
 
index 3ab682bbcf86780fa16daa27bc99baf9219bd88e..1bf7540a65ad95e61dbf9e918f475599500d1252 100644 (file)
@@ -1497,7 +1497,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
 
        dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
                                                 rpriv->rep->vport);
-       if (dl_port) {
+       if (!IS_ERR(dl_port)) {
                SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
                mlx5e_rep_vnic_reporter_create(priv, dl_port);
        }
index 7ca9e5b86778e3b0353530c4ce021a1fa8e5fd21..4809a66f3491a6234f17675b3e550bb45bf3afaf 100644 (file)
@@ -444,6 +444,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
        struct mlx5e_flow_meter_handle *meter;
        enum mlx5e_post_meter_type type;
 
+       if (IS_ERR(post_act))
+               return PTR_ERR(post_act);
+
        meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
        if (IS_ERR(meter)) {
                mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
@@ -3738,6 +3741,20 @@ out_free:
        return err;
 }
 
+static int
+set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr)
+{
+       struct mlx5e_post_act *post_act = get_post_action(priv);
+
+       if (IS_ERR(post_act))
+               return PTR_ERR(post_act);
+
+       attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
+
+       return 0;
+}
+
 static int
 alloc_branch_attr(struct mlx5e_tc_flow *flow,
                  struct mlx5e_tc_act_branch_ctrl *cond,
@@ -3761,8 +3778,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
                break;
        case FLOW_ACTION_ACCEPT:
        case FLOW_ACTION_PIPE:
-               attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-               attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+               if (set_branch_dest_ft(flow->priv, attr))
+                       goto out_err;
                break;
        case FLOW_ACTION_JUMP:
                if (*jump_count) {
@@ -3771,8 +3788,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
                        goto out_err;
                }
                *jump_count = cond->extval;
-               attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-               attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+               if (set_branch_dest_ft(flow->priv, attr))
+                       goto out_err;
                break;
        default:
                err = -EOPNOTSUPP;
index 095f31f380fa3aa8bc9c2a122ac2dc365dc5332d..190f10aba17028211fc6c34abaa7b35d44310ba2 100644 (file)
@@ -21,158 +21,6 @@ enum {
        MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
 };
 
-static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
-                                            struct mlx5e_ipsec_rx *rx)
-{
-       mlx5_del_flow_rules(rx->status_drop.rule);
-       mlx5_destroy_flow_group(rx->status_drop.group);
-       mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
-}
-
-static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
-                                            struct mlx5e_ipsec_rx *rx)
-{
-       mlx5_del_flow_rules(rx->status.rule);
-       mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-}
-
-static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
-                                          struct mlx5e_ipsec_rx *rx)
-{
-       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-       struct mlx5_flow_table *ft = rx->ft.status;
-       struct mlx5_core_dev *mdev = ipsec->mdev;
-       struct mlx5_flow_destination dest = {};
-       struct mlx5_flow_act flow_act = {};
-       struct mlx5_flow_handle *rule;
-       struct mlx5_fc *flow_counter;
-       struct mlx5_flow_spec *spec;
-       struct mlx5_flow_group *g;
-       u32 *flow_group_in;
-       int err = 0;
-
-       flow_group_in = kvzalloc(inlen, GFP_KERNEL);
-       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
-       if (!flow_group_in || !spec) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-
-       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
-       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
-       g = mlx5_create_flow_group(ft, flow_group_in);
-       if (IS_ERR(g)) {
-               err = PTR_ERR(g);
-               mlx5_core_err(mdev,
-                             "Failed to add ipsec rx status drop flow group, err=%d\n", err);
-               goto err_out;
-       }
-
-       flow_counter = mlx5_fc_create(mdev, false);
-       if (IS_ERR(flow_counter)) {
-               err = PTR_ERR(flow_counter);
-               mlx5_core_err(mdev,
-                             "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
-               goto err_cnt;
-       }
-
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
-       dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
-       dest.counter_id = mlx5_fc_id(flow_counter);
-       spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
-       rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
-       if (IS_ERR(rule)) {
-               err = PTR_ERR(rule);
-               mlx5_core_err(mdev,
-                             "Failed to add ipsec rx status drop rule, err=%d\n", err);
-               goto err_rule;
-       }
-
-       rx->status_drop.group = g;
-       rx->status_drop.rule = rule;
-       rx->status_drop_cnt = flow_counter;
-
-       kvfree(flow_group_in);
-       kvfree(spec);
-       return 0;
-
-err_rule:
-       mlx5_fc_destroy(mdev, flow_counter);
-err_cnt:
-       mlx5_destroy_flow_group(g);
-err_out:
-       kvfree(flow_group_in);
-       kvfree(spec);
-       return err;
-}
-
-static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
-                                          struct mlx5e_ipsec_rx *rx,
-                                          struct mlx5_flow_destination *dest)
-{
-       struct mlx5_flow_act flow_act = {};
-       struct mlx5_flow_handle *rule;
-       struct mlx5_flow_spec *spec;
-       int err;
-
-       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
-       if (!spec)
-               return -ENOMEM;
-
-       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-                        misc_parameters_2.ipsec_syndrome);
-       MLX5_SET(fte_match_param, spec->match_value,
-                misc_parameters_2.ipsec_syndrome, 0);
-       spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
-       spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
-       flow_act.flags = FLOW_ACT_NO_APPEND;
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
-                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
-       rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
-       if (IS_ERR(rule)) {
-               err = PTR_ERR(rule);
-               mlx5_core_warn(ipsec->mdev,
-                              "Failed to add ipsec rx status pass rule, err=%d\n", err);
-               goto err_rule;
-       }
-
-       rx->status.rule = rule;
-       kvfree(spec);
-       return 0;
-
-err_rule:
-       kvfree(spec);
-       return err;
-}
-
-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
-                                     struct mlx5e_ipsec_rx *rx)
-{
-       esw_ipsec_rx_status_pass_destroy(ipsec, rx);
-       esw_ipsec_rx_status_drop_destroy(ipsec, rx);
-}
-
-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
-                                   struct mlx5e_ipsec_rx *rx,
-                                   struct mlx5_flow_destination *dest)
-{
-       int err;
-
-       err = esw_ipsec_rx_status_drop_create(ipsec, rx);
-       if (err)
-               return err;
-
-       err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
-       if (err)
-               goto err_pass_create;
-
-       return 0;
-
-err_pass_create:
-       esw_ipsec_rx_status_drop_destroy(ipsec, rx);
-       return err;
-}
-
 void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
                                       struct mlx5e_ipsec_rx_create_attr *attr)
 {
@@ -202,7 +50,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
        u32 mapped_id;
        int err;
 
-       err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+       err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id,
                          xa_mk_value(sa_entry->ipsec_obj_id),
                          XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
        if (err)
@@ -233,7 +81,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
        return 0;
 
 err_header_alloc:
-       xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+       xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id);
        return err;
 }
 
@@ -242,7 +90,7 @@ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
        struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
 
        if (sa_entry->rx_mapped_id)
-               xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+               xa_erase_bh(&ipsec->ipsec_obj_id_map,
                            sa_entry->rx_mapped_id);
 }
 
@@ -252,7 +100,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
        struct mlx5e_ipsec *ipsec = priv->ipsec;
        void *val;
 
-       val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+       val = xa_load(&ipsec->ipsec_obj_id_map, id);
        if (!val)
                return -ENOENT;
 
@@ -304,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
 
        xa_for_each(&esw->offloads.vport_reps, i, rep) {
                rpriv = rep->rep_data[REP_ETH].priv;
-               if (!rpriv || !rpriv->netdev)
+               if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
                        continue;
 
                rhashtable_walk_enter(&rpriv->tc_ht, &iter);
index 0c90f7a8b0d32c7e4268fc1becfee6ffb2229ea1..ac9c65b89166e6fda902d51310900a7c50894928 100644 (file)
@@ -8,11 +8,6 @@ struct mlx5e_ipsec;
 struct mlx5e_ipsec_sa_entry;
 
 #ifdef CONFIG_MLX5_ESWITCH
-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
-                                     struct mlx5e_ipsec_rx *rx);
-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
-                                   struct mlx5e_ipsec_rx *rx,
-                                   struct mlx5_flow_destination *dest);
 void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
                                       struct mlx5e_ipsec_rx_create_attr *attr);
 int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
@@ -26,16 +21,6 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
                                       struct mlx5e_ipsec_tx_create_attr *attr);
 void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
 #else
-static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
-                                                   struct mlx5e_ipsec_rx *rx) {}
-
-static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
-                                                 struct mlx5e_ipsec_rx *rx,
-                                                 struct mlx5_flow_destination *dest)
-{
-       return  -EINVAL;
-}
-
 static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
                                                     struct mlx5e_ipsec_rx_create_attr *attr) {}
 
index 8d0b915a31214ea83366bfdcbabcc8ec0c232eaf..3047d7015c5256726338904432ce56845c59c39c 100644 (file)
@@ -1463,7 +1463,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
 {
        int err;
 
-       lockdep_assert_held(&esw->mode_lock);
+       devl_assert_locked(priv_to_devlink(esw->dev));
 
        if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
                esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
@@ -1531,7 +1531,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
        if (toggle_lag)
                mlx5_lag_disable_change(esw->dev);
 
-       down_write(&esw->mode_lock);
        if (!mlx5_esw_is_fdb_created(esw)) {
                ret = mlx5_eswitch_enable_locked(esw, num_vfs);
        } else {
@@ -1554,8 +1553,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
                }
        }
 
-       up_write(&esw->mode_lock);
-
        if (toggle_lag)
                mlx5_lag_enable_change(esw->dev);
 
@@ -1569,12 +1566,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
                return;
 
        devl_assert_locked(priv_to_devlink(esw->dev));
-       down_write(&esw->mode_lock);
        /* If driver is unloaded, this function is called twice by remove_one()
         * and mlx5_unload(). Prevent the second call.
         */
        if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
-               goto unlock;
+               return;
 
        esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
                 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
@@ -1603,9 +1599,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
                esw->esw_funcs.num_vfs = 0;
        else
                esw->esw_funcs.num_ec_vfs = 0;
-
-unlock:
-       up_write(&esw->mode_lock);
 }
 
 /* Free resources for corresponding eswitch mode. It is called by devlink
@@ -1647,10 +1640,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
 
        devl_assert_locked(priv_to_devlink(esw->dev));
        mlx5_lag_disable_change(esw->dev);
-       down_write(&esw->mode_lock);
        mlx5_eswitch_disable_locked(esw);
        esw->mode = MLX5_ESWITCH_LEGACY;
-       up_write(&esw->mode_lock);
        mlx5_lag_enable_change(esw->dev);
 }
 
@@ -2254,8 +2245,13 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
        if (!mlx5_esw_allowed(esw))
                return true;
 
-       if (down_read_trylock(&esw->mode_lock) != 0)
+       if (down_read_trylock(&esw->mode_lock) != 0) {
+               if (esw->eswitch_operation_in_progress) {
+                       up_read(&esw->mode_lock);
+                       return false;
+               }
                return true;
+       }
 
        return false;
 }
@@ -2312,7 +2308,8 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
        if (down_write_trylock(&esw->mode_lock) == 0)
                return -EINVAL;
 
-       if (atomic64_read(&esw->user_count) > 0) {
+       if (esw->eswitch_operation_in_progress ||
+           atomic64_read(&esw->user_count) > 0) {
                up_write(&esw->mode_lock);
                return -EBUSY;
        }
@@ -2320,6 +2317,18 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
        return esw->mode;
 }
 
+int mlx5_esw_lock(struct mlx5_eswitch *esw)
+{
+       down_write(&esw->mode_lock);
+
+       if (esw->eswitch_operation_in_progress) {
+               up_write(&esw->mode_lock);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  * mlx5_esw_unlock() - Release write lock on esw mode lock
  * @esw: eswitch device.
index 37ab66e7b403f1d86d3242e3fdabe7ce5bdcc49a..b674b57d05aad3ed340e8d19bc59ca569153395a 100644 (file)
@@ -383,6 +383,7 @@ struct mlx5_eswitch {
        struct xarray paired;
        struct mlx5_devcom_comp_dev *devcom;
        u16 enabled_ipsec_vf_count;
+       bool eswitch_operation_in_progress;
 };
 
 void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -827,6 +828,7 @@ void mlx5_esw_release(struct mlx5_core_dev *dev);
 void mlx5_esw_get(struct mlx5_core_dev *dev);
 void mlx5_esw_put(struct mlx5_core_dev *dev);
 int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
+int mlx5_esw_lock(struct mlx5_eswitch *esw);
 void mlx5_esw_unlock(struct mlx5_eswitch *esw);
 
 void esw_vport_change_handle_locked(struct mlx5_vport *vport);
index 88236e75fd9013058dd855a77073ed21cb1e8afe..bb8bcb448ae903c6bdb66e67722075c8073773bb 100644 (file)
@@ -3653,14 +3653,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
 
 static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
 {
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct net *devl_net, *netdev_net;
-       struct mlx5_eswitch *esw;
-
-       esw = mlx5_devlink_eswitch_nocheck_get(devlink);
-       netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
-       devl_net = devlink_net(devlink);
+       bool ret = false;
 
-       return net_eq(devl_net, netdev_net);
+       mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
+       if (dev->mlx5e_res.uplink_netdev) {
+               netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
+               devl_net = devlink_net(devlink);
+               ret = net_eq(devl_net, netdev_net);
+       }
+       mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
+       return ret;
 }
 
 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
@@ -3733,13 +3737,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
                goto unlock;
        }
 
+       esw->eswitch_operation_in_progress = true;
+       up_write(&esw->mode_lock);
+
        mlx5_eswitch_disable_locked(esw);
        if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
                if (mlx5_devlink_trap_get_num_active(esw->dev)) {
                        NL_SET_ERR_MSG_MOD(extack,
                                           "Can't change mode while devlink traps are active");
                        err = -EOPNOTSUPP;
-                       goto unlock;
+                       goto skip;
                }
                err = esw_offloads_start(esw, extack);
        } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
@@ -3749,6 +3756,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
                err = -EINVAL;
        }
 
+skip:
+       down_write(&esw->mode_lock);
+       esw->eswitch_operation_in_progress = false;
 unlock:
        mlx5_esw_unlock(esw);
 enable_lag:
@@ -3759,16 +3769,12 @@ enable_lag:
 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
 {
        struct mlx5_eswitch *esw;
-       int err;
 
        esw = mlx5_devlink_eswitch_get(devlink);
        if (IS_ERR(esw))
                return PTR_ERR(esw);
 
-       down_read(&esw->mode_lock);
-       err = esw_mode_to_devlink(esw->mode, mode);
-       up_read(&esw->mode_lock);
-       return err;
+       return esw_mode_to_devlink(esw->mode, mode);
 }
 
 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
@@ -3862,11 +3868,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
        if (err)
                goto out;
 
+       esw->eswitch_operation_in_progress = true;
+       up_write(&esw->mode_lock);
+
        err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
-       if (err)
-               goto out;
+       if (!err)
+               esw->offloads.inline_mode = mlx5_mode;
 
-       esw->offloads.inline_mode = mlx5_mode;
+       down_write(&esw->mode_lock);
+       esw->eswitch_operation_in_progress = false;
        up_write(&esw->mode_lock);
        return 0;
 
@@ -3878,16 +3888,12 @@ out:
 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
 {
        struct mlx5_eswitch *esw;
-       int err;
 
        esw = mlx5_devlink_eswitch_get(devlink);
        if (IS_ERR(esw))
                return PTR_ERR(esw);
 
-       down_read(&esw->mode_lock);
-       err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
-       up_read(&esw->mode_lock);
-       return err;
+       return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
 }
 
 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
@@ -3969,6 +3975,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
                goto unlock;
        }
 
+       esw->eswitch_operation_in_progress = true;
+       up_write(&esw->mode_lock);
+
        esw_destroy_offloads_fdb_tables(esw);
 
        esw->offloads.encap = encap;
@@ -3982,6 +3991,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
                (void)esw_create_offloads_fdb_tables(esw);
        }
 
+       down_write(&esw->mode_lock);
+       esw->eswitch_operation_in_progress = false;
+
 unlock:
        up_write(&esw->mode_lock);
        return err;
@@ -3996,9 +4008,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
        if (IS_ERR(esw))
                return PTR_ERR(esw);
 
-       down_read(&esw->mode_lock);
        *encap = esw->offloads.encap;
-       up_read(&esw->mode_lock);
        return 0;
 }
 
index b568988e92e3e95985683f79b8c5fdf601d13d5a..c4e19d627da2148bd1fab3b6f388358320ecb9d0 100644 (file)
@@ -325,6 +325,29 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
                mlx5_core_err(dev, "Failed to reload FW tracer\n");
 }
 
+#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
+static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
+{
+       struct pci_dev *bridge = dev->pdev->bus->self;
+       u16 reg16;
+       int err;
+
+       if (!bridge)
+               return -EOPNOTSUPP;
+
+       err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
+       if (err)
+               return err;
+
+       if ((reg16 & PCI_EXP_SLTCTL_HPIE) && (reg16 & PCI_EXP_SLTCTL_DLLSCE)) {
+               mlx5_core_warn(dev, "FW reset is not supported as HotPlug is enabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+#endif
+
 static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
 {
        struct pci_bus *bridge_bus = dev->pdev->bus;
@@ -357,6 +380,12 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
                return false;
        }
 
+#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
+       err = mlx5_check_hotplug_interrupt(dev);
+       if (err)
+               return false;
+#endif
+
        err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
        if (err)
                return false;
index 060a77f2265d9a12e0c675a1a43e9f06ba95a1b2..e522845c7c211619a252bb995dec65160d7a1ae5 100644 (file)
@@ -160,6 +160,18 @@ struct nfp_tun_mac_addr_offload {
        u8 addr[ETH_ALEN];
 };
 
+/**
+ * struct nfp_neigh_update_work - update neighbour information to nfp
+ * @work:      Work queue for writing neigh to the nfp
+ * @n:         neighbour entry
+ * @app:       Back pointer to app
+ */
+struct nfp_neigh_update_work {
+       struct work_struct work;
+       struct neighbour *n;
+       struct nfp_app *app;
+};
+
 enum nfp_flower_mac_offload_cmd {
        NFP_TUNNEL_MAC_OFFLOAD_ADD =            0,
        NFP_TUNNEL_MAC_OFFLOAD_DEL =            1,
@@ -607,38 +619,30 @@ err:
        nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n");
 }
 
-static int
-nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
-                           void *ptr)
+static void
+nfp_tun_release_neigh_update_work(struct nfp_neigh_update_work *update_work)
 {
-       struct nfp_flower_priv *app_priv;
-       struct netevent_redirect *redir;
-       struct neighbour *n;
+       neigh_release(update_work->n);
+       kfree(update_work);
+}
+
+static void nfp_tun_neigh_update(struct work_struct *work)
+{
+       struct nfp_neigh_update_work *update_work;
        struct nfp_app *app;
+       struct neighbour *n;
        bool neigh_invalid;
        int err;
 
-       switch (event) {
-       case NETEVENT_REDIRECT:
-               redir = (struct netevent_redirect *)ptr;
-               n = redir->neigh;
-               break;
-       case NETEVENT_NEIGH_UPDATE:
-               n = (struct neighbour *)ptr;
-               break;
-       default:
-               return NOTIFY_DONE;
-       }
-
-       neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead;
-
-       app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
-       app = app_priv->app;
+       update_work = container_of(work, struct nfp_neigh_update_work, work);
+       app = update_work->app;
+       n = update_work->n;
 
        if (!nfp_flower_get_port_id_from_netdev(app, n->dev))
-               return NOTIFY_DONE;
+               goto out;
 
 #if IS_ENABLED(CONFIG_INET)
+       neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead;
        if (n->tbl->family == AF_INET6) {
 #if IS_ENABLED(CONFIG_IPV6)
                struct flowi6 flow6 = {};
@@ -655,13 +659,11 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
                        dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL,
                                                  &flow6, NULL);
                        if (IS_ERR(dst))
-                               return NOTIFY_DONE;
+                               goto out;
 
                        dst_release(dst);
                }
                nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);
-#else
-               return NOTIFY_DONE;
 #endif /* CONFIG_IPV6 */
        } else {
                struct flowi4 flow4 = {};
@@ -678,17 +680,71 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
                        rt = ip_route_output_key(dev_net(n->dev), &flow4);
                        err = PTR_ERR_OR_ZERO(rt);
                        if (err)
-                               return NOTIFY_DONE;
+                               goto out;
 
                        ip_rt_put(rt);
                }
                nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);
        }
-#else
-       return NOTIFY_DONE;
 #endif /* CONFIG_INET */
+out:
+       nfp_tun_release_neigh_update_work(update_work);
+}
 
-       return NOTIFY_OK;
+static struct nfp_neigh_update_work *
+nfp_tun_alloc_neigh_update_work(struct nfp_app *app, struct neighbour *n)
+{
+       struct nfp_neigh_update_work *update_work;
+
+       update_work = kzalloc(sizeof(*update_work), GFP_ATOMIC);
+       if (!update_work)
+               return NULL;
+
+       INIT_WORK(&update_work->work, nfp_tun_neigh_update);
+       neigh_hold(n);
+       update_work->n = n;
+       update_work->app = app;
+
+       return update_work;
+}
+
+static int
+nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
+                           void *ptr)
+{
+       struct nfp_neigh_update_work *update_work;
+       struct nfp_flower_priv *app_priv;
+       struct netevent_redirect *redir;
+       struct neighbour *n;
+       struct nfp_app *app;
+
+       switch (event) {
+       case NETEVENT_REDIRECT:
+               redir = (struct netevent_redirect *)ptr;
+               n = redir->neigh;
+               break;
+       case NETEVENT_NEIGH_UPDATE:
+               n = (struct neighbour *)ptr;
+               break;
+       default:
+               return NOTIFY_DONE;
+       }
+#if IS_ENABLED(CONFIG_IPV6)
+       if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
+#else
+       if (n->tbl != &arp_tbl)
+#endif
+               return NOTIFY_DONE;
+
+       app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
+       app = app_priv->app;
+       update_work = nfp_tun_alloc_neigh_update_work(app, n);
+       if (!update_work)
+               return NOTIFY_DONE;
+
+       queue_work(system_highpri_wq, &update_work->work);
+
+       return NOTIFY_DONE;
 }
 
 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
@@ -706,6 +762,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
        netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
        if (!netdev)
                goto fail_rcu_unlock;
+       dev_hold(netdev);
 
        flow.daddr = payload->ipv4_addr;
        flow.flowi4_proto = IPPROTO_UDP;
@@ -725,13 +782,16 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
        ip_rt_put(rt);
        if (!n)
                goto fail_rcu_unlock;
+       rcu_read_unlock();
+
        nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
        neigh_release(n);
-       rcu_read_unlock();
+       dev_put(netdev);
        return;
 
 fail_rcu_unlock:
        rcu_read_unlock();
+       dev_put(netdev);
        nfp_flower_cmsg_warn(app, "Requested route not found.\n");
 }
 
@@ -749,6 +809,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
        netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
        if (!netdev)
                goto fail_rcu_unlock;
+       dev_hold(netdev);
 
        flow.daddr = payload->ipv6_addr;
        flow.flowi6_proto = IPPROTO_UDP;
@@ -766,14 +827,16 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
        dst_release(dst);
        if (!n)
                goto fail_rcu_unlock;
+       rcu_read_unlock();
 
        nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);
        neigh_release(n);
-       rcu_read_unlock();
+       dev_put(netdev);
        return;
 
 fail_rcu_unlock:
        rcu_read_unlock();
+       dev_put(netdev);
        nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
 }
 
index 1dbc3cb50b1d905469eb31b24857bf15ca3aa029..9b54630400752976471746828142dd494b2cd7f2 100644 (file)
@@ -223,7 +223,7 @@ struct ionic_desc_info {
        void *cb_arg;
 };
 
-#define IONIC_QUEUE_NAME_MAX_SZ                32
+#define IONIC_QUEUE_NAME_MAX_SZ                16
 
 struct ionic_queue {
        struct device *dev;
index edc14730ce88b5f7db438d37b4ad2e93da3cb813..bad919343180e9fa8e2c40d145ac527a9534b98c 100644 (file)
@@ -49,24 +49,24 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif);
 static void ionic_dim_work(struct work_struct *work)
 {
        struct dim *dim = container_of(work, struct dim, work);
+       struct ionic_intr_info *intr;
        struct dim_cq_moder cur_moder;
        struct ionic_qcq *qcq;
+       struct ionic_lif *lif;
        u32 new_coal;
 
        cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
        qcq = container_of(dim, struct ionic_qcq, dim);
-       new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
+       lif = qcq->q.lif;
+       new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
        new_coal = new_coal ? new_coal : 1;
 
-       if (qcq->intr.dim_coal_hw != new_coal) {
-               unsigned int qi = qcq->cq.bound_q->index;
-               struct ionic_lif *lif = qcq->q.lif;
-
-               qcq->intr.dim_coal_hw = new_coal;
+       intr = &qcq->intr;
+       if (intr->dim_coal_hw != new_coal) {
+               intr->dim_coal_hw = new_coal;
 
                ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
-                                    lif->rxqcqs[qi]->intr.index,
-                                    qcq->intr.dim_coal_hw);
+                                    intr->index, intr->dim_coal_hw);
        }
 
        dim->state = DIM_START_MEASURE;
index 65e20693c549e14753cbfb8509e1037723e6b7c1..33f4f58ee51c687d45a437b09275ead900336800 100644 (file)
@@ -933,6 +933,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
                p_dma->virt_addr = NULL;
        }
        kfree(p_mngr->ilt_shadow);
+       p_mngr->ilt_shadow = NULL;
 }
 
 static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
index 6f2fa2a42770aa2743f12cae7bae30408f731088..1822f2ad8f0ddf788b5b5a52568a74e719c2eac8 100644 (file)
@@ -30,6 +30,8 @@
 
 #define QCASPI_MAX_REGS 0x20
 
+#define QCASPI_RX_MAX_FRAMES 4
+
 static const u16 qcaspi_spi_regs[] = {
        SPI_REG_BFR_SIZE,
        SPI_REG_WRBUF_SPC_AVA,
@@ -252,9 +254,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
 {
        struct qcaspi *qca = netdev_priv(dev);
 
-       ring->rx_max_pending = 4;
+       ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
        ring->tx_max_pending = TX_RING_MAX_LEN;
-       ring->rx_pending = 4;
+       ring->rx_pending = QCASPI_RX_MAX_FRAMES;
        ring->tx_pending = qca->txr.count;
 }
 
@@ -263,22 +265,21 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
                     struct kernel_ethtool_ringparam *kernel_ring,
                     struct netlink_ext_ack *extack)
 {
-       const struct net_device_ops *ops = dev->netdev_ops;
        struct qcaspi *qca = netdev_priv(dev);
 
-       if ((ring->rx_pending) ||
+       if (ring->rx_pending != QCASPI_RX_MAX_FRAMES ||
            (ring->rx_mini_pending) ||
            (ring->rx_jumbo_pending))
                return -EINVAL;
 
-       if (netif_running(dev))
-               ops->ndo_stop(dev);
+       if (qca->spi_thread)
+               kthread_park(qca->spi_thread);
 
        qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
        qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
 
-       if (netif_running(dev))
-               ops->ndo_open(dev);
+       if (qca->spi_thread)
+               kthread_unpark(qca->spi_thread);
 
        return 0;
 }
index bec723028e96c9c7f9ec686e497d2eea91a7d8c6..5f3c11fb3fa27905521157f8ee582edc39b943e4 100644 (file)
@@ -580,6 +580,18 @@ qcaspi_spi_thread(void *data)
        netdev_info(qca->net_dev, "SPI thread created\n");
        while (!kthread_should_stop()) {
                set_current_state(TASK_INTERRUPTIBLE);
+               if (kthread_should_park()) {
+                       netif_tx_disable(qca->net_dev);
+                       netif_carrier_off(qca->net_dev);
+                       qcaspi_flush_tx_ring(qca);
+                       kthread_parkme();
+                       if (qca->sync == QCASPI_SYNC_READY) {
+                               netif_carrier_on(qca->net_dev);
+                               netif_wake_queue(qca->net_dev);
+                       }
+                       continue;
+               }
+
                if ((qca->intr_req == qca->intr_svc) &&
                    !qca->txr.skb[qca->txr.head])
                        schedule();
@@ -608,11 +620,17 @@ qcaspi_spi_thread(void *data)
                        if (intr_cause & SPI_INT_CPU_ON) {
                                qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON);
 
+                               /* Frame decoding in progress */
+                               if (qca->frm_handle.state != qca->frm_handle.init)
+                                       qca->net_dev->stats.rx_dropped++;
+
+                               qcafrm_fsm_init_spi(&qca->frm_handle);
+                               qca->stats.device_reset++;
+
                                /* not synced. */
                                if (qca->sync != QCASPI_SYNC_READY)
                                        continue;
 
-                               qca->stats.device_reset++;
                                netif_wake_queue(qca->net_dev);
                                netif_carrier_on(qca->net_dev);
                        }
index 62cabeeb842a135684ead5de19bab9872a422ba3..bb787a52bc754412cba47ce6f13381361e6fc44a 100644 (file)
@@ -196,6 +196,7 @@ enum rtl_registers {
                                        /* No threshold before first PCI xfer */
 #define        RX_FIFO_THRESH                  (7 << RXCFG_FIFO_SHIFT)
 #define        RX_EARLY_OFF                    (1 << 11)
+#define        RX_PAUSE_SLOT_ON                (1 << 11)       /* 8125b and later */
 #define        RXCFG_DMA_SHIFT                 8
                                        /* Unlimited maximum PCI burst. */
 #define        RX_DMA_BURST                    (7 << RXCFG_DMA_SHIFT)
@@ -2306,9 +2307,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
                RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
                break;
-       case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+       case RTL_GIGA_MAC_VER_61:
                RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
                break;
+       case RTL_GIGA_MAC_VER_63:
+               RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
+                       RX_PAUSE_SLOT_ON);
+               break;
        default:
                RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
                break;
index 2cd6fce5c9934c7a9fe2cc7b1396bd3b252d0a0d..9e40c28d453ab190d2c3150df27baa52d745a98d 100644 (file)
@@ -59,26 +59,19 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
                return -ENODEV;
        }
 
-       if (!of_device_is_compatible(np, "loongson, pci-gmac")) {
-               pr_info("dwmac_loongson_pci: Incompatible OF node\n");
-               return -ENODEV;
-       }
-
        plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
        if (!plat)
                return -ENOMEM;
 
+       plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+                                          sizeof(*plat->mdio_bus_data),
+                                          GFP_KERNEL);
+       if (!plat->mdio_bus_data)
+               return -ENOMEM;
+
        plat->mdio_node = of_get_child_by_name(np, "mdio");
        if (plat->mdio_node) {
                dev_info(&pdev->dev, "Found MDIO subnode\n");
-
-               plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
-                                                  sizeof(*plat->mdio_bus_data),
-                                                  GFP_KERNEL);
-               if (!plat->mdio_bus_data) {
-                       ret = -ENOMEM;
-                       goto err_put_node;
-               }
                plat->mdio_bus_data->needs_reset = true;
        }
 
index d3bf42d0fceb69bd084b9d05db0f7dacdb8cff18..31631e3f89d0a499f04b26ed79b159e99cea4879 100644 (file)
@@ -34,6 +34,7 @@
 #define RGMII_CONFIG_LOOPBACK_EN               BIT(2)
 #define RGMII_CONFIG_PROG_SWAP                 BIT(1)
 #define RGMII_CONFIG_DDR_MODE                  BIT(0)
+#define RGMII_CONFIG_SGMII_CLK_DVDR            GENMASK(18, 10)
 
 /* SDCC_HC_REG_DLL_CONFIG fields */
 #define SDCC_DLL_CONFIG_DLL_RST                        BIT(30)
@@ -78,6 +79,8 @@
 #define ETHQOS_MAC_CTRL_SPEED_MODE             BIT(14)
 #define ETHQOS_MAC_CTRL_PORT_SEL               BIT(15)
 
+#define SGMII_10M_RX_CLK_DVDR                  0x31
+
 struct ethqos_emac_por {
        unsigned int offset;
        unsigned int value;
@@ -598,6 +601,9 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
        return 0;
 }
 
+/* On interface toggle MAC registers gets reset.
+ * Configure MAC block for SGMII on ethernet phy link up
+ */
 static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
 {
        int val;
@@ -617,6 +623,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
        case SPEED_10:
                val |= ETHQOS_MAC_CTRL_PORT_SEL;
                val &= ~ETHQOS_MAC_CTRL_SPEED_MODE;
+               rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR,
+                             FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
+                                        SGMII_10M_RX_CLK_DVDR),
+                             RGMII_IO_MACRO_CONFIG);
                break;
        }
 
index e95d35f1e5a0c8f7932601905bdf2b9fb7600e1e..8fd167501fa0ea10a5f4489bc1d06fef6591b4d9 100644 (file)
@@ -710,28 +710,22 @@ void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
        }
 }
 
-void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+                         u32 num_txq, u32 num_rxq,
                          bool enable)
 {
        u32 value;
 
-       if (!enable) {
-               value = readl(ioaddr + MAC_FPE_CTRL_STS);
-
-               value &= ~EFPE;
-
-               writel(value, ioaddr + MAC_FPE_CTRL_STS);
-               return;
+       if (enable) {
+               cfg->fpe_csr = EFPE;
+               value = readl(ioaddr + GMAC_RXQ_CTRL1);
+               value &= ~GMAC_RXQCTRL_FPRQ;
+               value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
+               writel(value, ioaddr + GMAC_RXQ_CTRL1);
+       } else {
+               cfg->fpe_csr = 0;
        }
-
-       value = readl(ioaddr + GMAC_RXQ_CTRL1);
-       value &= ~GMAC_RXQCTRL_FPRQ;
-       value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
-       writel(value, ioaddr + GMAC_RXQ_CTRL1);
-
-       value = readl(ioaddr + MAC_FPE_CTRL_STS);
-       value |= EFPE;
-       writel(value, ioaddr + MAC_FPE_CTRL_STS);
+       writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
 }
 
 int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
@@ -741,6 +735,9 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
 
        status = FPE_EVENT_UNKNOWN;
 
+       /* Reads from the MAC_FPE_CTRL_STS register should only be performed
+        * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
+        */
        value = readl(ioaddr + MAC_FPE_CTRL_STS);
 
        if (value & TRSP) {
@@ -766,19 +763,15 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
        return status;
 }
 
-void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type)
+void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+                            enum stmmac_mpacket_type type)
 {
-       u32 value;
+       u32 value = cfg->fpe_csr;
 
-       value = readl(ioaddr + MAC_FPE_CTRL_STS);
-
-       if (type == MPACKET_VERIFY) {
-               value &= ~SRSP;
+       if (type == MPACKET_VERIFY)
                value |= SVER;
-       } else {
-               value &= ~SVER;
+       else if (type == MPACKET_RESPONSE)
                value |= SRSP;
-       }
 
        writel(value, ioaddr + MAC_FPE_CTRL_STS);
 }
index 53c138d0ff4808d3ca4b4f3fcd456afbe85ae493..34e620790eb37160383e431a431c493545412395 100644 (file)
@@ -153,9 +153,11 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
                         unsigned int ptp_rate);
 void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
                           struct stmmac_extra_stats *x, u32 txqcnt);
-void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+                         u32 num_txq, u32 num_rxq,
                          bool enable);
 void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
+                            struct stmmac_fpe_cfg *cfg,
                             enum stmmac_mpacket_type type);
 int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
 
index 453e88b75be08a71d67472fc5bd31b680fd1fd51..a74e71db79f949227525ceedb2d915eb88430aa9 100644 (file)
@@ -1484,7 +1484,8 @@ static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
        return 0;
 }
 
-static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
+static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+                                  u32 num_txq,
                                   u32 num_rxq, bool enable)
 {
        u32 value;
index b95d3e1378136eb485ea88370a6da42a1a75edd7..68aa2d5ca6e56774b03701098abf41c30eb4ac50 100644 (file)
@@ -412,9 +412,11 @@ struct stmmac_ops {
                             unsigned int ptp_rate);
        void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev,
                               struct stmmac_extra_stats *x, u32 txqcnt);
-       void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+       void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+                             u32 num_txq, u32 num_rxq,
                              bool enable);
        void (*fpe_send_mpacket)(void __iomem *ioaddr,
+                                struct stmmac_fpe_cfg *cfg,
                                 enum stmmac_mpacket_type type);
        int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
 };
index 2afb2bd25977a2265d998fb2203bbe3a70a9d3f5..37e64283f9107c4c06689d457a061ef9d38b582f 100644 (file)
@@ -964,7 +964,8 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
        bool *hs_enable = &fpe_cfg->hs_enable;
 
        if (is_up && *hs_enable) {
-               stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
+               stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
+                                       MPACKET_VERIFY);
        } else {
                *lo_state = FPE_STATE_OFF;
                *lp_state = FPE_STATE_OFF;
@@ -5839,6 +5840,7 @@ static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
                /* If user has requested FPE enable, quickly response */
                if (*hs_enable)
                        stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+                                               fpe_cfg,
                                                MPACKET_RESPONSE);
        }
 
@@ -7263,6 +7265,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work)
                if (*lo_state == FPE_STATE_ENTERING_ON &&
                    *lp_state == FPE_STATE_ENTERING_ON) {
                        stmmac_fpe_configure(priv, priv->ioaddr,
+                                            fpe_cfg,
                                             priv->plat->tx_queues_to_use,
                                             priv->plat->rx_queues_to_use,
                                             *enable);
@@ -7281,6 +7284,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work)
                        netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
                                    *lo_state, *lp_state);
                        stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+                                               fpe_cfg,
                                                MPACKET_VERIFY);
                }
                /* Sleep then retry */
@@ -7295,6 +7299,7 @@ void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
        if (priv->plat->fpe_cfg->hs_enable != enable) {
                if (enable) {
                        stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+                                               priv->plat->fpe_cfg,
                                                MPACKET_VERIFY);
                } else {
                        priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
@@ -7755,6 +7760,7 @@ int stmmac_suspend(struct device *dev)
        if (priv->dma_cap.fpesel) {
                /* Disable FPE */
                stmmac_fpe_configure(priv, priv->ioaddr,
+                                    priv->plat->fpe_cfg,
                                     priv->plat->tx_queues_to_use,
                                     priv->plat->rx_queues_to_use, false);
 
index fa9e7e7040b9457c6f2786de5555554b36f12529..0542cfd1817e62a50adb9a739625ac9c04d40b33 100644 (file)
@@ -591,7 +591,11 @@ int stmmac_mdio_register(struct net_device *ndev)
        new_bus->parent = priv->device;
 
        err = of_mdiobus_register(new_bus, mdio_node);
-       if (err != 0) {
+       if (err == -ENODEV) {
+               err = 0;
+               dev_info(dev, "MDIO bus is disabled\n");
+               goto bus_register_fail;
+       } else if (err) {
                dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
                goto bus_register_fail;
        }
index ac41ef4cbd2f0243e984d81171ef15ea0173edff..6ad3e0a119366672d0cab3e2c3a1d4b1409abf92 100644 (file)
@@ -1079,6 +1079,7 @@ disable:
 
        priv->plat->fpe_cfg->enable = false;
        stmmac_fpe_configure(priv, priv->ioaddr,
+                            priv->plat->fpe_cfg,
                             priv->plat->tx_queues_to_use,
                             priv->plat->rx_queues_to_use,
                             false);
index ca7bf7f897d36b08e16402cb6be10c664dfc6478..c8cbd85adcf99527f947f4b5a63dc616b533987e 100644 (file)
@@ -3,5 +3,6 @@ config HYPERV_NET
        tristate "Microsoft Hyper-V virtual network driver"
        depends on HYPERV
        select UCS2_STRING
+       select NLS
        help
          Select this option to enable the Hyper-V virtual network driver.
index 508d9a392ab182c4809019e73fd2b658fd1d66de..f575f225d4178950abdc73584e6266c2421a92f6 100644 (file)
@@ -281,8 +281,10 @@ static int __team_options_register(struct team *team,
        return 0;
 
 inst_rollback:
-       for (i--; i >= 0; i--)
+       for (i--; i >= 0; i--) {
                __team_option_inst_del_option(team, dst_opts[i]);
+               list_del(&dst_opts[i]->list);
+       }
 
        i = option_count;
 alloc_rollback:
index 2c5c1e91ded613b5e9a23d5c5b93f70bac57c862..9bf2140fd0a1f4dc97a0553854c83cca184d574b 100644 (file)
@@ -3000,6 +3000,8 @@ static void rtl8152_nic_reset(struct r8152 *tp)
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST);
 
                for (i = 0; i < 1000; i++) {
+                       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+                               break;
                        if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST))
                                break;
                        usleep_range(100, 400);
@@ -3329,6 +3331,8 @@ static void rtl_disable(struct r8152 *tp)
        rxdy_gated_en(tp, true);
 
        for (i = 0; i < 1000; i++) {
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+                       break;
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
                if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY)
                        break;
@@ -3336,6 +3340,8 @@ static void rtl_disable(struct r8152 *tp)
        }
 
        for (i = 0; i < 1000; i++) {
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+                       break;
                if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY)
                        break;
                usleep_range(1000, 2000);
@@ -5499,6 +5505,8 @@ static void wait_oob_link_list_ready(struct r8152 *tp)
        int i;
 
        for (i = 0; i < 1000; i++) {
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+                       break;
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
                if (ocp_data & LINK_LIST_READY)
                        break;
@@ -5513,6 +5521,8 @@ static void r8156b_wait_loading_flash(struct r8152 *tp)
                int i;
 
                for (i = 0; i < 100; i++) {
+                       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+                               break;
                        if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE)
                                break;
                        usleep_range(1000, 2000);
@@ -5635,6 +5645,8 @@ static int r8153_pre_firmware_1(struct r8152 *tp)
        for (i = 0; i < 104; i++) {
                u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL);
 
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+                       return -ENODEV;
                if (!(ocp_data & WTD1_EN))
                        break;
                usleep_range(1000, 2000);
@@ -5791,6 +5803,8 @@ static void r8153_aldps_en(struct r8152 *tp, bool enable)
                data &= ~EN_ALDPS;
                ocp_reg_write(tp, OCP_POWER_CFG, data);
                for (i = 0; i < 20; i++) {
+                       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+                               return;
                        usleep_range(1000, 2000);
                        if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100)
                                break;
@@ -8397,6 +8411,8 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
        struct r8152 *tp = usb_get_intfdata(intf);
        struct net_device *netdev;
 
+       rtnl_lock();
+
        if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
                return 0;
 
@@ -8428,20 +8444,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
        struct sockaddr sa;
 
        if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
-               return 0;
+               goto exit;
 
        rtl_set_accessible(tp);
 
        /* reset the MAC address in case of policy change */
-       if (determine_ethernet_addr(tp, &sa) >= 0) {
-               rtnl_lock();
+       if (determine_ethernet_addr(tp, &sa) >= 0)
                dev_set_mac_address (tp->netdev, &sa, NULL);
-               rtnl_unlock();
-       }
 
        netdev = tp->netdev;
        if (!netif_running(netdev))
-               return 0;
+               goto exit;
 
        set_bit(WORK_ENABLE, &tp->flags);
        if (netif_carrier_ok(netdev)) {
@@ -8460,6 +8473,8 @@ static int rtl8152_post_reset(struct usb_interface *intf)
        if (!list_empty(&tp->rx_done))
                napi_schedule(&tp->napi);
 
+exit:
+       rtnl_unlock();
        return 0;
 }
 
@@ -10034,6 +10049,7 @@ static const struct usb_device_id rtl8152_table[] = {
        { USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff) },
        { USB_DEVICE(VENDOR_ID_TPLINK,  0x0601) },
        { USB_DEVICE(VENDOR_ID_DLINK,   0xb301) },
+       { USB_DEVICE(VENDOR_ID_ASUS,    0x1976) },
        {}
 };
 
index 57efb3454c57aca0c5bf4e790226f2c7176c8468..977861c46b1fe16a27872b5df76e067409ae2da2 100644 (file)
@@ -790,7 +790,8 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
 
                        skb_add_rx_frag(nskb, i, page, page_offset, size,
                                        truesize);
-                       if (skb_copy_bits(skb, off, page_address(page),
+                       if (skb_copy_bits(skb, off,
+                                         page_address(page) + page_offset,
                                          size)) {
                                consume_skb(nskb);
                                goto drop;
index 8fe2dd619e80eb4b3f45b85e940397e1f2ae6cca..b309c8be720f47c8d8a38e62d29f565e1e90358d 100644 (file)
@@ -107,11 +107,12 @@ config NVME_TCP_TLS
          If unsure, say N.
 
 config NVME_HOST_AUTH
-       bool "NVM Express over Fabrics In-Band Authentication"
+       bool "NVMe over Fabrics In-Band Authentication in host side"
        depends on NVME_CORE
        select NVME_AUTH
        help
-         This provides support for NVMe over Fabrics In-Band Authentication.
+         This provides support for NVMe over Fabrics In-Band Authentication in
+         host side.
 
          If unsure, say N.
 
index 1be1ce5228965a85c02b44855b1a1d52b520705e..8ebdfd623e0f7855155008325945b18f66efc373 100644 (file)
@@ -131,7 +131,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
        /*
         * Only new queue scan work when admin and IO queues are both alive
         */
-       if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
+       if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
                queue_work(nvme_wq, &ctrl->scan_work);
 }
 
@@ -143,7 +143,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
  */
 int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
 {
-       if (ctrl->state != NVME_CTRL_RESETTING)
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
                return -EBUSY;
        if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
                return -EBUSY;
@@ -156,7 +156,7 @@ static void nvme_failfast_work(struct work_struct *work)
        struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
                        struct nvme_ctrl, failfast_work);
 
-       if (ctrl->state != NVME_CTRL_CONNECTING)
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
                return;
 
        set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
@@ -200,7 +200,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
        ret = nvme_reset_ctrl(ctrl);
        if (!ret) {
                flush_work(&ctrl->reset_work);
-               if (ctrl->state != NVME_CTRL_LIVE)
+               if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
                        ret = -ENETRESET;
        }
 
@@ -499,7 +499,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
 
        spin_lock_irqsave(&ctrl->lock, flags);
 
-       old_state = ctrl->state;
+       old_state = nvme_ctrl_state(ctrl);
        switch (new_state) {
        case NVME_CTRL_LIVE:
                switch (old_state) {
@@ -567,7 +567,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
        }
 
        if (changed) {
-               ctrl->state = new_state;
+               WRITE_ONCE(ctrl->state, new_state);
                wake_up_all(&ctrl->state_wq);
        }
 
@@ -575,11 +575,11 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
        if (!changed)
                return false;
 
-       if (ctrl->state == NVME_CTRL_LIVE) {
+       if (new_state == NVME_CTRL_LIVE) {
                if (old_state == NVME_CTRL_CONNECTING)
                        nvme_stop_failfast_work(ctrl);
                nvme_kick_requeue_lists(ctrl);
-       } else if (ctrl->state == NVME_CTRL_CONNECTING &&
+       } else if (new_state == NVME_CTRL_CONNECTING &&
                old_state == NVME_CTRL_RESETTING) {
                nvme_start_failfast_work(ctrl);
        }
@@ -592,7 +592,7 @@ EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
  */
 static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
 {
-       switch (ctrl->state) {
+       switch (nvme_ctrl_state(ctrl)) {
        case NVME_CTRL_NEW:
        case NVME_CTRL_LIVE:
        case NVME_CTRL_RESETTING:
@@ -617,7 +617,7 @@ bool nvme_wait_reset(struct nvme_ctrl *ctrl)
        wait_event(ctrl->state_wq,
                   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
                   nvme_state_terminal(ctrl));
-       return ctrl->state == NVME_CTRL_RESETTING;
+       return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
 }
 EXPORT_SYMBOL_GPL(nvme_wait_reset);
 
@@ -704,9 +704,11 @@ EXPORT_SYMBOL_GPL(nvme_init_request);
 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
                struct request *rq)
 {
-       if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
-           ctrl->state != NVME_CTRL_DELETING &&
-           ctrl->state != NVME_CTRL_DEAD &&
+       enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+       if (state != NVME_CTRL_DELETING_NOIO &&
+           state != NVME_CTRL_DELETING &&
+           state != NVME_CTRL_DEAD &&
            !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
            !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
                return BLK_STS_RESOURCE;
@@ -736,7 +738,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                 * command, which is require to set the queue live in the
                 * appropinquate states.
                 */
-               switch (ctrl->state) {
+               switch (nvme_ctrl_state(ctrl)) {
                case NVME_CTRL_CONNECTING:
                        if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
                            (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
@@ -2550,7 +2552,7 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
 
        if (ctrl->ps_max_latency_us != latency) {
                ctrl->ps_max_latency_us = latency;
-               if (ctrl->state == NVME_CTRL_LIVE)
+               if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
                        nvme_configure_apst(ctrl);
        }
 }
@@ -3238,7 +3240,7 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
        struct nvme_ctrl *ctrl =
                container_of(inode->i_cdev, struct nvme_ctrl, cdev);
 
-       switch (ctrl->state) {
+       switch (nvme_ctrl_state(ctrl)) {
        case NVME_CTRL_LIVE:
                break;
        default:
@@ -3660,6 +3662,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
                goto out_unlink_ns;
 
        down_write(&ctrl->namespaces_rwsem);
+       /*
+        * Ensure that no namespaces are added to the ctrl list after the queues
+        * are frozen, thereby avoiding a deadlock between scan and reset.
+        */
+       if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
+               up_write(&ctrl->namespaces_rwsem);
+               goto out_unlink_ns;
+       }
        nvme_ns_add_to_ctrl_list(ns);
        up_write(&ctrl->namespaces_rwsem);
        nvme_get_ctrl(ctrl);
@@ -3924,7 +3934,7 @@ static void nvme_scan_work(struct work_struct *work)
        int ret;
 
        /* No tagset on a live ctrl means IO queues could not created */
-       if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
                return;
 
        /*
@@ -3994,7 +4004,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
         * removing the namespaces' disks; fail all the queues now to avoid
         * potentially having to clean up the failed sync later.
         */
-       if (ctrl->state == NVME_CTRL_DEAD)
+       if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
                nvme_mark_namespaces_dead(ctrl);
 
        /* this is a no-op when called from the controller reset handler */
@@ -4076,7 +4086,7 @@ static void nvme_async_event_work(struct work_struct *work)
         * flushing ctrl async_event_work after changing the controller state
         * from LIVE and before freeing the admin queue.
        */
-       if (ctrl->state == NVME_CTRL_LIVE)
+       if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
                ctrl->ops->submit_async_event(ctrl);
 }
 
@@ -4471,7 +4481,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
 {
        int ret;
 
-       ctrl->state = NVME_CTRL_NEW;
+       WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
        clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
        spin_lock_init(&ctrl->lock);
        mutex_init(&ctrl->scan_lock);
@@ -4581,6 +4591,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
        list_for_each_entry(ns, &ctrl->namespaces, list)
                blk_mq_unfreeze_queue(ns->queue);
        up_read(&ctrl->namespaces_rwsem);
+       clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
 }
 EXPORT_SYMBOL_GPL(nvme_unfreeze);
 
@@ -4614,6 +4625,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
+       set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
        down_read(&ctrl->namespaces_rwsem);
        list_for_each_entry(ns, &ctrl->namespaces, list)
                blk_freeze_queue_start(ns->queue);
index 9f9a3b35dc64d3ea03c6fea85599279e2b46b3da..fb22976a36a89c06deb1c0624e0ed511ae1a59b1 100644 (file)
@@ -557,7 +557,7 @@ nvme_fc_rport_get(struct nvme_fc_rport *rport)
 static void
 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
 {
-       switch (ctrl->ctrl.state) {
+       switch (nvme_ctrl_state(&ctrl->ctrl)) {
        case NVME_CTRL_NEW:
        case NVME_CTRL_CONNECTING:
                /*
@@ -793,7 +793,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
                "NVME-FC{%d}: controller connectivity lost. Awaiting "
                "Reconnect", ctrl->cnum);
 
-       switch (ctrl->ctrl.state) {
+       switch (nvme_ctrl_state(&ctrl->ctrl)) {
        case NVME_CTRL_NEW:
        case NVME_CTRL_LIVE:
                /*
@@ -3319,7 +3319,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
        unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
        bool recon = true;
 
-       if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
+       if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_CONNECTING)
                return;
 
        if (portptr->port_state == FC_OBJSTATE_ONLINE) {
index 529b9954d2b8c0429e6bd7316791fef884121a29..4939ed35638f16a8efdd61e394100ba53c6864c4 100644 (file)
@@ -18,15 +18,12 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
 {
        u32 effects;
 
-       if (capable(CAP_SYS_ADMIN))
-               return true;
-
        /*
         * Do not allow unprivileged passthrough on partitions, as that allows an
         * escape from the containment of the partition.
         */
        if (flags & NVME_IOCTL_PARTITION)
-               return false;
+               goto admin;
 
        /*
         * Do not allow unprivileged processes to send vendor specific or fabrics
@@ -34,7 +31,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
         */
        if (c->common.opcode >= nvme_cmd_vendor_start ||
            c->common.opcode == nvme_fabrics_command)
-               return false;
+               goto admin;
 
        /*
         * Do not allow unprivileged passthrough of admin commands except
@@ -53,7 +50,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
                                return true;
                        }
                }
-               return false;
+               goto admin;
        }
 
        /*
@@ -63,7 +60,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
         */
        effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
        if (!(effects & NVME_CMD_EFFECTS_CSUPP))
-               return false;
+               goto admin;
 
        /*
         * Don't allow passthrough for command that have intrusive (or unknown)
@@ -72,16 +69,20 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
        if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
                        NVME_CMD_EFFECTS_UUID_SEL |
                        NVME_CMD_EFFECTS_SCOPE_MASK))
-               return false;
+               goto admin;
 
        /*
         * Only allow I/O commands that transfer data to the controller or that
         * change the logical block contents if the file descriptor is open for
         * writing.
         */
-       if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
-               return open_for_write;
+       if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
+           !open_for_write)
+               goto admin;
+
        return true;
+admin:
+       return capable(CAP_SYS_ADMIN);
 }
 
 /*
index 39a90b7cb1254e2cbad1318359ff79b186308972..e7411dac00f725cbb97ae4619b43d4371dec5f3a 100644 (file)
@@ -156,6 +156,11 @@ enum nvme_quirks {
         * No temperature thresholds for channels other than 0 (Composite).
         */
        NVME_QUIRK_NO_SECONDARY_TEMP_THRESH     = (1 << 19),
+
+       /*
+        * Disables simple suspend/resume path.
+        */
+       NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND      = (1 << 20),
 };
 
 /*
@@ -251,6 +256,7 @@ enum nvme_ctrl_flags {
        NVME_CTRL_STOPPED               = 3,
        NVME_CTRL_SKIP_ID_CNS_CS        = 4,
        NVME_CTRL_DIRTY_CAPABILITY      = 5,
+       NVME_CTRL_FROZEN                = 6,
 };
 
 struct nvme_ctrl {
@@ -387,6 +393,11 @@ struct nvme_ctrl {
        enum nvme_dctype dctype;
 };
 
+static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
+{
+       return READ_ONCE(ctrl->state);
+}
+
 enum nvme_iopolicy {
        NVME_IOPOLICY_NUMA,
        NVME_IOPOLICY_RR,
index 507bc149046dc8daa1f283458861f3edfcc4aca5..61af7ff1a9d6ba96f56f67ab6cdb3c5b5bf9be3b 100644 (file)
@@ -1233,7 +1233,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
        bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
 
        /* If there is a reset/reinit ongoing, we shouldn't reset again. */
-       switch (dev->ctrl.state) {
+       switch (nvme_ctrl_state(&dev->ctrl)) {
        case NVME_CTRL_RESETTING:
        case NVME_CTRL_CONNECTING:
                return false;
@@ -1321,7 +1321,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
         * cancellation error. All outstanding requests are completed on
         * shutdown, so we return BLK_EH_DONE.
         */
-       switch (dev->ctrl.state) {
+       switch (nvme_ctrl_state(&dev->ctrl)) {
        case NVME_CTRL_CONNECTING:
                nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
                fallthrough;
@@ -1593,7 +1593,7 @@ static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
        /*
         * Controller is in wrong state, fail early.
         */
-       if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
+       if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) {
                mutex_unlock(&dev->shutdown_lock);
                return -ENODEV;
        }
@@ -2573,13 +2573,13 @@ static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
 
 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 {
+       enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl);
        struct pci_dev *pdev = to_pci_dev(dev->dev);
        bool dead;
 
        mutex_lock(&dev->shutdown_lock);
        dead = nvme_pci_ctrl_is_dead(dev);
-       if (dev->ctrl.state == NVME_CTRL_LIVE ||
-           dev->ctrl.state == NVME_CTRL_RESETTING) {
+       if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) {
                if (pci_is_enabled(pdev))
                        nvme_start_freeze(&dev->ctrl);
                /*
@@ -2690,7 +2690,7 @@ static void nvme_reset_work(struct work_struct *work)
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result;
 
-       if (dev->ctrl.state != NVME_CTRL_RESETTING) {
+       if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) {
                dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
                         dev->ctrl.state);
                result = -ENODEV;
@@ -2902,6 +2902,18 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
                if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
                     dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
                        return NVME_QUIRK_SIMPLE_SUSPEND;
+       } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 ||
+                  pdev->device == 0x500f)) {
+               /*
+                * Exclude some Kingston NV1 and A2000 devices from
+                * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
+                * lot fo energy with s2idle sleep on some TUXEDO platforms.
+                */
+               if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
+                   dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
+                   dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
+                   dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
+                       return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
        }
 
        return 0;
@@ -2932,7 +2944,9 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
        dev->dev = get_device(&pdev->dev);
 
        quirks |= check_vendor_combination_bug(pdev);
-       if (!noacpi && acpi_storage_d3(&pdev->dev)) {
+       if (!noacpi &&
+           !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) &&
+           acpi_storage_d3(&pdev->dev)) {
                /*
                 * Some systems use a bios work around to ask for D3 on
                 * platforms that support kernel managed suspend.
@@ -3192,7 +3206,7 @@ static int nvme_suspend(struct device *dev)
        nvme_wait_freeze(ctrl);
        nvme_sync_queues(ctrl);
 
-       if (ctrl->state != NVME_CTRL_LIVE)
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
                goto unfreeze;
 
        /*
index 6d178d5559204dc522bd5513663032134da2a410..81e2621169e5d3597f849074f2952ae1b2eb7f26 100644 (file)
@@ -984,10 +984,11 @@ free_ctrl:
 
 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
 {
+       enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
        /* If we are resetting/deleting then do nothing */
-       if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
-               WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
-                       ctrl->ctrl.state == NVME_CTRL_LIVE);
+       if (state != NVME_CTRL_CONNECTING) {
+               WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
                return;
        }
 
@@ -1059,8 +1060,10 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
                 * unless we're during creation of a new controller to
                 * avoid races with teardown flow.
                 */
-               WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
-                            ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+               enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+               WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+                            state != NVME_CTRL_DELETING_NOIO);
                WARN_ON_ONCE(new);
                ret = -EINVAL;
                goto destroy_io;
@@ -1129,8 +1132,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we started ctrl delete */
-               WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
-                            ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+               enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+               WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+                            state != NVME_CTRL_DELETING_NOIO);
                return;
        }
 
@@ -1162,7 +1167,7 @@ static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
        struct nvme_rdma_queue *queue = wc->qp->qp_context;
        struct nvme_rdma_ctrl *ctrl = queue->ctrl;
 
-       if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+       if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
                dev_info(ctrl->ctrl.device,
                             "%s for CQE 0x%p failed with status %s (%d)\n",
                             op, wc->wr_cqe,
@@ -1945,7 +1950,7 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
        dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
                 rq->tag, nvme_rdma_queue_idx(queue));
 
-       if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+       if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
                /*
                 * If we are resetting, connecting or deleting we should
                 * complete immediately because we may block controller
index d79811cfa0ce88e4d50f57f4e6938e0436eeaa86..08805f0278106483c10b2b9c787aa35c36e4dcbe 100644 (file)
@@ -2152,10 +2152,11 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
 
 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
 {
+       enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
        /* If we are resetting/deleting then do nothing */
-       if (ctrl->state != NVME_CTRL_CONNECTING) {
-               WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
-                       ctrl->state == NVME_CTRL_LIVE);
+       if (state != NVME_CTRL_CONNECTING) {
+               WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
                return;
        }
 
@@ -2215,8 +2216,10 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
                 * unless we're during creation of a new controller to
                 * avoid races with teardown flow.
                 */
-               WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
-                            ctrl->state != NVME_CTRL_DELETING_NOIO);
+               enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+               WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+                            state != NVME_CTRL_DELETING_NOIO);
                WARN_ON_ONCE(new);
                ret = -EINVAL;
                goto destroy_io;
@@ -2280,8 +2283,10 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
 
        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we started ctrl delete */
-               WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
-                            ctrl->state != NVME_CTRL_DELETING_NOIO);
+               enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+               WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+                            state != NVME_CTRL_DELETING_NOIO);
                return;
        }
 
@@ -2311,8 +2316,10 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
 
        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we started ctrl delete */
-               WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
-                            ctrl->state != NVME_CTRL_DELETING_NOIO);
+               enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+               WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+                            state != NVME_CTRL_DELETING_NOIO);
                return;
        }
 
@@ -2430,7 +2437,7 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
                nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
                opc, nvme_opcode_str(qid, opc, fctype));
 
-       if (ctrl->state != NVME_CTRL_LIVE) {
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
                /*
                 * If we are resetting, connecting or deleting we should
                 * complete immediately because we may block controller
index e1ebc73f3e5e0ed95b9d3de3567aae543a835e55..872dd1a0acd8044fe2b9816d5a27316a57594cb3 100644 (file)
@@ -99,10 +99,11 @@ config NVME_TARGET_TCP_TLS
          If unsure, say N.
 
 config NVME_TARGET_AUTH
-       bool "NVMe over Fabrics In-band Authentication support"
+       bool "NVMe over Fabrics In-band Authentication in target side"
        depends on NVME_TARGET
        select NVME_AUTH
        help
-         This enables support for NVMe over Fabrics In-band Authentication
+         This enables support for NVMe over Fabrics In-band Authentication in
+         target side.
 
          If unsure, say N.
index e307a044b1a1bc1fee0b7ca4a87c3be84a828d15..d937fe05129e4db5604afbe62a40aaaaf9fe6245 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/nvme-keyring.h>
 #include <crypto/hash.h>
 #include <crypto/kpp.h>
+#include <linux/nospec.h>
 
 #include "nvmet.h"
 
@@ -621,6 +622,7 @@ static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
 
        down_write(&nvmet_ana_sem);
        oldgrpid = ns->anagrpid;
+       newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
        nvmet_ana_group_enabled[newgrpid]++;
        ns->anagrpid = newgrpid;
        nvmet_ana_group_enabled[oldgrpid]--;
@@ -1812,6 +1814,7 @@ static struct config_group *nvmet_ana_groups_make_group(
        grp->grpid = grpid;
 
        down_write(&nvmet_ana_sem);
+       grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
        nvmet_ana_group_enabled[grpid]++;
        up_write(&nvmet_ana_sem);
 
index bf42b7e826dbd202298e6db869dc072242167f8f..608b352a7d91fd6b4122517aa747a0fd6f5a30d5 100644 (file)
@@ -796,6 +796,12 @@ static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
        if (!layout_np)
                return NULL;
 
+       /* Fixed layouts don't have a matching driver */
+       if (of_device_is_compatible(layout_np, "fixed-layout")) {
+               of_node_put(layout_np);
+               return NULL;
+       }
+
        /*
         * In case the nvmem device was built-in while the layout was built as a
         * module, we shall manually request the layout driver loading otherwise
index f63250c650cafdae49c7b558d47528a1af632689..3bf27052832f302ac72d366b986629e03db4e900 100644 (file)
@@ -98,8 +98,9 @@ int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
  *
  * Returns the new state of a device based on the notifier used.
  *
- * Return: 0 on device going from enabled to disabled, 1 on device
- * going from disabled to enabled and -1 on no change.
+ * Return: OF_RECONFIG_CHANGE_REMOVE on device going from enabled to
+ * disabled, OF_RECONFIG_CHANGE_ADD on device going from disabled to
+ * enabled and OF_RECONFIG_NO_CHANGE on no change.
  */
 int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr)
 {
index 1f236aaf7867a7165c168735118623aa6639df3a..f33b5d1ddfc16f5f72dfbf32d022e5f29fcebb93 100644 (file)
@@ -2658,6 +2658,8 @@ enum parport_pc_pci_cards {
        asix_ax99100,
        quatech_sppxp100,
        wch_ch382l,
+       brainboxes_uc146,
+       brainboxes_px203,
 };
 
 
@@ -2737,6 +2739,8 @@ static struct parport_pc_pci {
        /* asix_ax99100 */              { 1, { { 0, 1 }, } },
        /* quatech_sppxp100 */          { 1, { { 0, 1 }, } },
        /* wch_ch382l */                { 1, { { 2, -1 }, } },
+       /* brainboxes_uc146 */  { 1, { { 3, -1 }, } },
+       /* brainboxes_px203 */  { 1, { { 0, -1 }, } },
 };
 
 static const struct pci_device_id parport_pc_pci_tbl[] = {
@@ -2833,6 +2837,23 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
        /* WCH CH382L PCI-E single parallel port card */
        { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
+       /* Brainboxes IX-500/550 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x402a,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+       /* Brainboxes UC-146/UC-157 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x0be1,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
+       { PCI_VENDOR_ID_INTASHIELD, 0x0be2,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
+       /* Brainboxes PX-146/PX-257 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x401c,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+       /* Brainboxes PX-203 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x4007,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px203 },
+       /* Brainboxes PX-475 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x401f,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
        { 0, } /* terminate list */
 };
 MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
index 6902e97719d1366d754c20f3d28a0482db48500e..11c80555d97543990fdd17cf9326cf58826ab457 100644 (file)
@@ -968,9 +968,12 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
 
 static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
 {
-       /* Downstream devices need to be in D0 state before enabling PCI PM substates */
+       /*
+        * Downstream devices need to be in D0 state before enabling PCI PM
+        * substates.
+        */
        pci_set_power_state(pdev, PCI_D0);
-       pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL);
+       pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
 
        return 0;
 }
index d45e7b8dc530d6162730ee677371992c36881501..8b34ccff073a99ef5f5f574eec4467d986b6548d 100644 (file)
@@ -80,13 +80,49 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
                        DEV_LS7A_LPC, system_bus_quirk);
 
+/*
+ * Some Loongson PCIe ports have hardware limitations on their Maximum Read
+ * Request Size. They can't handle anything larger than this.  Sane
+ * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for
+ * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly,
+ * so we have to enforce maximum safe MRRS, which is 256 bytes.
+ */
+#ifdef CONFIG_MIPS
+static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev)
+{
+       struct pci_bus *bus = pdev->bus;
+       struct pci_dev *bridge;
+       static const struct pci_device_id bridge_devids[] = {
+               { PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) },
+               { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) },
+               { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) },
+               { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) },
+               { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) },
+               { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) },
+               { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) },
+               { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) },
+               { 0, },
+       };
+
+       /* look for the matching bridge */
+       while (!pci_is_root_bus(bus)) {
+               bridge = bus->self;
+               bus = bus->parent;
+
+               if (pci_match_id(bridge_devids, bridge)) {
+                       if (pcie_get_readrq(pdev) > 256) {
+                               pci_info(pdev, "limiting MRRS to 256\n");
+                               pcie_set_readrq(pdev, 256);
+                       }
+                       break;
+               }
+       }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk);
+#endif
+
 static void loongson_mrrs_quirk(struct pci_dev *pdev)
 {
-       /*
-        * Some Loongson PCIe ports have h/w limitations of maximum read
-        * request size. They can't handle anything larger than this. So
-        * force this limit on any devices attached under these ports.
-        */
        struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
 
        bridge->no_inc_mrrs = 1;
index 94ba61fe1c44110b95c8b70543c0eda0ce553031..0452cbc362eef7b8707337a2d6efde15a01e9dcc 100644 (file)
@@ -751,7 +751,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
        if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
                return 0;
 
-       pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL);
+       pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
 
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
        if (!pos)
index 601129772b2d5021afde6eeddc479506852f4902..5b1f271c6034be045aa446f7e5aacb2d6e3f2e2a 100644 (file)
@@ -512,15 +512,12 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
                                if (pass && dev->subordinate) {
                                        check_hotplug_bridge(slot, dev);
                                        pcibios_resource_survey_bus(dev->subordinate);
-                                       if (pci_is_root_bus(bus))
-                                               __pci_bus_size_bridges(dev->subordinate, &add_list);
+                                       __pci_bus_size_bridges(dev->subordinate,
+                                                              &add_list);
                                }
                        }
                }
-               if (pci_is_root_bus(bus))
-                       __pci_bus_assign_resources(bus, &add_list, NULL);
-               else
-                       pci_assign_unassigned_bridge_resources(bus->self);
+               __pci_bus_assign_resources(bus, &add_list, NULL);
        }
 
        acpiphp_sanitize_bus(bus);
index 50b04ae5c394e3691d3b100eeed3918cd7be9f4c..5dab531c8654d68eaaf1e0db404e543f0c64497e 100644 (file)
@@ -1041,7 +1041,7 @@ static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
        return bridge->link_state;
 }
 
-static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
 {
        struct pcie_link_state *link = pcie_aspm_get_link(pdev);
 
@@ -1060,7 +1060,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
                return -EPERM;
        }
 
-       if (sem)
+       if (!locked)
                down_read(&pci_bus_sem);
        mutex_lock(&aspm_lock);
        if (state & PCIE_LINK_STATE_L0S)
@@ -1082,7 +1082,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
                link->clkpm_disable = 1;
        pcie_set_clkpm(link, policy_to_clkpm_state(link));
        mutex_unlock(&aspm_lock);
-       if (sem)
+       if (!locked)
                up_read(&pci_bus_sem);
 
        return 0;
@@ -1090,7 +1090,9 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
 
 int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
 {
-       return __pci_disable_link_state(pdev, state, false);
+       lockdep_assert_held_read(&pci_bus_sem);
+
+       return __pci_disable_link_state(pdev, state, true);
 }
 EXPORT_SYMBOL(pci_disable_link_state_locked);
 
@@ -1105,21 +1107,11 @@ EXPORT_SYMBOL(pci_disable_link_state_locked);
  */
 int pci_disable_link_state(struct pci_dev *pdev, int state)
 {
-       return __pci_disable_link_state(pdev, state, true);
+       return __pci_disable_link_state(pdev, state, false);
 }
 EXPORT_SYMBOL(pci_disable_link_state);
 
-/**
- * pci_enable_link_state - Clear and set the default device link state so that
- * the link may be allowed to enter the specified states. Note that if the
- * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
- * touch the LNKCTL register. Also note that this does not enable states
- * disabled by pci_disable_link_state(). Return 0 or a negative errno.
- *
- * @pdev: PCI device
- * @state: Mask of ASPM link states to enable
- */
-int pci_enable_link_state(struct pci_dev *pdev, int state)
+static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
 {
        struct pcie_link_state *link = pcie_aspm_get_link(pdev);
 
@@ -1136,7 +1128,8 @@ int pci_enable_link_state(struct pci_dev *pdev, int state)
                return -EPERM;
        }
 
-       down_read(&pci_bus_sem);
+       if (!locked)
+               down_read(&pci_bus_sem);
        mutex_lock(&aspm_lock);
        link->aspm_default = 0;
        if (state & PCIE_LINK_STATE_L0S)
@@ -1157,12 +1150,48 @@ int pci_enable_link_state(struct pci_dev *pdev, int state)
        link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
        pcie_set_clkpm(link, policy_to_clkpm_state(link));
        mutex_unlock(&aspm_lock);
-       up_read(&pci_bus_sem);
+       if (!locked)
+               up_read(&pci_bus_sem);
 
        return 0;
 }
+
+/**
+ * pci_enable_link_state - Clear and set the default device link state so that
+ * the link may be allowed to enter the specified states. Note that if the
+ * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
+ * touch the LNKCTL register. Also note that this does not enable states
+ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
+ *
+ * @pdev: PCI device
+ * @state: Mask of ASPM link states to enable
+ */
+int pci_enable_link_state(struct pci_dev *pdev, int state)
+{
+       return __pci_enable_link_state(pdev, state, false);
+}
 EXPORT_SYMBOL(pci_enable_link_state);
 
+/**
+ * pci_enable_link_state_locked - Clear and set the default device link state
+ * so that the link may be allowed to enter the specified states. Note that if
+ * the BIOS didn't grant ASPM control to the OS, this does nothing because we
+ * can't touch the LNKCTL register. Also note that this does not enable states
+ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
+ *
+ * @pdev: PCI device
+ * @state: Mask of ASPM link states to enable
+ *
+ * Context: Caller holds pci_bus_sem read lock.
+ */
+int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
+{
+       lockdep_assert_held_read(&pci_bus_sem);
+
+       return __pci_enable_link_state(pdev, state, true);
+}
+EXPORT_SYMBOL(pci_enable_link_state_locked);
+
 static int pcie_aspm_set_policy(const char *val,
                                const struct kernel_param *kp)
 {
index 014010d035882375e9c0381ebce08a80af48ddd5..847b0dc41293d2fa47689bf634b34aef60f6b1ab 100644 (file)
@@ -1816,7 +1816,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
                        idx = 0;
                        while (cmn->dtc[j].counters[idx])
                                if (++idx == CMN_DT_NUM_COUNTERS)
-                                       goto free_dtms;
+                                       return -ENOSPC;
                }
                hw->dtc_idx[j] = idx;
        }
index f021ec5a70e5c31082dfd2ce965b474cd6cb5fcb..553725e1269c9d7ad88f89367dfa1cf367aaf1ce 100644 (file)
@@ -100,7 +100,7 @@ static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
 static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
                                       unsigned long *prate)
 {
-       return clamp_val(rate, 50000000, 1600000000);
+       return clamp_val(rate, 125000000, 1600000000);
 }
 
 static const struct clk_ops mtk_mipi_tx_pll_ops = {
index 0efe74ac9c6afc254dd48d9be37e3fc611d8e20f..637a5fbae6d9a9b2538147072cb159fc0a0960d9 100644 (file)
@@ -275,7 +275,7 @@ static int sp_usb_phy_probe(struct platform_device *pdev)
 
        phy = devm_phy_create(&pdev->dev, NULL, &sp_uphy_ops);
        if (IS_ERR(phy)) {
-               ret = -PTR_ERR(phy);
+               ret = PTR_ERR(phy);
                return ret;
        }
 
index 555b323f45da1ee6dc69ef606860786ac5cc16ae..bc847d3879f79c0684693a475a3c4664f2792794 100644 (file)
@@ -64,6 +64,7 @@ struct phy_gmii_sel_priv {
        u32 num_ports;
        u32 reg_offset;
        u32 qsgmii_main_ports;
+       bool no_offset;
 };
 
 static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
@@ -402,7 +403,8 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
                priv->num_ports = size / sizeof(u32);
                if (!priv->num_ports)
                        return -EINVAL;
-               priv->reg_offset = __be32_to_cpu(*offset);
+               if (!priv->no_offset)
+                       priv->reg_offset = __be32_to_cpu(*offset);
        }
 
        if_phys = devm_kcalloc(dev, priv->num_ports,
@@ -471,6 +473,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
                        dev_err(dev, "Failed to get syscon %d\n", ret);
                        return ret;
                }
+               priv->no_offset = true;
        }
 
        ret = phy_gmii_sel_init_ports(priv);
index 1ac7dab22c637e9d6bffab03c75217c03b7e51de..c1aef3a8fb2de28cd78bb0a86e979eb8ea2d580b 100644 (file)
@@ -20,6 +20,7 @@
 
 #define MLXBF_BOOTCTL_SB_SECURE_MASK           0x03
 #define MLXBF_BOOTCTL_SB_TEST_MASK             0x0c
+#define MLXBF_BOOTCTL_SB_DEV_MASK              BIT(4)
 
 #define MLXBF_SB_KEY_NUM                       4
 
@@ -40,11 +41,18 @@ static struct mlxbf_bootctl_name boot_names[] = {
        { MLXBF_BOOTCTL_NONE, "none" },
 };
 
+enum {
+       MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION = 0,
+       MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE = 1,
+       MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE = 2,
+       MLXBF_BOOTCTL_SB_LIFECYCLE_RMA = 3
+};
+
 static const char * const mlxbf_bootctl_lifecycle_states[] = {
-       [0] = "Production",
-       [1] = "GA Secured",
-       [2] = "GA Non-Secured",
-       [3] = "RMA",
+       [MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION] = "Production",
+       [MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE] = "GA Secured",
+       [MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE] = "GA Non-Secured",
+       [MLXBF_BOOTCTL_SB_LIFECYCLE_RMA] = "RMA",
 };
 
 /* Log header format. */
@@ -247,25 +255,30 @@ static ssize_t second_reset_action_store(struct device *dev,
 static ssize_t lifecycle_state_show(struct device *dev,
                                    struct device_attribute *attr, char *buf)
 {
+       int status_bits;
+       int use_dev_key;
+       int test_state;
        int lc_state;
 
-       lc_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
-                                    MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
-       if (lc_state < 0)
-               return lc_state;
+       status_bits = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+                                       MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
+       if (status_bits < 0)
+               return status_bits;
 
-       lc_state &=
-               MLXBF_BOOTCTL_SB_TEST_MASK | MLXBF_BOOTCTL_SB_SECURE_MASK;
+       use_dev_key = status_bits & MLXBF_BOOTCTL_SB_DEV_MASK;
+       test_state = status_bits & MLXBF_BOOTCTL_SB_TEST_MASK;
+       lc_state = status_bits & MLXBF_BOOTCTL_SB_SECURE_MASK;
 
        /*
         * If the test bits are set, we specify that the current state may be
         * due to using the test bits.
         */
-       if (lc_state & MLXBF_BOOTCTL_SB_TEST_MASK) {
-               lc_state &= MLXBF_BOOTCTL_SB_SECURE_MASK;
-
+       if (test_state) {
                return sprintf(buf, "%s(test)\n",
                               mlxbf_bootctl_lifecycle_states[lc_state]);
+       } else if (use_dev_key &&
+                  (lc_state == MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE)) {
+               return sprintf(buf, "Secured (development)\n");
        }
 
        return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
index 0b427fc24a96e76986dc15f35370469305e385c9..1dd84c7a79de97f44b25c48fd241db068492b4f9 100644 (file)
@@ -1771,6 +1771,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
        attr->dev_attr.show = mlxbf_pmc_event_list_show;
        attr->nr = blk_num;
        attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list");
+       if (!attr->dev_attr.attr.name)
+               return -ENOMEM;
        pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
        attr = NULL;
 
@@ -1784,6 +1786,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
                attr->nr = blk_num;
                attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
                                                          "enable");
+               if (!attr->dev_attr.attr.name)
+                       return -ENOMEM;
                pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
                attr = NULL;
        }
@@ -1810,6 +1814,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
                attr->nr = blk_num;
                attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
                                                          "counter%d", j);
+               if (!attr->dev_attr.attr.name)
+                       return -ENOMEM;
                pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
                attr = NULL;
 
@@ -1821,6 +1827,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
                attr->nr = blk_num;
                attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
                                                          "event%d", j);
+               if (!attr->dev_attr.attr.name)
+                       return -ENOMEM;
                pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
                attr = NULL;
        }
@@ -1853,6 +1861,8 @@ static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
                attr->nr = blk_num;
                attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
                                                          events[j].evt_name);
+               if (!attr->dev_attr.attr.name)
+                       return -ENOMEM;
                pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
                attr = NULL;
                i++;
@@ -1882,6 +1892,8 @@ static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
        pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr;
        pmc->block[blk_num].block_attr_grp.name = devm_kasprintf(
                dev, GFP_KERNEL, pmc->block_name[blk_num]);
+       if (!pmc->block[blk_num].block_attr_grp.name)
+               return -ENOMEM;
        pmc->groups[pmc->group_num] = &pmc->block[blk_num].block_attr_grp;
        pmc->group_num++;
 
@@ -2063,6 +2075,8 @@ static int mlxbf_pmc_probe(struct platform_device *pdev)
 
        pmc->hwmon_dev = devm_hwmon_device_register_with_groups(
                dev, "bfperf", pmc, pmc->groups);
+       if (IS_ERR(pmc->hwmon_dev))
+               return PTR_ERR(pmc->hwmon_dev);
        platform_set_drvdata(pdev, pmc);
 
        return 0;
index 1a6373dea109cc2b63ad887bb0adb08cadd4a7e5..6152be38398c48feac4b48a5084faea7448365c1 100644 (file)
@@ -231,9 +231,12 @@ static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
                            size_t n)
 {
        struct ssam_controller *ctrl;
+       int ret;
 
        ctrl = serdev_device_get_drvdata(dev);
-       return ssam_controller_receive_buf(ctrl, buf, n);
+       ret = ssam_controller_receive_buf(ctrl, buf, n);
+
+       return ret < 0 ? 0 : ret;
 }
 
 static void ssam_write_wakeup(struct serdev_device *dev)
index 7e69fdaccdd5357cebe8ad44aa7cb5f17c714690..c94f31a5c6a3364707f82b4ec64b193940f2d196 100644 (file)
@@ -263,6 +263,7 @@ config ASUS_WMI
        depends on RFKILL || RFKILL = n
        depends on HOTPLUG_PCI
        depends on ACPI_VIDEO || ACPI_VIDEO = n
+       depends on SERIO_I8042 || SERIO_I8042 = n
        select INPUT_SPARSEKMAP
        select LEDS_CLASS
        select NEW_LEDS
@@ -279,7 +280,6 @@ config ASUS_WMI
 config ASUS_NB_WMI
        tristate "Asus Notebook WMI Driver"
        depends on ASUS_WMI
-       depends on SERIO_I8042 || SERIO_I8042 = n
        help
          This is a driver for newer Asus notebooks. It adds extra features
          like wireless radio and bluetooth control, leds, hotkeys, backlight...
index 9aa1226e74e6c56d3f19634a836027d4ed508577..fceffe2082ec582d6c432d372de5f05cc298e9cc 100644 (file)
@@ -48,25 +48,43 @@ module_param(tablet_mode_sw, uint, 0444);
 MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip 3:lid-flip-rog");
 
 static struct quirk_entry *quirks;
+static bool atkbd_reports_vol_keys;
 
-static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
-                             struct serio *port)
+static bool asus_i8042_filter(unsigned char data, unsigned char str, struct serio *port)
 {
-       static bool extended;
-       bool ret = false;
+       static bool extended_e0;
+       static bool extended_e1;
 
        if (str & I8042_STR_AUXDATA)
                return false;
 
-       if (unlikely(data == 0xe1)) {
-               extended = true;
-               ret = true;
-       } else if (unlikely(extended)) {
-               extended = false;
-               ret = true;
+       if (quirks->filter_i8042_e1_extended_codes) {
+               if (data == 0xe1) {
+                       extended_e1 = true;
+                       return true;
+               }
+
+               if (extended_e1) {
+                       extended_e1 = false;
+                       return true;
+               }
        }
 
-       return ret;
+       if (data == 0xe0) {
+               extended_e0 = true;
+       } else if (extended_e0) {
+               extended_e0 = false;
+
+               switch (data & 0x7f) {
+               case 0x20: /* e0 20 / e0 a0, Volume Mute press / release */
+               case 0x2e: /* e0 2e / e0 ae, Volume Down press / release */
+               case 0x30: /* e0 30 / e0 b0, Volume Up press / release */
+                       atkbd_reports_vol_keys = true;
+                       break;
+               }
+       }
+
+       return false;
 }
 
 static struct quirk_entry quirk_asus_unknown = {
@@ -75,7 +93,7 @@ static struct quirk_entry quirk_asus_unknown = {
 };
 
 static struct quirk_entry quirk_asus_q500a = {
-       .i8042_filter = asus_q500a_i8042_filter,
+       .filter_i8042_e1_extended_codes = true,
        .wmi_backlight_set_devstate = true,
 };
 
@@ -503,8 +521,6 @@ static const struct dmi_system_id asus_quirks[] = {
 
 static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
 {
-       int ret;
-
        quirks = &quirk_asus_unknown;
        dmi_check_system(asus_quirks);
 
@@ -519,15 +535,6 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
 
        if (tablet_mode_sw != -1)
                quirks->tablet_switch_mode = tablet_mode_sw;
-
-       if (quirks->i8042_filter) {
-               ret = i8042_install_filter(quirks->i8042_filter);
-               if (ret) {
-                       pr_warn("Unable to install key filter\n");
-                       return;
-               }
-               pr_info("Using i8042 filter function for receiving events\n");
-       }
 }
 
 static const struct key_entry asus_nb_wmi_keymap[] = {
@@ -617,6 +624,13 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
                if (acpi_video_handles_brightness_key_presses())
                        *code = ASUS_WMI_KEY_IGNORE;
 
+               break;
+       case 0x30: /* Volume Up */
+       case 0x31: /* Volume Down */
+       case 0x32: /* Volume Mute */
+               if (atkbd_reports_vol_keys)
+                       *code = ASUS_WMI_KEY_IGNORE;
+
                break;
        }
 }
@@ -630,6 +644,7 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
        .input_phys = ASUS_NB_WMI_FILE "/input0",
        .detect_quirks = asus_nb_wmi_quirks,
        .key_filter = asus_nb_wmi_key_filter,
+       .i8042_filter = asus_i8042_filter,
 };
 
 
index 6a79f16233abf737d6242bd938af791f007793ac..9f7e23c5c6b4da2c5d58e092c40d4ac716ee3317 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/acpi.h>
 #include <linux/backlight.h>
 #include <linux/debugfs.h>
+#include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/fb.h>
 #include <linux/hwmon.h>
@@ -132,6 +133,11 @@ module_param(fnlock_default, bool, 0444);
 #define ASUS_SCREENPAD_BRIGHT_MAX 255
 #define ASUS_SCREENPAD_BRIGHT_DEFAULT 60
 
+/* Controls the power state of the USB0 hub on ROG Ally which input is on */
+#define ASUS_USB0_PWR_EC0_CSEE "\\_SB.PCI0.SBRG.EC0.CSEE"
+/* 300ms so far seems to produce a reliable result on AC and battery */
+#define ASUS_USB0_PWR_EC0_CSEE_WAIT 300
+
 static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
 
 static int throttle_thermal_policy_write(struct asus_wmi *);
@@ -300,6 +306,9 @@ struct asus_wmi {
 
        bool fnlock_locked;
 
+       /* The ROG Ally device requires the MCU USB device be disconnected before suspend */
+       bool ally_mcu_usb_switch;
+
        struct asus_wmi_debug debug;
 
        struct asus_wmi_driver *driver;
@@ -4488,6 +4497,8 @@ static int asus_wmi_add(struct platform_device *pdev)
        asus->nv_temp_tgt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_THERM_TARGET);
        asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
        asus->mini_led_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE);
+       asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
+                                               && dmi_match(DMI_BOARD_NAME, "RC71L");
 
        err = fan_boost_mode_check_present(asus);
        if (err)
@@ -4567,6 +4578,12 @@ static int asus_wmi_add(struct platform_device *pdev)
                goto fail_wmi_handler;
        }
 
+       if (asus->driver->i8042_filter) {
+               err = i8042_install_filter(asus->driver->i8042_filter);
+               if (err)
+                       pr_warn("Unable to install key filter - %d\n", err);
+       }
+
        asus_wmi_battery_init(asus);
 
        asus_wmi_debugfs_init(asus);
@@ -4603,6 +4620,8 @@ static int asus_wmi_remove(struct platform_device *device)
        struct asus_wmi *asus;
 
        asus = platform_get_drvdata(device);
+       if (asus->driver->i8042_filter)
+               i8042_remove_filter(asus->driver->i8042_filter);
        wmi_remove_notify_handler(asus->driver->event_guid);
        asus_wmi_backlight_exit(asus);
        asus_screenpad_exit(asus);
@@ -4654,6 +4673,43 @@ static int asus_hotk_resume(struct device *device)
                asus_wmi_fnlock_update(asus);
 
        asus_wmi_tablet_mode_get_state(asus);
+
+       return 0;
+}
+
+static int asus_hotk_resume_early(struct device *device)
+{
+       struct asus_wmi *asus = dev_get_drvdata(device);
+
+       if (asus->ally_mcu_usb_switch) {
+               if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB8)))
+                       dev_err(device, "ROG Ally MCU failed to connect USB dev\n");
+               else
+                       msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT);
+       }
+       return 0;
+}
+
+static int asus_hotk_prepare(struct device *device)
+{
+       struct asus_wmi *asus = dev_get_drvdata(device);
+       int result, err;
+
+       if (asus->ally_mcu_usb_switch) {
+               /* When powersave is enabled it causes many issues with resume of USB hub */
+               result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_MCU_POWERSAVE);
+               if (result == 1) {
+                       dev_warn(device, "MCU powersave enabled, disabling to prevent resume issues");
+                       err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MCU_POWERSAVE, 0, &result);
+                       if (err || result != 1)
+                               dev_err(device, "Failed to set MCU powersave mode: %d\n", err);
+               }
+               /* sleep required to ensure USB0 is disabled before sleep continues */
+               if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB7)))
+                       dev_err(device, "ROG Ally MCU failed to disconnect USB dev\n");
+               else
+                       msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT);
+       }
        return 0;
 }
 
@@ -4701,6 +4757,8 @@ static const struct dev_pm_ops asus_pm_ops = {
        .thaw = asus_hotk_thaw,
        .restore = asus_hotk_restore,
        .resume = asus_hotk_resume,
+       .resume_early = asus_hotk_resume_early,
+       .prepare = asus_hotk_prepare,
 };
 
 /* Registration ***************************************************************/
index adb67c92572487856bd0834b6345513dbbdfe6ae..cc30f185384723f65f383b78b0075128406fc6ae 100644 (file)
@@ -39,6 +39,7 @@ struct quirk_entry {
        bool wmi_backlight_set_devstate;
        bool wmi_force_als_set;
        bool wmi_ignore_fan;
+       bool filter_i8042_e1_extended_codes;
        enum asus_wmi_tablet_switch_mode tablet_switch_mode;
        int wapf;
        /*
@@ -49,9 +50,6 @@ struct quirk_entry {
         */
        int no_display_toggle;
        u32 xusb2pr;
-
-       bool (*i8042_filter)(unsigned char data, unsigned char str,
-                            struct serio *serio);
 };
 
 struct asus_wmi_driver {
@@ -73,6 +71,9 @@ struct asus_wmi_driver {
         * Return ASUS_WMI_KEY_IGNORE in code if event should be ignored. */
        void (*key_filter) (struct asus_wmi_driver *driver, int *code,
                            unsigned int *value, bool *autorelease);
+       /* Optional standard i8042 filter */
+       bool (*i8042_filter)(unsigned char data, unsigned char str,
+                            struct serio *serio);
 
        int (*probe) (struct platform_device *device);
        void (*detect_quirks) (struct asus_wmi_driver *driver);
index 6fa1735ad7a49ad64ecc80033d2b1ec564b1114f..210b0a81b7ecbe3ec28499c3c8dbd52cbbf1c3fb 100644 (file)
@@ -73,10 +73,10 @@ struct intel_vbtn_priv {
        bool wakeup_mode;
 };
 
-static void detect_tablet_mode(struct platform_device *device)
+static void detect_tablet_mode(struct device *dev)
 {
-       struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
-       acpi_handle handle = ACPI_HANDLE(&device->dev);
+       struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+       acpi_handle handle = ACPI_HANDLE(dev);
        unsigned long long vgbs;
        acpi_status status;
        int m;
@@ -89,6 +89,8 @@ static void detect_tablet_mode(struct platform_device *device)
        input_report_switch(priv->switches_dev, SW_TABLET_MODE, m);
        m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0;
        input_report_switch(priv->switches_dev, SW_DOCK, m);
+
+       input_sync(priv->switches_dev);
 }
 
 /*
@@ -134,7 +136,7 @@ static int intel_vbtn_input_setup(struct platform_device *device)
        priv->switches_dev->id.bustype = BUS_HOST;
 
        if (priv->has_switches) {
-               detect_tablet_mode(device);
+               detect_tablet_mode(&device->dev);
 
                ret = input_register_device(priv->switches_dev);
                if (ret)
@@ -198,6 +200,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
        autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
 
        sparse_keymap_report_event(input_dev, event, val, autorelease);
+
+       /* Some devices need this to report further events */
+       acpi_evaluate_object(handle, "VBDL", NULL, NULL);
 }
 
 /*
@@ -352,7 +357,13 @@ static void intel_vbtn_pm_complete(struct device *dev)
 
 static int intel_vbtn_pm_resume(struct device *dev)
 {
+       struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
        intel_vbtn_pm_complete(dev);
+
+       if (priv->has_switches)
+               detect_tablet_mode(dev);
+
        return 0;
 }
 
index 4dfdbfca684172cbd757f9e26829e16e56d58f6d..c66808601fdd0801ec01573a34f1f4ca544a658c 100644 (file)
@@ -590,6 +590,8 @@ static void ips_disable_gpu_turbo(struct ips_driver *ips)
  * @ips: IPS driver struct
  *
  * Check whether the MCP is over its thermal or power budget.
+ *
+ * Returns: %true if the temp or power has exceeded its maximum, else %false
  */
 static bool mcp_exceeded(struct ips_driver *ips)
 {
@@ -619,6 +621,8 @@ static bool mcp_exceeded(struct ips_driver *ips)
  * @cpu: CPU number to check
  *
  * Check a given CPU's average temp or power is over its limit.
+ *
+ * Returns: %true if the temp or power has exceeded its maximum, else %false
  */
 static bool cpu_exceeded(struct ips_driver *ips, int cpu)
 {
@@ -645,6 +649,8 @@ static bool cpu_exceeded(struct ips_driver *ips, int cpu)
  * @ips: IPS driver struct
  *
  * Check the MCH temp & power against their maximums.
+ *
+ * Returns: %true if the temp or power has exceeded its maximum, else %false
  */
 static bool mch_exceeded(struct ips_driver *ips)
 {
@@ -742,12 +748,13 @@ static void update_turbo_limits(struct ips_driver *ips)
  *   - down (at TDP limit)
  *     - adjust both CPU and GPU down if possible
  *
              cpu+ gpu+       cpu+gpu-        cpu-gpu+        cpu-gpu-
-cpu < gpu <    cpu+gpu+        cpu+            gpu+            nothing
-cpu < gpu >=   cpu+gpu-(mcp<)  cpu+gpu-(mcp<)  gpu-            gpu-
-cpu >= gpu <   cpu-gpu+(mcp<)  cpu-            cpu-gpu+(mcp<)  cpu-
-cpu >= gpu >=  cpu-gpu-        cpu-gpu-        cpu-gpu-        cpu-gpu-
*              |cpu+ gpu+      cpu+gpu-        cpu-gpu+        cpu-gpu-
+ * cpu < gpu <  |cpu+gpu+       cpu+            gpu+            nothing
+ * cpu < gpu >= |cpu+gpu-(mcp<) cpu+gpu-(mcp<)  gpu-            gpu-
+ * cpu >= gpu < |cpu-gpu+(mcp<) cpu-            cpu-gpu+(mcp<)  cpu-
+ * cpu >= gpu >=|cpu-gpu-       cpu-gpu-        cpu-gpu-        cpu-gpu-
  *
+ * Returns: %0
  */
 static int ips_adjust(void *data)
 {
@@ -935,11 +942,13 @@ static void monitor_timeout(struct timer_list *t)
  * @data: ips driver structure
  *
  * This is the main function for the IPS driver.  It monitors power and
- * tempurature in the MCP and adjusts CPU and GPU power clams accordingly.
+ * temperature in the MCP and adjusts CPU and GPU power clamps accordingly.
  *
- * We keep a 5s moving average of power consumption and tempurature.  Using
+ * We keep a 5s moving average of power consumption and temperature.  Using
  * that data, along with CPU vs GPU preference, we adjust the power clamps
  * up or down.
+ *
+ * Returns: %0 on success or -errno on error
  */
 static int ips_monitor(void *data)
 {
@@ -1146,6 +1155,8 @@ static void dump_thermal_info(struct ips_driver *ips)
  * Handle temperature limit trigger events, generally by lowering the clamps.
  * If we're at a critical limit, we clamp back to the lowest possible value
  * to prevent emergency shutdown.
+ *
+ * Returns: IRQ_NONE or IRQ_HANDLED
  */
 static irqreturn_t ips_irq_handler(int irq, void *arg)
 {
@@ -1293,9 +1304,12 @@ static void ips_debugfs_init(struct ips_driver *ips)
 
 /**
  * ips_detect_cpu - detect whether CPU supports IPS
+ * @ips: IPS driver struct
  *
  * Walk our list and see if we're on a supported CPU.  If we find one,
  * return the limits for it.
+ *
+ * Returns: the &ips_mcp_limits struct that matches the boot CPU or %NULL
  */
 static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
 {
@@ -1352,6 +1366,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
  * monitor and control graphics turbo mode.  If we can find them, we can
  * enable graphics turbo, otherwise we must disable it to avoid exceeding
  * thermal and power limits in the MCP.
+ *
+ * Returns: %true if the required symbols are found, else %false
  */
 static bool ips_get_i915_syms(struct ips_driver *ips)
 {
index d0b5fd4137bcdc76826832744cb0737a1cd6b672..3392ae99ac3f476343a479b62516f20133ddfbe3 100644 (file)
@@ -512,10 +512,10 @@ struct tpacpi_quirk {
  * Iterates over a quirks list until one is found that matches the
  * ThinkPad's vendor, BIOS and EC model.
  *
- * Returns 0 if nothing matches, otherwise returns the quirks field of
+ * Returns: %0 if nothing matches, otherwise returns the quirks field of
  * the matching &struct tpacpi_quirk entry.
  *
- * The match criteria is: vendor, ec and bios much match.
+ * The match criteria is: vendor, ec and bios must match.
  */
 static unsigned long __init tpacpi_check_quirks(
                        const struct tpacpi_quirk *qlist,
@@ -9303,7 +9303,7 @@ static struct tpacpi_battery_driver_data battery_info;
 
 /* ACPI helpers/functions/probes */
 
-/**
+/*
  * This evaluates a ACPI method call specific to the battery
  * ACPI extension. The specifics are that an error is marked
  * in the 32rd bit of the response, so we just check that here.
index 5c27b4aa969049ce5c6c4f97fc2ba08a41dde7b5..5dd22258cb3bc2b350331ce07431b8ecf6c45734 100644 (file)
@@ -1340,6 +1340,11 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
                if (debug_dump_wdg)
                        wmi_dump_wdg(&gblock[i]);
 
+               if (!gblock[i].instance_count) {
+                       dev_info(wmi_bus_dev, FW_INFO "%pUL has zero instances\n", &gblock[i].guid);
+                       continue;
+               }
+
                if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
                        continue;
 
index 8a2f18fa3faf510bed9e140815e45fe7306eeac4..9193c3b8edebe9bf0cebc9a0d9d7084518a91c2d 100644 (file)
@@ -140,6 +140,8 @@ static void pd_release(struct dtpm *dtpm)
        if (policy) {
                for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
                        per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
+
+               cpufreq_cpu_put(policy);
        }
        
        kfree(dtpm_cpu);
@@ -191,12 +193,16 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
                return 0;
 
        pd = em_cpu_get(cpu);
-       if (!pd || em_is_artificial(pd))
-               return -EINVAL;
+       if (!pd || em_is_artificial(pd)) {
+               ret = -EINVAL;
+               goto release_policy;
+       }
 
        dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
-       if (!dtpm_cpu)
-               return -ENOMEM;
+       if (!dtpm_cpu) {
+               ret = -ENOMEM;
+               goto release_policy;
+       }
 
        dtpm_init(&dtpm_cpu->dtpm, &dtpm_ops);
        dtpm_cpu->cpu = cpu;
@@ -216,6 +222,7 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
        if (ret)
                goto out_dtpm_unregister;
 
+       cpufreq_cpu_put(policy);
        return 0;
 
 out_dtpm_unregister:
@@ -227,6 +234,8 @@ out_kfree_dtpm_cpu:
                per_cpu(dtpm_per_cpu, cpu) = NULL;
        kfree(dtpm_cpu);
 
+release_policy:
+       cpufreq_cpu_put(policy);
        return ret;
 }
 
index 9777babd5b95cd9fc3a4388745840ab1a7e2bbdc..ab30667f4f951c0d46ccbeddf365437652c269ba 100644 (file)
@@ -155,6 +155,8 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
        pc->chip.ops = &bcm2835_pwm_ops;
        pc->chip.npwm = 2;
 
+       platform_set_drvdata(pdev, pc);
+
        ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
        if (ret < 0)
                return dev_err_probe(&pdev->dev, ret,
index 73b6ac0c01f549a1ab20fcee5172b42a0232a35a..7d5a155073c6271866e0cf3ce9fff1e3e30c1669 100644 (file)
@@ -1678,7 +1678,6 @@ struct aac_dev
        u32                     handle_pci_error;
        bool                    init_reset;
        u8                      soft_reset_support;
-       u8                      use_map_queue;
 };
 
 #define aac_adapter_interrupt(dev) \
index 013a9a334972ebd3b8578d83c19639627cd8b38d..25cee03d7f9737f9314a9497b7369439ca5cbd61 100644 (file)
@@ -223,12 +223,8 @@ int aac_fib_setup(struct aac_dev * dev)
 struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
 {
        struct fib *fibptr;
-       u32 blk_tag;
-       int i;
 
-       blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
-       i = blk_mq_unique_tag_to_tag(blk_tag);
-       fibptr = &dev->fibs[i];
+       fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
        /*
         *      Null out fields that depend on being zero at the start of
         *      each I/O
index c4a36c0be527cd86b04713dbfcab4c1206e30618..68f4dbcfff49250a632a7c1e2e10f74e303a0e19 100644 (file)
@@ -19,7 +19,6 @@
 
 #include <linux/compat.h>
 #include <linux/blkdev.h>
-#include <linux/blk-mq-pci.h>
 #include <linux/completion.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -505,15 +504,6 @@ common_config:
        return 0;
 }
 
-static void aac_map_queues(struct Scsi_Host *shost)
-{
-       struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
-
-       blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
-                             aac->pdev, 0);
-       aac->use_map_queue = true;
-}
-
 /**
  *     aac_change_queue_depth          -       alter queue depths
  *     @sdev:  SCSI device we are considering
@@ -1498,7 +1488,6 @@ static const struct scsi_host_template aac_driver_template = {
        .bios_param                     = aac_biosparm,
        .shost_groups                   = aac_host_groups,
        .slave_configure                = aac_slave_configure,
-       .map_queues                     = aac_map_queues,
        .change_queue_depth             = aac_change_queue_depth,
        .sdev_groups                    = aac_dev_groups,
        .eh_abort_handler               = aac_eh_abort,
@@ -1786,8 +1775,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        shost->max_lun = AAC_MAX_LUN;
 
        pci_set_drvdata(pdev, shost);
-       shost->nr_hw_queues = aac->max_msix;
-       shost->host_tagset = 1;
 
        error = scsi_add_host(shost, &pdev->dev);
        if (error)
@@ -1919,7 +1906,6 @@ static void aac_remove_one(struct pci_dev *pdev)
        struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
 
        aac_cancel_rescan_worker(aac);
-       aac->use_map_queue = false;
        scsi_remove_host(shost);
 
        __aac_shutdown(aac);
index 61949f3741886ba1b439292b473bf27d50d1b227..11ef58204e96f179227c166433eb4932ed7dbafa 100644 (file)
@@ -493,10 +493,6 @@ static int aac_src_deliver_message(struct fib *fib)
 #endif
 
        u16 vector_no;
-       struct scsi_cmnd *scmd;
-       u32 blk_tag;
-       struct Scsi_Host *shost = dev->scsi_host_ptr;
-       struct blk_mq_queue_map *qmap;
 
        atomic_inc(&q->numpending);
 
@@ -509,25 +505,8 @@ static int aac_src_deliver_message(struct fib *fib)
                if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
                        && dev->sa_firmware)
                        vector_no = aac_get_vector(dev);
-               else {
-                       if (!fib->vector_no || !fib->callback_data) {
-                               if (shost && dev->use_map_queue) {
-                                       qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
-                                       vector_no = qmap->mq_map[raw_smp_processor_id()];
-                               }
-                               /*
-                                *      We hardcode the vector_no for
-                                *      reserved commands as a valid shost is
-                                *      absent during the init
-                                */
-                               else
-                                       vector_no = 0;
-                       } else {
-                               scmd = (struct scsi_cmnd *)fib->callback_data;
-                               blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
-                               vector_no = blk_mq_unique_tag_to_hwq(blk_tag);
-                       }
-               }
+               else
+                       vector_no = fib->vector_no;
 
                if (native_hba) {
                        if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
index e48f14ad6dfd89293375c1402458b852e240f7e7..06acb5ff609ee719b0c72439dcd30d79468b32c6 100644 (file)
@@ -2710,6 +2710,7 @@ init_wrb_hndl_failed:
                kfree(pwrb_context->pwrb_handle_base);
                kfree(pwrb_context->pwrb_handle_basestd);
        }
+       kfree(phwi_ctxt->be_wrbq);
        return -ENOMEM;
 }
 
index 82672fcbc2aa27b7c6c252afcb73a0e46d33f57f..8280baa3254b0ba93cb8a80dc14f29fb0af3b375 100644 (file)
@@ -23,8 +23,9 @@
 static void intel_shim_vs_init(struct sdw_intel *sdw)
 {
        void __iomem *shim_vs = sdw->link_res->shim_vs;
-       u16 act = 0;
+       u16 act;
 
+       act = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL);
        u16p_replace_bits(&act, 0x1, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS);
        act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE;
        act |=  SDW_SHIM2_INTEL_VS_ACTMCTL_DODS;
index 69719b335bcb1e4c9427d6c5331b5f5597e96725..f048b3d55b2edcb32269369aa7c2b5dda07414ea 100644 (file)
@@ -742,14 +742,15 @@ error_1:
  * sdw_ml_sync_bank_switch: Multilink register bank switch
  *
  * @bus: SDW bus instance
+ * @multi_link: whether this is a multi-link stream with hardware-based sync
  *
  * Caller function should free the buffers on error
  */
-static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
+static int sdw_ml_sync_bank_switch(struct sdw_bus *bus, bool multi_link)
 {
        unsigned long time_left;
 
-       if (!bus->multi_link)
+       if (!multi_link)
                return 0;
 
        /* Wait for completion of transfer */
@@ -847,7 +848,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
                        bus->bank_switch_timeout = DEFAULT_BANK_SWITCH_TIMEOUT;
 
                /* Check if bank switch was successful */
-               ret = sdw_ml_sync_bank_switch(bus);
+               ret = sdw_ml_sync_bank_switch(bus, multi_link);
                if (ret < 0) {
                        dev_err(bus->dev,
                                "multi link bank switch failed: %d\n", ret);
index 6aa8adbe4170cdbea30b64bd8a4011c8f47f128d..bad34998454a80ff36a8ee69fb2368e46888e2e3 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
 #include <trace/events/spi.h>
 
 /* SPI register offsets */
  */
 #define DMA_MIN_BYTES  16
 
-#define SPI_DMA_MIN_TIMEOUT    (msecs_to_jiffies(1000))
-#define SPI_DMA_TIMEOUT_PER_10K        (msecs_to_jiffies(4))
-
 #define AUTOSUSPEND_TIMEOUT    2000
 
 struct atmel_spi_caps {
@@ -279,6 +277,7 @@ struct atmel_spi {
        bool                    keep_cs;
 
        u32                     fifo_size;
+       bool                    last_polarity;
        u8                      native_cs_free;
        u8                      native_cs_for_gpio;
 };
@@ -291,6 +290,22 @@ struct atmel_spi_device {
 #define SPI_MAX_DMA_XFER       65535 /* true for both PDC and DMA */
 #define INVALID_DMA_ADDRESS    0xffffffff
 
+/*
+ * This frequency can be anything supported by the controller, but to avoid
+ * unnecessary delay, the highest possible frequency is chosen.
+ *
+ * This frequency is the highest possible which is not interfering with other
+ * chip select registers (see Note for Serial Clock Bit Rate configuration in
+ * Atmel-11121F-ATARM-SAMA5D3-Series-Datasheet_02-Feb-16, page 1283)
+ */
+#define DUMMY_MSG_FREQUENCY    0x02
+/*
+ * 8 bits is the minimum data the controller is capable of sending.
+ *
+ * This message can be anything as it should not be treated by any SPI device.
+ */
+#define DUMMY_MSG              0xAA
+
 /*
  * Version 2 of the SPI controller has
  *  - CR.LASTXFER
@@ -304,6 +319,43 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
        return as->caps.is_spi2;
 }
 
+/*
+ * Send a dummy message.
+ *
+ * This is sometimes needed when using a CS GPIO to force clock transition when
+ * switching between devices with different polarities.
+ */
+static void atmel_spi_send_dummy(struct atmel_spi *as, struct spi_device *spi, int chip_select)
+{
+       u32 status;
+       u32 csr;
+
+       /*
+        * Set a clock frequency to allow sending message on SPI bus.
+        * The frequency here can be anything, but is needed for
+        * the controller to send the data.
+        */
+       csr = spi_readl(as, CSR0 + 4 * chip_select);
+       csr = SPI_BFINS(SCBR, DUMMY_MSG_FREQUENCY, csr);
+       spi_writel(as, CSR0 + 4 * chip_select, csr);
+
+       /*
+        * Read all data coming from SPI bus, needed to be able to send
+        * the message.
+        */
+       spi_readl(as, RDR);
+       while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
+               spi_readl(as, RDR);
+               cpu_relax();
+       }
+
+       spi_writel(as, TDR, DUMMY_MSG);
+
+       readl_poll_timeout_atomic(as->regs + SPI_SR, status,
+                                 (status & SPI_BIT(TXEMPTY)), 1, 1000);
+}
+
+
 /*
  * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
  * they assume that spi slave device state will not change on deselect, so
@@ -320,11 +372,17 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
  * Master on Chip Select 0.")  No workaround exists for that ... so for
  * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
  * and (c) will trigger that first erratum in some cases.
+ *
+ * When changing the clock polarity, the SPI controller waits for the next
+ * transmission to enforce the default clock state. This may be an issue when
+ * using a GPIO as Chip Select: the clock level is applied only when the first
+ * packet is sent, once the CS has already been asserted. The workaround is to
+ * avoid this by sending a first (dummy) message before toggling the CS state.
  */
-
 static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
 {
        struct atmel_spi_device *asd = spi->controller_state;
+       bool new_polarity;
        int chip_select;
        u32 mr;
 
@@ -353,6 +411,25 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
                }
 
                mr = spi_readl(as, MR);
+
+               /*
+                * Ensures the clock polarity is valid before we actually
+                * assert the CS to avoid spurious clock edges to be
+                * processed by the spi devices.
+                */
+               if (spi_get_csgpiod(spi, 0)) {
+                       new_polarity = (asd->csr & SPI_BIT(CPOL)) != 0;
+                       if (new_polarity != as->last_polarity) {
+                               /*
+                                * Need to disable the GPIO before sending the dummy
+                                * message because it is already set by the spi core.
+                                */
+                               gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 0);
+                               atmel_spi_send_dummy(as, spi, chip_select);
+                               as->last_polarity = new_polarity;
+                               gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 1);
+                       }
+               }
        } else {
                u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
                int i;
@@ -1336,12 +1413,10 @@ static int atmel_spi_one_transfer(struct spi_controller *host,
                }
 
                dma_timeout = msecs_to_jiffies(spi_controller_xfer_timeout(host, xfer));
-               ret_timeout = wait_for_completion_interruptible_timeout(&as->xfer_completion,
-                                                                       dma_timeout);
-               if (ret_timeout <= 0) {
-                       dev_err(&spi->dev, "spi transfer %s\n",
-                               !ret_timeout ? "timeout" : "canceled");
-                       as->done_status = ret_timeout < 0 ? ret_timeout : -EIO;
+               ret_timeout = wait_for_completion_timeout(&as->xfer_completion, dma_timeout);
+               if (!ret_timeout) {
+                       dev_err(&spi->dev, "spi transfer timeout\n");
+                       as->done_status = -EIO;
                }
 
                if (as->done_status)
index 1f2f8c717df61f8756b209ada178709f3808f3c6..a50eb4db79de8e93cb61a9ea50bc8913ed3e4f1f 100644 (file)
@@ -451,7 +451,6 @@ static int cdns_transfer_one(struct spi_controller *ctlr,
                udelay(10);
 
        cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0);
-       spi_transfer_delay_exec(transfer);
 
        cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
        return transfer->len;
index 498e35c8db2c1d733cc33a197e5cb04d06fd6c43..272bc871a848b833e6e673740f4be5f8f3a16294 100644 (file)
@@ -659,11 +659,18 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
                ctrl |= (spi_imx->target_burst * 8 - 1)
                        << MX51_ECSPI_CTRL_BL_OFFSET;
        else {
-               if (spi_imx->count >= 512)
-                       ctrl |= 0xFFF << MX51_ECSPI_CTRL_BL_OFFSET;
-               else
-                       ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
+               if (spi_imx->usedma) {
+                       ctrl |= (spi_imx->bits_per_word *
+                               spi_imx_bytes_per_word(spi_imx->bits_per_word) - 1)
                                << MX51_ECSPI_CTRL_BL_OFFSET;
+               } else {
+                       if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
+                               ctrl |= (MX51_ECSPI_CTRL_MAX_BURST - 1)
+                                               << MX51_ECSPI_CTRL_BL_OFFSET;
+                       else
+                               ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
+                                               << MX51_ECSPI_CTRL_BL_OFFSET;
+               }
        }
 
        /* set clock speed */
index 64f0e047c23d2a16d54099f486400eec8c361adf..4b10921276942ed13a43e531b723e746b76dd6fa 100644 (file)
@@ -60,7 +60,16 @@ static void optee_release_device(struct device *dev)
        kfree(optee_device);
 }
 
-static int optee_register_device(const uuid_t *device_uuid)
+static ssize_t need_supplicant_show(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       return 0;
+}
+
+static DEVICE_ATTR_RO(need_supplicant);
+
+static int optee_register_device(const uuid_t *device_uuid, u32 func)
 {
        struct tee_client_device *optee_device = NULL;
        int rc;
@@ -83,6 +92,10 @@ static int optee_register_device(const uuid_t *device_uuid)
                put_device(&optee_device->dev);
        }
 
+       if (func == PTA_CMD_GET_DEVICES_SUPP)
+               device_create_file(&optee_device->dev,
+                                  &dev_attr_need_supplicant);
+
        return rc;
 }
 
@@ -142,7 +155,7 @@ static int __optee_enumerate_devices(u32 func)
        num_devices = shm_size / sizeof(uuid_t);
 
        for (idx = 0; idx < num_devices; idx++) {
-               rc = optee_register_device(&device_uuid[idx]);
+               rc = optee_register_device(&device_uuid[idx], func);
                if (rc)
                        goto out_shm;
        }
index b94f567647cb653b7332d25d06fa0674de0a21c1..e6218766d0c804a6b7230ecbc5604b4f7296b374 100644 (file)
@@ -777,6 +777,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
        { "INT33C5", (kernel_ulong_t)&dw8250_dw_apb },
        { "INT3434", (kernel_ulong_t)&dw8250_dw_apb },
        { "INT3435", (kernel_ulong_t)&dw8250_dw_apb },
+       { "INTC10EE", (kernel_ulong_t)&dw8250_dw_apb },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
index 9837a27739fdf5bf76072fce0c9969ddf252af3c..e3f482fd3de4811a0684fbab3d39f90d49c808f4 100644 (file)
@@ -189,5 +189,6 @@ static int __init early_omap8250_setup(struct earlycon_device *device,
 OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup);
 OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
 OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
+OF_EARLYCON_DECLARE(omap8250, "ti,am654-uart", early_omap8250_setup);
 
 #endif
index 2d42f485c987669280c692b71be8b440fe1aa17f..578f35895b273fdc4e43956db562cd4d7a607e98 100644 (file)
@@ -933,7 +933,7 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
        if (priv->habit & UART_HAS_RHR_IT_DIS) {
                reg = serial_in(p, UART_OMAP_IER2);
                reg &= ~UART_OMAP_IER2_RHR_IT_DIS;
-               serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
+               serial_out(p, UART_OMAP_IER2, reg);
        }
 
        dmaengine_tx_status(rxchan, cookie, &state);
@@ -1079,7 +1079,7 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
        if (priv->habit & UART_HAS_RHR_IT_DIS) {
                reg = serial_in(p, UART_OMAP_IER2);
                reg |= UART_OMAP_IER2_RHR_IT_DIS;
-               serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
+               serial_out(p, UART_OMAP_IER2, reg);
        }
 
        dma_async_issue_pending(dma->rxchan);
@@ -1298,10 +1298,12 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
 
        status = serial_port_in(port, UART_LSR);
 
-       if (priv->habit & UART_HAS_EFR2)
-               am654_8250_handle_rx_dma(up, iir, status);
-       else
-               status = omap_8250_handle_rx_dma(up, iir, status);
+       if ((iir & 0x3f) != UART_IIR_THRI) {
+               if (priv->habit & UART_HAS_EFR2)
+                       am654_8250_handle_rx_dma(up, iir, status);
+               else
+                       status = omap_8250_handle_rx_dma(up, iir, status);
+       }
 
        serial8250_modem_status(up);
        if (status & UART_LSR_THRE && up->dma->tx_err) {
index 61cc24cd90e4bcf6ad84579743b68d38e91e8fdb..b7635363373e201fe0e59885d1d6cfe7e5ebd017 100644 (file)
@@ -218,17 +218,18 @@ static struct vendor_data vendor_st = {
 
 /* Deals with DMA transactions */
 
-struct pl011_sgbuf {
-       struct scatterlist sg;
-       char *buf;
+struct pl011_dmabuf {
+       dma_addr_t              dma;
+       size_t                  len;
+       char                    *buf;
 };
 
 struct pl011_dmarx_data {
        struct dma_chan         *chan;
        struct completion       complete;
        bool                    use_buf_b;
-       struct pl011_sgbuf      sgbuf_a;
-       struct pl011_sgbuf      sgbuf_b;
+       struct pl011_dmabuf     dbuf_a;
+       struct pl011_dmabuf     dbuf_b;
        dma_cookie_t            cookie;
        bool                    running;
        struct timer_list       timer;
@@ -241,7 +242,8 @@ struct pl011_dmarx_data {
 
 struct pl011_dmatx_data {
        struct dma_chan         *chan;
-       struct scatterlist      sg;
+       dma_addr_t              dma;
+       size_t                  len;
        char                    *buf;
        bool                    queued;
 };
@@ -366,32 +368,24 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
 
 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
 
-static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
+static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
        enum dma_data_direction dir)
 {
-       dma_addr_t dma_addr;
-
-       sg->buf = dma_alloc_coherent(chan->device->dev,
-               PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
-       if (!sg->buf)
+       db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
+                                    &db->dma, GFP_KERNEL);
+       if (!db->buf)
                return -ENOMEM;
-
-       sg_init_table(&sg->sg, 1);
-       sg_set_page(&sg->sg, phys_to_page(dma_addr),
-               PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
-       sg_dma_address(&sg->sg) = dma_addr;
-       sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
+       db->len = PL011_DMA_BUFFER_SIZE;
 
        return 0;
 }
 
-static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
+static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
        enum dma_data_direction dir)
 {
-       if (sg->buf) {
+       if (db->buf) {
                dma_free_coherent(chan->device->dev,
-                       PL011_DMA_BUFFER_SIZE, sg->buf,
-                       sg_dma_address(&sg->sg));
+                                 PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
        }
 }
 
@@ -552,8 +546,8 @@ static void pl011_dma_tx_callback(void *data)
 
        uart_port_lock_irqsave(&uap->port, &flags);
        if (uap->dmatx.queued)
-               dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
-                            DMA_TO_DEVICE);
+               dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
+                               dmatx->len, DMA_TO_DEVICE);
 
        dmacr = uap->dmacr;
        uap->dmacr = dmacr & ~UART011_TXDMAE;
@@ -639,18 +633,19 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
                        memcpy(&dmatx->buf[first], &xmit->buf[0], second);
        }
 
-       dmatx->sg.length = count;
-
-       if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
+       dmatx->len = count;
+       dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
+                                   DMA_TO_DEVICE);
+       if (dmatx->dma == DMA_MAPPING_ERROR) {
                uap->dmatx.queued = false;
                dev_dbg(uap->port.dev, "unable to map TX DMA\n");
                return -EBUSY;
        }
 
-       desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
+       desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
                                             DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
-               dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
+               dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
                uap->dmatx.queued = false;
                /*
                 * If DMA cannot be used right now, we complete this
@@ -813,8 +808,8 @@ __acquires(&uap->port.lock)
        dmaengine_terminate_async(uap->dmatx.chan);
 
        if (uap->dmatx.queued) {
-               dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
-                            DMA_TO_DEVICE);
+               dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
+                                uap->dmatx.len, DMA_TO_DEVICE);
                uap->dmatx.queued = false;
                uap->dmacr &= ~UART011_TXDMAE;
                pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -828,15 +823,15 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
        struct dma_chan *rxchan = uap->dmarx.chan;
        struct pl011_dmarx_data *dmarx = &uap->dmarx;
        struct dma_async_tx_descriptor *desc;
-       struct pl011_sgbuf *sgbuf;
+       struct pl011_dmabuf *dbuf;
 
        if (!rxchan)
                return -EIO;
 
        /* Start the RX DMA job */
-       sgbuf = uap->dmarx.use_buf_b ?
-               &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
-       desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+       dbuf = uap->dmarx.use_buf_b ?
+               &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+       desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
                                        DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        /*
@@ -876,8 +871,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
                               bool readfifo)
 {
        struct tty_port *port = &uap->port.state->port;
-       struct pl011_sgbuf *sgbuf = use_buf_b ?
-               &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+       struct pl011_dmabuf *dbuf = use_buf_b ?
+               &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
        int dma_count = 0;
        u32 fifotaken = 0; /* only used for vdbg() */
 
@@ -886,7 +881,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
 
        if (uap->dmarx.poll_rate) {
                /* The data can be taken by polling */
-               dmataken = sgbuf->sg.length - dmarx->last_residue;
+               dmataken = dbuf->len - dmarx->last_residue;
                /* Recalculate the pending size */
                if (pending >= dmataken)
                        pending -= dmataken;
@@ -900,7 +895,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
                 * Note that tty_insert_flip_buf() tries to take as many chars
                 * as it can.
                 */
-               dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
+               dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
                                pending);
 
                uap->port.icount.rx += dma_count;
@@ -911,7 +906,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
 
        /* Reset the last_residue for Rx DMA poll */
        if (uap->dmarx.poll_rate)
-               dmarx->last_residue = sgbuf->sg.length;
+               dmarx->last_residue = dbuf->len;
 
        /*
         * Only continue with trying to read the FIFO if all DMA chars have
@@ -946,8 +941,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
 {
        struct pl011_dmarx_data *dmarx = &uap->dmarx;
        struct dma_chan *rxchan = dmarx->chan;
-       struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
-               &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+       struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
+               &dmarx->dbuf_b : &dmarx->dbuf_a;
        size_t pending;
        struct dma_tx_state state;
        enum dma_status dmastat;
@@ -969,7 +964,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
        pl011_write(uap->dmacr, uap, REG_DMACR);
        uap->dmarx.running = false;
 
-       pending = sgbuf->sg.length - state.residue;
+       pending = dbuf->len - state.residue;
        BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
        /* Then we terminate the transfer - we now know our residue */
        dmaengine_terminate_all(rxchan);
@@ -996,8 +991,8 @@ static void pl011_dma_rx_callback(void *data)
        struct pl011_dmarx_data *dmarx = &uap->dmarx;
        struct dma_chan *rxchan = dmarx->chan;
        bool lastbuf = dmarx->use_buf_b;
-       struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
-               &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+       struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
+               &dmarx->dbuf_b : &dmarx->dbuf_a;
        size_t pending;
        struct dma_tx_state state;
        int ret;
@@ -1015,7 +1010,7 @@ static void pl011_dma_rx_callback(void *data)
         * the DMA irq handler. So we check the residue here.
         */
        rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
-       pending = sgbuf->sg.length - state.residue;
+       pending = dbuf->len - state.residue;
        BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
        /* Then we terminate the transfer - we now know our residue */
        dmaengine_terminate_all(rxchan);
@@ -1067,16 +1062,16 @@ static void pl011_dma_rx_poll(struct timer_list *t)
        unsigned long flags;
        unsigned int dmataken = 0;
        unsigned int size = 0;
-       struct pl011_sgbuf *sgbuf;
+       struct pl011_dmabuf *dbuf;
        int dma_count;
        struct dma_tx_state state;
 
-       sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+       dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
        rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
        if (likely(state.residue < dmarx->last_residue)) {
-               dmataken = sgbuf->sg.length - dmarx->last_residue;
+               dmataken = dbuf->len - dmarx->last_residue;
                size = dmarx->last_residue - state.residue;
-               dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
+               dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
                                size);
                if (dma_count == size)
                        dmarx->last_residue =  state.residue;
@@ -1123,7 +1118,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
                return;
        }
 
-       sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
+       uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
 
        /* The DMA buffer is now the FIFO the TTY subsystem can use */
        uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
@@ -1133,7 +1128,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
                goto skip_rx;
 
        /* Allocate and map DMA RX buffers */
-       ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+       ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
                               DMA_FROM_DEVICE);
        if (ret) {
                dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
@@ -1141,12 +1136,12 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
                goto skip_rx;
        }
 
-       ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
+       ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
                               DMA_FROM_DEVICE);
        if (ret) {
                dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
                        "RX buffer B", ret);
-               pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+               pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
                                 DMA_FROM_DEVICE);
                goto skip_rx;
        }
@@ -1200,8 +1195,9 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
                /* In theory, this should already be done by pl011_dma_flush_buffer */
                dmaengine_terminate_all(uap->dmatx.chan);
                if (uap->dmatx.queued) {
-                       dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
-                                    DMA_TO_DEVICE);
+                       dma_unmap_single(uap->dmatx.chan->device->dev,
+                                        uap->dmatx.dma, uap->dmatx.len,
+                                        DMA_TO_DEVICE);
                        uap->dmatx.queued = false;
                }
 
@@ -1212,8 +1208,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
        if (uap->using_rx_dma) {
                dmaengine_terminate_all(uap->dmarx.chan);
                /* Clean up the RX DMA */
-               pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
-               pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+               pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
+               pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
                if (uap->dmarx.poll_rate)
                        del_timer_sync(&uap->dmarx.timer);
                uap->using_rx_dma = false;
index a6a7c405892e80e07a1ab4008a3c67619f707001..21b574f78b8615bfa3fdc1cb00790b13900349f5 100644 (file)
@@ -552,11 +552,19 @@ static void ma35d1serial_console_putchar(struct uart_port *port, unsigned char c
  */
 static void ma35d1serial_console_write(struct console *co, const char *s, u32 count)
 {
-       struct uart_ma35d1_port *up = &ma35d1serial_ports[co->index];
+       struct uart_ma35d1_port *up;
        unsigned long flags;
        int locked = 1;
        u32 ier;
 
+       if ((co->index < 0) || (co->index >= MA35_UART_NR)) {
+               pr_warn("Failed to write on ononsole port %x, out of range\n",
+                       co->index);
+               return;
+       }
+
+       up = &ma35d1serial_ports[co->index];
+
        if (up->port.sysrq)
                locked = 0;
        else if (oops_in_progress)
index db2bb1c0d36c264648a16898904f7a50d07a317b..cf0c6120d30edee40728934c1e8f447ae990b2f2 100644 (file)
@@ -766,6 +766,18 @@ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
                case SC16IS7XX_IIR_RTOI_SRC:
                case SC16IS7XX_IIR_XOFFI_SRC:
                        rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
+
+                       /*
+                        * There is a silicon bug that makes the chip report a
+                        * time-out interrupt but no data in the FIFO. This is
+                        * described in errata section 18.1.4.
+                        *
+                        * When this happens, read one byte from the FIFO to
+                        * clear the interrupt.
+                        */
+                       if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
+                               rxlen = 1;
+
                        if (rxlen)
                                sc16is7xx_handle_rx(port, rxlen, iir);
                        break;
index da2558e274b473663d3c543f9354ddcbdd19fe84..db9d9365ff55dc1b2de23b75994a3622ee4238fc 100644 (file)
@@ -8,6 +8,7 @@
  *     Vinayak Holikatti <h.vinayak@samsung.com>
  */
 
+#include <linux/clk.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/pm_opp.h>
@@ -213,6 +214,55 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
        }
 }
 
+/**
+ * ufshcd_parse_clock_min_max_freq  - Parse MIN and MAX clocks freq
+ * @hba: per adapter instance
+ *
+ * This function parses MIN and MAX frequencies of all clocks required
+ * by the host drivers.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_parse_clock_min_max_freq(struct ufs_hba *hba)
+{
+       struct list_head *head = &hba->clk_list_head;
+       struct ufs_clk_info *clki;
+       struct dev_pm_opp *opp;
+       unsigned long freq;
+       u8 idx = 0;
+
+       list_for_each_entry(clki, head, list) {
+               if (!clki->name)
+                       continue;
+
+               clki->clk = devm_clk_get(hba->dev, clki->name);
+               if (IS_ERR(clki->clk))
+                       continue;
+
+               /* Find Max Freq */
+               freq = ULONG_MAX;
+               opp = dev_pm_opp_find_freq_floor_indexed(hba->dev, &freq, idx);
+               if (IS_ERR(opp)) {
+                       dev_err(hba->dev, "Failed to find OPP for MAX frequency\n");
+                       return PTR_ERR(opp);
+               }
+               clki->max_freq = dev_pm_opp_get_freq_indexed(opp, idx);
+               dev_pm_opp_put(opp);
+
+               /* Find Min Freq */
+               freq = 0;
+               opp = dev_pm_opp_find_freq_ceil_indexed(hba->dev, &freq, idx);
+               if (IS_ERR(opp)) {
+                       dev_err(hba->dev, "Failed to find OPP for MIN frequency\n");
+                       return PTR_ERR(opp);
+               }
+               clki->min_freq = dev_pm_opp_get_freq_indexed(opp, idx++);
+               dev_pm_opp_put(opp);
+       }
+
+       return 0;
+}
+
 static int ufshcd_parse_operating_points(struct ufs_hba *hba)
 {
        struct device *dev = hba->dev;
@@ -279,6 +329,10 @@ static int ufshcd_parse_operating_points(struct ufs_hba *hba)
                return ret;
        }
 
+       ret = ufshcd_parse_clock_min_max_freq(hba);
+       if (ret)
+               return ret;
+
        hba->use_pm_opp = true;
 
        return 0;
index ea85e2c701a15f304c2082a720aa076c32bf91a9..3c8a9dd585c09ea0bd5d1817ab3aadf60c0537b5 100644 (file)
@@ -92,6 +92,7 @@ static void hidg_release(struct device *dev)
 {
        struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
 
+       kfree(hidg->report_desc);
        kfree(hidg->set_report_buf);
        kfree(hidg);
 }
@@ -1287,9 +1288,9 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
        hidg->report_length = opts->report_length;
        hidg->report_desc_length = opts->report_desc_length;
        if (opts->report_desc) {
-               hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc,
-                                                opts->report_desc_length,
-                                                GFP_KERNEL);
+               hidg->report_desc = kmemdup(opts->report_desc,
+                                           opts->report_desc_length,
+                                           GFP_KERNEL);
                if (!hidg->report_desc) {
                        ret = -ENOMEM;
                        goto err_put_device;
index ded9531f141b1b94bd7505d10652f21ad0546cd5..d59f94464b870b76c21a5b0380460479fdd1e8ed 100644 (file)
@@ -1646,8 +1646,6 @@ static void gadget_unbind_driver(struct device *dev)
 
        dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function);
 
-       kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
-
        udc->allow_connect = false;
        cancel_work_sync(&udc->vbus_work);
        mutex_lock(&udc->connect_lock);
@@ -1667,6 +1665,8 @@ static void gadget_unbind_driver(struct device *dev)
        driver->is_bound = false;
        udc->driver = NULL;
        mutex_unlock(&udc_lock);
+
+       kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
 }
 
 /* ------------------------------------------------------------------------- */
index 95ed9404f6f8520678ccf3a220a8dad8fff82d98..d6fc08e5db8fbd410c7b547782f44fa1eff23271 100644 (file)
@@ -535,8 +535,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        /* xHC spec requires PCI devices to support D3hot and D3cold */
        if (xhci->hci_version >= 0x120)
                xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
-       else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110)
-               xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
 
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
index 2e0451bd336e2bd5e424b65e1964d6ad42ebf60d..16a670828dde19b14d8049bb79c7e6daec210b72 100644 (file)
@@ -267,7 +267,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
        if (!partner)
                return;
 
-       adev = &partner->adev;
+       adev = &altmode->adev;
 
        if (is_typec_plug(adev->dev.parent)) {
                struct typec_plug *plug = to_typec_plug(adev->dev.parent);
@@ -497,7 +497,8 @@ static void typec_altmode_release(struct device *dev)
 {
        struct altmode *alt = to_altmode(to_typec_altmode(dev));
 
-       typec_altmode_put_partner(alt);
+       if (!is_typec_port(dev->parent))
+               typec_altmode_put_partner(alt);
 
        altmode_id_remove(alt->adev.dev.parent, alt->id);
        kfree(alt);
index 12ac3397f39b819aa766e6da8a90e906b4350988..26ba7da6b410621ea72e65d4bb90bd192e06dbda 100644 (file)
@@ -2815,13 +2815,18 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
        struct mlx5_control_vq *cvq = &mvdev->cvq;
        int err = 0;
 
-       if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
+       if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
+               u16 idx = cvq->vring.last_avail_idx;
+
                err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
                                        MLX5_CVQ_MAX_ENT, false,
                                        (struct vring_desc *)(uintptr_t)cvq->desc_addr,
                                        (struct vring_avail *)(uintptr_t)cvq->driver_addr,
                                        (struct vring_used *)(uintptr_t)cvq->device_addr);
 
+               if (!err)
+                       cvq->vring.last_avail_idx = cvq->vring.last_used_idx = idx;
+       }
        return err;
 }
 
index 9b04aad6ec35d7499da38d1209fb53a1cdae91a9..c328e694f6e7f0716a9eee53d21e1d6977b64e80 100644 (file)
@@ -261,7 +261,7 @@ void pds_vdpa_debugfs_add_vdpadev(struct pds_vdpa_aux *vdpa_aux)
        debugfs_create_file("config", 0400, vdpa_aux->dentry, vdpa_aux->pdsv, &config_fops);
 
        for (i = 0; i < vdpa_aux->pdsv->num_vqs; i++) {
-               char name[8];
+               char name[16];
 
                snprintf(name, sizeof(name), "vq%02d", i);
                debugfs_create_file(name, 0400, vdpa_aux->dentry,
index 52b2449182ad71976cc68cb58aa8b52a77ff5cea..25c0fe5ec3d5dfacdb53fa31a709851adb118942 100644 (file)
@@ -318,9 +318,8 @@ static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 featur
                return -EOPNOTSUPP;
        }
 
-       pdsv->negotiated_features = nego_features;
-
        driver_features = pds_vdpa_get_driver_features(vdpa_dev);
+       pdsv->negotiated_features = nego_features;
        dev_dbg(dev, "%s: %#llx => %#llx\n",
                __func__, driver_features, nego_features);
 
@@ -461,8 +460,10 @@ static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
 
        pds_vdpa_cmd_set_status(pdsv, status);
 
-       /* Note: still working with FW on the need for this reset cmd */
        if (status == 0) {
+               struct vdpa_callback null_cb = { };
+
+               pds_vdpa_set_config_cb(vdpa_dev, &null_cb);
                pds_vdpa_cmd_reset(pdsv);
 
                for (i = 0; i < pdsv->num_vqs; i++) {
index fd1f655b4f1ff38d20df8d939e750e9982d02147..42837617a55b5464b7fe0f63b4fa1326ab3717cc 100644 (file)
@@ -268,6 +268,7 @@ config HUGETLBFS
 
 config HUGETLB_PAGE
        def_bool HUGETLBFS
+       select XARRAY_MULTI
 
 config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
        def_bool HUGETLB_PAGE
index ed1644e7683f47445b48d5c78a34b0db30b3dd6d..d642d06a453be75b40663ef2a1594dc696085e77 100644 (file)
@@ -424,7 +424,7 @@ error_kill_call:
        if (call->async) {
                if (cancel_work_sync(&call->async_work))
                        afs_put_call(call);
-               afs_put_call(call);
+               afs_set_call_complete(call, ret, 0);
        }
 
        ac->error = ret;
index 47e7770d05831757d45c4aee9331f52a10c06e76..79495cd7a7949916e53650b0af98388a930ec4cd 100644 (file)
@@ -9,6 +9,7 @@
 #include "debug.h"
 #include "errcode.h"
 #include "error.h"
+#include "journal.h"
 #include "trace.h"
 
 #include <linux/prefetch.h>
@@ -424,14 +425,11 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
                BUG_ON(btree_node_read_in_flight(b) ||
                       btree_node_write_in_flight(b));
 
-               if (btree_node_dirty(b))
-                       bch2_btree_complete_write(c, b, btree_current_write(b));
-               clear_btree_node_dirty_acct(c, b);
-
                btree_node_data_free(c, b);
        }
 
-       BUG_ON(atomic_read(&c->btree_cache.dirty));
+       BUG_ON(!bch2_journal_error(&c->journal) &&
+              atomic_read(&c->btree_cache.dirty));
 
        list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
 
index 57c20390e10e3fe05394415d8ccabb43201c871b..5a720f0cd5a653eb7053325de344192dc55fba3e 100644 (file)
@@ -1704,8 +1704,8 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
        return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
 }
 
-void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
-                             struct btree_write *w)
+static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
+                                     struct btree_write *w)
 {
        unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
 
index 7e03dd76fb380498a42bcdef91857727403a4d8a..e0d7fa5b1dfb9ab292a010071da9ed0162d303c1 100644 (file)
@@ -134,9 +134,6 @@ void bch2_btree_node_read(struct bch_fs *, struct btree *, bool);
 int bch2_btree_root_read(struct bch_fs *, enum btree_id,
                         const struct bkey_i *, unsigned);
 
-void bch2_btree_complete_write(struct bch_fs *, struct btree *,
-                             struct btree_write *);
-
 bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
 
 enum btree_write_flags {
index 37fbf22de8fcba305d717f41e4ae8a9461502d53..1b7a5668df7cc4694f73f7c287a1858f3b61074e 100644 (file)
@@ -992,8 +992,6 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
        list_for_each_entry_safe(ck, n, &items, list) {
                cond_resched();
 
-               bch2_journal_pin_drop(&c->journal, &ck->journal);
-
                list_del(&ck->list);
                kfree(ck->k);
                six_lock_exit(&ck->c.lock);
index 324767c0ddccd7457004a34e8ed6e49da8c54b85..25fdca00bf7bdeed69f9e114332746ccb1de0d23 100644 (file)
@@ -554,6 +554,19 @@ int __must_check bch2_trans_update_seq(struct btree_trans *trans, u64 seq,
                                                 BTREE_UPDATE_PREJOURNAL);
 }
 
+static noinline int bch2_btree_insert_clone_trans(struct btree_trans *trans,
+                                                 enum btree_id btree,
+                                                 struct bkey_i *k)
+{
+       struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(&k->k));
+       int ret = PTR_ERR_OR_ZERO(n);
+       if (ret)
+               return ret;
+
+       bkey_copy(n, k);
+       return bch2_btree_insert_trans(trans, btree, n, 0);
+}
+
 int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
                                            enum btree_id btree,
                                            struct bkey_i *k)
@@ -564,6 +577,9 @@ int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
        EBUG_ON(trans->nr_wb_updates > trans->wb_updates_size);
        EBUG_ON(k->k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
 
+       if (unlikely(trans->journal_replay_not_finished))
+               return bch2_btree_insert_clone_trans(trans, btree, k);
+
        trans_for_each_wb_update(trans, i) {
                if (i->btree == btree && bpos_eq(i->k.k.p, k->k.p)) {
                        bkey_copy(&i->k, k);
index 6697417273aa14e7c5de09fbc1622c84eff24949..26be38ab6ecb8fffa179683fae7f2ec76895ec57 100644 (file)
@@ -1056,6 +1056,17 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
        flags &= ~BCH_WATERMARK_MASK;
        flags |= watermark;
 
+       if (!(flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
+           watermark < c->journal.watermark) {
+               struct journal_res res = { 0 };
+
+               ret = drop_locks_do(trans,
+                       bch2_journal_res_get(&c->journal, &res, 1,
+                                            watermark|JOURNAL_RES_GET_CHECK));
+               if (ret)
+                       return ERR_PTR(ret);
+       }
+
        while (1) {
                nr_nodes[!!update_level] += 1 + split;
                update_level++;
index 71aa5e59787b8bc6dca216add572553363a44492..2418c528c5333b6e621df17e1579cd2205264e36 100644 (file)
@@ -471,7 +471,7 @@ int bch2_extent_drop_ptrs(struct btree_trans *trans,
         * we aren't using the extent overwrite path to delete, we're
         * just using the normal key deletion path:
         */
-       if (bkey_deleted(&n->k))
+       if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_IS_EXTENTS))
                n->k.size = 0;
 
        return bch2_trans_relock(trans) ?:
@@ -591,7 +591,7 @@ int bch2_data_update_init(struct btree_trans *trans,
                m->data_opts.rewrite_ptrs = 0;
                /* if iter == NULL, it's just a promote */
                if (iter)
-                       ret = bch2_extent_drop_ptrs(trans, iter, k, data_opts);
+                       ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts);
                goto done;
        }
 
index 1a0f2d5715692baa2f26a088c61b55742e03fec3..2bfff0da7000b38dc18a20cb8a51290fa0696432 100644 (file)
@@ -485,20 +485,15 @@ retry:
        return ret;
 }
 
-int bch2_empty_dir_trans(struct btree_trans *trans, subvol_inum dir)
+int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 snapshot)
 {
        struct btree_iter iter;
        struct bkey_s_c k;
-       u32 snapshot;
        int ret;
 
-       ret = bch2_subvolume_get_snapshot(trans, dir.subvol, &snapshot);
-       if (ret)
-               return ret;
-
        for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents,
-                          SPOS(dir.inum, 0, snapshot),
-                          POS(dir.inum, U64_MAX), 0, k, ret)
+                          SPOS(dir, 0, snapshot),
+                          POS(dir, U64_MAX), 0, k, ret)
                if (k.k->type == KEY_TYPE_dirent) {
                        ret = -ENOTEMPTY;
                        break;
@@ -508,6 +503,14 @@ int bch2_empty_dir_trans(struct btree_trans *trans, subvol_inum dir)
        return ret;
 }
 
+int bch2_empty_dir_trans(struct btree_trans *trans, subvol_inum dir)
+{
+       u32 snapshot;
+
+       return bch2_subvolume_get_snapshot(trans, dir.subvol, &snapshot) ?:
+               bch2_empty_dir_snapshot(trans, dir.inum, snapshot);
+}
+
 int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
 {
        struct btree_trans *trans = bch2_trans_get(c);
index cd262bf4d9c5365747562f22536309dc5853d070..1e3431990abd3549efb0d9216679c1c5ec54489a 100644 (file)
@@ -64,6 +64,7 @@ u64 bch2_dirent_lookup(struct bch_fs *, subvol_inum,
                       const struct bch_hash_info *,
                       const struct qstr *, subvol_inum *);
 
+int bch2_empty_dir_snapshot(struct btree_trans *, u64, u32);
 int bch2_empty_dir_trans(struct btree_trans *, subvol_inum);
 int bch2_readdir(struct bch_fs *, subvol_inum, struct dir_context *);
 
index f6c92df552702a7455baa39532bf3c6231ae69b4..9d8afcb5979a12456c032a00faf5514962b9aa05 100644 (file)
@@ -1294,7 +1294,8 @@ unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
                unsigned i = 0;
 
                bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
-                       if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) {
+                       if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
+                           p.ptr.unwritten) {
                                rewrite_ptrs = 0;
                                goto incompressible;
                        }
index 5a39bcb597a33d42826a16a98da394de3fe23660..a70b7a03057d7fdd9921a7e3e4fdddd742866508 100644 (file)
@@ -413,7 +413,7 @@ retry:
 
        if ((arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) &&
            !arg.src_ptr)
-               snapshot_src.subvol = to_bch_ei(dir)->ei_inode.bi_subvol;
+               snapshot_src.subvol = inode_inum(to_bch_ei(dir)).subvol;
 
        inode = __bch2_create(file_mnt_idmap(filp), to_bch_ei(dir),
                              dst_dentry, arg.mode|S_IFDIR,
index 4d51be813509891458735d494b44e36c043035c6..371565e02ff273cdded33d32ce3f913deccccad6 100644 (file)
@@ -1733,6 +1733,9 @@ static int bch2_unfreeze(struct super_block *sb)
        struct bch_fs *c = sb->s_fs_info;
        int ret;
 
+       if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
+               return 0;
+
        down_write(&c->state_lock);
        ret = bch2_fs_read_write(c);
        up_write(&c->state_lock);
index c7849b0753e7a115d563aa4cfdf5bdf33b319ec0..9309cfeecd8d6194bc1819a259dac0882b5ea55e 100644 (file)
@@ -7,6 +7,7 @@
 #include "btree_update.h"
 #include "buckets.h"
 #include "compress.h"
+#include "dirent.h"
 #include "error.h"
 #include "extents.h"
 #include "extent_update.h"
@@ -1093,11 +1094,15 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
        if (ret)
                goto out;
 
-       if (fsck_err_on(S_ISDIR(inode.bi_mode), c,
-                       deleted_inode_is_dir,
-                       "directory %llu:%u in deleted_inodes btree",
-                       pos.offset, pos.snapshot))
-               goto delete;
+       if (S_ISDIR(inode.bi_mode)) {
+               ret = bch2_empty_dir_snapshot(trans, pos.offset, pos.snapshot);
+               if (fsck_err_on(ret == -ENOTEMPTY, c, deleted_inode_is_dir,
+                               "non empty directory %llu:%u in deleted_inodes btree",
+                               pos.offset, pos.snapshot))
+                       goto delete;
+               if (ret)
+                       goto out;
+       }
 
        if (fsck_err_on(!(inode.bi_flags & BCH_INODE_unlinked), c,
                        deleted_inode_not_unlinked,
index 489b34046e7807744bdc7b8462910e5d9d4dc53a..8cf238be6213ece57815b6dce5e46a5c62c2853a 100644 (file)
@@ -249,7 +249,7 @@ static bool journal_entry_want_write(struct journal *j)
        return ret;
 }
 
-static bool journal_entry_close(struct journal *j)
+bool bch2_journal_entry_close(struct journal *j)
 {
        bool ret;
 
@@ -383,7 +383,7 @@ static bool journal_quiesced(struct journal *j)
        bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
 
        if (!ret)
-               journal_entry_close(j);
+               bch2_journal_entry_close(j);
        return ret;
 }
 
@@ -436,7 +436,7 @@ retry:
 
        /*
         * Recheck after taking the lock, so we don't race with another thread
-        * that just did journal_entry_open() and call journal_entry_close()
+        * that just did journal_entry_open() and call bch2_journal_entry_close()
         * unnecessarily
         */
        if (journal_res_get_fast(j, res, flags)) {
@@ -1041,7 +1041,7 @@ void bch2_fs_journal_stop(struct journal *j)
        bch2_journal_reclaim_stop(j);
        bch2_journal_flush_all_pins(j);
 
-       wait_event(j->wait, journal_entry_close(j));
+       wait_event(j->wait, bch2_journal_entry_close(j));
 
        /*
         * Always write a new journal entry, to make sure the clock hands are up
index 4c513fca5ef2d1db0c1bc9673359235912505afc..2f768e11aec9a9aaed7e900a8d87f9fc0546373c 100644 (file)
@@ -266,6 +266,7 @@ static inline union journal_res_state journal_state_buf_put(struct journal *j, u
        return s;
 }
 
+bool bch2_journal_entry_close(struct journal *);
 void bch2_journal_buf_put_final(struct journal *, u64, bool);
 
 static inline void __bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq)
index 0f17fc5f8d6844f774d71f5a361b55e6acbb866f..5de1b68fb8afc80a61c06263cd8010b93e9efec5 100644 (file)
@@ -1599,6 +1599,7 @@ static CLOSURE_CALLBACK(journal_write_done)
        } while ((v = atomic64_cmpxchg(&j->reservations.counter,
                                       old.v, new.v)) != old.v);
 
+       bch2_journal_reclaim_fast(j);
        bch2_journal_space_available(j);
 
        closure_wake_up(&w->wait);
index e63c6eda86afeb9e9c0920554e9bef953b0a9a26..ec712104addb32c94a1baa350a4bb3a43304c8b2 100644 (file)
@@ -776,6 +776,9 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush,
                               (1U << JOURNAL_PIN_btree), 0, 0, 0))
                *did_work = true;
 
+       if (seq_to_flush > journal_cur_seq(j))
+               bch2_journal_entry_close(j);
+
        spin_lock(&j->lock);
        /*
         * If journal replay hasn't completed, the unreplayed journal entries
index 770ced1c62850d317eb991c8723401456735cc90..c7d9074c82d97bbd6d0b893c0e3059358b6014e5 100644 (file)
@@ -144,7 +144,7 @@ static int bch2_journal_replay(struct bch_fs *c)
        u64 start_seq   = c->journal_replay_seq_start;
        u64 end_seq     = c->journal_replay_seq_start;
        size_t i;
-       int ret;
+       int ret = 0;
 
        move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
        keys->gap = keys->nr;
index 6e1bfe9feb59e4abe96e1dc74b30196fa5766f48..37d16e04e6715a56c8fdd328803fcb796c629a43 100644 (file)
@@ -121,6 +121,14 @@ int bch2_trans_mark_reflink_v(struct btree_trans *trans,
 {
        check_indirect_extent_deleting(new, &flags);
 
+       if (old.k->type == KEY_TYPE_reflink_v &&
+           new->k.type == KEY_TYPE_reflink_v &&
+           old.k->u64s == new->k.u64s &&
+           !memcmp(bkey_s_c_to_reflink_v(old).v->start,
+                   bkey_i_to_reflink_v(new)->v.start,
+                   bkey_val_bytes(&new->k) - 8))
+               return 0;
+
        return bch2_trans_mark_extent(trans, btree_id, level, old, new, flags);
 }
 
index ab743115f169e5fc1a7c665148d0c877800fefab..f3cb7115b530bb29dcd35453931a05753438f0fb 100644 (file)
@@ -276,8 +276,8 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
                if (!btree_type_has_ptrs(id))
                        continue;
 
-               for_each_btree_key(trans, iter, id, POS_MIN,
-                                  BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+               ret = for_each_btree_key2(trans, iter, id, POS_MIN,
+                                         BTREE_ITER_ALL_SNAPSHOTS, k, ({
                        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
                        const union bch_extent_entry *entry;
                        struct extent_ptr_decoded p;
@@ -309,8 +309,8 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
                                nr_uncompressed_extents++;
                        else if (compressed)
                                nr_compressed_extents++;
-               }
-               bch2_trans_iter_exit(trans, &iter);
+                       0;
+               }));
        }
 
        bch2_trans_put(trans);
index 51453d4928fa400dcf0d08c2e01bb1080b59cb88..2833e8ef4c098f680a4883d41a1e925dc477bc2f 100644 (file)
@@ -199,7 +199,7 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
        start = round_down(start, fs_info->sectorsize);
 
        btrfs_free_reserved_data_space_noquota(fs_info, len);
-       btrfs_qgroup_free_data(inode, reserved, start, len);
+       btrfs_qgroup_free_data(inode, reserved, start, len, NULL);
 }
 
 /*
index bbcc3df776461f5b6952422c246e65bbcbc1ccc6..62cb97f7c94fa26e0969707f595e07ffa9bb3937 100644 (file)
@@ -4799,6 +4799,32 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
        }
 }
 
+static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_root *gang[8];
+       int i;
+       int ret;
+
+       spin_lock(&fs_info->fs_roots_radix_lock);
+       while (1) {
+               ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
+                                                (void **)gang, 0,
+                                                ARRAY_SIZE(gang),
+                                                BTRFS_ROOT_TRANS_TAG);
+               if (ret == 0)
+                       break;
+               for (i = 0; i < ret; i++) {
+                       struct btrfs_root *root = gang[i];
+
+                       btrfs_qgroup_free_meta_all_pertrans(root);
+                       radix_tree_tag_clear(&fs_info->fs_roots_radix,
+                                       (unsigned long)root->root_key.objectid,
+                                       BTRFS_ROOT_TRANS_TAG);
+               }
+       }
+       spin_unlock(&fs_info->fs_roots_radix_lock);
+}
+
 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
                                   struct btrfs_fs_info *fs_info)
 {
@@ -4827,6 +4853,8 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
                                     EXTENT_DIRTY);
        btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
 
+       btrfs_free_all_qgroup_pertrans(fs_info);
+
        cur_trans->state =TRANS_STATE_COMPLETED;
        wake_up(&cur_trans->commit_wait);
 }
index 0455935ff558804b3e47291821a566e348d8a6d4..01423670bc8a2b5aba840a98687d6cec84a166b9 100644 (file)
@@ -1547,6 +1547,23 @@ out:
        return ret;
 }
 
+static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info,
+                                    struct btrfs_delayed_ref_head *href)
+{
+       u64 root = href->owning_root;
+
+       /*
+        * Don't check must_insert_reserved, as this is called from contexts
+        * where it has already been unset.
+        */
+       if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE ||
+           !href->is_data || !is_fstree(root))
+               return;
+
+       btrfs_qgroup_free_refroot(fs_info, root, href->reserved_bytes,
+                                 BTRFS_QGROUP_RSV_DATA);
+}
+
 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
                                struct btrfs_delayed_ref_head *href,
                                struct btrfs_delayed_ref_node *node,
@@ -1569,7 +1586,6 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
                struct btrfs_squota_delta delta = {
                        .root = href->owning_root,
                        .num_bytes = node->num_bytes,
-                       .rsv_bytes = href->reserved_bytes,
                        .is_data = true,
                        .is_inc = true,
                        .generation = trans->transid,
@@ -1586,11 +1602,9 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
                                                 flags, ref->objectid,
                                                 ref->offset, &key,
                                                 node->ref_mod, href->owning_root);
+               free_head_ref_squota_rsv(trans->fs_info, href);
                if (!ret)
                        ret = btrfs_record_squota_delta(trans->fs_info, &delta);
-               else
-                       btrfs_qgroup_free_refroot(trans->fs_info, delta.root,
-                                                 delta.rsv_bytes, BTRFS_QGROUP_RSV_DATA);
        } else if (node->action == BTRFS_ADD_DELAYED_REF) {
                ret = __btrfs_inc_extent_ref(trans, node, parent, ref->root,
                                             ref->objectid, ref->offset,
@@ -1742,7 +1756,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
                struct btrfs_squota_delta delta = {
                        .root = href->owning_root,
                        .num_bytes = fs_info->nodesize,
-                       .rsv_bytes = 0,
                        .is_data = false,
                        .is_inc = true,
                        .generation = trans->transid,
@@ -1774,8 +1787,10 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
        int ret = 0;
 
        if (TRANS_ABORTED(trans)) {
-               if (insert_reserved)
+               if (insert_reserved) {
                        btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
+                       free_head_ref_squota_rsv(trans->fs_info, href);
+               }
                return 0;
        }
 
@@ -1871,6 +1886,8 @@ u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
                                  struct btrfs_delayed_ref_root *delayed_refs,
                                  struct btrfs_delayed_ref_head *head)
 {
+       u64 ret = 0;
+
        /*
         * We had csum deletions accounted for in our delayed refs rsv, we need
         * to drop the csum leaves for this update from our delayed_refs_rsv.
@@ -1885,14 +1902,13 @@ u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
 
                btrfs_delayed_refs_rsv_release(fs_info, 0, nr_csums);
 
-               return btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
+               ret = btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
        }
-       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
-           head->must_insert_reserved && head->is_data)
-               btrfs_qgroup_free_refroot(fs_info, head->owning_root,
-                                         head->reserved_bytes, BTRFS_QGROUP_RSV_DATA);
+       /* must_insert_reserved can be set only if we didn't run the head ref. */
+       if (head->must_insert_reserved)
+               free_head_ref_squota_rsv(fs_info, head);
 
-       return 0;
+       return ret;
 }
 
 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
@@ -2033,6 +2049,12 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
                 * spin lock.
                 */
                must_insert_reserved = locked_ref->must_insert_reserved;
+               /*
+                * Unsetting this on the head ref relinquishes ownership of
+                * the rsv_bytes, so it is critical that every possible code
+                * path from here forward frees all reserves including qgroup
+                * reserve.
+                */
                locked_ref->must_insert_reserved = false;
 
                extent_op = locked_ref->extent_op;
@@ -3292,7 +3314,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                struct btrfs_squota_delta delta = {
                        .root = delayed_ref_root,
                        .num_bytes = num_bytes,
-                       .rsv_bytes = 0,
                        .is_data = is_data,
                        .is_inc = false,
                        .generation = btrfs_extent_generation(leaf, ei),
@@ -4937,7 +4958,6 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
                .root = root_objectid,
                .num_bytes = ins->offset,
                .generation = trans->transid,
-               .rsv_bytes = 0,
                .is_data = true,
                .is_inc = true,
        };
index e6230a6ffa9859fd26046b12e9cc295e5cf78d35..8f724c54fc8e9c8a38f720df77526f7b722b6f44 100644 (file)
@@ -2302,7 +2302,8 @@ static int try_release_extent_state(struct extent_io_tree *tree,
                ret = 0;
        } else {
                u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
-                                  EXTENT_DELALLOC_NEW | EXTENT_CTLBITS);
+                                  EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
+                                  EXTENT_QGROUP_RESERVED);
 
                /*
                 * At this point we can safely clear everything except the
index f47731c45bb50497c6a3e5059eed67526bf95be2..32611a4edd6b2388af70203ab33ec41f2f7f60c3 100644 (file)
@@ -3192,7 +3192,7 @@ static long btrfs_fallocate(struct file *file, int mode,
                        qgroup_reserved -= range->len;
                } else if (qgroup_reserved > 0) {
                        btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
-                                              range->start, range->len);
+                                              range->start, range->len, NULL);
                        qgroup_reserved -= range->len;
                }
                list_del(&range->list);
index 9f5a9894f88f49156e9ddb7c4baac8ef1be252df..fb3c3f43c3fa401da09ad815ae815e47e92087e3 100644 (file)
@@ -688,7 +688,7 @@ out:
         * And at reserve time, it's always aligned to page size, so
         * just free one page here.
         */
-       btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
+       btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
        btrfs_free_path(path);
        btrfs_end_transaction(trans);
        return ret;
@@ -5132,7 +5132,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
                 */
                if (state_flags & EXTENT_DELALLOC)
                        btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
-                                              end - start + 1);
+                                              end - start + 1, NULL);
 
                clear_extent_bit(io_tree, start, end,
                                 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
@@ -8059,7 +8059,7 @@ next:
                 *    reserved data space.
                 *    Since the IO will never happen for this page.
                 */
-               btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
+               btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
                if (!inode_evicting) {
                        clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
                                 EXTENT_DELALLOC | EXTENT_UPTODATE |
@@ -9491,7 +9491,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
        struct btrfs_path *path;
        u64 start = ins->objectid;
        u64 len = ins->offset;
-       int qgroup_released;
+       u64 qgroup_released = 0;
        int ret;
 
        memset(&stack_fi, 0, sizeof(stack_fi));
@@ -9504,9 +9504,9 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
        btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
        /* Encryption and other encoding is reserved and all 0 */
 
-       qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
-       if (qgroup_released < 0)
-               return ERR_PTR(qgroup_released);
+       ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
+       if (ret < 0)
+               return ERR_PTR(ret);
 
        if (trans) {
                ret = insert_reserved_file_extent(trans, inode,
@@ -10401,7 +10401,7 @@ out_delalloc_release:
        btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
 out_qgroup_free_data:
        if (ret < 0)
-               btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes);
+               btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
 out_free_data_space:
        /*
         * If btrfs_reserve_extent() succeeded, then we already decremented
index 4e50b62db2a8feba629ee2ceb2040e28b4c2485b..a1743904202b78a7c341aabf2c6af3756d15b2aa 100644 (file)
@@ -1290,6 +1290,15 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
                         * are limited to own subvolumes only
                         */
                        ret = -EPERM;
+               } else if (btrfs_ino(BTRFS_I(src_inode)) != BTRFS_FIRST_FREE_OBJECTID) {
+                       /*
+                        * Snapshots must be made with the src_inode referring
+                        * to the subvolume inode, otherwise the permission
+                        * checking above is useless because we may have
+                        * permission on a lower directory but not the subvol
+                        * itself.
+                        */
+                       ret = -EINVAL;
                } else {
                        ret = btrfs_mksnapshot(&file->f_path, idmap,
                                               name, namelen,
index 574e8a55e24a2b08e6b3a411330a284854f3e5ab..a82e1417c4d278ffccb8939d2f56e79caa14f179 100644 (file)
@@ -152,11 +152,12 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
 {
        struct btrfs_ordered_extent *entry;
        int ret;
+       u64 qgroup_rsv = 0;
 
        if (flags &
            ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
                /* For nocow write, we can release the qgroup rsv right now */
-               ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
+               ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
                if (ret < 0)
                        return ERR_PTR(ret);
        } else {
@@ -164,7 +165,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
                 * The ordered extent has reserved qgroup space, release now
                 * and pass the reserved number for qgroup_record to free.
                 */
-               ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
+               ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
                if (ret < 0)
                        return ERR_PTR(ret);
        }
@@ -182,7 +183,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
        entry->inode = igrab(&inode->vfs_inode);
        entry->compress_type = compress_type;
        entry->truncated_len = (u64)-1;
-       entry->qgroup_rsv = ret;
+       entry->qgroup_rsv = qgroup_rsv;
        entry->flags = flags;
        refcount_set(&entry->refs, 1);
        init_waitqueue_head(&entry->wait);
@@ -599,7 +600,9 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
                        release = entry->disk_num_bytes;
                else
                        release = entry->num_bytes;
-               btrfs_delalloc_release_metadata(btrfs_inode, release, false);
+               btrfs_delalloc_release_metadata(btrfs_inode, release,
+                                               test_bit(BTRFS_ORDERED_IOERR,
+                                                        &entry->flags));
        }
 
        percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
index ce446d9d7f23da3f44cb9c7100c8e0d7b3866684..e46774e8f49fd6609afa61914adb82dd53dac83d 100644 (file)
@@ -4057,13 +4057,14 @@ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
 
 /* Free ranges specified by @reserved, normally in error path */
 static int qgroup_free_reserved_data(struct btrfs_inode *inode,
-                       struct extent_changeset *reserved, u64 start, u64 len)
+                                    struct extent_changeset *reserved,
+                                    u64 start, u64 len, u64 *freed_ret)
 {
        struct btrfs_root *root = inode->root;
        struct ulist_node *unode;
        struct ulist_iterator uiter;
        struct extent_changeset changeset;
-       int freed = 0;
+       u64 freed = 0;
        int ret;
 
        extent_changeset_init(&changeset);
@@ -4104,7 +4105,9 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
        }
        btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
                                  BTRFS_QGROUP_RSV_DATA);
-       ret = freed;
+       if (freed_ret)
+               *freed_ret = freed;
+       ret = 0;
 out:
        extent_changeset_release(&changeset);
        return ret;
@@ -4112,7 +4115,7 @@ out:
 
 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
                        struct extent_changeset *reserved, u64 start, u64 len,
-                       int free)
+                       u64 *released, int free)
 {
        struct extent_changeset changeset;
        int trace_op = QGROUP_RELEASE;
@@ -4128,7 +4131,7 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
        /* In release case, we shouldn't have @reserved */
        WARN_ON(!free && reserved);
        if (free && reserved)
-               return qgroup_free_reserved_data(inode, reserved, start, len);
+               return qgroup_free_reserved_data(inode, reserved, start, len, released);
        extent_changeset_init(&changeset);
        ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
                                       EXTENT_QGROUP_RESERVED, &changeset);
@@ -4143,7 +4146,8 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
                btrfs_qgroup_free_refroot(inode->root->fs_info,
                                inode->root->root_key.objectid,
                                changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
-       ret = changeset.bytes_changed;
+       if (released)
+               *released = changeset.bytes_changed;
 out:
        extent_changeset_release(&changeset);
        return ret;
@@ -4162,9 +4166,10 @@ out:
  * NOTE: This function may sleep for memory allocation.
  */
 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
-                       struct extent_changeset *reserved, u64 start, u64 len)
+                          struct extent_changeset *reserved,
+                          u64 start, u64 len, u64 *freed)
 {
-       return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
+       return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
 }
 
 /*
@@ -4182,9 +4187,9 @@ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
  *
  * NOTE: This function may sleep for memory allocation.
  */
-int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
+int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
 {
-       return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
+       return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
 }
 
 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
@@ -4332,8 +4337,9 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
 
                qgroup_rsv_release(fs_info, qgroup, num_bytes,
                                BTRFS_QGROUP_RSV_META_PREALLOC);
-               qgroup_rsv_add(fs_info, qgroup, num_bytes,
-                               BTRFS_QGROUP_RSV_META_PERTRANS);
+               if (!sb_rdonly(fs_info->sb))
+                       qgroup_rsv_add(fs_info, qgroup, num_bytes,
+                                      BTRFS_QGROUP_RSV_META_PERTRANS);
 
                list_for_each_entry(glist, &qgroup->groups, next_group)
                        qgroup_iterator_add(&qgroup_list, glist->group);
@@ -4655,6 +4661,17 @@ void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
        *root = RB_ROOT;
 }
 
+void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes)
+{
+       if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
+               return;
+
+       if (!is_fstree(root))
+               return;
+
+       btrfs_qgroup_free_refroot(fs_info, root, rsv_bytes, BTRFS_QGROUP_RSV_DATA);
+}
+
 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
                              struct btrfs_squota_delta *delta)
 {
@@ -4699,8 +4716,5 @@ int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
 
 out:
        spin_unlock(&fs_info->qgroup_lock);
-       if (!ret && delta->rsv_bytes)
-               btrfs_qgroup_free_refroot(fs_info, root, delta->rsv_bytes,
-                                         BTRFS_QGROUP_RSV_DATA);
        return ret;
 }
index 855a4f97876185099db6a188d70c75ace327071f..be18c862e64ede62e6db26877b7ca69fa3c3ad8a 100644 (file)
@@ -274,8 +274,6 @@ struct btrfs_squota_delta {
        u64 root;
        /* The number of bytes in the extent being counted. */
        u64 num_bytes;
-       /* The number of bytes reserved for this extent. */
-       u64 rsv_bytes;
        /* The generation the extent was created in. */
        u64 generation;
        /* Whether we are using or freeing the extent. */
@@ -358,10 +356,10 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
 /* New io_tree based accurate qgroup reserve API */
 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
                        struct extent_changeset **reserved, u64 start, u64 len);
-int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
+int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released);
 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
                           struct extent_changeset *reserved, u64 start,
-                          u64 len);
+                          u64 len, u64 *freed);
 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
                              enum btrfs_qgroup_rsv_type type, bool enforce);
 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
@@ -422,6 +420,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
                struct btrfs_root *root, struct extent_buffer *eb);
 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
 bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
+void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes);
 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
                              struct btrfs_squota_delta *delta);
 
index bfc0eb5e3b7c22f90ec83b4c5b53abd2fda5cd04..5b3333ceef04818dbf98270da4bb84c99e5c70f8 100644 (file)
@@ -37,8 +37,6 @@
 
 static struct kmem_cache *btrfs_trans_handle_cachep;
 
-#define BTRFS_ROOT_TRANS_TAG 0
-
 /*
  * Transaction states and transitions
  *
index 18c4f6e83b78839d3a2f65306eef097f0f28a944..2bf8bbdfd0b38b1b1caf3cf1cf0b6738d28f231f 100644 (file)
@@ -12,6 +12,9 @@
 #include "ctree.h"
 #include "misc.h"
 
+/* Radix-tree tag for roots that are part of the trasaction. */
+#define BTRFS_ROOT_TRANS_TAG                   0
+
 enum btrfs_trans_state {
        TRANS_STATE_RUNNING,
        TRANS_STATE_COMMIT_PREP,
index a5ade8c163754bf09036d81eff378f3ff5c545cd..5063434be0fc839d844f3d23810caa6d359bb350 100644 (file)
@@ -108,12 +108,6 @@ int debugfs_file_get(struct dentry *dentry)
                        kfree(fsd);
                        fsd = READ_ONCE(dentry->d_fsdata);
                }
-#ifdef CONFIG_LOCKDEP
-               fsd->lock_name = kasprintf(GFP_KERNEL, "debugfs:%pd", dentry);
-               lockdep_register_key(&fsd->key);
-               lockdep_init_map(&fsd->lockdep_map, fsd->lock_name ?: "debugfs",
-                                &fsd->key, 0);
-#endif
                INIT_LIST_HEAD(&fsd->cancellations);
                mutex_init(&fsd->cancellations_mtx);
        }
@@ -132,8 +126,6 @@ int debugfs_file_get(struct dentry *dentry)
        if (!refcount_inc_not_zero(&fsd->active_users))
                return -EIO;
 
-       lock_map_acquire_read(&fsd->lockdep_map);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(debugfs_file_get);
@@ -151,8 +143,6 @@ void debugfs_file_put(struct dentry *dentry)
 {
        struct debugfs_fsdata *fsd = READ_ONCE(dentry->d_fsdata);
 
-       lock_map_release(&fsd->lockdep_map);
-
        if (refcount_dec_and_test(&fsd->active_users))
                complete(&fsd->active_users_drained);
 }
index e4e7fe1bd9fbfaa316364404a3dcfe1cd9961bfe..034a617cb1a5e777d5e254bd3aa81b368c566ed2 100644 (file)
@@ -243,10 +243,6 @@ static void debugfs_release_dentry(struct dentry *dentry)
 
        /* check it wasn't a dir (no fsdata) or automount (no real_fops) */
        if (fsd && fsd->real_fops) {
-#ifdef CONFIG_LOCKDEP
-               lockdep_unregister_key(&fsd->key);
-               kfree(fsd->lock_name);
-#endif
                WARN_ON(!list_empty(&fsd->cancellations));
                mutex_destroy(&fsd->cancellations_mtx);
        }
@@ -755,9 +751,6 @@ static void __debugfs_file_removed(struct dentry *dentry)
        if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
                return;
 
-       lock_map_acquire(&fsd->lockdep_map);
-       lock_map_release(&fsd->lockdep_map);
-
        /* if we hit zero, just wait for all to finish */
        if (!refcount_dec_and_test(&fsd->active_users)) {
                wait_for_completion(&fsd->active_users_drained);
index 0c4c68cf161f8742cf25c072291a26095e35f74e..dae80c2a469ed0da1ae864ae426cedd2efbb6368 100644 (file)
@@ -7,7 +7,6 @@
 
 #ifndef _DEBUGFS_INTERNAL_H_
 #define _DEBUGFS_INTERNAL_H_
-#include <linux/lockdep.h>
 #include <linux/list.h>
 
 struct file_operations;
@@ -25,11 +24,6 @@ struct debugfs_fsdata {
                struct {
                        refcount_t active_users;
                        struct completion active_users_drained;
-#ifdef CONFIG_LOCKDEP
-                       struct lockdep_map lockdep_map;
-                       struct lock_class_key key;
-                       char *lock_name;
-#endif
 
                        /* protect cancellations */
                        struct mutex cancellations_mtx;
index 0166bb9ca160bdb5196aa9731b2c2daebd3c7116..6aa15dafc67786559d3b68ebfefd8f90e119b3fc 100644 (file)
@@ -349,9 +349,10 @@ static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count)
                return;
        }
        /*
-        * If i_disksize got extended due to writeback of delalloc blocks while
-        * the DIO was running we could fail to cleanup the orphan list in
-        * ext4_handle_inode_extension(). Do it now.
+        * If i_disksize got extended either due to writeback of delalloc
+        * blocks or extending truncate while the DIO was running we could fail
+        * to cleanup the orphan list in ext4_handle_inode_extension(). Do it
+        * now.
         */
        if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
                handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
@@ -386,10 +387,11 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
         * blocks. But the code in ext4_iomap_alloc() is careful to use
         * zeroed/unwritten extents if this is possible; thus we won't leave
         * uninitialized blocks in a file even if we didn't succeed in writing
-        * as much as we intended.
+        * as much as we intended. Also we can race with truncate or write
+        * expanding the file so we have to be a bit careful here.
         */
-       WARN_ON_ONCE(i_size_read(inode) < READ_ONCE(EXT4_I(inode)->i_disksize));
-       if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize))
+       if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) &&
+           pos + size <= i_size_read(inode))
                return size;
        return ext4_handle_inode_extension(inode, pos, size);
 }
index 454d5612641ee3c32e71114e4d6148c52b484459..d72b5e3c92ec4088b878246f431059906d8cf931 100644 (file)
@@ -4478,6 +4478,10 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
        start = max(start, rounddown(ac->ac_o_ex.fe_logical,
                        (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
 
+       /* avoid unnecessary preallocation that may trigger assertions */
+       if (start + size > EXT_MAX_BLOCKS)
+               size = EXT_MAX_BLOCKS - start;
+
        /* don't cover already allocated blocks in selected range */
        if (ar->pleft && start <= ar->lleft) {
                size -= ar->lleft + 1 - start;
index 23904a6a9a96f74a45eb8c2f9bf72d795e05ac60..12ef91d170bb3091ac35a33d2b9dc38330b00948 100644 (file)
@@ -1222,6 +1222,7 @@ void fuse_dax_conn_free(struct fuse_conn *fc)
        if (fc->dax) {
                fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
                kfree(fc->dax);
+               fc->dax = NULL;
        }
 }
 
index 1cdb6327511ef843db8936302fe7f141c4016186..a660f1f21540abbb3dd60e876ca81a587af3336e 100644 (file)
@@ -1448,7 +1448,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
        if (!ia)
                return -ENOMEM;
 
-       if (fopen_direct_io && fc->direct_io_relax) {
+       if (fopen_direct_io && fc->direct_io_allow_mmap) {
                res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
                if (res) {
                        fuse_io_free(ia);
@@ -1574,6 +1574,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
        ssize_t res;
        bool exclusive_lock =
                !(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) ||
+               get_fuse_conn(inode)->direct_io_allow_mmap ||
                iocb->ki_flags & IOCB_APPEND ||
                fuse_direct_write_extending_i_size(iocb, from);
 
@@ -1581,6 +1582,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
         * Take exclusive lock if
         * - Parallel direct writes are disabled - a user space decision
         * - Parallel direct writes are enabled and i_size is being extended.
+        * - Shared mmap on direct_io file is supported (FUSE_DIRECT_IO_ALLOW_MMAP).
         *   This might not be needed at all, but needs further investigation.
         */
        if (exclusive_lock)
@@ -2466,9 +2468,9 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
 
        if (ff->open_flags & FOPEN_DIRECT_IO) {
                /* Can't provide the coherency needed for MAP_SHARED
-                * if FUSE_DIRECT_IO_RELAX isn't set.
+                * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
                 */
-               if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_relax)
+               if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
                        return -ENODEV;
 
                invalidate_inode_pages2(file->f_mapping);
index 6e6e721f421b9da154ff504cfe8fbcff0602bb0d..1df83eebda92771d20a42ea2aaefa118effcbc77 100644 (file)
@@ -63,6 +63,19 @@ struct fuse_forget_link {
        struct fuse_forget_link *next;
 };
 
+/* Submount lookup tracking */
+struct fuse_submount_lookup {
+       /** Refcount */
+       refcount_t count;
+
+       /** Unique ID, which identifies the inode between userspace
+        * and kernel */
+       u64 nodeid;
+
+       /** The request used for sending the FORGET message */
+       struct fuse_forget_link *forget;
+};
+
 /** FUSE inode */
 struct fuse_inode {
        /** Inode data */
@@ -158,6 +171,8 @@ struct fuse_inode {
         */
        struct fuse_inode_dax *dax;
 #endif
+       /** Submount specific lookup tracking */
+       struct fuse_submount_lookup *submount_lookup;
 };
 
 /** FUSE inode state bits */
@@ -797,8 +812,8 @@ struct fuse_conn {
        /* Is tmpfile not implemented by fs? */
        unsigned int no_tmpfile:1;
 
-       /* relax restrictions in FOPEN_DIRECT_IO mode */
-       unsigned int direct_io_relax:1;
+       /* Relax restrictions to allow shared mmap in FOPEN_DIRECT_IO mode */
+       unsigned int direct_io_allow_mmap:1;
 
        /* Is statx not implemented by fs? */
        unsigned int no_statx:1;
index 74d4f09d5827e8af92aef881eb8060fa69e4afca..2a6d44f91729bbd7e3bf1c955a952ecdd695bd0f 100644 (file)
@@ -68,6 +68,24 @@ struct fuse_forget_link *fuse_alloc_forget(void)
        return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT);
 }
 
+static struct fuse_submount_lookup *fuse_alloc_submount_lookup(void)
+{
+       struct fuse_submount_lookup *sl;
+
+       sl = kzalloc(sizeof(struct fuse_submount_lookup), GFP_KERNEL_ACCOUNT);
+       if (!sl)
+               return NULL;
+       sl->forget = fuse_alloc_forget();
+       if (!sl->forget)
+               goto out_free;
+
+       return sl;
+
+out_free:
+       kfree(sl);
+       return NULL;
+}
+
 static struct inode *fuse_alloc_inode(struct super_block *sb)
 {
        struct fuse_inode *fi;
@@ -83,6 +101,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        fi->attr_version = 0;
        fi->orig_ino = 0;
        fi->state = 0;
+       fi->submount_lookup = NULL;
        mutex_init(&fi->mutex);
        spin_lock_init(&fi->lock);
        fi->forget = fuse_alloc_forget();
@@ -113,6 +132,17 @@ static void fuse_free_inode(struct inode *inode)
        kmem_cache_free(fuse_inode_cachep, fi);
 }
 
+static void fuse_cleanup_submount_lookup(struct fuse_conn *fc,
+                                        struct fuse_submount_lookup *sl)
+{
+       if (!refcount_dec_and_test(&sl->count))
+               return;
+
+       fuse_queue_forget(fc, sl->forget, sl->nodeid, 1);
+       sl->forget = NULL;
+       kfree(sl);
+}
+
 static void fuse_evict_inode(struct inode *inode)
 {
        struct fuse_inode *fi = get_fuse_inode(inode);
@@ -132,6 +162,11 @@ static void fuse_evict_inode(struct inode *inode)
                                          fi->nlookup);
                        fi->forget = NULL;
                }
+
+               if (fi->submount_lookup) {
+                       fuse_cleanup_submount_lookup(fc, fi->submount_lookup);
+                       fi->submount_lookup = NULL;
+               }
        }
        if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
                WARN_ON(!list_empty(&fi->write_files));
@@ -330,6 +365,13 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
                fuse_dax_dontcache(inode, attr->flags);
 }
 
+static void fuse_init_submount_lookup(struct fuse_submount_lookup *sl,
+                                     u64 nodeid)
+{
+       sl->nodeid = nodeid;
+       refcount_set(&sl->count, 1);
+}
+
 static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
                            struct fuse_conn *fc)
 {
@@ -392,12 +434,22 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
         */
        if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) &&
            S_ISDIR(attr->mode)) {
+               struct fuse_inode *fi;
+
                inode = new_inode(sb);
                if (!inode)
                        return NULL;
 
                fuse_init_inode(inode, attr, fc);
-               get_fuse_inode(inode)->nodeid = nodeid;
+               fi = get_fuse_inode(inode);
+               fi->nodeid = nodeid;
+               fi->submount_lookup = fuse_alloc_submount_lookup();
+               if (!fi->submount_lookup) {
+                       iput(inode);
+                       return NULL;
+               }
+               /* Sets nlookup = 1 on fi->submount_lookup->nlookup */
+               fuse_init_submount_lookup(fi->submount_lookup, nodeid);
                inode->i_flags |= S_AUTOMOUNT;
                goto done;
        }
@@ -420,11 +472,11 @@ retry:
                iput(inode);
                goto retry;
        }
-done:
        fi = get_fuse_inode(inode);
        spin_lock(&fi->lock);
        fi->nlookup++;
        spin_unlock(&fi->lock);
+done:
        fuse_change_attributes(inode, attr, NULL, attr_valid, attr_version);
 
        return inode;
@@ -1230,8 +1282,8 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
                                fc->init_security = 1;
                        if (flags & FUSE_CREATE_SUPP_GROUP)
                                fc->create_supp_group = 1;
-                       if (flags & FUSE_DIRECT_IO_RELAX)
-                               fc->direct_io_relax = 1;
+                       if (flags & FUSE_DIRECT_IO_ALLOW_MMAP)
+                               fc->direct_io_allow_mmap = 1;
                } else {
                        ra_pages = fc->max_read / PAGE_SIZE;
                        fc->no_lock = 1;
@@ -1278,7 +1330,7 @@ void fuse_send_init(struct fuse_mount *fm)
                FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
                FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
                FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
-               FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_RELAX;
+               FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP;
 #ifdef CONFIG_FUSE_DAX
        if (fm->fc->dax)
                flags |= FUSE_MAP_ALIGNMENT;
@@ -1465,6 +1517,8 @@ static int fuse_fill_super_submount(struct super_block *sb,
        struct super_block *parent_sb = parent_fi->inode.i_sb;
        struct fuse_attr root_attr;
        struct inode *root;
+       struct fuse_submount_lookup *sl;
+       struct fuse_inode *fi;
 
        fuse_sb_defaults(sb);
        fm->sb = sb;
@@ -1487,12 +1541,27 @@ static int fuse_fill_super_submount(struct super_block *sb,
         * its nlookup should not be incremented.  fuse_iget() does
         * that, though, so undo it here.
         */
-       get_fuse_inode(root)->nlookup--;
+       fi = get_fuse_inode(root);
+       fi->nlookup--;
+
        sb->s_d_op = &fuse_dentry_operations;
        sb->s_root = d_make_root(root);
        if (!sb->s_root)
                return -ENOMEM;
 
+       /*
+        * Grab the parent's submount_lookup pointer and take a
+        * reference on the shared nlookup from the parent.  This is to
+        * prevent the last forget for this nodeid from getting
+        * triggered until all users have finished with it.
+        */
+       sl = parent_fi->submount_lookup;
+       WARN_ON(!sl);
+       if (sl) {
+               refcount_inc(&sl->count);
+               fi->submount_lookup = sl;
+       }
+
        return 0;
 }
 
index 8d6f934c3d9543d3c87a77f5ec3566df7a2b2a37..5e122586e06ed0f9b9a36ebfe089ca7b3a75aee8 100644 (file)
@@ -119,7 +119,7 @@ static int journal_submit_commit_record(journal_t *journal,
        struct commit_header *tmp;
        struct buffer_head *bh;
        struct timespec64 now;
-       blk_opf_t write_flags = REQ_OP_WRITE | REQ_SYNC;
+       blk_opf_t write_flags = REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS;
 
        *cbh = NULL;
 
@@ -270,6 +270,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
                        if (!ret)
                                ret = err;
                }
+               cond_resched();
                spin_lock(&journal->j_list_lock);
                jinode->i_flags &= ~JI_COMMIT_RUNNING;
                smp_mb();
@@ -395,8 +396,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                 */
                jbd2_journal_update_sb_log_tail(journal,
                                                journal->j_tail_sequence,
-                                               journal->j_tail,
-                                               REQ_SYNC);
+                                               journal->j_tail, 0);
                mutex_unlock(&journal->j_checkpoint_mutex);
        } else {
                jbd2_debug(3, "superblock not updated\n");
@@ -715,6 +715,7 @@ start_journal_io:
 
                        for (i = 0; i < bufs; i++) {
                                struct buffer_head *bh = wbuf[i];
+
                                /*
                                 * Compute checksum.
                                 */
@@ -727,7 +728,8 @@ start_journal_io:
                                clear_buffer_dirty(bh);
                                set_buffer_uptodate(bh);
                                bh->b_end_io = journal_end_buffer_io_sync;
-                               submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
+                               submit_bh(REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS,
+                                         bh);
                        }
                        cond_resched();
 
index ed53188472f9aa9949d1c3c6338cba01f725b79a..206cb53ef2b06813a7344da309e0d811dd3e808c 100644 (file)
@@ -1100,8 +1100,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
         * space and if we lose sb update during power failure we'd replay
         * old transaction with possibly newly overwritten data.
         */
-       ret = jbd2_journal_update_sb_log_tail(journal, tid, block,
-                                             REQ_SYNC | REQ_FUA);
+       ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA);
        if (ret)
                goto out;
 
@@ -1775,8 +1774,7 @@ static int journal_reset(journal_t *journal)
                 */
                jbd2_journal_update_sb_log_tail(journal,
                                                journal->j_tail_sequence,
-                                               journal->j_tail,
-                                               REQ_SYNC | REQ_FUA);
+                                               journal->j_tail, REQ_FUA);
                mutex_unlock(&journal->j_checkpoint_mutex);
        }
        return jbd2_journal_start_thread(journal);
@@ -1798,9 +1796,16 @@ static int jbd2_write_superblock(journal_t *journal, blk_opf_t write_flags)
                return -EIO;
        }
 
-       trace_jbd2_write_superblock(journal, write_flags);
+       /*
+        * Always set high priority flags to exempt from block layer's
+        * QOS policies, e.g. writeback throttle.
+        */
+       write_flags |= JBD2_JOURNAL_REQ_FLAGS;
        if (!(journal->j_flags & JBD2_BARRIER))
                write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
+
+       trace_jbd2_write_superblock(journal, write_flags);
+
        if (buffer_write_io_error(bh)) {
                /*
                 * Oh, dear.  A previous attempt to write the journal
@@ -2050,7 +2055,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
        jbd2_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
        sb->s_errno    = cpu_to_be32(errcode);
 
-       jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA);
+       jbd2_write_superblock(journal, REQ_FUA);
 }
 EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
 
@@ -2171,8 +2176,7 @@ int jbd2_journal_destroy(journal_t *journal)
                                ++journal->j_transaction_sequence;
                        write_unlock(&journal->j_state_lock);
 
-                       jbd2_mark_journal_empty(journal,
-                                       REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
+                       jbd2_mark_journal_empty(journal, REQ_PREFLUSH | REQ_FUA);
                        mutex_unlock(&journal->j_checkpoint_mutex);
                } else
                        err = -EIO;
@@ -2473,7 +2477,7 @@ int jbd2_journal_flush(journal_t *journal, unsigned int flags)
         * the magic code for a fully-recovered superblock.  Any future
         * commits of data to the journal will restore the current
         * s_start value. */
-       jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
+       jbd2_mark_journal_empty(journal, REQ_FUA);
 
        if (flags)
                err = __jbd2_journal_erase(journal, flags);
@@ -2519,7 +2523,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
        if (write) {
                /* Lock to make assertions happy... */
                mutex_lock_io(&journal->j_checkpoint_mutex);
-               jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
+               jbd2_mark_journal_empty(journal, REQ_FUA);
                mutex_unlock(&journal->j_checkpoint_mutex);
        }
 
index fdf2aad7347090b7ceecd97a5a9eb3dfdfc1093a..e6beaaf4f1700b0ac78d05128dc23fd74c902dcb 100644 (file)
@@ -26,8 +26,6 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
        int i;
        int flags = nfsexp_flags(rqstp, exp);
 
-       validate_process_creds();
-
        /* discard any old override before preparing the new set */
        revert_creds(get_cred(current_real_cred()));
        new = prepare_creds();
@@ -81,10 +79,8 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
        else
                new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
                                                        new->cap_permitted);
-       validate_process_creds();
        put_cred(override_creds(new));
        put_cred(new);
-       validate_process_creds();
        return 0;
 
 oom:
index fe61d9bbcc1faa2d704f9ec926812a022d78381d..5014ab87d313f6797af445967a05288ac6e0750c 100644 (file)
@@ -955,7 +955,6 @@ nfsd(void *vrqstp)
                rqstp->rq_server->sv_maxconn = nn->max_connections;
 
                svc_recv(rqstp);
-               validate_process_creds();
        }
 
        atomic_dec(&nfsdstats.th_cnt);
index fbbea7498f02b4845c2302f84f91c5b0f555b5a6..e01e4e2acbd9a26cf8c6617ce29e1aba7968bceb 100644 (file)
@@ -901,7 +901,6 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
        int host_err;
        bool retried = false;
 
-       validate_process_creds();
        /*
         * If we get here, then the client has already done an "open",
         * and (hopefully) checked permission - so allow OWNER_OVERRIDE
@@ -926,7 +925,6 @@ retry:
                }
                err = nfserrno(host_err);
        }
-       validate_process_creds();
        return err;
 }
 
@@ -943,12 +941,7 @@ int
 nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
                   struct file **filp)
 {
-       int err;
-
-       validate_process_creds();
-       err = __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
-       validate_process_creds();
-       return err;
+       return __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
 }
 
 /*
index 2c6078a6b8ecb5edd9b1c1b69b192dfef7c47b00..58ca7c936393c4982bb1431e4d75841fd4183553 100644 (file)
@@ -501,15 +501,38 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
 
        down_write(&NILFS_MDT(sufile)->mi_sem);
        ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
-       if (!ret) {
-               mark_buffer_dirty(bh);
-               nilfs_mdt_mark_dirty(sufile);
-               kaddr = kmap_atomic(bh->b_page);
-               su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+       if (ret)
+               goto out_sem;
+
+       kaddr = kmap_atomic(bh->b_page);
+       su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+       if (unlikely(nilfs_segment_usage_error(su))) {
+               struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
+
+               kunmap_atomic(kaddr);
+               brelse(bh);
+               if (nilfs_segment_is_active(nilfs, segnum)) {
+                       nilfs_error(sufile->i_sb,
+                                   "active segment %llu is erroneous",
+                                   (unsigned long long)segnum);
+               } else {
+                       /*
+                        * Segments marked erroneous are never allocated by
+                        * nilfs_sufile_alloc(); only active segments, ie,
+                        * the segments indexed by ns_segnum or ns_nextnum,
+                        * can be erroneous here.
+                        */
+                       WARN_ON_ONCE(1);
+               }
+               ret = -EIO;
+       } else {
                nilfs_segment_usage_set_dirty(su);
                kunmap_atomic(kaddr);
+               mark_buffer_dirty(bh);
+               nilfs_mdt_mark_dirty(sufile);
                brelse(bh);
        }
+out_sem:
        up_write(&NILFS_MDT(sufile)->mi_sem);
        return ret;
 }
@@ -536,9 +559,14 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
 
        kaddr = kmap_atomic(bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
-       WARN_ON(nilfs_segment_usage_error(su));
-       if (modtime)
+       if (modtime) {
+               /*
+                * Check segusage error and set su_lastmod only when updating
+                * this entry with a valid timestamp, not for cancellation.
+                */
+               WARN_ON_ONCE(nilfs_segment_usage_error(su));
                su->su_lastmod = cpu_to_le64(modtime);
+       }
        su->su_nblocks = cpu_to_le32(nblocks);
        kunmap_atomic(kaddr);
 
index 0f0667957c8100d79f1b41f593fb5dc36cebe20a..71400496ed36519d2524ab552efed3e150899a52 100644 (file)
@@ -716,7 +716,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
                        goto failed_sbh;
                }
                nilfs_release_super_block(nilfs);
-               sb_set_blocksize(sb, blocksize);
+               if (!sb_set_blocksize(sb, blocksize)) {
+                       nilfs_err(sb, "bad blocksize %d", blocksize);
+                       err = -EINVAL;
+                       goto out;
+               }
 
                err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
                if (err)
index 02dc608d40d81f90c9427695763c85d0fd111e93..3494a9cd8046c42a0f9b681c87285e922a8afa40 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -1088,8 +1088,6 @@ struct file *dentry_open(const struct path *path, int flags,
        int error;
        struct file *f;
 
-       validate_creds(cred);
-
        /* We must always pass in a valid mount pointer. */
        BUG_ON(!path->mnt);
 
@@ -1128,7 +1126,6 @@ struct file *dentry_create(const struct path *path, int flags, umode_t mode,
        struct file *f;
        int error;
 
-       validate_creds(cred);
        f = alloc_empty_file(flags, cred);
        if (IS_ERR(f))
                return f;
index ef2eb12906da88c6fe3a227e82598020f0badc44..435b61054b5b9e768ac34fafd4bf5a7a2c378f89 100644 (file)
@@ -1982,15 +1982,31 @@ static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
        struct pagemap_scan_private *p = walk->private;
        struct vm_area_struct *vma = walk->vma;
        unsigned long vma_category = 0;
+       bool wp_allowed = userfaultfd_wp_async(vma) &&
+           userfaultfd_wp_use_markers(vma);
 
-       if (userfaultfd_wp_async(vma) && userfaultfd_wp_use_markers(vma))
-               vma_category |= PAGE_IS_WPALLOWED;
-       else if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
-               return -EPERM;
+       if (!wp_allowed) {
+               /* User requested explicit failure over wp-async capability */
+               if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
+                       return -EPERM;
+               /*
+                * User requires wr-protect, and allows silently skipping
+                * unsupported vmas.
+                */
+               if (p->arg.flags & PM_SCAN_WP_MATCHING)
+                       return 1;
+               /*
+                * Then the request doesn't involve wr-protects at all,
+                * fall through to the rest checks, and allow vma walk.
+                */
+       }
 
        if (vma->vm_flags & VM_PFNMAP)
                return 1;
 
+       if (wp_allowed)
+               vma_category |= PAGE_IS_WPALLOWED;
+
        if (!pagemap_scan_is_interesting_vma(vma_category, p))
                return 1;
 
@@ -2140,7 +2156,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
                return 0;
        }
 
-       if (!p->vec_out) {
+       if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
                /* Fast path for performing exclusive WP */
                for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
                        if (pte_uffd_wp(ptep_get(pte)))
index 59f6b8e32cc97acda64aa7b95c45544a1e52ee0a..d64a306a414be0580e910842b19f150bf43863a9 100644 (file)
@@ -291,16 +291,23 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
        oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
 #endif /* CIFS_DEBUG2 */
 
-       rc = -EINVAL;
+
        if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
+               spin_unlock(&cfids->cfid_list_lock);
+               rc = -EINVAL;
+               goto oshr_free;
+       }
+
+       rc = smb2_parse_contexts(server, rsp_iov,
+                                &oparms.fid->epoch,
+                                oparms.fid->lease_key,
+                                &oplock, NULL, NULL);
+       if (rc) {
                spin_unlock(&cfids->cfid_list_lock);
                goto oshr_free;
        }
 
-       smb2_parse_contexts(server, o_rsp,
-                           &oparms.fid->epoch,
-                           oparms.fid->lease_key, &oplock,
-                           NULL, NULL);
+       rc = -EINVAL;
        if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
                spin_unlock(&cfids->cfid_list_lock);
                goto oshr_free;
index ea3a7a668b45f38ccfa6079449e666f2e6c9877b..2131638f26d0b4b89a89b7ea40a9064403872f8d 100644 (file)
@@ -1196,32 +1196,103 @@ const struct inode_operations cifs_symlink_inode_ops = {
        .listxattr = cifs_listxattr,
 };
 
+/*
+ * Advance the EOF marker to after the source range.
+ */
+static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
+                               struct cifs_tcon *src_tcon,
+                               unsigned int xid, loff_t src_end)
+{
+       struct cifsFileInfo *writeable_srcfile;
+       int rc = -EINVAL;
+
+       writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
+       if (writeable_srcfile) {
+               if (src_tcon->ses->server->ops->set_file_size)
+                       rc = src_tcon->ses->server->ops->set_file_size(
+                               xid, src_tcon, writeable_srcfile,
+                               src_inode->i_size, true /* no need to set sparse */);
+               else
+                       rc = -ENOSYS;
+               cifsFileInfo_put(writeable_srcfile);
+               cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
+       }
+
+       if (rc < 0)
+               goto set_failed;
+
+       netfs_resize_file(&src_cifsi->netfs, src_end);
+       fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
+       return 0;
+
+set_failed:
+       return filemap_write_and_wait(src_inode->i_mapping);
+}
+
+/*
+ * Flush out either the folio that overlaps the beginning of a range in which
+ * pos resides or the folio that overlaps the end of a range unless that folio
+ * is entirely within the range we're going to invalidate.  We extend the flush
+ * bounds to encompass the folio.
+ */
+static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
+                           bool first)
+{
+       struct folio *folio;
+       unsigned long long fpos, fend;
+       pgoff_t index = pos / PAGE_SIZE;
+       size_t size;
+       int rc = 0;
+
+       folio = filemap_get_folio(inode->i_mapping, index);
+       if (IS_ERR(folio))
+               return 0;
+
+       size = folio_size(folio);
+       fpos = folio_pos(folio);
+       fend = fpos + size - 1;
+       *_fstart = min_t(unsigned long long, *_fstart, fpos);
+       *_fend   = max_t(unsigned long long, *_fend, fend);
+       if ((first && pos == fpos) || (!first && pos == fend))
+               goto out;
+
+       rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
+out:
+       folio_put(folio);
+       return rc;
+}
+
 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
                struct file *dst_file, loff_t destoff, loff_t len,
                unsigned int remap_flags)
 {
        struct inode *src_inode = file_inode(src_file);
        struct inode *target_inode = file_inode(dst_file);
+       struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
+       struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
        struct cifsFileInfo *smb_file_src = src_file->private_data;
-       struct cifsFileInfo *smb_file_target;
-       struct cifs_tcon *target_tcon;
+       struct cifsFileInfo *smb_file_target = dst_file->private_data;
+       struct cifs_tcon *target_tcon, *src_tcon;
+       unsigned long long destend, fstart, fend, new_size;
        unsigned int xid;
        int rc;
 
-       if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+       if (remap_flags & REMAP_FILE_DEDUP)
+               return -EOPNOTSUPP;
+       if (remap_flags & ~REMAP_FILE_ADVISORY)
                return -EINVAL;
 
        cifs_dbg(FYI, "clone range\n");
 
        xid = get_xid();
 
-       if (!src_file->private_data || !dst_file->private_data) {
+       if (!smb_file_src || !smb_file_target) {
                rc = -EBADF;
                cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
                goto out;
        }
 
-       smb_file_target = dst_file->private_data;
+       src_tcon = tlink_tcon(smb_file_src->tlink);
        target_tcon = tlink_tcon(smb_file_target->tlink);
 
        /*
@@ -1234,20 +1305,63 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
        if (len == 0)
                len = src_inode->i_size - off;
 
-       cifs_dbg(FYI, "about to flush pages\n");
-       /* should we flush first and last page first */
-       truncate_inode_pages_range(&target_inode->i_data, destoff,
-                                  PAGE_ALIGN(destoff + len)-1);
+       cifs_dbg(FYI, "clone range\n");
 
-       if (target_tcon->ses->server->ops->duplicate_extents)
+       /* Flush the source buffer */
+       rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
+                                         off + len - 1);
+       if (rc)
+               goto unlock;
+
+       /* The server-side copy will fail if the source crosses the EOF marker.
+        * Advance the EOF marker after the flush above to the end of the range
+        * if it's short of that.
+        */
+       if (src_cifsi->netfs.remote_i_size < off + len) {
+               rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
+               if (rc < 0)
+                       goto unlock;
+       }
+
+       new_size = destoff + len;
+       destend = destoff + len - 1;
+
+       /* Flush the folios at either end of the destination range to prevent
+        * accidental loss of dirty data outside of the range.
+        */
+       fstart = destoff;
+       fend = destend;
+
+       rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
+       if (rc)
+               goto unlock;
+       rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
+       if (rc)
+               goto unlock;
+
+       /* Discard all the folios that overlap the destination region. */
+       cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
+       truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
+
+       fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
+                          i_size_read(target_inode), 0);
+
+       rc = -EOPNOTSUPP;
+       if (target_tcon->ses->server->ops->duplicate_extents) {
                rc = target_tcon->ses->server->ops->duplicate_extents(xid,
                        smb_file_src, smb_file_target, off, len, destoff);
-       else
-               rc = -EOPNOTSUPP;
+               if (rc == 0 && new_size > i_size_read(target_inode)) {
+                       truncate_setsize(target_inode, new_size);
+                       netfs_resize_file(&target_cifsi->netfs, new_size);
+                       fscache_resize_cookie(cifs_inode_cookie(target_inode),
+                                             new_size);
+               }
+       }
 
        /* force revalidate of size and timestamps of target file now
           that target is updated on the server */
        CIFS_I(target_inode)->time = 0;
+unlock:
        /* although unlocking in the reverse order from locking is not
           strictly necessary here it is a little cleaner to be consistent */
        unlock_two_nondirectories(src_inode, target_inode);
@@ -1263,10 +1377,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
 {
        struct inode *src_inode = file_inode(src_file);
        struct inode *target_inode = file_inode(dst_file);
+       struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
        struct cifsFileInfo *smb_file_src;
        struct cifsFileInfo *smb_file_target;
        struct cifs_tcon *src_tcon;
        struct cifs_tcon *target_tcon;
+       unsigned long long destend, fstart, fend;
        ssize_t rc;
 
        cifs_dbg(FYI, "copychunk range\n");
@@ -1306,13 +1422,41 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
        if (rc)
                goto unlock;
 
-       /* should we flush first and last page first */
-       truncate_inode_pages(&target_inode->i_data, 0);
+       /* The server-side copy will fail if the source crosses the EOF marker.
+        * Advance the EOF marker after the flush above to the end of the range
+        * if it's short of that.
+        */
+       if (src_cifsi->server_eof < off + len) {
+               rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
+               if (rc < 0)
+                       goto unlock;
+       }
+
+       destend = destoff + len - 1;
+
+       /* Flush the folios at either end of the destination range to prevent
+        * accidental loss of dirty data outside of the range.
+        */
+       fstart = destoff;
+       fend = destend;
+
+       rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
+       if (rc)
+               goto unlock;
+       rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
+       if (rc)
+               goto unlock;
+
+       /* Discard all the folios that overlap the destination region. */
+       truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
 
        rc = file_modified(dst_file);
-       if (!rc)
+       if (!rc) {
                rc = target_tcon->ses->server->ops->copychunk_range(xid,
                        smb_file_src, smb_file_target, off, len, destoff);
+               if (rc > 0 && destoff + rc > i_size_read(target_inode))
+                       truncate_setsize(target_inode, destoff + rc);
+       }
 
        file_accessed(src_file);
 
index f896f60c924bfa462e8fa764fdf806bcd67b0cfb..9dc6dc2754c2e9e16232e7053f4f5e460246174d 100644 (file)
@@ -402,13 +402,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
                        spin_unlock(&server->srv_lock);
                        cifs_swn_reset_server_dstaddr(server);
                        cifs_server_unlock(server);
-
-                       /* increase ref count which reconnect work will drop */
-                       spin_lock(&cifs_tcp_ses_lock);
-                       server->srv_count++;
-                       spin_unlock(&cifs_tcp_ses_lock);
-                       if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0))
-                               cifs_put_tcp_session(server, false);
+                       mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
                }
        } while (server->tcpStatus == CifsNeedReconnect);
 
@@ -538,13 +532,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
                spin_unlock(&server->srv_lock);
                cifs_swn_reset_server_dstaddr(server);
                cifs_server_unlock(server);
-
-               /* increase ref count which reconnect work will drop */
-               spin_lock(&cifs_tcp_ses_lock);
-               server->srv_count++;
-               spin_unlock(&cifs_tcp_ses_lock);
-               if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0))
-                       cifs_put_tcp_session(server, false);
+               mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
        } while (server->tcpStatus == CifsNeedReconnect);
 
        mutex_lock(&server->refpath_lock);
@@ -1620,25 +1608,22 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
        list_del_init(&server->tcp_ses_list);
        spin_unlock(&cifs_tcp_ses_lock);
 
-       /* For secondary channels, we pick up ref-count on the primary server */
-       if (SERVER_IS_CHAN(server))
-               cifs_put_tcp_session(server->primary_server, from_reconnect);
-
        cancel_delayed_work_sync(&server->echo);
 
-       if (from_reconnect) {
+       if (from_reconnect)
                /*
                 * Avoid deadlock here: reconnect work calls
                 * cifs_put_tcp_session() at its end. Need to be sure
                 * that reconnect work does nothing with server pointer after
                 * that step.
                 */
-               if (cancel_delayed_work(&server->reconnect))
-                       cifs_put_tcp_session(server, from_reconnect);
-       } else {
-               if (cancel_delayed_work_sync(&server->reconnect))
-                       cifs_put_tcp_session(server, from_reconnect);
-       }
+               cancel_delayed_work(&server->reconnect);
+       else
+               cancel_delayed_work_sync(&server->reconnect);
+
+       /* For secondary channels, we pick up ref-count on the primary server */
+       if (SERVER_IS_CHAN(server))
+               cifs_put_tcp_session(server->primary_server, from_reconnect);
 
        spin_lock(&server->srv_lock);
        server->tcpStatus = CifsExiting;
index 32dfa0f7a78c30013f9d5ead8c5f684b295183ec..e20b4354e703b8e9662808bf19441fe91a7f682b 100644 (file)
@@ -313,6 +313,9 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
 char *
 smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
 {
+       const int max_off = 4096;
+       const int max_len = 128 * 1024;
+
        *off = 0;
        *len = 0;
 
@@ -384,29 +387,20 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
         * Invalid length or offset probably means data area is invalid, but
         * we have little choice but to ignore the data area in this case.
         */
-       if (*off > 4096) {
-               cifs_dbg(VFS, "offset %d too large, data area ignored\n", *off);
-               *len = 0;
-               *off = 0;
-       } else if (*off < 0) {
-               cifs_dbg(VFS, "negative offset %d to data invalid ignore data area\n",
-                        *off);
+       if (unlikely(*off < 0 || *off > max_off ||
+                    *len < 0 || *len > max_len)) {
+               cifs_dbg(VFS, "%s: invalid data area (off=%d len=%d)\n",
+                        __func__, *off, *len);
                *off = 0;
                *len = 0;
-       } else if (*len < 0) {
-               cifs_dbg(VFS, "negative data length %d invalid, data area ignored\n",
-                        *len);
-               *len = 0;
-       } else if (*len > 128 * 1024) {
-               cifs_dbg(VFS, "data area larger than 128K: %d\n", *len);
+       } else if (*off == 0) {
                *len = 0;
        }
 
        /* return pointer to beginning of data area, ie offset from SMB start */
-       if ((*off != 0) && (*len != 0))
+       if (*off > 0 && *len > 0)
                return (char *)shdr + *off;
-       else
-               return NULL;
+       return NULL;
 }
 
 /*
index 45931115f475f0b9ea74769f620140af08726e9a..8f6f0a38b88684aa0287a9df8fe776b1760984ea 100644 (file)
@@ -2836,6 +2836,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
                usleep_range(512, 2048);
        } while (++retry_count < 5);
 
+       if (!rc && !dfs_rsp)
+               rc = -EIO;
        if (rc) {
                if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
                        cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
@@ -3001,7 +3003,7 @@ static int smb2_query_reparse_point(const unsigned int xid,
        struct kvec *rsp_iov;
        struct smb2_ioctl_rsp *ioctl_rsp;
        struct reparse_data_buffer *reparse_buf;
-       u32 plen;
+       u32 off, count, len;
 
        cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
 
@@ -3082,16 +3084,22 @@ static int smb2_query_reparse_point(const unsigned int xid,
         */
        if (rc == 0) {
                /* See MS-FSCC 2.3.23 */
+               off = le32_to_cpu(ioctl_rsp->OutputOffset);
+               count = le32_to_cpu(ioctl_rsp->OutputCount);
+               if (check_add_overflow(off, count, &len) ||
+                   len > rsp_iov[1].iov_len) {
+                       cifs_tcon_dbg(VFS, "%s: invalid ioctl: off=%d count=%d\n",
+                                     __func__, off, count);
+                       rc = -EIO;
+                       goto query_rp_exit;
+               }
 
-               reparse_buf = (struct reparse_data_buffer *)
-                       ((char *)ioctl_rsp +
-                        le32_to_cpu(ioctl_rsp->OutputOffset));
-               plen = le32_to_cpu(ioctl_rsp->OutputCount);
-
-               if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
-                   rsp_iov[1].iov_len) {
-                       cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n",
-                                plen);
+               reparse_buf = (void *)((u8 *)ioctl_rsp + off);
+               len = sizeof(*reparse_buf);
+               if (count < len ||
+                   count < le16_to_cpu(reparse_buf->ReparseDataLength) + len) {
+                       cifs_tcon_dbg(VFS, "%s: invalid ioctl: off=%d count=%d\n",
+                                     __func__, off, count);
                        rc = -EIO;
                        goto query_rp_exit;
                }
@@ -4941,6 +4949,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
        struct smb2_hdr *shdr;
        unsigned int pdu_length = server->pdu_size;
        unsigned int buf_size;
+       unsigned int next_cmd;
        struct mid_q_entry *mid_entry;
        int next_is_large;
        char *next_buffer = NULL;
@@ -4969,14 +4978,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
        next_is_large = server->large_buf;
 one_more:
        shdr = (struct smb2_hdr *)buf;
-       if (shdr->NextCommand) {
+       next_cmd = le32_to_cpu(shdr->NextCommand);
+       if (next_cmd) {
+               if (WARN_ON_ONCE(next_cmd > pdu_length))
+                       return -1;
                if (next_is_large)
                        next_buffer = (char *)cifs_buf_get();
                else
                        next_buffer = (char *)cifs_small_buf_get();
-               memcpy(next_buffer,
-                      buf + le32_to_cpu(shdr->NextCommand),
-                      pdu_length - le32_to_cpu(shdr->NextCommand));
+               memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd);
        }
 
        mid_entry = smb2_find_mid(server, buf);
@@ -5000,8 +5010,8 @@ one_more:
        else
                ret = cifs_handle_standard(server, mid_entry);
 
-       if (ret == 0 && shdr->NextCommand) {
-               pdu_length -= le32_to_cpu(shdr->NextCommand);
+       if (ret == 0 && next_cmd) {
+               pdu_length -= next_cmd;
                server->large_buf = next_is_large;
                if (next_is_large)
                        server->bigbuf = buf = next_buffer;
index 395e1230ddbc99a87d2ba3c8349f17fbb015abfb..c571760ad39a100bd90fada8a0ad8905c6235e2b 100644 (file)
@@ -158,7 +158,7 @@ out:
 
 static int
 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
-              struct TCP_Server_Info *server)
+              struct TCP_Server_Info *server, bool from_reconnect)
 {
        int rc = 0;
        struct nls_table *nls_codepage = NULL;
@@ -331,7 +331,7 @@ again:
                                 * as cifs_put_tcp_session takes a higher lock
                                 * i.e. cifs_tcp_ses_lock
                                 */
-                               cifs_put_tcp_session(server, 1);
+                               cifs_put_tcp_session(server, from_reconnect);
 
                                server->terminate = true;
                                cifs_signal_cifsd_for_reconnect(server, false);
@@ -499,7 +499,7 @@ static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
 {
        int rc;
 
-       rc = smb2_reconnect(smb2_command, tcon, server);
+       rc = smb2_reconnect(smb2_command, tcon, server, false);
        if (rc)
                return rc;
 
@@ -2236,17 +2236,18 @@ parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
                 posix->nlink, posix->mode, posix->reparse_tag);
 }
 
-void
-smb2_parse_contexts(struct TCP_Server_Info *server,
-                   struct smb2_create_rsp *rsp,
-                   unsigned int *epoch, char *lease_key, __u8 *oplock,
-                   struct smb2_file_all_info *buf,
-                   struct create_posix_rsp *posix)
+int smb2_parse_contexts(struct TCP_Server_Info *server,
+                       struct kvec *rsp_iov,
+                       unsigned int *epoch,
+                       char *lease_key, __u8 *oplock,
+                       struct smb2_file_all_info *buf,
+                       struct create_posix_rsp *posix)
 {
-       char *data_offset;
+       struct smb2_create_rsp *rsp = rsp_iov->iov_base;
        struct create_context *cc;
-       unsigned int next;
-       unsigned int remaining;
+       size_t rem, off, len;
+       size_t doff, dlen;
+       size_t noff, nlen;
        char *name;
        static const char smb3_create_tag_posix[] = {
                0x93, 0xAD, 0x25, 0x50, 0x9C,
@@ -2255,45 +2256,63 @@ smb2_parse_contexts(struct TCP_Server_Info *server,
        };
 
        *oplock = 0;
-       data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
-       remaining = le32_to_cpu(rsp->CreateContextsLength);
-       cc = (struct create_context *)data_offset;
+
+       off = le32_to_cpu(rsp->CreateContextsOffset);
+       rem = le32_to_cpu(rsp->CreateContextsLength);
+       if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len)
+               return -EINVAL;
+       cc = (struct create_context *)((u8 *)rsp + off);
 
        /* Initialize inode number to 0 in case no valid data in qfid context */
        if (buf)
                buf->IndexNumber = 0;
 
-       while (remaining >= sizeof(struct create_context)) {
-               name = le16_to_cpu(cc->NameOffset) + (char *)cc;
-               if (le16_to_cpu(cc->NameLength) == 4 &&
-                   strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
-                       *oplock = server->ops->parse_lease_buf(cc, epoch,
-                                                          lease_key);
-               else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
-                   strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
-                       parse_query_id_ctxt(cc, buf);
-               else if ((le16_to_cpu(cc->NameLength) == 16)) {
-                       if (posix &&
-                           memcmp(name, smb3_create_tag_posix, 16) == 0)
+       while (rem >= sizeof(*cc)) {
+               doff = le16_to_cpu(cc->DataOffset);
+               dlen = le32_to_cpu(cc->DataLength);
+               if (check_add_overflow(doff, dlen, &len) || len > rem)
+                       return -EINVAL;
+
+               noff = le16_to_cpu(cc->NameOffset);
+               nlen = le16_to_cpu(cc->NameLength);
+               if (noff + nlen >= doff)
+                       return -EINVAL;
+
+               name = (char *)cc + noff;
+               switch (nlen) {
+               case 4:
+                       if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
+                               *oplock = server->ops->parse_lease_buf(cc, epoch,
+                                                                      lease_key);
+                       } else if (buf &&
+                                  !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) {
+                               parse_query_id_ctxt(cc, buf);
+                       }
+                       break;
+               case 16:
+                       if (posix && !memcmp(name, smb3_create_tag_posix, 16))
                                parse_posix_ctxt(cc, buf, posix);
+                       break;
+               default:
+                       cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n",
+                                __func__, nlen, dlen);
+                       if (IS_ENABLED(CONFIG_CIFS_DEBUG2))
+                               cifs_dump_mem("context data: ", cc, dlen);
+                       break;
                }
-               /* else {
-                       cifs_dbg(FYI, "Context not matched with len %d\n",
-                               le16_to_cpu(cc->NameLength));
-                       cifs_dump_mem("Cctxt name: ", name, 4);
-               } */
-
-               next = le32_to_cpu(cc->Next);
-               if (!next)
+
+               off = le32_to_cpu(cc->Next);
+               if (!off)
                        break;
-               remaining -= next;
-               cc = (struct create_context *)((char *)cc + next);
+               if (check_sub_overflow(rem, off, &rem))
+                       return -EINVAL;
+               cc = (struct create_context *)((u8 *)cc + off);
        }
 
        if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
                *oplock = rsp->OplockLevel;
 
-       return;
+       return 0;
 }
 
 static int
@@ -3124,8 +3143,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        }
 
 
-       smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
-                           oparms->fid->lease_key, oplock, buf, posix);
+       rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch,
+                                oparms->fid->lease_key, oplock, buf, posix);
 creat_exit:
        SMB2_open_free(&rqst);
        free_rsp_buf(resp_buftype, rsp);
@@ -3895,6 +3914,15 @@ void smb2_reconnect_server(struct work_struct *work)
        int rc;
        bool resched = false;
 
+       /* first check if ref count has reached 0, if not inc ref count */
+       spin_lock(&cifs_tcp_ses_lock);
+       if (!server->srv_count) {
+               spin_unlock(&cifs_tcp_ses_lock);
+               return;
+       }
+       server->srv_count++;
+       spin_unlock(&cifs_tcp_ses_lock);
+
        /* If server is a channel, select the primary channel */
        pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
 
@@ -3952,11 +3980,10 @@ void smb2_reconnect_server(struct work_struct *work)
                }
                spin_unlock(&ses->chan_lock);
        }
-
        spin_unlock(&cifs_tcp_ses_lock);
 
        list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
-               rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
+               rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
                if (!rc)
                        cifs_reopen_persistent_handles(tcon);
                else
@@ -3989,7 +4016,7 @@ void smb2_reconnect_server(struct work_struct *work)
        /* now reconnect sessions for necessary channels */
        list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
                tcon->ses = ses;
-               rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
+               rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
                if (rc)
                        resched = true;
                list_del_init(&ses->rlist);
@@ -3999,13 +4026,8 @@ void smb2_reconnect_server(struct work_struct *work)
 
 done:
        cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
-       if (resched) {
+       if (resched)
                queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
-               mutex_unlock(&pserver->reconnect_mutex);
-
-               /* no need to put tcp session as we're retrying */
-               return;
-       }
        mutex_unlock(&pserver->reconnect_mutex);
 
        /* now we can safely release srv struct */
@@ -4029,12 +4051,7 @@ SMB2_echo(struct TCP_Server_Info *server)
            server->ops->need_neg(server)) {
                spin_unlock(&server->srv_lock);
                /* No need to send echo on newly established connections */
-               spin_lock(&cifs_tcp_ses_lock);
-               server->srv_count++;
-               spin_unlock(&cifs_tcp_ses_lock);
-               if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0))
-                       cifs_put_tcp_session(server, false);
-
+               mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
                return rc;
        }
        spin_unlock(&server->srv_lock);
index 46eff9ec302aadc5c1eccdc2e13168607f5304c1..0e371f7e2854b924053cbec235ef3b94f9244551 100644 (file)
@@ -251,11 +251,13 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
 
 extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
                                        enum securityEnum);
-extern void smb2_parse_contexts(struct TCP_Server_Info *server,
-                               struct smb2_create_rsp *rsp,
-                               unsigned int *epoch, char *lease_key,
-                               __u8 *oplock, struct smb2_file_all_info *buf,
-                               struct create_posix_rsp *posix);
+int smb2_parse_contexts(struct TCP_Server_Info *server,
+                       struct kvec *rsp_iov,
+                       unsigned int *epoch,
+                       char *lease_key, __u8 *oplock,
+                       struct smb2_file_all_info *buf,
+                       struct create_posix_rsp *posix);
+
 extern int smb3_encryption_required(const struct cifs_tcon *tcon);
 extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
                             struct kvec *iov, unsigned int min_buf_size);
index 9fbaaa387dcc50c05d22cc55c680ca4105c152cd..57f2343164a34d81ad9816c0dec7c8994b66beb0 100644 (file)
@@ -1145,7 +1145,7 @@ struct smb2_server_client_notification {
 #define SMB2_CREATE_SD_BUFFER                  "SecD" /* security descriptor */
 #define SMB2_CREATE_DURABLE_HANDLE_REQUEST     "DHnQ"
 #define SMB2_CREATE_DURABLE_HANDLE_RECONNECT   "DHnC"
-#define SMB2_CREATE_ALLOCATION_SIZE            "AISi"
+#define SMB2_CREATE_ALLOCATION_SIZE            "AlSi"
 #define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
 #define SMB2_CREATE_TIMEWARP_REQUEST           "TWrp"
 #define SMB2_CREATE_QUERY_ON_DISK_ID           "QFid"
@@ -1253,6 +1253,7 @@ struct create_mxac_rsp {
 #define SMB2_LEASE_WRITE_CACHING_LE            cpu_to_le32(0x04)
 
 #define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE   cpu_to_le32(0x02)
+#define SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE        cpu_to_le32(0x04)
 
 #define SMB2_LEASE_KEY_SIZE                    16
 
index 50c68beb71d6c49855b3f3dc8b8b2789e25e90c5..562b180459a1a82d9137c96a2832b85485561c7a 100644 (file)
@@ -102,9 +102,10 @@ static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
        lease->new_state = 0;
        lease->flags = lctx->flags;
        lease->duration = lctx->duration;
+       lease->is_dir = lctx->is_dir;
        memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE);
        lease->version = lctx->version;
-       lease->epoch = 0;
+       lease->epoch = le16_to_cpu(lctx->epoch);
        INIT_LIST_HEAD(&opinfo->lease_entry);
        opinfo->o_lease = lease;
 
@@ -395,8 +396,8 @@ void close_id_del_oplock(struct ksmbd_file *fp)
 {
        struct oplock_info *opinfo;
 
-       if (S_ISDIR(file_inode(fp->filp)->i_mode))
-               return;
+       if (fp->reserve_lease_break)
+               smb_lazy_parent_lease_break_close(fp);
 
        opinfo = opinfo_get(fp);
        if (!opinfo)
@@ -543,12 +544,13 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
                        /* upgrading lease */
                        if ((atomic_read(&ci->op_count) +
                             atomic_read(&ci->sop_count)) == 1) {
-                               if (lease->state ==
-                                   (lctx->req_state & lease->state)) {
+                               if (lease->state != SMB2_LEASE_NONE_LE &&
+                                   lease->state == (lctx->req_state & lease->state)) {
                                        lease->state |= lctx->req_state;
                                        if (lctx->req_state &
                                                SMB2_LEASE_WRITE_CACHING_LE)
                                                lease_read_to_write(opinfo);
+
                                }
                        } else if ((atomic_read(&ci->op_count) +
                                    atomic_read(&ci->sop_count)) > 1) {
@@ -900,7 +902,8 @@ static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
                                        lease->new_state =
                                                SMB2_LEASE_READ_CACHING_LE;
                        } else {
-                               if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
+                               if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE &&
+                                               !lease->is_dir)
                                        lease->new_state =
                                                SMB2_LEASE_READ_CACHING_LE;
                                else
@@ -1032,6 +1035,7 @@ static void copy_lease(struct oplock_info *op1, struct oplock_info *op2)
               SMB2_LEASE_KEY_SIZE);
        lease2->duration = lease1->duration;
        lease2->flags = lease1->flags;
+       lease2->epoch = lease1->epoch++;
 }
 
 static int add_lease_global_list(struct oplock_info *opinfo)
@@ -1081,6 +1085,89 @@ static void set_oplock_level(struct oplock_info *opinfo, int level,
        }
 }
 
+void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
+                                     struct lease_ctx_info *lctx)
+{
+       struct oplock_info *opinfo;
+       struct ksmbd_inode *p_ci = NULL;
+
+       if (lctx->version != 2)
+               return;
+
+       p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
+       if (!p_ci)
+               return;
+
+       read_lock(&p_ci->m_lock);
+       list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
+               if (!opinfo->is_lease)
+                       continue;
+
+               if (opinfo->o_lease->state != SMB2_OPLOCK_LEVEL_NONE &&
+                   (!(lctx->flags & SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE) ||
+                    !compare_guid_key(opinfo, fp->conn->ClientGUID,
+                                     lctx->parent_lease_key))) {
+                       if (!atomic_inc_not_zero(&opinfo->refcount))
+                               continue;
+
+                       atomic_inc(&opinfo->conn->r_count);
+                       if (ksmbd_conn_releasing(opinfo->conn)) {
+                               atomic_dec(&opinfo->conn->r_count);
+                               continue;
+                       }
+
+                       read_unlock(&p_ci->m_lock);
+                       oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
+                       opinfo_conn_put(opinfo);
+                       read_lock(&p_ci->m_lock);
+               }
+       }
+       read_unlock(&p_ci->m_lock);
+
+       ksmbd_inode_put(p_ci);
+}
+
+void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
+{
+       struct oplock_info *opinfo;
+       struct ksmbd_inode *p_ci = NULL;
+
+       rcu_read_lock();
+       opinfo = rcu_dereference(fp->f_opinfo);
+       rcu_read_unlock();
+
+       if (!opinfo->is_lease || opinfo->o_lease->version != 2)
+               return;
+
+       p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
+       if (!p_ci)
+               return;
+
+       read_lock(&p_ci->m_lock);
+       list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
+               if (!opinfo->is_lease)
+                       continue;
+
+               if (opinfo->o_lease->state != SMB2_OPLOCK_LEVEL_NONE) {
+                       if (!atomic_inc_not_zero(&opinfo->refcount))
+                               continue;
+
+                       atomic_inc(&opinfo->conn->r_count);
+                       if (ksmbd_conn_releasing(opinfo->conn)) {
+                               atomic_dec(&opinfo->conn->r_count);
+                               continue;
+                       }
+                       read_unlock(&p_ci->m_lock);
+                       oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
+                       opinfo_conn_put(opinfo);
+                       read_lock(&p_ci->m_lock);
+               }
+       }
+       read_unlock(&p_ci->m_lock);
+
+       ksmbd_inode_put(p_ci);
+}
+
 /**
  * smb_grant_oplock() - handle oplock/lease request on file open
  * @work:              smb work
@@ -1104,10 +1191,6 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
        bool prev_op_has_lease;
        __le32 prev_op_state = 0;
 
-       /* not support directory lease */
-       if (S_ISDIR(file_inode(fp->filp)->i_mode))
-               return 0;
-
        opinfo = alloc_opinfo(work, pid, tid);
        if (!opinfo)
                return -ENOMEM;
@@ -1364,6 +1447,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
                memcpy(buf->lcontext.LeaseKey, lease->lease_key,
                       SMB2_LEASE_KEY_SIZE);
                buf->lcontext.LeaseFlags = lease->flags;
+               buf->lcontext.Epoch = cpu_to_le16(++lease->epoch);
                buf->lcontext.LeaseState = lease->state;
                memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
                       SMB2_LEASE_KEY_SIZE);
@@ -1400,10 +1484,11 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
 /**
  * parse_lease_state() - parse lease context containted in file open request
  * @open_req:  buffer containing smb2 file open(create) request
+ * @is_dir:    whether leasing file is directory
  *
  * Return:  oplock state, -ENOENT if create lease context not found
  */
-struct lease_ctx_info *parse_lease_state(void *open_req)
+struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir)
 {
        struct create_context *cc;
        struct smb2_create_req *req = (struct smb2_create_req *)open_req;
@@ -1421,8 +1506,14 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
                struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
 
                memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
-               lreq->req_state = lc->lcontext.LeaseState;
+               if (is_dir) {
+                       lreq->req_state = lc->lcontext.LeaseState &
+                               ~SMB2_LEASE_WRITE_CACHING_LE;
+                       lreq->is_dir = true;
+               } else
+                       lreq->req_state = lc->lcontext.LeaseState;
                lreq->flags = lc->lcontext.LeaseFlags;
+               lreq->epoch = lc->lcontext.Epoch;
                lreq->duration = lc->lcontext.LeaseDuration;
                memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
                                SMB2_LEASE_KEY_SIZE);
index 4b0fe6da76940f54a915cb4bd20d3ce67539bd3e..5b93ea9196c013d0b757add55db43c143cd9706b 100644 (file)
@@ -34,7 +34,9 @@ struct lease_ctx_info {
        __le32                  flags;
        __le64                  duration;
        __u8                    parent_lease_key[SMB2_LEASE_KEY_SIZE];
+       __le16                  epoch;
        int                     version;
+       bool                    is_dir;
 };
 
 struct lease_table {
@@ -53,6 +55,7 @@ struct lease {
        __u8                    parent_lease_key[SMB2_LEASE_KEY_SIZE];
        int                     version;
        unsigned short          epoch;
+       bool                    is_dir;
        struct lease_table      *l_lb;
 };
 
@@ -108,7 +111,7 @@ void opinfo_put(struct oplock_info *opinfo);
 
 /* Lease related functions */
 void create_lease_buf(u8 *rbuf, struct lease *lease);
-struct lease_ctx_info *parse_lease_state(void *open_req);
+struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir);
 __u8 smb2_map_lease_to_oplock(__le32 lease_state);
 int lease_read_to_write(struct oplock_info *opinfo);
 
@@ -124,4 +127,7 @@ struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
 int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
                        struct lease_ctx_info *lctx);
 void destroy_lease_table(struct ksmbd_conn *conn);
+void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
+                                     struct lease_ctx_info *lctx);
+void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp);
 #endif /* __KSMBD_OPLOCK_H */
index aed7704a0672864609f6a9ab313efeca0cba9be0..27a9dce3e03abe4673ac7241aa828943d22754d8 100644 (file)
@@ -221,7 +221,8 @@ void init_smb3_0_server(struct ksmbd_conn *conn)
        conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
 
        if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
-               conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
+                       SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
 
        if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
            conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
@@ -245,7 +246,8 @@ void init_smb3_02_server(struct ksmbd_conn *conn)
        conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
 
        if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
-               conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
+                       SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
 
        if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
            (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
@@ -270,7 +272,8 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
        conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
 
        if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
-               conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+               conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
+                       SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
 
        if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
            (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
index d369b98a6e10373bbc331beba1daa4414db619a9..652ab429bf2e9cd945a772c6cddac46012fadf00 100644 (file)
@@ -2516,7 +2516,7 @@ static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *
        da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
                XATTR_DOSINFO_ITIME;
 
-       rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, false);
+       rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, true);
        if (rc)
                ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
 }
@@ -2732,10 +2732,6 @@ int smb2_open(struct ksmbd_work *work)
                }
        }
 
-       req_op_level = req->RequestedOplockLevel;
-       if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
-               lc = parse_lease_state(req);
-
        if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE)) {
                pr_err("Invalid impersonationlevel : 0x%x\n",
                       le32_to_cpu(req->ImpersonationLevel));
@@ -3189,23 +3185,6 @@ int smb2_open(struct ksmbd_work *work)
                goto err_out;
        }
 
-       rc = ksmbd_vfs_getattr(&path, &stat);
-       if (rc)
-               goto err_out;
-
-       if (stat.result_mask & STATX_BTIME)
-               fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
-       else
-               fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
-       if (req->FileAttributes || fp->f_ci->m_fattr == 0)
-               fp->f_ci->m_fattr =
-                       cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
-
-       if (!created)
-               smb2_update_xattrs(tcon, &path, fp);
-       else
-               smb2_new_xattrs(tcon, &path, fp);
-
        if (file_present || created)
                ksmbd_vfs_kern_path_unlock(&parent_path, &path);
 
@@ -3215,6 +3194,10 @@ int smb2_open(struct ksmbd_work *work)
                need_truncate = 1;
        }
 
+       req_op_level = req->RequestedOplockLevel;
+       if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
+               lc = parse_lease_state(req, S_ISDIR(file_inode(filp)->i_mode));
+
        share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
        if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) ||
            (req_op_level == SMB2_OPLOCK_LEVEL_LEASE &&
@@ -3225,6 +3208,13 @@ int smb2_open(struct ksmbd_work *work)
                }
        } else {
                if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
+                       /*
+                        * Compare parent lease using parent key. If there is no
+                        * a lease that has same parent key, Send lease break
+                        * notification.
+                        */
+                       smb_send_parent_lease_break_noti(fp, lc);
+
                        req_op_level = smb2_map_lease_to_oplock(lc->req_state);
                        ksmbd_debug(SMB,
                                    "lease req for(%s) req oplock state 0x%x, lease state 0x%x\n",
@@ -3295,6 +3285,23 @@ int smb2_open(struct ksmbd_work *work)
                }
        }
 
+       rc = ksmbd_vfs_getattr(&path, &stat);
+       if (rc)
+               goto err_out1;
+
+       if (stat.result_mask & STATX_BTIME)
+               fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+       else
+               fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
+       if (req->FileAttributes || fp->f_ci->m_fattr == 0)
+               fp->f_ci->m_fattr =
+                       cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
+
+       if (!created)
+               smb2_update_xattrs(tcon, &path, fp);
+       else
+               smb2_new_xattrs(tcon, &path, fp);
+
        memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
 
        rsp->StructureSize = cpu_to_le16(89);
@@ -7080,6 +7087,7 @@ skip:
                                                      smb2_remove_blocked_lock,
                                                      argv);
                                if (rc) {
+                                       kfree(argv);
                                        err = -ENOMEM;
                                        goto out;
                                }
@@ -8211,6 +8219,11 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
                            le32_to_cpu(req->LeaseState));
        }
 
+       if (ret < 0) {
+               rsp->hdr.Status = err;
+               goto err_out;
+       }
+
        lease_state = lease->state;
        opinfo->op_state = OPLOCK_STATE_NONE;
        wake_up_interruptible_all(&opinfo->oplock_q);
@@ -8218,11 +8231,6 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
        wake_up_interruptible_all(&opinfo->oplock_brk);
        opinfo_put(opinfo);
 
-       if (ret < 0) {
-               rsp->hdr.Status = err;
-               goto err_out;
-       }
-
        rsp->StructureSize = cpu_to_le16(36);
        rsp->Reserved = 0;
        rsp->Flags = 0;
index 9091dcd7a3102c82a24d04948847ed5edbb67d45..4277750a6da1b16e6e6f19d29f157f31124cff3b 100644 (file)
@@ -517,6 +517,9 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
                }
        }
 
+       /* Reserve lease break for parent dir at closing time */
+       fp->reserve_lease_break = true;
+
        /* Do we need to break any of a levelII oplock? */
        smb_break_all_levII_oplock(work, fp, 1);
 
index ddf233994ddbbf37c1657b925961a7f8be94f4f0..4e82ff627d1224d7972534a1233b0f952e94f9e4 100644 (file)
@@ -87,6 +87,17 @@ static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
        return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
 }
 
+struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d)
+{
+       struct ksmbd_inode *ci;
+
+       read_lock(&inode_hash_lock);
+       ci = __ksmbd_inode_lookup(d);
+       read_unlock(&inode_hash_lock);
+
+       return ci;
+}
+
 int ksmbd_query_inode_status(struct dentry *dentry)
 {
        struct ksmbd_inode *ci;
@@ -199,7 +210,7 @@ static void ksmbd_inode_free(struct ksmbd_inode *ci)
        kfree(ci);
 }
 
-static void ksmbd_inode_put(struct ksmbd_inode *ci)
+void ksmbd_inode_put(struct ksmbd_inode *ci)
 {
        if (atomic_dec_and_test(&ci->m_count))
                ksmbd_inode_free(ci);
index 8325cf4527c464c7db83b772e145f01849814faf..a528f0cc775ae0b6c60e05dfef3c2484fa92af3c 100644 (file)
@@ -105,6 +105,7 @@ struct ksmbd_file {
        struct ksmbd_readdir_data       readdir_data;
        int                             dot_dotdot[2];
        unsigned int                    f_state;
+       bool                            reserve_lease_break;
 };
 
 static inline void set_ctx_actor(struct dir_context *ctx,
@@ -138,6 +139,8 @@ struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id);
 struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
                                        u64 pid);
 void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp);
+struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d);
+void ksmbd_inode_put(struct ksmbd_inode *ci);
 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id);
 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid);
 struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry);
index 581ce9519339018d005137caf72295aaeb16b797..2dc730800f448d8cb44f2d5c4e625e607d1faf2f 100644 (file)
@@ -321,7 +321,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
                TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
                      compressed ? "" : "un", length);
        }
-       if (length < 0 || length > output->length ||
+       if (length <= 0 || length > output->length ||
                        (index + length) > msblk->bytes_used) {
                res = -EIO;
                goto out;
index 0b90869fd805cd62ab6db7cf924ee0cad8c86eaf..43e237864a422b6df5e85a0119008b4c154566bf 100644 (file)
@@ -546,6 +546,8 @@ static struct dentry *eventfs_root_lookup(struct inode *dir,
                if (strcmp(ei_child->name, name) != 0)
                        continue;
                ret = simple_lookup(dir, dentry, flags);
+               if (IS_ERR(ret))
+                       goto out;
                create_dir_dentry(ei, ei_child, ei_dentry, true);
                created = true;
                break;
@@ -568,6 +570,8 @@ static struct dentry *eventfs_root_lookup(struct inode *dir,
                        if (r <= 0)
                                continue;
                        ret = simple_lookup(dir, dentry, flags);
+                       if (IS_ERR(ret))
+                               goto out;
                        create_file_dentry(ei, i, ei_dentry, name, mode, cdata,
                                           fops, true);
                        break;
index 13ba34e6d64ff30f51de4367f3aa08f12796786e..2acf191eb89ede0a7df7d667e948f74228cc2ac6 100644 (file)
@@ -245,7 +245,7 @@ struct folio *ufs_get_locked_folio(struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        struct folio *folio = filemap_lock_folio(mapping, index);
-       if (!folio) {
+       if (IS_ERR(folio)) {
                folio = read_mapping_folio(mapping, index, NULL);
 
                if (IS_ERR(folio)) {
index 536a0b0091c3a9cf85c4007eac613191c114f75f..006b5c977ad7725f880f35a057bbc7dc9ef22fc6 100644 (file)
@@ -97,6 +97,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 
 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
                                     struct drm_atomic_state *state);
+void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
+                                       struct drm_atomic_state *state);
 
 #define DRM_PLANE_COMMIT_ACTIVE_ONLY                   BIT(0)
 #define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET      BIT(1)
index 1abedb5b2e48fa298e022a502deb7ea39d38b59a..3d0fde57ba90eb7b6bf6a295181f06ab73cfb03b 100644 (file)
@@ -209,6 +209,8 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
 #define module_ffa_driver(__ffa_driver)        \
        module_driver(__ffa_driver, ffa_register, ffa_unregister)
 
+extern struct bus_type ffa_bus_type;
+
 /* FFA transport related */
 struct ffa_partition_info {
        u16 id;
index 6762dac3ef76153fe96bbd05a1050b9a31d1a43d..cff5bb08820ecfc0877f4f5428969c77849554a5 100644 (file)
@@ -3175,6 +3175,9 @@ enum bpf_text_poke_type {
 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
                       void *addr1, void *addr2);
 
+void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+                              struct bpf_prog *new, struct bpf_prog *old);
+
 void *bpf_arch_text_copy(void *dst, void *src, size_t len);
 int bpf_arch_text_invalidate(void *dst, size_t len);
 
index af8d353a4b86af3d4e571301238a4e6aa20b78f9..2976f534a7a32f050410125df97e9880cc429296 100644 (file)
@@ -109,14 +109,7 @@ static inline int groups_search(const struct group_info *group_info, kgid_t grp)
  * same context as task->real_cred.
  */
 struct cred {
-       atomic_t        usage;
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       atomic_t        subscribers;    /* number of processes subscribed */
-       void            *put_addr;
-       unsigned        magic;
-#define CRED_MAGIC     0x43736564
-#define CRED_MAGIC_DEAD        0x44656144
-#endif
+       atomic_long_t   usage;
        kuid_t          uid;            /* real UID of the task */
        kgid_t          gid;            /* real GID of the task */
        kuid_t          suid;           /* saved UID of the task */
@@ -172,46 +165,6 @@ extern int cred_fscmp(const struct cred *, const struct cred *);
 extern void __init cred_init(void);
 extern int set_cred_ucounts(struct cred *);
 
-/*
- * check for validity of credentials
- */
-#ifdef CONFIG_DEBUG_CREDENTIALS
-extern void __noreturn __invalid_creds(const struct cred *, const char *, unsigned);
-extern void __validate_process_creds(struct task_struct *,
-                                    const char *, unsigned);
-
-extern bool creds_are_invalid(const struct cred *cred);
-
-static inline void __validate_creds(const struct cred *cred,
-                                   const char *file, unsigned line)
-{
-       if (unlikely(creds_are_invalid(cred)))
-               __invalid_creds(cred, file, line);
-}
-
-#define validate_creds(cred)                           \
-do {                                                   \
-       __validate_creds((cred), __FILE__, __LINE__);   \
-} while(0)
-
-#define validate_process_creds()                               \
-do {                                                           \
-       __validate_process_creds(current, __FILE__, __LINE__);  \
-} while(0)
-
-extern void validate_creds_for_do_exit(struct task_struct *);
-#else
-static inline void validate_creds(const struct cred *cred)
-{
-}
-static inline void validate_creds_for_do_exit(struct task_struct *tsk)
-{
-}
-static inline void validate_process_creds(void)
-{
-}
-#endif
-
 static inline bool cap_ambient_invariant_ok(const struct cred *cred)
 {
        return cap_issubset(cred->cap_ambient,
@@ -229,7 +182,7 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
  */
 static inline struct cred *get_new_cred_many(struct cred *cred, int nr)
 {
-       atomic_add(nr, &cred->usage);
+       atomic_long_add(nr, &cred->usage);
        return cred;
 }
 
@@ -264,7 +217,6 @@ static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
        struct cred *nonconst_cred = (struct cred *) cred;
        if (!cred)
                return cred;
-       validate_creds(cred);
        nonconst_cred->non_rcu = 0;
        return get_new_cred_many(nonconst_cred, nr);
 }
@@ -288,9 +240,8 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
        struct cred *nonconst_cred = (struct cred *) cred;
        if (!cred)
                return NULL;
-       if (!atomic_inc_not_zero(&nonconst_cred->usage))
+       if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
                return NULL;
-       validate_creds(cred);
        nonconst_cred->non_rcu = 0;
        return cred;
 }
@@ -312,8 +263,7 @@ static inline void put_cred_many(const struct cred *_cred, int nr)
        struct cred *cred = (struct cred *) _cred;
 
        if (cred) {
-               validate_creds(cred);
-               if (atomic_sub_and_test(nr, &cred->usage))
+               if (atomic_long_sub_and_test(nr, &cred->usage))
                        __put_cred(cred);
        }
 }
index ab2f17d9926b599c2457f6e3dc834d12b6d8e7dc..e00ddf1ed39c05c945c0f31703d52f77881d615d 100644 (file)
@@ -559,6 +559,8 @@ struct damon_ctx {
         * update
         */
        unsigned long next_ops_update_sis;
+       /* for waiting until the execution of the kdamond_fn is started */
+       struct completion kdamond_started;
 
 /* public: */
        struct task_struct *kdamond;
index 4cacc0e43b5139863d86c4dbcd85488b8fd9a7a4..be20cff4ba737038692c04cafd2c2abf0381694f 100644 (file)
@@ -454,7 +454,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
                memcpy(to, from, chunk);
                kunmap_local(from);
 
-               from += chunk;
+               to += chunk;
                offset += chunk;
                len -= chunk;
        } while (len > 0);
index d3acecc5db4b33ccdab8c345cab26873ddd3ede7..236ec7b63c5413407ae3dbe06622b8d3463ca7a0 100644 (file)
@@ -1268,10 +1268,7 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
        return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
 }
 
-static inline bool __vma_private_lock(struct vm_area_struct *vma)
-{
-       return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
-}
+bool __vma_private_lock(struct vm_area_struct *vma);
 
 /*
  * Safe version of huge_pte_offset() to check the locks.  See comments
index 805bb635cdf55805171c20f66312663a6d84fe8e..239a4f68801bb59812c33b80d88958065bf124a3 100644 (file)
@@ -434,6 +434,7 @@ enum {
        /* keep async read/write and isreg together and in order */
        REQ_F_SUPPORT_NOWAIT_BIT,
        REQ_F_ISREG_BIT,
+       REQ_F_POLL_NO_LAZY_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -501,6 +502,8 @@ enum {
        REQ_F_CLEAR_POLLIN      = BIT(REQ_F_CLEAR_POLLIN_BIT),
        /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
        REQ_F_HASH_LOCKED       = BIT(REQ_F_HASH_LOCKED_BIT),
+       /* don't use lazy poll wake for this request */
+       REQ_F_POLL_NO_LAZY      = BIT(REQ_F_POLL_NO_LAZY_BIT),
 };
 
 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
index 6dcbb4eb80fb2085c578ef4c8fd40bfeba9d6db3..beb30719ee161bad0e01db32edfd14e610a6634c 100644 (file)
@@ -1374,6 +1374,9 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum2,                CSUM_V2)
 JBD2_FEATURE_INCOMPAT_FUNCS(csum3,             CSUM_V3)
 JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit,       FAST_COMMIT)
 
+/* Journal high priority write IO operation flags */
+#define JBD2_JOURNAL_REQ_FLAGS         (REQ_META | REQ_SYNC | REQ_IDLE)
+
 /*
  * Journal flag definitions
  */
index 6f3631425f386dad40e847aa592a31e12d1b5e9f..3f7b664d625b94e35539d1184bd2b5fcf109c4b1 100644 (file)
@@ -621,7 +621,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
 
        u8         reserved_at_140[0x8];
        u8         bth_dst_qp[0x18];
-       u8         reserved_at_160[0x20];
+       u8         inner_esp_spi[0x20];
        u8         outer_esp_spi[0x20];
        u8         reserved_at_1a0[0x60];
 };
@@ -12001,6 +12001,13 @@ enum {
        MLX5_IPSEC_ASO_INC_SN            = 0x2,
 };
 
+enum {
+       MLX5_IPSEC_ASO_REPLAY_WIN_32BIT  = 0x0,
+       MLX5_IPSEC_ASO_REPLAY_WIN_64BIT  = 0x1,
+       MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+       MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
 struct mlx5_ifc_ipsec_aso_bits {
        u8         valid[0x1];
        u8         reserved_at_201[0x1];
index 418d26608ece70d12a5608dff42f0f4d04af5aea..da5219b48d52294a4442a27ea1f1a80b41e3e555 100644 (file)
@@ -886,8 +886,8 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
  */
 static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
 {
-       return vma->vm_start <= vma->vm_mm->brk &&
-               vma->vm_end >= vma->vm_mm->start_brk;
+       return vma->vm_start < vma->vm_mm->brk &&
+               vma->vm_end > vma->vm_mm->start_brk;
 }
 
 /*
@@ -901,8 +901,8 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
         * its "stack".  It's not even well-defined for programs written
         * languages like Go.
         */
-       return vma->vm_start <= vma->vm_mm->start_stack &&
-              vma->vm_end >= vma->vm_mm->start_stack;
+       return vma->vm_start <= vma->vm_mm->start_stack &&
+               vma->vm_end >= vma->vm_mm->start_stack;
 }
 
 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
index 9ae7def16cb2a9dd14f653eb78eefd753ce5c0c0..f4fe593c1400e662add6d60baa0cd794e78a172b 100644 (file)
@@ -232,22 +232,27 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
        if (folio_test_unevictable(folio) || !lrugen->enabled)
                return false;
        /*
-        * There are three common cases for this page:
-        * 1. If it's hot, e.g., freshly faulted in or previously hot and
-        *    migrated, add it to the youngest generation.
-        * 2. If it's cold but can't be evicted immediately, i.e., an anon page
-        *    not in swapcache or a dirty page pending writeback, add it to the
-        *    second oldest generation.
-        * 3. Everything else (clean, cold) is added to the oldest generation.
+        * There are four common cases for this page:
+        * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
+        *    generation, and it's protected over the rest below.
+        * 2. If it can't be evicted immediately, i.e., a dirty page pending
+        *    writeback, add it to the second youngest generation.
+        * 3. If it should be evicted first, e.g., cold and clean from
+        *    folio_rotate_reclaimable(), add it to the oldest generation.
+        * 4. Everything else falls between 2 & 3 above and is added to the
+        *    second oldest generation if it's considered inactive, or the
+        *    oldest generation otherwise. See lru_gen_is_active().
         */
        if (folio_test_active(folio))
                seq = lrugen->max_seq;
        else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
                 (folio_test_reclaim(folio) &&
                  (folio_test_dirty(folio) || folio_test_writeback(folio))))
-               seq = lrugen->min_seq[type] + 1;
-       else
+               seq = lrugen->max_seq - 1;
+       else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
                seq = lrugen->min_seq[type];
+       else
+               seq = lrugen->min_seq[type] + 1;
 
        gen = lru_gen_from_seq(seq);
        flags = (gen + 1UL) << LRU_GEN_PGOFF;
index 3c25226beeed4731616f640a4b9d9d5ae05f0054..9db36e1977125b91de428c32eeec1d842d80a695 100644 (file)
@@ -505,33 +505,37 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
  * the old generation, is incremented when all its bins become empty.
  *
  * There are four operations:
- * 1. MEMCG_LRU_HEAD, which moves an memcg to the head of a random bin in its
+ * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its
  *    current generation (old or young) and updates its "seg" to "head";
- * 2. MEMCG_LRU_TAIL, which moves an memcg to the tail of a random bin in its
+ * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its
  *    current generation (old or young) and updates its "seg" to "tail";
- * 3. MEMCG_LRU_OLD, which moves an memcg to the head of a random bin in the old
+ * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old
  *    generation, updates its "gen" to "old" and resets its "seg" to "default";
- * 4. MEMCG_LRU_YOUNG, which moves an memcg to the tail of a random bin in the
+ * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the
  *    young generation, updates its "gen" to "young" and resets its "seg" to
  *    "default".
  *
  * The events that trigger the above operations are:
  * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
- * 2. The first attempt to reclaim an memcg below low, which triggers
+ * 2. The first attempt to reclaim a memcg below low, which triggers
  *    MEMCG_LRU_TAIL;
- * 3. The first attempt to reclaim an memcg below reclaimable size threshold,
- *    which triggers MEMCG_LRU_TAIL;
- * 4. The second attempt to reclaim an memcg below reclaimable size threshold,
- *    which triggers MEMCG_LRU_YOUNG;
- * 5. Attempting to reclaim an memcg below min, which triggers MEMCG_LRU_YOUNG;
+ * 3. The first attempt to reclaim a memcg offlined or below reclaimable size
+ *    threshold, which triggers MEMCG_LRU_TAIL;
+ * 4. The second attempt to reclaim a memcg offlined or below reclaimable size
+ *    threshold, which triggers MEMCG_LRU_YOUNG;
+ * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG;
  * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
- * 7. Offlining an memcg, which triggers MEMCG_LRU_OLD.
+ * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD.
  *
- * Note that memcg LRU only applies to global reclaim, and the round-robin
- * incrementing of their max_seq counters ensures the eventual fairness to all
- * eligible memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ * Notes:
+ * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
+ *    of their max_seq counters ensures the eventual fairness to all eligible
+ *    memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ * 2. There are only two valid generations: old (seq) and young (seq+1).
+ *    MEMCG_NR_GENS is set to three so that when reading the generation counter
+ *    locklessly, a stale value (seq-1) does not wraparound to young.
  */
-#define MEMCG_NR_GENS  2
+#define MEMCG_NR_GENS  3
 #define MEMCG_NR_BINS  8
 
 struct lru_gen_memcg {
index 60ca768bc8679f4ff135f2d0364e46b5e7ab4fb9..dea043bc1e383acc4c18e83d506d1ffe5c489448 100644 (file)
@@ -1829,6 +1829,7 @@ extern bool pcie_ports_native;
 int pci_disable_link_state(struct pci_dev *pdev, int state);
 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
 int pci_enable_link_state(struct pci_dev *pdev, int state);
+int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
 void pcie_no_aspm(void);
 bool pcie_aspm_support_enabled(void);
 bool pcie_aspm_enabled(struct pci_dev *pdev);
@@ -1839,6 +1840,8 @@ static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
 { return 0; }
 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
 { return 0; }
+static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
+{ return 0; }
 static inline void pcie_no_aspm(void) { }
 static inline bool pcie_aspm_support_enabled(void) { return false; }
 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
index 63e630276499f88c337c85321d17aed1931aa583..ab1c7deff118f3d988565796bd47ce8b4a5ea7c2 100644 (file)
 /* Charging mode - 1=Barrel, 2=USB */
 #define ASUS_WMI_DEVID_CHARGE_MODE     0x0012006C
 
+/* MCU powersave mode */
+#define ASUS_WMI_DEVID_MCU_POWERSAVE   0x001200E2
+
 /* epu is connected? 1 == true */
 #define ASUS_WMI_DEVID_EGPU_CONNECTED  0x00090018
 /* egpu on/off */
index 0b4658a7eceb622346b5e2f757d8395c825e82a3..dee5ad6e48c5a064c70613948ce1a6f602350cf1 100644 (file)
@@ -175,6 +175,7 @@ struct stmmac_fpe_cfg {
        bool hs_enable;                         /* FPE handshake enable */
        enum stmmac_fpe_state lp_fpe_state;     /* Link Partner FPE state */
        enum stmmac_fpe_state lo_fpe_state;     /* Local station FPE state */
+       u32 fpe_csr;                            /* MAC_FPE_CTRL_STS reg cache */
 };
 
 struct stmmac_safety_feature_cfg {
index 68f3d315d2e18d93a356b0738e4ed855fac94591..b646b574b060d6ca45e545df8edbed032be4189e 100644 (file)
@@ -169,7 +169,7 @@ struct tcp_request_sock {
 #ifdef CONFIG_TCP_AO
        u8                              ao_keyid;
        u8                              ao_rcv_next;
-       u8                              maclen;
+       bool                            used_tcp_ao;
 #endif
 };
 
@@ -180,14 +180,10 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
 
 static inline bool tcp_rsk_used_ao(const struct request_sock *req)
 {
-       /* The real length of MAC is saved in the request socket,
-        * signing anything with zero-length makes no sense, so here is
-        * a little hack..
-        */
 #ifndef CONFIG_TCP_AO
        return false;
 #else
-       return tcp_rsk(req)->maclen != 0;
+       return tcp_rsk(req)->used_tcp_ao;
 #endif
 }
 
index ff1bd6b5f5b372449102ed50fa7edacc47d60c19..45110daaf8d3260ced995b66ba62669e8b29ddfa 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _LINUX_UNITS_H
 #define _LINUX_UNITS_H
 
+#include <linux/bits.h>
 #include <linux/math.h>
 
 /* Metric prefixes in accordance with Système international (d'unités) */
index 287e9d83fb8bc38ff6d7aba70c5d7fa44f48c64e..33a4c146dc19c45d5cb343fba0ffdf0a0b7cff36 100644 (file)
@@ -30,6 +30,7 @@
 #define VENDOR_ID_NVIDIA               0x0955
 #define VENDOR_ID_TPLINK               0x2357
 #define VENDOR_ID_DLINK                        0x2001
+#define VENDOR_ID_ASUS                 0x0b05
 
 #if IS_REACHABLE(CONFIG_USB_RTL8152)
 extern u8 rtl8152_get_version(struct usb_interface *intf);
index 82da55101b5a30b2a5512d964429d2c5f73d03fd..61ebe723ee4d5078afdeec1ec70d392d0ef54ec4 100644 (file)
@@ -31,17 +31,22 @@ struct prefix_info {
        __u8                    length;
        __u8                    prefix_len;
 
+       union __packed {
+               __u8            flags;
+               struct __packed {
 #if defined(__BIG_ENDIAN_BITFIELD)
-       __u8                    onlink : 1,
+                       __u8    onlink : 1,
                                autoconf : 1,
                                reserved : 6;
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
-       __u8                    reserved : 6,
+                       __u8    reserved : 6,
                                autoconf : 1,
                                onlink : 1;
 #else
 #error "Please fix <asm/byteorder.h>"
 #endif
+               };
+       };
        __be32                  valid;
        __be32                  prefered;
        __be32                  reserved2;
@@ -49,6 +54,9 @@ struct prefix_info {
        struct in6_addr         prefix;
 };
 
+/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */
+static_assert(sizeof(struct prefix_info) == 32);
+
 #include <linux/ipv6.h>
 #include <linux/netdevice.h>
 #include <net/if_inet6.h>
index e18a4c0d69eedcce5c4c246e751f6d5efc8c52c5..c53244f204370442054501e7c9698b6b7224af8b 100644 (file)
  * struct genl_multicast_group - generic netlink multicast group
  * @name: name of the multicast group, names are per-family
  * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @cap_sys_admin: whether %CAP_SYS_ADMIN is required for binding
  */
 struct genl_multicast_group {
        char                    name[GENL_NAMSIZ];
        u8                      flags;
+       u8                      cap_sys_admin:1;
 };
 
 struct genl_split_ops;
index 3e454c4d7ba623d6abcfcfb396c0f4390de84c5a..f07642264c1eb622e57b9ce0715e296360a4db6e 100644 (file)
 #define IF_RS_SENT     0x10
 #define IF_READY       0x80000000
 
-/* prefix flags */
-#define IF_PREFIX_ONLINK       0x01
-#define IF_PREFIX_AUTOCONF     0x02
-
 enum {
        INET6_IFADDR_STATE_PREDAD,
        INET6_IFADDR_STATE_DAD,
index fe1507c1db828b14e6d684f1f1bca514dbb4103d..692d5955911c7dbf967110be2dd98e62c75f20af 100644 (file)
@@ -62,6 +62,8 @@ struct nf_flowtable_type {
                                                  enum flow_offload_tuple_dir dir,
                                                  struct nf_flow_rule *flow_rule);
        void                            (*free)(struct nf_flowtable *ft);
+       void                            (*get)(struct nf_flowtable *ft);
+       void                            (*put)(struct nf_flowtable *ft);
        nf_hookfn                       *hook;
        struct module                   *owner;
 };
@@ -240,6 +242,11 @@ nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
        }
 
        list_add_tail(&block_cb->list, &block->cb_list);
+       up_write(&flow_table->flow_block_lock);
+
+       if (flow_table->type->get)
+               flow_table->type->get(flow_table);
+       return 0;
 
 unlock:
        up_write(&flow_table->flow_block_lock);
@@ -262,6 +269,9 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
                WARN_ON(true);
        }
        up_write(&flow_table->flow_block_lock);
+
+       if (flow_table->type->put)
+               flow_table->type->put(flow_table);
 }
 
 void flow_offload_route_init(struct flow_offload *flow,
index d2f0736b76b8b299b41e59adb63055bc52dc1f34..144ba48bb07bb9e48de07f7fe7eec60e8997c7f1 100644 (file)
@@ -1514,17 +1514,22 @@ static inline int tcp_full_space(const struct sock *sk)
        return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
 }
 
-static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
 {
        int unused_mem = sk_unused_reserved_mem(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+       tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
        if (unused_mem)
                tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
                                         tcp_win_from_space(sk, unused_mem));
 }
 
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+       __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
+}
+
 void tcp_cleanup_rbuf(struct sock *sk, int copied);
 void __tcp_cleanup_rbuf(struct sock *sk, int copied);
 
index b56be10838f09a2cb56ab511242d2b583eb4c33b..6477810806137dbf7f0262ada4a64ebb568c690b 100644 (file)
@@ -62,11 +62,17 @@ static inline int tcp_ao_maclen(const struct tcp_ao_key *key)
        return key->maclen;
 }
 
+/* Use tcp_ao_len_aligned() for TCP header calculations */
 static inline int tcp_ao_len(const struct tcp_ao_key *key)
 {
        return tcp_ao_maclen(key) + sizeof(struct tcp_ao_hdr);
 }
 
+static inline int tcp_ao_len_aligned(const struct tcp_ao_key *key)
+{
+       return round_up(tcp_ao_len(key), 4);
+}
+
 static inline unsigned int tcp_ao_digest_size(struct tcp_ao_key *key)
 {
        return key->digest_size;
index 95896472a82bfb8d9a5d4ebbcd6ebd6f5d445ca6..565a850445414d4de6fae1de3d0c96ea80b8f3ab 100644 (file)
@@ -77,6 +77,13 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
 {
        __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
                                umem->sgt_append.sgt.nents, pgsz);
+       biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
+       biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
+}
+
+static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
+{
+       return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
 }
 
 /**
@@ -92,7 +99,7 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
  */
 #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
        for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
-            __rdma_block_iter_next(biter);)
+            __rdma_umem_block_iter_next(biter);)
 
 #ifdef CONFIG_INFINIBAND_USER_MEM
 
index fb1a2d6b196900d0b98ca454c4dbd2c97bfb9142..b7b6b58dd3486d98e5d641149b69dcf13e694292 100644 (file)
@@ -2850,6 +2850,7 @@ struct ib_block_iter {
        /* internal states */
        struct scatterlist *__sg;       /* sg holding the current aligned block */
        dma_addr_t __dma_addr;          /* unaligned DMA address of this block */
+       size_t __sg_numblocks;          /* ib_umem_num_dma_blocks() */
        unsigned int __sg_nents;        /* number of SG entries */
        unsigned int __sg_advance;      /* number of bytes to advance in sg in next step */
        unsigned int __pg_bit;          /* alignment of current block */
index db92a7202b342b3a1df3f7583c86b79f3ad4e3df..e7418d15fe3906507827025545dcd5348202ff30 100644 (file)
  *  - add FUSE_HAS_EXPIRE_ONLY
  *
  *  7.39
- *  - add FUSE_DIRECT_IO_RELAX
+ *  - add FUSE_DIRECT_IO_ALLOW_MMAP
  *  - add FUSE_STATX and related structures
  */
 
@@ -409,8 +409,7 @@ struct fuse_file_lock {
  * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
  *                     symlink and mknod (single group that matches parent)
  * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
- * FUSE_DIRECT_IO_RELAX: relax restrictions in FOPEN_DIRECT_IO mode, for now
- *                       allow shared mmap
+ * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode.
  */
 #define FUSE_ASYNC_READ                (1 << 0)
 #define FUSE_POSIX_LOCKS       (1 << 1)
@@ -449,7 +448,10 @@ struct fuse_file_lock {
 #define FUSE_HAS_INODE_DAX     (1ULL << 33)
 #define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
 #define FUSE_HAS_EXPIRE_ONLY   (1ULL << 35)
-#define FUSE_DIRECT_IO_RELAX   (1ULL << 36)
+#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36)
+
+/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
+#define FUSE_DIRECT_IO_RELAX   FUSE_DIRECT_IO_ALLOW_MMAP
 
 /**
  * CUSE INIT request/reply flags
index aba5657d287ef6d0f4a549ac5f27bcd5a30ab64b..9626a363f1213604f6a53b29af1545a015a0b2c1 100644 (file)
@@ -271,6 +271,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
        struct io_kiocb *req, *tmp;
        struct io_tw_state ts = { .locked = true, };
 
+       percpu_ref_get(&ctx->refs);
        mutex_lock(&ctx->uring_lock);
        llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
                req->io_task_work.func(req, &ts);
@@ -278,6 +279,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
                return;
        io_submit_flush_completions(ctx);
        mutex_unlock(&ctx->uring_lock);
+       percpu_ref_put(&ctx->refs);
 }
 
 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
@@ -3146,12 +3148,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
        init_completion(&exit.completion);
        init_task_work(&exit.task_work, io_tctx_exit_cb);
        exit.ctx = ctx;
-       /*
-        * Some may use context even when all refs and requests have been put,
-        * and they are free to do so while still holding uring_lock or
-        * completion_lock, see io_req_task_submit(). Apart from other work,
-        * this lock/unlock section also waits them to finish.
-        */
+
        mutex_lock(&ctx->uring_lock);
        while (!list_empty(&ctx->tctx_list)) {
                WARN_ON_ONCE(time_after(jiffies, timeout));
index 268788305b612cf3e8c2670ad5ac3bd71110fd9f..72b6af1d2ed3b0fc7cf629608173f750cb487247 100644 (file)
@@ -636,8 +636,8 @@ static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
        ibf = io_lookup_buf_free_entry(ctx, ring_size);
        if (!ibf) {
                ptr = io_mem_alloc(ring_size);
-               if (!ptr)
-                       return -ENOMEM;
+               if (IS_ERR(ptr))
+                       return PTR_ERR(ptr);
 
                /* Allocate and store deferred free entry */
                ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
@@ -756,6 +756,8 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
 
        bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
 
+       if (!bl || !bl->is_mmap)
+               return NULL;
        /*
         * Ensure the list is fully setup. Only strictly needed for RCU lookup
         * via mmap, and in that case only for the array indexed groups. For
@@ -763,8 +765,6 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
         */
        if (!smp_load_acquire(&bl->is_ready))
                return NULL;
-       if (!bl || !bl->is_mmap)
-               return NULL;
 
        return bl->buf_ring;
 }
index d38d05edb4fa26c92f46b48bc89d0aa280c568ab..d59b74a99d4e4b444dcb2f86dc9d3594d838e1cf 100644 (file)
@@ -366,11 +366,16 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
 
 static void __io_poll_execute(struct io_kiocb *req, int mask)
 {
+       unsigned flags = 0;
+
        io_req_set_res(req, mask, 0);
        req->io_task_work.func = io_poll_task_func;
 
        trace_io_uring_task_add(req, mask);
-       __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
+
+       if (!(req->flags & REQ_F_POLL_NO_LAZY))
+               flags = IOU_F_TWQ_LAZY_WAKE;
+       __io_req_task_work_add(req, flags);
 }
 
 static inline void io_poll_execute(struct io_kiocb *req, int res)
@@ -526,10 +531,19 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
        poll->head = head;
        poll->wait.private = (void *) wqe_private;
 
-       if (poll->events & EPOLLEXCLUSIVE)
+       if (poll->events & EPOLLEXCLUSIVE) {
+               /*
+                * Exclusive waits may only wake a limited amount of entries
+                * rather than all of them, this may interfere with lazy
+                * wake if someone does wait(events > 1). Ensure we don't do
+                * lazy wake for those, as we need to process each one as they
+                * come in.
+                */
+               req->flags |= REQ_F_POLL_NO_LAZY;
                add_wait_queue_exclusive(head, &poll->wait);
-       else
+       } else {
                add_wait_queue(head, &poll->wait);
+       }
 }
 
 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
index 8625181fb87acfc81c1dd56ff043c1baa399a92a..08ac0d8e07ef84e8138579a0b7c6436a9bc7e425 100644 (file)
@@ -77,17 +77,10 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
 
 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
 
-#if defined(CONFIG_UNIX)
-static inline bool io_file_need_scm(struct file *filp)
-{
-       return !!unix_get_socket(filp);
-}
-#else
 static inline bool io_file_need_scm(struct file *filp)
 {
        return false;
 }
-#endif
 
 static inline int io_scm_file_account(struct io_ring_ctx *ctx,
                                      struct file *file)
index acbc2924ecd211e07c2aec13a96106f0fed63b02..7d3ef62e620a58068a77e0ada254fc7e3e47990d 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/nospec.h>
 
 #include <uapi/linux/io_uring.h>
-#include <uapi/asm-generic/ioctls.h>
+#include <asm/ioctls.h>
 
 #include "io_uring.h"
 #include "rsrc.h"
index 7aff28ded2f48fbee898ca291f2c29780fb2e9b9..2fd510256604e986e88295a8eb5b17f0d2a10073 100644 (file)
@@ -94,10 +94,8 @@ config KEXEC_JUMP
 config CRASH_DUMP
        bool "kernel crash dumps"
        depends on ARCH_SUPPORTS_CRASH_DUMP
-       depends on ARCH_SUPPORTS_KEXEC
        select CRASH_CORE
        select KEXEC_CORE
-       select KEXEC
        help
          Generate crash dump after being started by kexec.
          This should be normally only set in special crash dump kernels
index 2058e89b5ddd0091e49032b3982b8d7eaabc6b0d..c85ff9162a5cd44444746f0199e508d0f045b0c4 100644 (file)
@@ -1012,11 +1012,16 @@ static void prog_array_map_poke_untrack(struct bpf_map *map,
        mutex_unlock(&aux->poke_mutex);
 }
 
+void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+                                     struct bpf_prog *new, struct bpf_prog *old)
+{
+       WARN_ON_ONCE(1);
+}
+
 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
                                    struct bpf_prog *old,
                                    struct bpf_prog *new)
 {
-       u8 *old_addr, *new_addr, *old_bypass_addr;
        struct prog_poke_elem *elem;
        struct bpf_array_aux *aux;
 
@@ -1025,7 +1030,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
 
        list_for_each_entry(elem, &aux->poke_progs, list) {
                struct bpf_jit_poke_descriptor *poke;
-               int i, ret;
+               int i;
 
                for (i = 0; i < elem->aux->size_poke_tab; i++) {
                        poke = &elem->aux->poke_tab[i];
@@ -1044,21 +1049,10 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
                         *    activated, so tail call updates can arrive from here
                         *    while JIT is still finishing its final fixup for
                         *    non-activated poke entries.
-                        * 3) On program teardown, the program's kallsym entry gets
-                        *    removed out of RCU callback, but we can only untrack
-                        *    from sleepable context, therefore bpf_arch_text_poke()
-                        *    might not see that this is in BPF text section and
-                        *    bails out with -EINVAL. As these are unreachable since
-                        *    RCU grace period already passed, we simply skip them.
-                        * 4) Also programs reaching refcount of zero while patching
+                        * 3) Also programs reaching refcount of zero while patching
                         *    is in progress is okay since we're protected under
                         *    poke_mutex and untrack the programs before the JIT
-                        *    buffer is freed. When we're still in the middle of
-                        *    patching and suddenly kallsyms entry of the program
-                        *    gets evicted, we just skip the rest which is fine due
-                        *    to point 3).
-                        * 5) Any other error happening below from bpf_arch_text_poke()
-                        *    is a unexpected bug.
+                        *    buffer is freed.
                         */
                        if (!READ_ONCE(poke->tailcall_target_stable))
                                continue;
@@ -1068,39 +1062,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
                            poke->tail_call.key != key)
                                continue;
 
-                       old_bypass_addr = old ? NULL : poke->bypass_addr;
-                       old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
-                       new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
-
-                       if (new) {
-                               ret = bpf_arch_text_poke(poke->tailcall_target,
-                                                        BPF_MOD_JUMP,
-                                                        old_addr, new_addr);
-                               BUG_ON(ret < 0 && ret != -EINVAL);
-                               if (!old) {
-                                       ret = bpf_arch_text_poke(poke->tailcall_bypass,
-                                                                BPF_MOD_JUMP,
-                                                                poke->bypass_addr,
-                                                                NULL);
-                                       BUG_ON(ret < 0 && ret != -EINVAL);
-                               }
-                       } else {
-                               ret = bpf_arch_text_poke(poke->tailcall_bypass,
-                                                        BPF_MOD_JUMP,
-                                                        old_bypass_addr,
-                                                        poke->bypass_addr);
-                               BUG_ON(ret < 0 && ret != -EINVAL);
-                               /* let other CPUs finish the execution of program
-                                * so that it will not possible to expose them
-                                * to invalid nop, stack unwind, nop state
-                                */
-                               if (!ret)
-                                       synchronize_rcu();
-                               ret = bpf_arch_text_poke(poke->tailcall_target,
-                                                        BPF_MOD_JUMP,
-                                                        old_addr, NULL);
-                               BUG_ON(ret < 0 && ret != -EINVAL);
-                       }
+                       bpf_arch_poke_desc_update(poke, new, old);
                }
        }
 }
index cd3afe57ece3cc9a5a52c20243bdafd7fa987f4f..fe254ae035fe4956388897af24d38809530f8fb7 100644 (file)
@@ -371,14 +371,18 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
                                s32 end_new, s32 curr, const bool probe_pass)
 {
-       const s32 off_min = S16_MIN, off_max = S16_MAX;
+       s64 off_min, off_max, off;
        s32 delta = end_new - end_old;
-       s32 off;
 
-       if (insn->code == (BPF_JMP32 | BPF_JA))
+       if (insn->code == (BPF_JMP32 | BPF_JA)) {
                off = insn->imm;
-       else
+               off_min = S32_MIN;
+               off_max = S32_MAX;
+       } else {
                off = insn->off;
+               off_min = S16_MIN;
+               off_max = S16_MAX;
+       }
 
        if (curr < pos && curr + off + 1 >= end_old)
                off += delta;
index 122dacb3a44390825054d67530f8f894f2f7fb04..66d1708042a72bf5d25b9ff1d7acb8d91183ffd7 100644 (file)
@@ -66,9 +66,15 @@ static struct freezer *parent_freezer(struct freezer *freezer)
 bool cgroup_freezing(struct task_struct *task)
 {
        bool ret;
+       unsigned int state;
 
        rcu_read_lock();
-       ret = task_freezer(task)->state & CGROUP_FREEZING;
+       /* Check if the cgroup is still FREEZING, but not FROZEN. The extra
+        * !FROZEN check is required, because the FREEZING bit is not cleared
+        * when the state FROZEN is reached.
+        */
+       state = task_freezer(task)->state;
+       ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN);
        rcu_read_unlock();
 
        return ret;
index efe87d501c8c3c002cbdd6124fc0e3026ad82bd7..d4313b53837e3de5bb42b553152b086b5d0657ef 100644 (file)
@@ -199,7 +199,7 @@ static __initdata char *suffix_tbl[] = {
  * It returns 0 on success and -EINVAL on failure.
  */
 static int __init parse_crashkernel_suffix(char *cmdline,
-                                          unsigned long long   *crash_size,
+                                          unsigned long long *crash_size,
                                           const char *suffix)
 {
        char *cur = cmdline;
@@ -268,9 +268,9 @@ static int __init __parse_crashkernel(char *cmdline,
                             unsigned long long *crash_base,
                             const char *suffix)
 {
-       char    *first_colon, *first_space;
-       char    *ck_cmdline;
-       char    *name = "crashkernel=";
+       char *first_colon, *first_space;
+       char *ck_cmdline;
+       char *name = "crashkernel=";
 
        BUG_ON(!crash_size || !crash_base);
        *crash_size = 0;
@@ -440,7 +440,7 @@ retry:
                return;
        }
 
-       if ((crash_base > CRASH_ADDR_LOW_MAX) &&
+       if ((crash_base >= CRASH_ADDR_LOW_MAX) &&
             crash_low_size && reserve_crashkernel_low(crash_low_size)) {
                memblock_phys_free(crash_base, crash_size);
                return;
index 3c714cb31660d970cb7d19cd25b48161cf50a6df..c033a201c808e59763f36338854b04123e6a5ab9 100644 (file)
@@ -43,10 +43,6 @@ static struct group_info init_groups = { .usage = REFCOUNT_INIT(2) };
  */
 struct cred init_cred = {
        .usage                  = ATOMIC_INIT(4),
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       .subscribers            = ATOMIC_INIT(2),
-       .magic                  = CRED_MAGIC,
-#endif
        .uid                    = GLOBAL_ROOT_UID,
        .gid                    = GLOBAL_ROOT_GID,
        .suid                   = GLOBAL_ROOT_UID,
@@ -66,31 +62,6 @@ struct cred init_cred = {
        .ucounts                = &init_ucounts,
 };
 
-static inline void set_cred_subscribers(struct cred *cred, int n)
-{
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       atomic_set(&cred->subscribers, n);
-#endif
-}
-
-static inline int read_cred_subscribers(const struct cred *cred)
-{
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       return atomic_read(&cred->subscribers);
-#else
-       return 0;
-#endif
-}
-
-static inline void alter_cred_subscribers(const struct cred *_cred, int n)
-{
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       struct cred *cred = (struct cred *) _cred;
-
-       atomic_add(n, &cred->subscribers);
-#endif
-}
-
 /*
  * The RCU callback to actually dispose of a set of credentials
  */
@@ -100,20 +71,9 @@ static void put_cred_rcu(struct rcu_head *rcu)
 
        kdebug("put_cred_rcu(%p)", cred);
 
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       if (cred->magic != CRED_MAGIC_DEAD ||
-           atomic_read(&cred->usage) != 0 ||
-           read_cred_subscribers(cred) != 0)
-               panic("CRED: put_cred_rcu() sees %p with"
-                     " mag %x, put %p, usage %d, subscr %d\n",
-                     cred, cred->magic, cred->put_addr,
-                     atomic_read(&cred->usage),
-                     read_cred_subscribers(cred));
-#else
-       if (atomic_read(&cred->usage) != 0)
-               panic("CRED: put_cred_rcu() sees %p with usage %d\n",
-                     cred, atomic_read(&cred->usage));
-#endif
+       if (atomic_long_read(&cred->usage) != 0)
+               panic("CRED: put_cred_rcu() sees %p with usage %ld\n",
+                     cred, atomic_long_read(&cred->usage));
 
        security_cred_free(cred);
        key_put(cred->session_keyring);
@@ -137,16 +97,10 @@ static void put_cred_rcu(struct rcu_head *rcu)
  */
 void __put_cred(struct cred *cred)
 {
-       kdebug("__put_cred(%p{%d,%d})", cred,
-              atomic_read(&cred->usage),
-              read_cred_subscribers(cred));
-
-       BUG_ON(atomic_read(&cred->usage) != 0);
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       BUG_ON(read_cred_subscribers(cred) != 0);
-       cred->magic = CRED_MAGIC_DEAD;
-       cred->put_addr = __builtin_return_address(0);
-#endif
+       kdebug("__put_cred(%p{%ld})", cred,
+              atomic_long_read(&cred->usage));
+
+       BUG_ON(atomic_long_read(&cred->usage) != 0);
        BUG_ON(cred == current->cred);
        BUG_ON(cred == current->real_cred);
 
@@ -164,9 +118,8 @@ void exit_creds(struct task_struct *tsk)
 {
        struct cred *real_cred, *cred;
 
-       kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
-              atomic_read(&tsk->cred->usage),
-              read_cred_subscribers(tsk->cred));
+       kdebug("exit_creds(%u,%p,%p,{%ld})", tsk->pid, tsk->real_cred, tsk->cred,
+              atomic_long_read(&tsk->cred->usage));
 
        real_cred = (struct cred *) tsk->real_cred;
        tsk->real_cred = NULL;
@@ -174,15 +127,10 @@ void exit_creds(struct task_struct *tsk)
        cred = (struct cred *) tsk->cred;
        tsk->cred = NULL;
 
-       validate_creds(cred);
        if (real_cred == cred) {
-               alter_cred_subscribers(cred, -2);
                put_cred_many(cred, 2);
        } else {
-               validate_creds(real_cred);
-               alter_cred_subscribers(real_cred, -1);
                put_cred(real_cred);
-               alter_cred_subscribers(cred, -1);
                put_cred(cred);
        }
 
@@ -230,10 +178,7 @@ struct cred *cred_alloc_blank(void)
        if (!new)
                return NULL;
 
-       atomic_set(&new->usage, 1);
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       new->magic = CRED_MAGIC;
-#endif
+       atomic_long_set(&new->usage, 1);
        if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
                goto error;
 
@@ -264,8 +209,6 @@ struct cred *prepare_creds(void)
        const struct cred *old;
        struct cred *new;
 
-       validate_process_creds();
-
        new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
        if (!new)
                return NULL;
@@ -276,8 +219,7 @@ struct cred *prepare_creds(void)
        memcpy(new, old, sizeof(struct cred));
 
        new->non_rcu = 0;
-       atomic_set(&new->usage, 1);
-       set_cred_subscribers(new, 0);
+       atomic_long_set(&new->usage, 1);
        get_group_info(new->group_info);
        get_uid(new->user);
        get_user_ns(new->user_ns);
@@ -300,7 +242,6 @@ struct cred *prepare_creds(void)
        if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
                goto error;
 
-       validate_creds(new);
        return new;
 
 error:
@@ -362,10 +303,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
                clone_flags & CLONE_THREAD
            ) {
                p->real_cred = get_cred_many(p->cred, 2);
-               alter_cred_subscribers(p->cred, 2);
-               kdebug("share_creds(%p{%d,%d})",
-                      p->cred, atomic_read(&p->cred->usage),
-                      read_cred_subscribers(p->cred));
+               kdebug("share_creds(%p{%ld})",
+                      p->cred, atomic_long_read(&p->cred->usage));
                inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
                return 0;
        }
@@ -404,8 +343,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
 
        p->cred = p->real_cred = get_cred(new);
        inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
-       alter_cred_subscribers(new, 2);
-       validate_creds(new);
        return 0;
 
 error_put:
@@ -457,17 +394,11 @@ int commit_creds(struct cred *new)
        struct task_struct *task = current;
        const struct cred *old = task->real_cred;
 
-       kdebug("commit_creds(%p{%d,%d})", new,
-              atomic_read(&new->usage),
-              read_cred_subscribers(new));
+       kdebug("commit_creds(%p{%ld})", new,
+              atomic_long_read(&new->usage));
 
        BUG_ON(task->cred != old);
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       BUG_ON(read_cred_subscribers(old) < 2);
-       validate_creds(old);
-       validate_creds(new);
-#endif
-       BUG_ON(atomic_read(&new->usage) < 1);
+       BUG_ON(atomic_long_read(&new->usage) < 1);
 
        get_cred(new); /* we will require a ref for the subj creds too */
 
@@ -502,14 +433,12 @@ int commit_creds(struct cred *new)
         * RLIMIT_NPROC limits on user->processes have already been checked
         * in set_user().
         */
-       alter_cred_subscribers(new, 2);
        if (new->user != old->user || new->user_ns != old->user_ns)
                inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1);
        rcu_assign_pointer(task->real_cred, new);
        rcu_assign_pointer(task->cred, new);
        if (new->user != old->user || new->user_ns != old->user_ns)
                dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
-       alter_cred_subscribers(old, -2);
 
        /* send notifications */
        if (!uid_eq(new->uid,   old->uid)  ||
@@ -539,14 +468,10 @@ EXPORT_SYMBOL(commit_creds);
  */
 void abort_creds(struct cred *new)
 {
-       kdebug("abort_creds(%p{%d,%d})", new,
-              atomic_read(&new->usage),
-              read_cred_subscribers(new));
+       kdebug("abort_creds(%p{%ld})", new,
+              atomic_long_read(&new->usage));
 
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       BUG_ON(read_cred_subscribers(new) != 0);
-#endif
-       BUG_ON(atomic_read(&new->usage) < 1);
+       BUG_ON(atomic_long_read(&new->usage) < 1);
        put_cred(new);
 }
 EXPORT_SYMBOL(abort_creds);
@@ -562,12 +487,8 @@ const struct cred *override_creds(const struct cred *new)
 {
        const struct cred *old = current->cred;
 
-       kdebug("override_creds(%p{%d,%d})", new,
-              atomic_read(&new->usage),
-              read_cred_subscribers(new));
-
-       validate_creds(old);
-       validate_creds(new);
+       kdebug("override_creds(%p{%ld})", new,
+              atomic_long_read(&new->usage));
 
        /*
         * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
@@ -576,18 +497,12 @@ const struct cred *override_creds(const struct cred *new)
         * we are only installing the cred into the thread-synchronous
         * '->cred' pointer, not the '->real_cred' pointer that is
         * visible to other threads under RCU.
-        *
-        * Also note that we did validate_creds() manually, not depending
-        * on the validation in 'get_cred()'.
         */
        get_new_cred((struct cred *)new);
-       alter_cred_subscribers(new, 1);
        rcu_assign_pointer(current->cred, new);
-       alter_cred_subscribers(old, -1);
 
-       kdebug("override_creds() = %p{%d,%d}", old,
-              atomic_read(&old->usage),
-              read_cred_subscribers(old));
+       kdebug("override_creds() = %p{%ld}", old,
+              atomic_long_read(&old->usage));
        return old;
 }
 EXPORT_SYMBOL(override_creds);
@@ -603,15 +518,10 @@ void revert_creds(const struct cred *old)
 {
        const struct cred *override = current->cred;
 
-       kdebug("revert_creds(%p{%d,%d})", old,
-              atomic_read(&old->usage),
-              read_cred_subscribers(old));
+       kdebug("revert_creds(%p{%ld})", old,
+              atomic_long_read(&old->usage));
 
-       validate_creds(old);
-       validate_creds(override);
-       alter_cred_subscribers(old, 1);
        rcu_assign_pointer(current->cred, old);
-       alter_cred_subscribers(override, -1);
        put_cred(override);
 }
 EXPORT_SYMBOL(revert_creds);
@@ -731,12 +641,10 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
        kdebug("prepare_kernel_cred() alloc %p", new);
 
        old = get_task_cred(daemon);
-       validate_creds(old);
 
        *new = *old;
        new->non_rcu = 0;
-       atomic_set(&new->usage, 1);
-       set_cred_subscribers(new, 0);
+       atomic_long_set(&new->usage, 1);
        get_uid(new->user);
        get_user_ns(new->user_ns);
        get_group_info(new->group_info);
@@ -760,7 +668,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
                goto error;
 
        put_cred(old);
-       validate_creds(new);
        return new;
 
 error:
@@ -825,109 +732,3 @@ int set_create_files_as(struct cred *new, struct inode *inode)
        return security_kernel_create_files_as(new, inode);
 }
 EXPORT_SYMBOL(set_create_files_as);
-
-#ifdef CONFIG_DEBUG_CREDENTIALS
-
-bool creds_are_invalid(const struct cred *cred)
-{
-       if (cred->magic != CRED_MAGIC)
-               return true;
-       return false;
-}
-EXPORT_SYMBOL(creds_are_invalid);
-
-/*
- * dump invalid credentials
- */
-static void dump_invalid_creds(const struct cred *cred, const char *label,
-                              const struct task_struct *tsk)
-{
-       pr_err("%s credentials: %p %s%s%s\n",
-              label, cred,
-              cred == &init_cred ? "[init]" : "",
-              cred == tsk->real_cred ? "[real]" : "",
-              cred == tsk->cred ? "[eff]" : "");
-       pr_err("->magic=%x, put_addr=%p\n",
-              cred->magic, cred->put_addr);
-       pr_err("->usage=%d, subscr=%d\n",
-              atomic_read(&cred->usage),
-              read_cred_subscribers(cred));
-       pr_err("->*uid = { %d,%d,%d,%d }\n",
-               from_kuid_munged(&init_user_ns, cred->uid),
-               from_kuid_munged(&init_user_ns, cred->euid),
-               from_kuid_munged(&init_user_ns, cred->suid),
-               from_kuid_munged(&init_user_ns, cred->fsuid));
-       pr_err("->*gid = { %d,%d,%d,%d }\n",
-               from_kgid_munged(&init_user_ns, cred->gid),
-               from_kgid_munged(&init_user_ns, cred->egid),
-               from_kgid_munged(&init_user_ns, cred->sgid),
-               from_kgid_munged(&init_user_ns, cred->fsgid));
-#ifdef CONFIG_SECURITY
-       pr_err("->security is %p\n", cred->security);
-       if ((unsigned long) cred->security >= PAGE_SIZE &&
-           (((unsigned long) cred->security & 0xffffff00) !=
-            (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)))
-               pr_err("->security {%x, %x}\n",
-                      ((u32*)cred->security)[0],
-                      ((u32*)cred->security)[1]);
-#endif
-}
-
-/*
- * report use of invalid credentials
- */
-void __noreturn __invalid_creds(const struct cred *cred, const char *file, unsigned line)
-{
-       pr_err("Invalid credentials\n");
-       pr_err("At %s:%u\n", file, line);
-       dump_invalid_creds(cred, "Specified", current);
-       BUG();
-}
-EXPORT_SYMBOL(__invalid_creds);
-
-/*
- * check the credentials on a process
- */
-void __validate_process_creds(struct task_struct *tsk,
-                             const char *file, unsigned line)
-{
-       if (tsk->cred == tsk->real_cred) {
-               if (unlikely(read_cred_subscribers(tsk->cred) < 2 ||
-                            creds_are_invalid(tsk->cred)))
-                       goto invalid_creds;
-       } else {
-               if (unlikely(read_cred_subscribers(tsk->real_cred) < 1 ||
-                            read_cred_subscribers(tsk->cred) < 1 ||
-                            creds_are_invalid(tsk->real_cred) ||
-                            creds_are_invalid(tsk->cred)))
-                       goto invalid_creds;
-       }
-       return;
-
-invalid_creds:
-       pr_err("Invalid process credentials\n");
-       pr_err("At %s:%u\n", file, line);
-
-       dump_invalid_creds(tsk->real_cred, "Real", tsk);
-       if (tsk->cred != tsk->real_cred)
-               dump_invalid_creds(tsk->cred, "Effective", tsk);
-       else
-               pr_err("Effective creds == Real creds\n");
-       BUG();
-}
-EXPORT_SYMBOL(__validate_process_creds);
-
-/*
- * check creds for do_exit()
- */
-void validate_creds_for_do_exit(struct task_struct *tsk)
-{
-       kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})",
-              tsk->real_cred, tsk->cred,
-              atomic_read(&tsk->cred->usage),
-              read_cred_subscribers(tsk->cred));
-
-       __validate_process_creds(tsk, __FILE__, __LINE__);
-}
-
-#endif /* CONFIG_DEBUG_CREDENTIALS */
index b704d83a28b29bace1741616eeff55a1cf03f413..9efd0d7775e7ce400a1c966ba09af73ec3887f3d 100644 (file)
@@ -1814,31 +1814,34 @@ static inline void perf_event__state_init(struct perf_event *event)
                                              PERF_EVENT_STATE_INACTIVE;
 }
 
-static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
+static int __perf_event_read_size(u64 read_format, int nr_siblings)
 {
        int entry = sizeof(u64); /* value */
        int size = 0;
        int nr = 1;
 
-       if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
                size += sizeof(u64);
 
-       if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
                size += sizeof(u64);
 
-       if (event->attr.read_format & PERF_FORMAT_ID)
+       if (read_format & PERF_FORMAT_ID)
                entry += sizeof(u64);
 
-       if (event->attr.read_format & PERF_FORMAT_LOST)
+       if (read_format & PERF_FORMAT_LOST)
                entry += sizeof(u64);
 
-       if (event->attr.read_format & PERF_FORMAT_GROUP) {
+       if (read_format & PERF_FORMAT_GROUP) {
                nr += nr_siblings;
                size += sizeof(u64);
        }
 
-       size += entry * nr;
-       event->read_size = size;
+       /*
+        * Since perf_event_validate_size() limits this to 16k and inhibits
+        * adding more siblings, this will never overflow.
+        */
+       return size + nr * entry;
 }
 
 static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
@@ -1888,8 +1891,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
  */
 static void perf_event__header_size(struct perf_event *event)
 {
-       __perf_event_read_size(event,
-                              event->group_leader->nr_siblings);
+       event->read_size =
+               __perf_event_read_size(event->attr.read_format,
+                                      event->group_leader->nr_siblings);
        __perf_event_header_size(event, event->attr.sample_type);
 }
 
@@ -1920,23 +1924,44 @@ static void perf_event__id_header_size(struct perf_event *event)
        event->id_header_size = size;
 }
 
+/*
+ * Check that adding an event to the group does not result in anybody
+ * overflowing the 64k event limit imposed by the output buffer.
+ *
+ * Specifically, check that the read_size for the event does not exceed 16k,
+ * read_size being the one term that grows with groups size. Since read_size
+ * depends on per-event read_format, also (re)check the existing events.
+ *
+ * This leaves 48k for the constant size fields and things like callchains,
+ * branch stacks and register sets.
+ */
 static bool perf_event_validate_size(struct perf_event *event)
 {
-       /*
-        * The values computed here will be over-written when we actually
-        * attach the event.
-        */
-       __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
-       __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
-       perf_event__id_header_size(event);
+       struct perf_event *sibling, *group_leader = event->group_leader;
+
+       if (__perf_event_read_size(event->attr.read_format,
+                                  group_leader->nr_siblings + 1) > 16*1024)
+               return false;
+
+       if (__perf_event_read_size(group_leader->attr.read_format,
+                                  group_leader->nr_siblings + 1) > 16*1024)
+               return false;
 
        /*
-        * Sum the lot; should not exceed the 64k limit we have on records.
-        * Conservative limit to allow for callchains and other variable fields.
+        * When creating a new group leader, group_leader->ctx is initialized
+        * after the size has been validated, but we cannot safely use
+        * for_each_sibling_event() until group_leader->ctx is set. A new group
+        * leader cannot have any siblings yet, so we can safely skip checking
+        * the non-existent siblings.
         */
-       if (event->read_size + event->header_size +
-           event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
-               return false;
+       if (event == group_leader)
+               return true;
+
+       for_each_sibling_event(sibling, group_leader) {
+               if (__perf_event_read_size(sibling->attr.read_format,
+                                          group_leader->nr_siblings + 1) > 16*1024)
+                       return false;
+       }
 
        return true;
 }
index ee9f43bed49a240ac60c9f4054c663374e36ccf0..aedc0832c9f4ded6578233a611ee362e0dad77ff 100644 (file)
@@ -824,8 +824,6 @@ void __noreturn do_exit(long code)
        ptrace_event(PTRACE_EVENT_EXIT, code);
        user_events_exit(tsk);
 
-       validate_creds_for_do_exit(tsk);
-
        io_uring_files_cancel();
        exit_signals(tsk);  /* sets PF_EXITING */
 
@@ -909,7 +907,6 @@ void __noreturn do_exit(long code)
        if (tsk->task_frag.page)
                put_page(tsk->task_frag.page);
 
-       validate_creds_for_do_exit(tsk);
        exit_task_stack_account(tsk);
 
        check_stack_usage();
index c450fa8b8b5ef4472cac53a70d99975ccca82c01..759006a9a91026a99d3a5022e605a94eea73a6a5 100644 (file)
@@ -201,7 +201,7 @@ void __thaw_task(struct task_struct *p)
        if (WARN_ON_ONCE(freezing(p)))
                goto unlock;
 
-       if (task_call_func(p, __restore_freezer_state, NULL))
+       if (!frozen(p) || task_call_func(p, __restore_freezer_state, NULL))
                goto unlock;
 
        wake_up_state(p, TASK_FROZEN);
index 866ef3663a0b65ac8f6d6dc3acebbbb051c8d67a..91be1bc50b60e96204d95ad792cc71e5152c5e64 100644 (file)
@@ -1844,8 +1844,8 @@ get_free_mem_region(struct device *dev, struct resource *base,
 
        write_lock(&resource_lock);
        for (addr = gfr_start(base, size, align, flags);
-            gfr_continue(base, addr, size, flags);
-            addr = gfr_next(addr, size, flags)) {
+            gfr_continue(base, addr, align, flags);
+            addr = gfr_next(addr, align, flags)) {
                if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
                    REGION_DISJOINT)
                        continue;
index 43cc47d7faafc7bd9c80d62fe4ad03f97b4164eb..5a114e752f1101707eed15fc03521501211260fa 100644 (file)
@@ -644,8 +644,8 @@ static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
 
        *cnt = rb_time_cnt(top);
 
-       /* If top and bottom counts don't match, this interrupted a write */
-       if (*cnt != rb_time_cnt(bottom))
+       /* If top, msb or bottom counts don't match, this interrupted a write */
+       if (*cnt != rb_time_cnt(msb) || *cnt != rb_time_cnt(bottom))
                return false;
 
        /* The shift to msb will lose its cnt bits */
@@ -706,6 +706,9 @@ static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
        unsigned long cnt2, top2, bottom2, msb2;
        u64 val;
 
+       /* Any interruptions in this function should cause a failure */
+       cnt = local_read(&t->cnt);
+
        /* The cmpxchg always fails if it interrupted an update */
         if (!__rb_time_read(t, &val, &cnt2))
                 return false;
@@ -713,17 +716,18 @@ static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
         if (val != expect)
                 return false;
 
-        cnt = local_read(&t->cnt);
         if ((cnt & 3) != cnt2)
                 return false;
 
         cnt2 = cnt + 1;
 
         rb_time_split(val, &top, &bottom, &msb);
+        msb = rb_time_val_cnt(msb, cnt);
         top = rb_time_val_cnt(top, cnt);
         bottom = rb_time_val_cnt(bottom, cnt);
 
         rb_time_split(set, &top2, &bottom2, &msb2);
+        msb2 = rb_time_val_cnt(msb2, cnt);
         top2 = rb_time_val_cnt(top2, cnt2);
         bottom2 = rb_time_val_cnt(bottom2, cnt2);
 
@@ -1787,6 +1791,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
                free_buffer_page(bpage);
        }
 
+       free_page((unsigned long)cpu_buffer->free_page);
+
        kfree(cpu_buffer);
 }
 
@@ -2407,7 +2413,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
         */
        barrier();
 
-       if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
+       if ((iter->head + length) > commit || length > BUF_PAGE_SIZE)
                /* Writer corrupted the read? */
                goto reset;
 
@@ -2981,25 +2987,6 @@ static unsigned rb_calculate_event_length(unsigned length)
        return length;
 }
 
-static u64 rb_time_delta(struct ring_buffer_event *event)
-{
-       switch (event->type_len) {
-       case RINGBUF_TYPE_PADDING:
-               return 0;
-
-       case RINGBUF_TYPE_TIME_EXTEND:
-               return rb_event_time_stamp(event);
-
-       case RINGBUF_TYPE_TIME_STAMP:
-               return 0;
-
-       case RINGBUF_TYPE_DATA:
-               return event->time_delta;
-       default:
-               return 0;
-       }
-}
-
 static inline bool
 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
                  struct ring_buffer_event *event)
@@ -3007,8 +2994,6 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
        unsigned long new_index, old_index;
        struct buffer_page *bpage;
        unsigned long addr;
-       u64 write_stamp;
-       u64 delta;
 
        new_index = rb_event_index(event);
        old_index = new_index + rb_event_ts_length(event);
@@ -3017,41 +3002,34 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
 
        bpage = READ_ONCE(cpu_buffer->tail_page);
 
-       delta = rb_time_delta(event);
-
-       if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
-               return false;
-
-       /* Make sure the write stamp is read before testing the location */
-       barrier();
-
+       /*
+        * Make sure the tail_page is still the same and
+        * the next write location is the end of this event
+        */
        if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
                unsigned long write_mask =
                        local_read(&bpage->write) & ~RB_WRITE_MASK;
                unsigned long event_length = rb_event_length(event);
 
-               /* Something came in, can't discard */
-               if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
-                                      write_stamp, write_stamp - delta))
-                       return false;
-
                /*
-                * It's possible that the event time delta is zero
-                * (has the same time stamp as the previous event)
-                * in which case write_stamp and before_stamp could
-                * be the same. In such a case, force before_stamp
-                * to be different than write_stamp. It doesn't
-                * matter what it is, as long as its different.
+                * For the before_stamp to be different than the write_stamp
+                * to make sure that the next event adds an absolute
+                * value and does not rely on the saved write stamp, which
+                * is now going to be bogus.
+                *
+                * By setting the before_stamp to zero, the next event
+                * is not going to use the write_stamp and will instead
+                * create an absolute timestamp. This means there's no
+                * reason to update the wirte_stamp!
                 */
-               if (!delta)
-                       rb_time_set(&cpu_buffer->before_stamp, 0);
+               rb_time_set(&cpu_buffer->before_stamp, 0);
 
                /*
                 * If an event were to come in now, it would see that the
                 * write_stamp and the before_stamp are different, and assume
                 * that this event just added itself before updating
                 * the write stamp. The interrupting event will fix the
-                * write stamp for us, and use the before stamp as its delta.
+                * write stamp for us, and use an absolute timestamp.
                 */
 
                /*
@@ -3488,7 +3466,7 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
                return;
 
        /*
-        * If this interrupted another event, 
+        * If this interrupted another event,
         */
        if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
                goto out;
@@ -3582,7 +3560,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                 * absolute timestamp.
                 * Don't bother if this is the start of a new page (w == 0).
                 */
-               if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
+               if (!w) {
+                       /* Use the sub-buffer timestamp */
+                       info->delta = 0;
+               } else if (unlikely(!a_ok || !b_ok || info->before != info->after)) {
                        info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
                        info->length += RB_LEN_TIME_EXTEND;
                } else {
@@ -3605,26 +3586,19 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 
        /* See if we shot pass the end of this buffer page */
        if (unlikely(write > BUF_PAGE_SIZE)) {
-               /* before and after may now different, fix it up*/
-               b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
-               a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
-               if (a_ok && b_ok && info->before != info->after)
-                       (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
-                                             info->before, info->after);
-               if (a_ok && b_ok)
-                       check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
+               check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
                return rb_move_tail(cpu_buffer, tail, info);
        }
 
        if (likely(tail == w)) {
-               u64 save_before;
-               bool s_ok;
-
                /* Nothing interrupted us between A and C */
  /*D*/         rb_time_set(&cpu_buffer->write_stamp, info->ts);
-               barrier();
- /*E*/         s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
-               RB_WARN_ON(cpu_buffer, !s_ok);
+               /*
+                * If something came in between C and D, the write stamp
+                * may now not be in sync. But that's fine as the before_stamp
+                * will be different and then next event will just be forced
+                * to use an absolute timestamp.
+                */
                if (likely(!(info->add_timestamp &
                             (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
                        /* This did not interrupt any time update */
@@ -3632,24 +3606,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                else
                        /* Just use full timestamp for interrupting event */
                        info->delta = info->ts;
-               barrier();
                check_buffer(cpu_buffer, info, tail);
-               if (unlikely(info->ts != save_before)) {
-                       /* SLOW PATH - Interrupted between C and E */
-
-                       a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
-                       RB_WARN_ON(cpu_buffer, !a_ok);
-
-                       /* Write stamp must only go forward */
-                       if (save_before > info->after) {
-                               /*
-                                * We do not care about the result, only that
-                                * it gets updated atomically.
-                                */
-                               (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
-                                                     info->after, save_before);
-                       }
-               }
        } else {
                u64 ts;
                /* SLOW PATH - Interrupted between A and C */
@@ -3717,6 +3674,12 @@ rb_reserve_next_event(struct trace_buffer *buffer,
        int nr_loops = 0;
        int add_ts_default;
 
+       /* ring buffer does cmpxchg, make sure it is safe in NMI context */
+       if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
+           (unlikely(in_nmi()))) {
+               return NULL;
+       }
+
        rb_start_commit(cpu_buffer);
        /* The commit page can not change after this */
 
@@ -3740,6 +3703,8 @@ rb_reserve_next_event(struct trace_buffer *buffer,
        if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
                add_ts_default = RB_ADD_STAMP_ABSOLUTE;
                info.length += RB_LEN_TIME_EXTEND;
+               if (info.length > BUF_MAX_DATA_SIZE)
+                       goto out_fail;
        } else {
                add_ts_default = RB_ADD_STAMP_NONE;
        }
@@ -5121,7 +5086,8 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
        if (!iter)
                return NULL;
 
-       iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
+       /* Holds the entire event: data and meta data */
+       iter->event = kmalloc(BUF_PAGE_SIZE, flags);
        if (!iter->event) {
                kfree(iter);
                return NULL;
index 9aebf904ff9738137a314a78ead0092c009c4d38..199df497db07eb0b6f80d679c8ac77606996205e 100644 (file)
@@ -2360,13 +2360,7 @@ int is_tracing_stopped(void)
        return global_trace.stop_count;
 }
 
-/**
- * tracing_start - quick start of the tracer
- *
- * If tracing is enabled but was stopped by tracing_stop,
- * this will start the tracer back up.
- */
-void tracing_start(void)
+static void tracing_start_tr(struct trace_array *tr)
 {
        struct trace_buffer *buffer;
        unsigned long flags;
@@ -2374,119 +2368,83 @@ void tracing_start(void)
        if (tracing_disabled)
                return;
 
-       raw_spin_lock_irqsave(&global_trace.start_lock, flags);
-       if (--global_trace.stop_count) {
-               if (global_trace.stop_count < 0) {
+       raw_spin_lock_irqsave(&tr->start_lock, flags);
+       if (--tr->stop_count) {
+               if (WARN_ON_ONCE(tr->stop_count < 0)) {
                        /* Someone screwed up their debugging */
-                       WARN_ON_ONCE(1);
-                       global_trace.stop_count = 0;
+                       tr->stop_count = 0;
                }
                goto out;
        }
 
        /* Prevent the buffers from switching */
-       arch_spin_lock(&global_trace.max_lock);
+       arch_spin_lock(&tr->max_lock);
 
-       buffer = global_trace.array_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        if (buffer)
                ring_buffer_record_enable(buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
-       buffer = global_trace.max_buffer.buffer;
+       buffer = tr->max_buffer.buffer;
        if (buffer)
                ring_buffer_record_enable(buffer);
 #endif
 
-       arch_spin_unlock(&global_trace.max_lock);
-
- out:
-       raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
-}
-
-static void tracing_start_tr(struct trace_array *tr)
-{
-       struct trace_buffer *buffer;
-       unsigned long flags;
-
-       if (tracing_disabled)
-               return;
-
-       /* If global, we need to also start the max tracer */
-       if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
-               return tracing_start();
-
-       raw_spin_lock_irqsave(&tr->start_lock, flags);
-
-       if (--tr->stop_count) {
-               if (tr->stop_count < 0) {
-                       /* Someone screwed up their debugging */
-                       WARN_ON_ONCE(1);
-                       tr->stop_count = 0;
-               }
-               goto out;
-       }
-
-       buffer = tr->array_buffer.buffer;
-       if (buffer)
-               ring_buffer_record_enable(buffer);
+       arch_spin_unlock(&tr->max_lock);
 
  out:
        raw_spin_unlock_irqrestore(&tr->start_lock, flags);
 }
 
 /**
- * tracing_stop - quick stop of the tracer
+ * tracing_start - quick start of the tracer
  *
- * Light weight way to stop tracing. Use in conjunction with
- * tracing_start.
+ * If tracing is enabled but was stopped by tracing_stop,
+ * this will start the tracer back up.
  */
-void tracing_stop(void)
+void tracing_start(void)
+
+{
+       return tracing_start_tr(&global_trace);
+}
+
+static void tracing_stop_tr(struct trace_array *tr)
 {
        struct trace_buffer *buffer;
        unsigned long flags;
 
-       raw_spin_lock_irqsave(&global_trace.start_lock, flags);
-       if (global_trace.stop_count++)
+       raw_spin_lock_irqsave(&tr->start_lock, flags);
+       if (tr->stop_count++)
                goto out;
 
        /* Prevent the buffers from switching */
-       arch_spin_lock(&global_trace.max_lock);
+       arch_spin_lock(&tr->max_lock);
 
-       buffer = global_trace.array_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        if (buffer)
                ring_buffer_record_disable(buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
-       buffer = global_trace.max_buffer.buffer;
+       buffer = tr->max_buffer.buffer;
        if (buffer)
                ring_buffer_record_disable(buffer);
 #endif
 
-       arch_spin_unlock(&global_trace.max_lock);
+       arch_spin_unlock(&tr->max_lock);
 
  out:
-       raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+       raw_spin_unlock_irqrestore(&tr->start_lock, flags);
 }
 
-static void tracing_stop_tr(struct trace_array *tr)
+/**
+ * tracing_stop - quick stop of the tracer
+ *
+ * Light weight way to stop tracing. Use in conjunction with
+ * tracing_start.
+ */
+void tracing_stop(void)
 {
-       struct trace_buffer *buffer;
-       unsigned long flags;
-
-       /* If global, we need to also stop the max tracer */
-       if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
-               return tracing_stop();
-
-       raw_spin_lock_irqsave(&tr->start_lock, flags);
-       if (tr->stop_count++)
-               goto out;
-
-       buffer = tr->array_buffer.buffer;
-       if (buffer)
-               ring_buffer_record_disable(buffer);
-
- out:
-       raw_spin_unlock_irqrestore(&tr->start_lock, flags);
+       return tracing_stop_tr(&global_trace);
 }
 
 static int trace_save_cmdline(struct task_struct *tsk)
@@ -2770,8 +2728,11 @@ void trace_buffered_event_enable(void)
        for_each_tracing_cpu(cpu) {
                page = alloc_pages_node(cpu_to_node(cpu),
                                        GFP_KERNEL | __GFP_NORETRY, 0);
-               if (!page)
-                       goto failed;
+               /* This is just an optimization and can handle failures */
+               if (!page) {
+                       pr_err("Failed to allocate event buffer\n");
+                       break;
+               }
 
                event = page_address(page);
                memset(event, 0, sizeof(*event));
@@ -2785,10 +2746,6 @@ void trace_buffered_event_enable(void)
                        WARN_ON_ONCE(1);
                preempt_enable();
        }
-
-       return;
- failed:
-       trace_buffered_event_disable();
 }
 
 static void enable_trace_buffered_event(void *data)
@@ -2823,11 +2780,9 @@ void trace_buffered_event_disable(void)
        if (--trace_buffered_event_ref)
                return;
 
-       preempt_disable();
        /* For each CPU, set the buffer as used. */
-       smp_call_function_many(tracing_buffer_mask,
-                              disable_trace_buffered_event, NULL, 1);
-       preempt_enable();
+       on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
+                        NULL, true);
 
        /* Wait for all current users to finish */
        synchronize_rcu();
@@ -2836,17 +2791,19 @@ void trace_buffered_event_disable(void)
                free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
                per_cpu(trace_buffered_event, cpu) = NULL;
        }
+
        /*
-        * Make sure trace_buffered_event is NULL before clearing
-        * trace_buffered_event_cnt.
+        * Wait for all CPUs that potentially started checking if they can use
+        * their event buffer only after the previous synchronize_rcu() call and
+        * they still read a valid pointer from trace_buffered_event. It must be
+        * ensured they don't see cleared trace_buffered_event_cnt else they
+        * could wrongly decide to use the pointed-to buffer which is now freed.
         */
-       smp_wmb();
+       synchronize_rcu();
 
-       preempt_disable();
-       /* Do the work on each cpu */
-       smp_call_function_many(tracing_buffer_mask,
-                              enable_trace_buffered_event, NULL, 1);
-       preempt_enable();
+       /* For each CPU, relinquish the buffer */
+       on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
+                        true);
 }
 
 static struct trace_buffer *temp_buffer;
@@ -4765,7 +4722,11 @@ static int s_show(struct seq_file *m, void *v)
                iter->leftover = ret;
 
        } else {
-               print_trace_line(iter);
+               ret = print_trace_line(iter);
+               if (ret == TRACE_TYPE_PARTIAL_LINE) {
+                       iter->seq.full = 0;
+                       trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
+               }
                ret = trace_print_seq(m, &iter->seq);
                /*
                 * If we overflow the seq_file buffer, then it will
@@ -5007,6 +4968,12 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
        return 0;
 }
 
+int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
+{
+       tracing_release_file_tr(inode, filp);
+       return single_release(inode, filp);
+}
+
 static int tracing_mark_open(struct inode *inode, struct file *filp)
 {
        stream_open(inode, filp);
@@ -6387,13 +6354,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
        if (!tr->array_buffer.buffer)
                return 0;
 
+       /* Do not allow tracing while resizing ring buffer */
+       tracing_stop_tr(tr);
+
        ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
        if (ret < 0)
-               return ret;
+               goto out_start;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
-       if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
-           !tr->current_trace->use_max_tr)
+       if (!tr->allocated_snapshot)
                goto out;
 
        ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
@@ -6418,7 +6387,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
                        WARN_ON(1);
                        tracing_disabled = 1;
                }
-               return ret;
+               goto out_start;
        }
 
        update_buffer_entries(&tr->max_buffer, cpu);
@@ -6427,7 +6396,8 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
 #endif /* CONFIG_TRACER_MAX_TRACE */
 
        update_buffer_entries(&tr->array_buffer, cpu);
-
+ out_start:
+       tracing_start_tr(tr);
        return ret;
 }
 
index b7f4ea25a19431d1a78c5f21e15298dea3c51897..0489e72c8169c19754159efa623057ae72a0c5db 100644 (file)
@@ -617,6 +617,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
 int tracing_open_file_tr(struct inode *inode, struct file *filp);
 int tracing_release_file_tr(struct inode *inode, struct file *filp);
+int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
 bool tracing_is_disabled(void);
 bool tracer_tracing_is_on(struct trace_array *tr);
 void tracer_tracing_on(struct trace_array *tr);
index 1abc07fba1b91ddea5b1f2c6446d1fe03275ba31..5ecf3c8bde205f360e880b608b11abcddb689a70 100644 (file)
@@ -5623,10 +5623,12 @@ static int event_hist_open(struct inode *inode, struct file *file)
 {
        int ret;
 
-       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       ret = tracing_open_file_tr(inode, file);
        if (ret)
                return ret;
 
+       /* Clear private_data to avoid warning in single_open() */
+       file->private_data = NULL;
        return single_open(file, hist_show, file);
 }
 
@@ -5634,7 +5636,7 @@ const struct file_operations event_hist_fops = {
        .open = event_hist_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = single_release,
+       .release = tracing_single_release_file_tr,
 };
 
 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
@@ -5900,10 +5902,12 @@ static int event_hist_debug_open(struct inode *inode, struct file *file)
 {
        int ret;
 
-       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       ret = tracing_open_file_tr(inode, file);
        if (ret)
                return ret;
 
+       /* Clear private_data to avoid warning in single_open() */
+       file->private_data = NULL;
        return single_open(file, hist_debug_show, file);
 }
 
@@ -5911,7 +5915,7 @@ const struct file_operations event_hist_debug_fops = {
        .open = event_hist_debug_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = single_release,
+       .release = tracing_single_release_file_tr,
 };
 #endif
 
index d8b302d0108302d9ef2debe735c4b7778a217f90..3e7fa44dc2b24850f8b836f6bd223807fbcf0c48 100644 (file)
@@ -1587,11 +1587,12 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter,
 {
        struct print_entry *field;
        struct trace_seq *s = &iter->seq;
+       int max = iter->ent_size - offsetof(struct print_entry, buf);
 
        trace_assign_type(field, iter->ent);
 
        seq_print_ip_sym(s, field->ip, flags);
-       trace_seq_printf(s, ": %s", field->buf);
+       trace_seq_printf(s, ": %.*s", max, field->buf);
 
        return trace_handle_return(s);
 }
@@ -1600,10 +1601,11 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
                                         struct trace_event *event)
 {
        struct print_entry *field;
+       int max = iter->ent_size - offsetof(struct print_entry, buf);
 
        trace_assign_type(field, iter->ent);
 
-       trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
+       trace_seq_printf(&iter->seq, "# %lx %.*s", field->ip, max, field->buf);
 
        return trace_handle_return(&iter->seq);
 }
index 6e578f576a6f2b73b98817b0f5489a79c9d85524..2989b57e154a767dadc6054bed607808086b36d6 100644 (file)
@@ -1684,9 +1684,6 @@ static int wq_select_unbound_cpu(int cpu)
                pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
        }
 
-       if (cpumask_empty(wq_unbound_cpumask))
-               return cpu;
-
        new_cpu = __this_cpu_read(wq_rr_cpu_last);
        new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
        if (unlikely(new_cpu >= nr_cpu_ids)) {
@@ -6515,6 +6512,17 @@ static inline void wq_watchdog_init(void) { }
 
 #endif /* CONFIG_WQ_WATCHDOG */
 
+static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
+{
+       if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
+               pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
+                       cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
+               return;
+       }
+
+       cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
+}
+
 /**
  * workqueue_init_early - early init for workqueue subsystem
  *
@@ -6534,11 +6542,11 @@ void __init workqueue_init_early(void)
        BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
 
        BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
-       cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
-       cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
-
+       cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
+       restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
+       restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
        if (!cpumask_empty(&wq_cmdline_cpumask))
-               cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, &wq_cmdline_cpumask);
+               restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
 
        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
index cc7d53d9dc0191c544b0e2a71d0a6500b854924c..4405f81248fbc72530d7f4882706d7499c5ea3c5 100644 (file)
@@ -1739,21 +1739,6 @@ config DEBUG_MAPLE_TREE
 
 endmenu
 
-config DEBUG_CREDENTIALS
-       bool "Debug credential management"
-       depends on DEBUG_KERNEL
-       help
-         Enable this to turn on some debug checking for credential
-         management.  The additional code keeps track of the number of
-         pointers from task_structs to any given cred struct, and checks to
-         see that this number never exceeds the usage count of the cred
-         struct.
-
-         Furthermore, if SELinux is enabled, this also checks that the
-         security pointer in the cred struct is never seen to be invalid.
-
-         If unsure, say N.
-
 source "kernel/rcu/Kconfig.debug"
 
 config DEBUG_WQ_FORCE_RR_CPU
index aa3f6815bb124053e06678fc17c6b6613375e2b9..ee272c4cefcc13907ce9f211f479615d2e3c9154 100644 (file)
@@ -366,13 +366,25 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
        if (!masks)
                goto fail_node_to_cpumask;
 
-       /* Stabilize the cpumasks */
-       cpus_read_lock();
        build_node_to_cpumask(node_to_cpumask);
 
+       /*
+        * Make a local cache of 'cpu_present_mask', so the two stages
+        * spread can observe consistent 'cpu_present_mask' without holding
+        * cpu hotplug lock, then we can reduce deadlock risk with cpu
+        * hotplug code.
+        *
+        * Here CPU hotplug may happen when reading `cpu_present_mask`, and
+        * we can live with the case because it only affects that hotplug
+        * CPU is handled in the 1st or 2nd stage, and either way is correct
+        * from API user viewpoint since 2-stage spread is sort of
+        * optimization.
+        */
+       cpumask_copy(npresmsk, data_race(cpu_present_mask));
+
        /* grouping present CPUs first */
        ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
-                                 cpu_present_mask, nmsk, masks);
+                                 npresmsk, nmsk, masks);
        if (ret < 0)
                goto fail_build_affinity;
        nr_present = ret;
@@ -387,15 +399,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
                curgrp = 0;
        else
                curgrp = nr_present;
-       cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
+       cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk);
        ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
                                  npresmsk, nmsk, masks);
        if (ret >= 0)
                nr_others = ret;
 
  fail_build_affinity:
-       cpus_read_unlock();
-
        if (ret >= 0)
                WARN_ON(nr_present + nr_others < numgrps);
 
index 89971a894b6058fd7b908c0e8ffa216cbdc887dd..57cd378c73d67fb369d39cfbb90259246531a274 100644 (file)
@@ -1201,13 +1201,6 @@ config ANON_VMA_NAME
          area from being merged with adjacent virtual memory areas due to the
          difference in their name.
 
-config USERFAULTFD
-       bool "Enable userfaultfd() system call"
-       depends on MMU
-       help
-         Enable the userfaultfd() system call that allows to intercept and
-         handle page faults in userland.
-
 config HAVE_ARCH_USERFAULTFD_WP
        bool
        help
@@ -1218,6 +1211,14 @@ config HAVE_ARCH_USERFAULTFD_MINOR
        help
          Arch has userfaultfd minor fault support
 
+menuconfig USERFAULTFD
+       bool "Enable userfaultfd() system call"
+       depends on MMU
+       help
+         Enable the userfaultfd() system call that allows to intercept and
+         handle page faults in userland.
+
+if USERFAULTFD
 config PTE_MARKER_UFFD_WP
        bool "Userfaultfd write protection support for shmem/hugetlbfs"
        default y
@@ -1227,6 +1228,7 @@ config PTE_MARKER_UFFD_WP
          Allows to create marker PTEs for userfaultfd write protection
          purposes.  It is required to enable userfaultfd write protection on
          file-backed memory types like shmem and hugetlbfs.
+endif # USERFAULTFD
 
 # multi-gen LRU {
 config LRU_GEN
index 6262d55904e744a4a41431127266971eb5ee3d8b..3a05e71509b9db527e7938896dcde44521fe1735 100644 (file)
@@ -445,6 +445,8 @@ struct damon_ctx *damon_new_ctx(void)
        if (!ctx)
                return NULL;
 
+       init_completion(&ctx->kdamond_started);
+
        ctx->attrs.sample_interval = 5 * 1000;
        ctx->attrs.aggr_interval = 100 * 1000;
        ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
@@ -668,11 +670,14 @@ static int __damon_start(struct damon_ctx *ctx)
        mutex_lock(&ctx->kdamond_lock);
        if (!ctx->kdamond) {
                err = 0;
+               reinit_completion(&ctx->kdamond_started);
                ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
                                nr_running_ctxs);
                if (IS_ERR(ctx->kdamond)) {
                        err = PTR_ERR(ctx->kdamond);
                        ctx->kdamond = NULL;
+               } else {
+                       wait_for_completion(&ctx->kdamond_started);
                }
        }
        mutex_unlock(&ctx->kdamond_lock);
@@ -1225,6 +1230,7 @@ static void damon_split_region_at(struct damon_target *t,
        new->age = r->age;
        new->last_nr_accesses = r->last_nr_accesses;
        new->nr_accesses_bp = r->nr_accesses_bp;
+       new->nr_accesses = r->nr_accesses;
 
        damon_insert_region(new, r, damon_next_region(r), t);
 }
@@ -1432,6 +1438,7 @@ static int kdamond_fn(void *data)
 
        pr_debug("kdamond (%d) starts\n", current->pid);
 
+       complete(&ctx->kdamond_started);
        kdamond_init_intervals_sis(ctx);
 
        if (ctx->ops.init)
index be667236b8e6e30713a205a4987d50c3b48ab463..fe0fe2562000b3c01d7c5ba01ec884c26fbdb7b8 100644 (file)
@@ -139,6 +139,13 @@ static const struct kobj_type damon_sysfs_scheme_region_ktype = {
  * damon_sysfs_before_damos_apply() understands the situation by showing the
  * 'finished' status and do nothing.
  *
+ * If DAMOS is not applied to any region due to any reasons including the
+ * access pattern, the watermarks, the quotas, and the filters,
+ * ->before_damos_apply() will not be called back.  Until the situation is
+ * changed, the update will not be finished.  To avoid this,
+ * damon_sysfs_after_sampling() set the status as 'finished' if more than two
+ * apply intervals of the scheme is passed while the state is 'idle'.
+ *
  *  Finally, the tried regions request handling finisher function
  *  (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks.
  */
@@ -154,6 +161,7 @@ struct damon_sysfs_scheme_regions {
        int nr_regions;
        unsigned long total_bytes;
        enum damos_sysfs_regions_upd_status upd_status;
+       unsigned long upd_timeout_jiffies;
 };
 
 static struct damon_sysfs_scheme_regions *
@@ -1854,7 +1862,9 @@ static int damon_sysfs_after_sampling(struct damon_ctx *ctx)
        for (i = 0; i < sysfs_schemes->nr; i++) {
                sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
                if (sysfs_regions->upd_status ==
-                               DAMOS_TRIED_REGIONS_UPD_STARTED)
+                               DAMOS_TRIED_REGIONS_UPD_STARTED ||
+                               time_after(jiffies,
+                                       sysfs_regions->upd_timeout_jiffies))
                        sysfs_regions->upd_status =
                                DAMOS_TRIED_REGIONS_UPD_FINISHED;
        }
@@ -1885,14 +1895,41 @@ int damon_sysfs_schemes_clear_regions(
        return 0;
 }
 
+static struct damos *damos_sysfs_nth_scheme(int n, struct damon_ctx *ctx)
+{
+       struct damos *scheme;
+       int i = 0;
+
+       damon_for_each_scheme(scheme, ctx) {
+               if (i == n)
+                       return scheme;
+               i++;
+       }
+       return NULL;
+}
+
 static void damos_tried_regions_init_upd_status(
-               struct damon_sysfs_schemes *sysfs_schemes)
+               struct damon_sysfs_schemes *sysfs_schemes,
+               struct damon_ctx *ctx)
 {
        int i;
+       struct damos *scheme;
+       struct damon_sysfs_scheme_regions *sysfs_regions;
 
-       for (i = 0; i < sysfs_schemes->nr; i++)
-               sysfs_schemes->schemes_arr[i]->tried_regions->upd_status =
-                       DAMOS_TRIED_REGIONS_UPD_IDLE;
+       for (i = 0; i < sysfs_schemes->nr; i++) {
+               sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
+               scheme = damos_sysfs_nth_scheme(i, ctx);
+               if (!scheme) {
+                       sysfs_regions->upd_status =
+                               DAMOS_TRIED_REGIONS_UPD_FINISHED;
+                       continue;
+               }
+               sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE;
+               sysfs_regions->upd_timeout_jiffies = jiffies +
+                       2 * usecs_to_jiffies(scheme->apply_interval_us ?
+                                       scheme->apply_interval_us :
+                                       ctx->attrs.sample_interval);
+       }
 }
 
 /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */
@@ -1902,7 +1939,7 @@ int damon_sysfs_schemes_update_regions_start(
 {
        damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx);
        damon_sysfs_schemes_for_damos_callback = sysfs_schemes;
-       damos_tried_regions_init_upd_status(sysfs_schemes);
+       damos_tried_regions_init_upd_status(sysfs_schemes, ctx);
        damos_regions_upd_total_bytes_only = total_bytes_only;
        ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply;
        ctx->callback.after_sampling = damon_sysfs_after_sampling;
index 32eedf3afd45883a7c34920c4a097906b743621e..f1c8c278310fd51384d0e5dcfd309c4aff9541ff 100644 (file)
@@ -3371,7 +3371,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
                }
        }
 
-       if (pmd_none(*vmf->pmd))
+       if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
                pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
 
        return false;
index 1169ef2f2176fa2cd1ca0bce159ce7d31abf0359..6feb3e0630d1865939517a966529c98196083074 100644 (file)
@@ -1182,6 +1182,13 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
        return (get_vma_private_data(vma) & flag) != 0;
 }
 
+bool __vma_private_lock(struct vm_area_struct *vma)
+{
+       return !(vma->vm_flags & VM_MAYSHARE) &&
+               get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
+               is_vma_resv_set(vma, HPAGE_RESV_OWNER);
+}
+
 void hugetlb_dup_vma_private(struct vm_area_struct *vma)
 {
        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
index 1eacca03bedd20cd6ff6cffaa96dcffcde9e5397..5501363d6b3125be1d916c3a29dfd016d2c17233 100644 (file)
@@ -642,32 +642,16 @@ static struct kmemleak_object *__alloc_object(gfp_t gfp)
        if (!object) {
                pr_warn("Cannot allocate a kmemleak_object structure\n");
                kmemleak_disable();
+               return NULL;
        }
 
-       return object;
-}
-
-static int __link_object(struct kmemleak_object *object, unsigned long ptr,
-                        size_t size, int min_count, bool is_phys)
-{
-
-       struct kmemleak_object *parent;
-       struct rb_node **link, *rb_parent;
-       unsigned long untagged_ptr;
-       unsigned long untagged_objp;
-
        INIT_LIST_HEAD(&object->object_list);
        INIT_LIST_HEAD(&object->gray_list);
        INIT_HLIST_HEAD(&object->area_list);
        raw_spin_lock_init(&object->lock);
        atomic_set(&object->use_count, 1);
-       object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
-       object->pointer = ptr;
-       object->size = kfence_ksize((void *)ptr) ?: size;
        object->excess_ref = 0;
-       object->min_count = min_count;
        object->count = 0;                      /* white color initially */
-       object->jiffies = jiffies;
        object->checksum = 0;
        object->del_state = 0;
 
@@ -692,6 +676,24 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
        /* kernel backtrace */
        object->trace_handle = set_track_prepare();
 
+       return object;
+}
+
+static int __link_object(struct kmemleak_object *object, unsigned long ptr,
+                        size_t size, int min_count, bool is_phys)
+{
+
+       struct kmemleak_object *parent;
+       struct rb_node **link, *rb_parent;
+       unsigned long untagged_ptr;
+       unsigned long untagged_objp;
+
+       object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
+       object->pointer = ptr;
+       object->size = kfence_ksize((void *)ptr) ?: size;
+       object->min_count = min_count;
+       object->jiffies = jiffies;
+
        untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
        /*
         * Only update min_addr and max_addr with object
@@ -1150,6 +1152,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
 void __ref kmemleak_update_trace(const void *ptr)
 {
        struct kmemleak_object *object;
+       depot_stack_handle_t trace_handle;
        unsigned long flags;
 
        pr_debug("%s(0x%px)\n", __func__, ptr);
@@ -1166,8 +1169,9 @@ void __ref kmemleak_update_trace(const void *ptr)
                return;
        }
 
+       trace_handle = set_track_prepare();
        raw_spin_lock_irqsave(&object->lock, flags);
-       object->trace_handle = set_track_prepare();
+       object->trace_handle = trace_handle;
        raw_spin_unlock_irqrestore(&object->lock, flags);
 
        put_object(object);
index cf4d694280e98ae4d65ccd02d2bf1d6c4393a998..6214a1ab5654f442c3ad5ae9c39678f703a303fd 100644 (file)
@@ -335,6 +335,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
        struct folio *folio = NULL;
        LIST_HEAD(folio_list);
        bool pageout_anon_only_filter;
+       unsigned int batch_count = 0;
 
        if (fatal_signal_pending(current))
                return -EINTR;
@@ -416,6 +417,7 @@ huge_unlock:
 regular_folio:
 #endif
        tlb_change_page_size(tlb, PAGE_SIZE);
+restart:
        start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        if (!start_pte)
                return 0;
@@ -424,6 +426,15 @@ regular_folio:
        for (; addr < end; pte++, addr += PAGE_SIZE) {
                ptent = ptep_get(pte);
 
+               if (++batch_count == SWAP_CLUSTER_MAX) {
+                       batch_count = 0;
+                       if (need_resched()) {
+                               pte_unmap_unlock(start_pte, ptl);
+                               cond_resched();
+                               goto restart;
+                       }
+               }
+
                if (pte_none(ptent))
                        continue;
 
index 1c1061df9cd17cb664d5d6b9faf1ac79db2cef6a..b226090fd9061c6261ace5a09850ebcc0c183060 100644 (file)
@@ -3166,6 +3166,7 @@ __always_inline struct obj_cgroup *current_obj_cgroup(void)
        return NULL;
 
 from_memcg:
+       objcg = NULL;
        for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
                /*
                 * Memcg pointer is protected by scope (see set_active_memcg())
@@ -3176,7 +3177,6 @@ from_memcg:
                objcg = rcu_dereference_check(memcg->objcg, 1);
                if (likely(objcg))
                        break;
-               objcg = NULL;
        }
 
        return objcg;
index 1f18ed4a54971dd8737e7fb108d093bc702d777c..5c757fba8858a592281ea9ad479feee15314e46b 100644 (file)
@@ -1517,6 +1517,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                continue;
                } else {
                        /* We should have covered all the swap entry types */
+                       pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
                        WARN_ON_ONCE(1);
                }
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
index ab41a511e20a8ed054a0e79db37dc1aa25e8ecd3..7a5fc89a865289b76af1df2e4c992a6dbf2ae2df 100644 (file)
@@ -1129,6 +1129,9 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
        kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
 }
 
+/*
+ * Must be called with mem_hotplug_lock in write mode.
+ */
 int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
                       struct zone *zone, struct memory_group *group)
 {
@@ -1149,7 +1152,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
                         !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
                return -EINVAL;
 
-       mem_hotplug_begin();
 
        /* associate pfn range with the zone */
        move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
@@ -1208,7 +1210,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
        writeback_set_ratelimit();
 
        memory_notify(MEM_ONLINE, &arg);
-       mem_hotplug_done();
        return 0;
 
 failed_addition:
@@ -1217,7 +1218,6 @@ failed_addition:
                 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
        memory_notify(MEM_CANCEL_ONLINE, &arg);
        remove_pfn_range_from_zone(zone, pfn, nr_pages);
-       mem_hotplug_done();
        return ret;
 }
 
@@ -1458,7 +1458,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
        /* create memory block devices after memory was added */
        ret = create_memory_block_devices(start, size, params.altmap, group);
        if (ret) {
-               arch_remove_memory(start, size, NULL);
+               arch_remove_memory(start, size, params.altmap);
                goto error_free;
        }
 
@@ -1863,6 +1863,9 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
        return 0;
 }
 
+/*
+ * Must be called with mem_hotplug_lock in write mode.
+ */
 int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
                        struct zone *zone, struct memory_group *group)
 {
@@ -1885,8 +1888,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
                         !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
                return -EINVAL;
 
-       mem_hotplug_begin();
-
        /*
         * Don't allow to offline memory blocks that contain holes.
         * Consequently, memory blocks with holes can never get onlined
@@ -2031,7 +2032,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
 
        memory_notify(MEM_OFFLINE, &arg);
        remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
-       mem_hotplug_done();
        return 0;
 
 failed_removal_isolated:
@@ -2046,7 +2046,6 @@ failed_removal:
                 (unsigned long long) start_pfn << PAGE_SHIFT,
                 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
                 reason);
-       mem_hotplug_done();
        return ret;
 }
 
index 91e2620148b2f6d789420e6736daef7a53e2cc5c..0d1ce70bce38028970802375b2409e9e6400364f 100644 (file)
@@ -1080,7 +1080,24 @@ whole_folios:
                                }
                                VM_BUG_ON_FOLIO(folio_test_writeback(folio),
                                                folio);
-                               truncate_inode_folio(mapping, folio);
+
+                               if (!folio_test_large(folio)) {
+                                       truncate_inode_folio(mapping, folio);
+                               } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
+                                       /*
+                                        * If we split a page, reset the loop so
+                                        * that we pick up the new sub pages.
+                                        * Otherwise the THP was entirely
+                                        * dropped or the target range was
+                                        * zeroed, so just continue the loop as
+                                        * is.
+                                        */
+                                       if (!folio_test_large(folio)) {
+                                               folio_unlock(folio);
+                                               index = start;
+                                               break;
+                                       }
+                               }
                        }
                        folio_unlock(folio);
                }
index 506f8220c5fe5384678182628b78d1519ea8fc26..9dd8977de5a22345bdc40cf56680438226b4f4a8 100644 (file)
@@ -4089,6 +4089,9 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
        else
                VM_WARN_ON_ONCE(true);
 
+       WRITE_ONCE(lruvec->lrugen.seg, seg);
+       WRITE_ONCE(lruvec->lrugen.gen, new);
+
        hlist_nulls_del_rcu(&lruvec->lrugen.list);
 
        if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD)
@@ -4099,9 +4102,6 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
        pgdat->memcg_lru.nr_memcgs[old]--;
        pgdat->memcg_lru.nr_memcgs[new]++;
 
-       lruvec->lrugen.gen = new;
-       WRITE_ONCE(lruvec->lrugen.seg, seg);
-
        if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
                WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
 
@@ -4124,11 +4124,11 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
 
                gen = get_memcg_gen(pgdat->memcg_lru.seq);
 
+               lruvec->lrugen.gen = gen;
+
                hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
                pgdat->memcg_lru.nr_memcgs[gen]++;
 
-               lruvec->lrugen.gen = gen;
-
                spin_unlock_irq(&pgdat->memcg_lru.lock);
        }
 }
@@ -4232,7 +4232,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
        }
 
        /* protected */
-       if (tier > tier_idx) {
+       if (tier > tier_idx || refs == BIT(LRU_REFS_WIDTH)) {
                int hist = lru_hist_from_seq(lrugen->min_seq[type]);
 
                gen = folio_inc_gen(lruvec, folio, false);
@@ -4598,7 +4598,12 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
        }
 
        /* try to scrape all its memory if this memcg was deleted */
-       *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
+       if (!mem_cgroup_online(memcg)) {
+               *nr_to_scan = total;
+               return false;
+       }
+
+       *nr_to_scan = total >> sc->priority;
 
        /*
         * The aging tries to be lazy to reduce the overhead, while the eviction
@@ -4635,7 +4640,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool
        DEFINE_MAX_SEQ(lruvec);
 
        if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
-               return 0;
+               return -1;
 
        if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
                return nr_to_scan;
@@ -4648,20 +4653,41 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool
        return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0;
 }
 
-static unsigned long get_nr_to_reclaim(struct scan_control *sc)
+static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
 {
+       int i;
+       enum zone_watermarks mark;
+
        /* don't abort memcg reclaim to ensure fairness */
        if (!root_reclaim(sc))
-               return -1;
+               return false;
+
+       if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order)))
+               return true;
+
+       /* check the order to exclude compaction-induced reclaim */
+       if (!current_is_kswapd() || sc->order)
+               return false;
+
+       mark = sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ?
+              WMARK_PROMO : WMARK_HIGH;
+
+       for (i = 0; i <= sc->reclaim_idx; i++) {
+               struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
+               unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH;
+
+               if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0))
+                       return false;
+       }
 
-       return max(sc->nr_to_reclaim, compact_gap(sc->order));
+       /* kswapd should abort if all eligible zones are safe */
+       return true;
 }
 
 static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 {
        long nr_to_scan;
        unsigned long scanned = 0;
-       unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
        int swappiness = get_swappiness(lruvec, sc);
 
        /* clean file folios are more likely to exist */
@@ -4683,13 +4709,13 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
                if (scanned >= nr_to_scan)
                        break;
 
-               if (sc->nr_reclaimed >= nr_to_reclaim)
+               if (should_abort_scan(lruvec, sc))
                        break;
 
                cond_resched();
        }
 
-       /* whether try_to_inc_max_seq() was successful */
+       /* whether this lruvec should be rotated */
        return nr_to_scan < 0;
 }
 
@@ -4698,14 +4724,9 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
        bool success;
        unsigned long scanned = sc->nr_scanned;
        unsigned long reclaimed = sc->nr_reclaimed;
-       int seg = lru_gen_memcg_seg(lruvec);
        struct mem_cgroup *memcg = lruvec_memcg(lruvec);
        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
 
-       /* see the comment on MEMCG_NR_GENS */
-       if (!lruvec_is_sizable(lruvec, sc))
-               return seg != MEMCG_LRU_TAIL ? MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
-
        mem_cgroup_calculate_protection(NULL, memcg);
 
        if (mem_cgroup_below_min(NULL, memcg))
@@ -4713,7 +4734,7 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
 
        if (mem_cgroup_below_low(NULL, memcg)) {
                /* see the comment on MEMCG_NR_GENS */
-               if (seg != MEMCG_LRU_TAIL)
+               if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL)
                        return MEMCG_LRU_TAIL;
 
                memcg_memory_event(memcg, MEMCG_LOW);
@@ -4729,7 +4750,15 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
 
        flush_reclaim_state(sc);
 
-       return success ? MEMCG_LRU_YOUNG : 0;
+       if (success && mem_cgroup_online(memcg))
+               return MEMCG_LRU_YOUNG;
+
+       if (!success && lruvec_is_sizable(lruvec, sc))
+               return 0;
+
+       /* one retry if offlined or too small */
+       return lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL ?
+              MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
 }
 
 #ifdef CONFIG_MEMCG
@@ -4743,14 +4772,13 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
        struct lruvec *lruvec;
        struct lru_gen_folio *lrugen;
        struct mem_cgroup *memcg;
-       const struct hlist_nulls_node *pos;
-       unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
+       struct hlist_nulls_node *pos;
 
+       gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
        bin = first_bin = get_random_u32_below(MEMCG_NR_BINS);
 restart:
        op = 0;
        memcg = NULL;
-       gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
 
        rcu_read_lock();
 
@@ -4761,6 +4789,10 @@ restart:
                }
 
                mem_cgroup_put(memcg);
+               memcg = NULL;
+
+               if (gen != READ_ONCE(lrugen->gen))
+                       continue;
 
                lruvec = container_of(lrugen, struct lruvec, lrugen);
                memcg = lruvec_memcg(lruvec);
@@ -4777,7 +4809,7 @@ restart:
 
                rcu_read_lock();
 
-               if (sc->nr_reclaimed >= nr_to_reclaim)
+               if (should_abort_scan(lruvec, sc))
                        break;
        }
 
@@ -4788,7 +4820,7 @@ restart:
 
        mem_cgroup_put(memcg);
 
-       if (sc->nr_reclaimed >= nr_to_reclaim)
+       if (!is_a_nulls(pos))
                return;
 
        /* restart if raced with lru_gen_rotate_memcg() */
@@ -4845,16 +4877,14 @@ static void set_initial_priority(struct pglist_data *pgdat, struct scan_control
        if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH)
                return;
        /*
-        * Determine the initial priority based on ((total / MEMCG_NR_GENS) >>
-        * priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, where the
-        * estimated reclaimed_to_scanned_ratio = inactive / total.
+        * Determine the initial priority based on
+        * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim,
+        * where reclaimed_to_scanned_ratio = inactive / total.
         */
        reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE);
        if (get_swappiness(lruvec, sc))
                reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON);
 
-       reclaimable /= MEMCG_NR_GENS;
-
        /* round down reclaimable and round up sc->nr_to_reclaim */
        priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1);
 
index b192e44a0e7ccc08815e9b1f826d60f0038a5fa2..33baad203277d1e335dfbc44b72a1c7f520c3736 100644 (file)
@@ -313,10 +313,10 @@ static void lru_gen_refault(struct folio *folio, void *shadow)
         * 1. For pages accessed through page tables, hotter pages pushed out
         *    hot pages which refaulted immediately.
         * 2. For pages accessed multiple times through file descriptors,
-        *    numbers of accesses might have been out of the range.
+        *    they would have been protected by sort_folio().
         */
-       if (lru_gen_in_fault() || refs == BIT(LRU_REFS_WIDTH)) {
-               folio_set_workingset(folio);
+       if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) {
+               set_mask_bits(&folio->flags, 0, LRU_REFS_MASK | BIT(PG_workingset));
                mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
        }
 unlock:
index 9ba04a69ec2ae70e0f5907215288208062d34ad4..a852ec093fa8b9f8e314f12a51ad78e8f2b3c851 100644 (file)
@@ -1775,15 +1775,14 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                break;
        }
        case TIOCINQ: {
-               /*
-                * These two are safe on a single CPU system as only
-                * user tasks fiddle here
-                */
-               struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+               struct sk_buff *skb;
                long amount = 0;
 
+               spin_lock_irq(&sk->sk_receive_queue.lock);
+               skb = skb_peek(&sk->sk_receive_queue);
                if (skb)
                        amount = skb->len - sizeof(struct ddpehdr);
+               spin_unlock_irq(&sk->sk_receive_queue.lock);
                rc = put_user(amount, (int __user *)argp);
                break;
        }
index 838ebf0cabbfb72f370d2794212d67466f79b81e..f81f8d56f5c0c574d60b7bb552c3cc62bca40f9e 100644 (file)
@@ -73,14 +73,17 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
        case SIOCINQ:
        {
                struct sk_buff *skb;
+               int amount;
 
                if (sock->state != SS_CONNECTED) {
                        error = -EINVAL;
                        goto done;
                }
+               spin_lock_irq(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
-               error = put_user(skb ? skb->len : 0,
-                                (int __user *)argp) ? -EFAULT : 0;
+               amount = skb ? skb->len : 0;
+               spin_unlock_irq(&sk->sk_receive_queue.lock);
+               error = put_user(amount, (int __user *)argp) ? -EFAULT : 0;
                goto done;
        }
        case ATM_SETSC:
index aff31cd944c29d60b3634ef9ac098aadb290bb0f..b240d9aae4a6435cdd973f27e54b48f7bd8f9298 100644 (file)
@@ -183,7 +183,7 @@ out:
 }
 
 static const struct genl_multicast_group dropmon_mcgrps[] = {
-       { .name = "events", },
+       { .name = "events", .cap_sys_admin = 1 },
 };
 
 static void send_dm_alert(struct work_struct *work)
@@ -1619,11 +1619,13 @@ static const struct genl_small_ops dropmon_ops[] = {
                .cmd = NET_DM_CMD_START,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = net_dm_cmd_trace,
+               .flags = GENL_ADMIN_PERM,
        },
        {
                .cmd = NET_DM_CMD_STOP,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = net_dm_cmd_trace,
+               .flags = GENL_ADMIN_PERM,
        },
        {
                .cmd = NET_DM_CMD_CONFIG_GET,
index 7e4d7c3bcc849a9211eca4246cda7fa76af13c36..1737884be52f85ac5818b870ae567ebc3501bfb6 100644 (file)
@@ -2602,6 +2602,22 @@ BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
        return 0;
 }
 
+static void sk_msg_reset_curr(struct sk_msg *msg)
+{
+       u32 i = msg->sg.start;
+       u32 len = 0;
+
+       do {
+               len += sk_msg_elem(msg, i)->length;
+               sk_msg_iter_var_next(i);
+               if (len >= msg->sg.size)
+                       break;
+       } while (i != msg->sg.end);
+
+       msg->sg.curr = i;
+       msg->sg.copybreak = 0;
+}
+
 static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
        .func           = bpf_msg_cork_bytes,
        .gpl_only       = false,
@@ -2721,6 +2737,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
                      msg->sg.end - shift + NR_MSG_FRAG_IDS :
                      msg->sg.end - shift;
 out:
+       sk_msg_reset_curr(msg);
        msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
        msg->data_end = msg->data + bytes;
        return 0;
@@ -2857,6 +2874,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
                msg->sg.data[new] = rsge;
        }
 
+       sk_msg_reset_curr(msg);
        sk_msg_compute_data_pointers(msg);
        return 0;
 }
@@ -3025,6 +3043,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
 
        sk_mem_uncharge(msg->sk, len - pop);
        msg->sg.size -= (len - pop);
+       sk_msg_reset_curr(msg);
        sk_msg_compute_data_pointers(msg);
        return 0;
 }
index df81c1f0a57047e176b7c7e4809d2dae59ba6be5..552719c3bbc3d7869c49028f4c5c9102d1ae9b0a 100644 (file)
@@ -253,9 +253,11 @@ static int neigh_forced_gc(struct neigh_table *tbl)
 {
        int max_clean = atomic_read(&tbl->gc_entries) -
                        READ_ONCE(tbl->gc_thresh2);
+       u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
        unsigned long tref = jiffies - 5 * HZ;
        struct neighbour *n, *tmp;
        int shrunk = 0;
+       int loop = 0;
 
        NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 
@@ -278,11 +280,16 @@ static int neigh_forced_gc(struct neigh_table *tbl)
                                shrunk++;
                        if (shrunk >= max_clean)
                                break;
+                       if (++loop == 16) {
+                               if (ktime_get_ns() > tmax)
+                                       goto unlock;
+                               loop = 0;
+                       }
                }
        }
 
        WRITE_ONCE(tbl->last_flush, jiffies);
-
+unlock:
        write_unlock_bh(&tbl->lock);
 
        return shrunk;
index 880027ecf516503c6b98d1190aabca3c3be24e99..7dc47c17d8638a79960cee0a4cc8fd3fedd624fe 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/nsproxy.h>
 #include <linux/slab.h>
 #include <linux/errqueue.h>
+#include <linux/io_uring.h>
 
 #include <linux/uaccess.h>
 
@@ -103,6 +104,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
 
                if (fd < 0 || !(file = fget_raw(fd)))
                        return -EBADF;
+               /* don't allow io_uring files */
+               if (io_uring_get_socket(file)) {
+                       fput(file);
+                       return -EINVAL;
+               }
                *fpp++ = file;
                fpl->count++;
        }
index b157efea5dea88745f9a2ae547d39fdf7e622627..83af8aaeb893b1a89bc034ee0d034d4f96318c6f 100644 (file)
@@ -4522,8 +4522,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
                /* GSO partial only requires that we trim off any excess that
                 * doesn't fit into an MSS sized block, so take care of that
                 * now.
+                * Cap len to not accidentally hit GSO_BY_FRAGS.
                 */
-               partial_segs = len / mss;
+               partial_segs = min(len, GSO_BY_FRAGS - 1) / mss;
                if (partial_segs > 1)
                        mss *= partial_segs;
                else
index 22a26d1d29a09d234f18ce3b0f329e5047c0c046..5169c3c72cffe49cef613e69889d139db867ff74 100644 (file)
@@ -635,15 +635,18 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
        }
 
        if (dev->header_ops) {
+               int pull_len = tunnel->hlen + sizeof(struct iphdr);
+
                if (skb_cow_head(skb, 0))
                        goto free_skb;
 
                tnl_params = (const struct iphdr *)skb->data;
 
-               /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
-                * to gre header.
-                */
-               skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
+               if (!pskb_network_may_pull(skb, pull_len))
+                       goto free_skb;
+
+               /* ip_tunnel_xmit() needs skb->data pointing to gre header. */
+               skb_pull(skb, pull_len);
                skb_reset_mac_header(skb);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL &&
index 53bcc17c91e4c4fc0799f3f5d51b7ad5c8f32b26..ff6838ca2e58068d6ab435d2bb31babccb728c19 100644 (file)
@@ -3368,9 +3368,25 @@ int tcp_set_window_clamp(struct sock *sk, int val)
                        return -EINVAL;
                tp->window_clamp = 0;
        } else {
-               tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
-                       SOCK_MIN_RCVBUF / 2 : val;
-               tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
+               u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
+               u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
+                                               SOCK_MIN_RCVBUF / 2 : val;
+
+               if (new_window_clamp == old_window_clamp)
+                       return 0;
+
+               tp->window_clamp = new_window_clamp;
+               if (new_window_clamp < old_window_clamp) {
+                       /* need to apply the reserved mem provisioning only
+                        * when shrinking the window clamp
+                        */
+                       __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp);
+
+               } else {
+                       new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
+                       tp->rcv_ssthresh = max(new_rcv_ssthresh,
+                                              tp->rcv_ssthresh);
+               }
        }
        return 0;
 }
@@ -3594,6 +3610,10 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
                break;
 
        case TCP_AO_REPAIR:
+               if (!tcp_can_repair_sock(sk)) {
+                       err = -EPERM;
+                       break;
+               }
                err = tcp_ao_set_repair(sk, optval, optlen);
                break;
 #ifdef CONFIG_TCP_AO
@@ -4293,6 +4313,8 @@ zerocopy_rcv_out:
        }
 #endif
        case TCP_AO_REPAIR:
+               if (!tcp_can_repair_sock(sk))
+                       return -EPERM;
                return tcp_ao_get_repair(sk, optval, optlen);
        case TCP_AO_GET_KEYS:
        case TCP_AO_INFO: {
index 7696417d064011d3b437c2035b6fe1e5a3a0e6df..f8308d3f565e9d8e46d8fef608de000c2b552b5b 100644 (file)
@@ -851,7 +851,7 @@ void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
        const struct tcp_ao_hdr *aoh;
        struct tcp_ao_key *key;
 
-       treq->maclen = 0;
+       treq->used_tcp_ao = false;
 
        if (tcp_parse_auth_options(th, NULL, &aoh) || !aoh)
                return;
@@ -863,7 +863,7 @@ void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
 
        treq->ao_rcv_next = aoh->keyid;
        treq->ao_keyid = aoh->rnext_keyid;
-       treq->maclen = tcp_ao_maclen(key);
+       treq->used_tcp_ao = true;
 }
 
 static enum skb_drop_reason
@@ -1100,7 +1100,7 @@ void tcp_ao_connect_init(struct sock *sk)
                        ao_info->current_key = key;
                if (!ao_info->rnext_key)
                        ao_info->rnext_key = key;
-               tp->tcp_header_len += tcp_ao_len(key);
+               tp->tcp_header_len += tcp_ao_len_aligned(key);
 
                ao_info->lisn = htonl(tp->write_seq);
                ao_info->snd_sne = 0;
@@ -1346,7 +1346,7 @@ static int tcp_ao_parse_crypto(struct tcp_ao_add *cmd, struct tcp_ao_key *key)
        syn_tcp_option_space -= TCPOLEN_MSS_ALIGNED;
        syn_tcp_option_space -= TCPOLEN_TSTAMP_ALIGNED;
        syn_tcp_option_space -= TCPOLEN_WSCALE_ALIGNED;
-       if (tcp_ao_len(key) > syn_tcp_option_space) {
+       if (tcp_ao_len_aligned(key) > syn_tcp_option_space) {
                err = -EMSGSIZE;
                goto err_kfree;
        }
@@ -1608,6 +1608,15 @@ static int tcp_ao_add_cmd(struct sock *sk, unsigned short int family,
                if (!dev || !l3index)
                        return -EINVAL;
 
+               if (!bound_dev_if || bound_dev_if != cmd.ifindex) {
+                       /* tcp_ao_established_key() doesn't expect having
+                        * non peer-matching key on an established TCP-AO
+                        * connection.
+                        */
+                       if (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)))
+                               return -EINVAL;
+               }
+
                /* It's still possible to bind after adding keys or even
                 * re-bind to a different dev (with CAP_NET_RAW).
                 * So, no reason to return error here, rather try to be
index bcb55d98004c5213f0095613124d5193b15b2793..701cb87043f28079286044208128c2d687908991 100644 (file)
@@ -3871,8 +3871,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
         * then we can probably ignore it.
         */
        if (before(ack, prior_snd_una)) {
+               u32 max_window;
+
+               /* do not accept ACK for bytes we never sent. */
+               max_window = min_t(u64, tp->max_window, tp->bytes_acked);
                /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
-               if (before(ack, prior_snd_una - tp->max_window)) {
+               if (before(ack, prior_snd_una - max_window)) {
                        if (!(flag & FLAG_NO_CHALLENGE_ACK))
                                tcp_send_challenge_ack(sk);
                        return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
@@ -4364,6 +4368,23 @@ EXPORT_SYMBOL(tcp_do_parse_auth_options);
  * up to bandwidth of 18Gigabit/sec. 8) ]
  */
 
+/* Estimates max number of increments of remote peer TSval in
+ * a replay window (based on our current RTO estimation).
+ */
+static u32 tcp_tsval_replay(const struct sock *sk)
+{
+       /* If we use usec TS resolution,
+        * then expect the remote peer to use the same resolution.
+        */
+       if (tcp_sk(sk)->tcp_usec_ts)
+               return inet_csk(sk)->icsk_rto * (USEC_PER_SEC / HZ);
+
+       /* RFC 7323 recommends a TSval clock between 1ms and 1sec.
+        * We know that some OS (including old linux) can use 1200 Hz.
+        */
+       return inet_csk(sk)->icsk_rto * 1200 / HZ;
+}
+
 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
@@ -4371,7 +4392,7 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
        u32 seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
 
-       return (/* 1. Pure ACK with correct sequence number. */
+       return  /* 1. Pure ACK with correct sequence number. */
                (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
 
                /* 2. ... and duplicate ACK. */
@@ -4381,7 +4402,8 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
                !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
 
                /* 4. ... and sits in replay window. */
-               (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
+               (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <=
+               tcp_tsval_replay(sk);
 }
 
 static inline bool tcp_paws_discard(const struct sock *sk,
@@ -7182,11 +7204,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
                goto drop_and_release; /* Invalid TCP options */
        if (aoh) {
-               tcp_rsk(req)->maclen = aoh->length - sizeof(struct tcp_ao_hdr);
+               tcp_rsk(req)->used_tcp_ao = true;
                tcp_rsk(req)->ao_rcv_next = aoh->keyid;
                tcp_rsk(req)->ao_keyid = aoh->rnext_keyid;
+
        } else {
-               tcp_rsk(req)->maclen = 0;
+               tcp_rsk(req)->used_tcp_ao = false;
        }
 #endif
        tcp_rsk(req)->snt_isn = isn;
index 5f693bbd578d2261b78aa0be6bf69499bbd5117e..0c50c5a32b84a3b601510655ecaa39b46a8f0b34 100644 (file)
@@ -690,7 +690,7 @@ static bool tcp_v4_ao_sign_reset(const struct sock *sk, struct sk_buff *skb,
 
        reply_options[0] = htonl((TCPOPT_AO << 24) | (tcp_ao_len(key) << 16) |
                                 (aoh->rnext_keyid << 8) | keyid);
-       arg->iov[0].iov_len += round_up(tcp_ao_len(key), 4);
+       arg->iov[0].iov_len += tcp_ao_len_aligned(key);
        reply->doff = arg->iov[0].iov_len / 4;
 
        if (tcp_ao_hash_hdr(AF_INET, (char *)&reply_options[1],
@@ -978,7 +978,7 @@ static void tcp_v4_send_ack(const struct sock *sk,
                                          (tcp_ao_len(key->ao_key) << 16) |
                                          (key->ao_key->sndid << 8) |
                                          key->rcv_next);
-               arg.iov[0].iov_len += round_up(tcp_ao_len(key->ao_key), 4);
+               arg.iov[0].iov_len += tcp_ao_len_aligned(key->ao_key);
                rep.th.doff = arg.iov[0].iov_len / 4;
 
                tcp_ao_hash_hdr(AF_INET, (char *)&rep.opt[offset],
index a9807eeb311ca6a276a8ab87ed359819f816cf41..9e85f2a0bddd4978b1bde6add1efc6aad351db8b 100644 (file)
@@ -615,7 +615,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
        ao_key = treq->af_specific->ao_lookup(sk, req,
                                tcp_rsk(req)->ao_keyid, -1);
        if (ao_key)
-               newtp->tcp_header_len += tcp_ao_len(ao_key);
+               newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
  #endif
        if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
                newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
index eb13a55d660c2376968f11ee3265280f8cc9e1bd..e3167ad965676facaacd8f82848c52cf966f97c3 100644 (file)
@@ -825,7 +825,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
                timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps);
                if (tcp_key_is_ao(key)) {
                        opts->options |= OPTION_AO;
-                       remaining -= tcp_ao_len(key->ao_key);
+                       remaining -= tcp_ao_len_aligned(key->ao_key);
                }
        }
 
@@ -915,7 +915,7 @@ static unsigned int tcp_synack_options(const struct sock *sk,
                        ireq->tstamp_ok &= !ireq->sack_ok;
        } else if (tcp_key_is_ao(key)) {
                opts->options |= OPTION_AO;
-               remaining -= tcp_ao_len(key->ao_key);
+               remaining -= tcp_ao_len_aligned(key->ao_key);
                ireq->tstamp_ok &= !ireq->sack_ok;
        }
 
@@ -982,7 +982,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
                size += TCPOLEN_MD5SIG_ALIGNED;
        } else if (tcp_key_is_ao(key)) {
                opts->options |= OPTION_AO;
-               size += tcp_ao_len(key->ao_key);
+               size += tcp_ao_len_aligned(key->ao_key);
        }
 
        if (likely(tp->rx_opt.tstamp_ok)) {
@@ -3293,7 +3293,13 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
        if (skb_still_in_host_queue(sk, skb))
                return -EBUSY;
 
+start:
        if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
+               if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
+                       TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
+                       TCP_SKB_CB(skb)->seq++;
+                       goto start;
+               }
                if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
                        WARN_ON_ONCE(1);
                        return -EINVAL;
@@ -3720,7 +3726,6 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
        if (tcp_rsk_used_ao(req)) {
 #ifdef CONFIG_TCP_AO
                struct tcp_ao_key *ao_key = NULL;
-               u8 maclen = tcp_rsk(req)->maclen;
                u8 keyid = tcp_rsk(req)->ao_keyid;
 
                ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req),
@@ -3730,13 +3735,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
                 * for another peer-matching key, but the peer has requested
                 * ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here.
                 */
-               if (unlikely(!ao_key || tcp_ao_maclen(ao_key) != maclen)) {
-                       u8 key_maclen = ao_key ? tcp_ao_maclen(ao_key) : 0;
-
+               if (unlikely(!ao_key)) {
                        rcu_read_unlock();
                        kfree_skb(skb);
-                       net_warn_ratelimited("TCP-AO: the keyid %u with maclen %u|%u from SYN packet is not present - not sending SYNACK\n",
-                                            keyid, maclen, key_maclen);
+                       net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n",
+                                            keyid);
                        return NULL;
                }
                key.ao_key = ao_key;
index 3aaea56b516601070a11ec2c00150b8268bd481d..2692a7b24c40977a44c33358f558090036338f2c 100644 (file)
@@ -6149,11 +6149,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
        pmsg->prefix_len = pinfo->prefix_len;
        pmsg->prefix_type = pinfo->type;
        pmsg->prefix_pad3 = 0;
-       pmsg->prefix_flags = 0;
-       if (pinfo->onlink)
-               pmsg->prefix_flags |= IF_PREFIX_ONLINK;
-       if (pinfo->autoconf)
-               pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
+       pmsg->prefix_flags = pinfo->flags;
 
        if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
                goto nla_put_failure;
index 28b01a068412ab673415bfd9d15b0fd15b3bdb19..7772f42ff2b940da5f53ee1f0ff0dfcdb187cbf0 100644 (file)
@@ -1511,13 +1511,9 @@ out:
                        if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
                                pn_leaf = fib6_find_prefix(info->nl_net, table,
                                                           pn);
-#if RT6_DEBUG >= 2
-                               if (!pn_leaf) {
-                                       WARN_ON(!pn_leaf);
+                               if (!pn_leaf)
                                        pn_leaf =
                                            info->nl_net->ipv6.fib6_null_entry;
-                               }
-#endif
                                fib6_info_hold(pn_leaf);
                                rcu_assign_pointer(pn->leaf, pn_leaf);
                        }
index 937a02c2e5345390ed592b19faa661cd703a23f0..8c6623496dd7e9daf4cb63528a4817b3c65a334a 100644 (file)
@@ -881,7 +881,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
        if (tcp_key_is_md5(key))
                tot_len += TCPOLEN_MD5SIG_ALIGNED;
        if (tcp_key_is_ao(key))
-               tot_len += tcp_ao_len(key->ao_key);
+               tot_len += tcp_ao_len_aligned(key->ao_key);
 
 #ifdef CONFIG_MPTCP
        if (rst && !tcp_key_is_md5(key)) {
index e502ec00b2fe1e4a806568fe078ef328e4f15ac7..0e4beae421f8302cb0bc6cb798daf36366da0e4b 100644 (file)
@@ -31,7 +31,7 @@ struct bpf_nf_link {
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 static const struct nf_defrag_hook *
 get_proto_defrag_hook(struct bpf_nf_link *link,
-                     const struct nf_defrag_hook __rcu *global_hook,
+                     const struct nf_defrag_hook __rcu **ptr_global_hook,
                      const char *mod)
 {
        const struct nf_defrag_hook *hook;
@@ -39,7 +39,7 @@ get_proto_defrag_hook(struct bpf_nf_link *link,
 
        /* RCU protects us from races against module unloading */
        rcu_read_lock();
-       hook = rcu_dereference(global_hook);
+       hook = rcu_dereference(*ptr_global_hook);
        if (!hook) {
                rcu_read_unlock();
                err = request_module(mod);
@@ -47,7 +47,7 @@ get_proto_defrag_hook(struct bpf_nf_link *link,
                        return ERR_PTR(err < 0 ? err : -EINVAL);
 
                rcu_read_lock();
-               hook = rcu_dereference(global_hook);
+               hook = rcu_dereference(*ptr_global_hook);
        }
 
        if (hook && try_module_get(hook->owner)) {
@@ -78,7 +78,7 @@ static int bpf_nf_enable_defrag(struct bpf_nf_link *link)
        switch (link->hook_ops.pf) {
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
        case NFPROTO_IPV4:
-               hook = get_proto_defrag_hook(link, nf_defrag_v4_hook, "nf_defrag_ipv4");
+               hook = get_proto_defrag_hook(link, &nf_defrag_v4_hook, "nf_defrag_ipv4");
                if (IS_ERR(hook))
                        return PTR_ERR(hook);
 
@@ -87,7 +87,7 @@ static int bpf_nf_enable_defrag(struct bpf_nf_link *link)
 #endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
        case NFPROTO_IPV6:
-               hook = get_proto_defrag_hook(link, nf_defrag_v6_hook, "nf_defrag_ipv6");
+               hook = get_proto_defrag_hook(link, &nf_defrag_v6_hook, "nf_defrag_ipv6");
                if (IS_ERR(hook))
                        return PTR_ERR(hook);
 
index c0a42989b982266aa7378f06c0a46b5668335d3a..c5c17c6e80eda1f5a33e035a427ee4b2be2f786f 100644 (file)
@@ -803,7 +803,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
 
 static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
                                                   const struct nlattr *nla,
-                                                  u8 genmask, u32 nlpid)
+                                                  int family, u8 genmask, u32 nlpid)
 {
        struct nftables_pernet *nft_net;
        struct nft_table *table;
@@ -811,6 +811,7 @@ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
        nft_net = nft_pernet(net);
        list_for_each_entry(table, &nft_net->tables, list) {
                if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
+                   table->family == family &&
                    nft_active_genmask(table, genmask)) {
                        if (nft_table_has_owner(table) &&
                            nlpid && table->nlpid != nlpid)
@@ -1544,7 +1545,7 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
 
        if (nla[NFTA_TABLE_HANDLE]) {
                attr = nla[NFTA_TABLE_HANDLE];
-               table = nft_table_lookup_byhandle(net, attr, genmask,
+               table = nft_table_lookup_byhandle(net, attr, family, genmask,
                                                  NETLINK_CB(skb).portid);
        } else {
                attr = nla[NFTA_TABLE_NAME];
index b18a7903912597115c27e2857298c0211cca0fd0..c09dba57354c17e0776e4d5c3b52486719d20163 100644 (file)
@@ -280,10 +280,15 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
                        priv->expr_array[i] = dynset_expr;
                        priv->num_exprs++;
 
-                       if (set->num_exprs &&
-                           dynset_expr->ops != set->exprs[i]->ops) {
-                               err = -EOPNOTSUPP;
-                               goto err_expr_free;
+                       if (set->num_exprs) {
+                               if (i >= set->num_exprs) {
+                                       err = -EINVAL;
+                                       goto err_expr_free;
+                               }
+                               if (dynset_expr->ops != set->exprs[i]->ops) {
+                                       err = -EOPNOTSUPP;
+                                       goto err_expr_free;
+                               }
                        }
                        i++;
                }
index 3fbaa7bf41f9c74956c826ee63797406bff9f297..6eb571d0c3fdfcb4ac6cef4b313f039ecfe2517b 100644 (file)
@@ -214,7 +214,7 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
 
                offset = i + priv->offset;
                if (priv->flags & NFT_EXTHDR_F_PRESENT) {
-                       *dest = 1;
+                       nft_reg_store8(dest, 1);
                } else {
                        if (priv->len % NFT_REG32_SIZE)
                                dest[priv->len / NFT_REG32_SIZE] = 0;
@@ -461,7 +461,7 @@ static void nft_exthdr_dccp_eval(const struct nft_expr *expr,
                type = bufp[0];
 
                if (type == priv->type) {
-                       *dest = 1;
+                       nft_reg_store8(dest, 1);
                        return;
                }
 
index 1bfe258018da45fb8cf9f95fe0d3b871e5023f31..37cfe6dd712d8b138fc290abe66fd8d9b69963d6 100644 (file)
@@ -145,11 +145,15 @@ void nft_fib_store_result(void *reg, const struct nft_fib *priv,
        switch (priv->result) {
        case NFT_FIB_RESULT_OIF:
                index = dev ? dev->ifindex : 0;
-               *dreg = (priv->flags & NFTA_FIB_F_PRESENT) ? !!index : index;
+               if (priv->flags & NFTA_FIB_F_PRESENT)
+                       nft_reg_store8(dreg, !!index);
+               else
+                       *dreg = index;
+
                break;
        case NFT_FIB_RESULT_OIFNAME:
                if (priv->flags & NFTA_FIB_F_PRESENT)
-                       *dreg = !!dev;
+                       nft_reg_store8(dreg, !!dev);
                else
                        strscpy_pad(reg, dev ? dev->name : "", IFNAMSIZ);
                break;
index 701977af3ee851947dbf3096e729f3161928b9d6..7252fcdae34993835d10d143c02755958ad9a7e2 100644 (file)
@@ -2043,6 +2043,9 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
 
                e = f->mt[r].e;
 
+               if (!nft_set_elem_active(&e->ext, iter->genmask))
+                       goto cont;
+
                iter->err = iter->fn(ctx, set, iter, &e->priv);
                if (iter->err < 0)
                        goto out;
index e85ce69924aee95d46ab597b1357ddd0d2c6c2b7..50332888c8d233aab0915a31f2f616f3171da45e 100644 (file)
@@ -76,18 +76,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
                 */
                return false;
 
-       filp = sk->sk_socket->file;
-       if (filp == NULL)
+       read_lock_bh(&sk->sk_callback_lock);
+       filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+       if (filp == NULL) {
+               read_unlock_bh(&sk->sk_callback_lock);
                return ((info->match ^ info->invert) &
                       (XT_OWNER_UID | XT_OWNER_GID)) == 0;
+       }
 
        if (info->match & XT_OWNER_UID) {
                kuid_t uid_min = make_kuid(net->user_ns, info->uid_min);
                kuid_t uid_max = make_kuid(net->user_ns, info->uid_max);
                if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
                     uid_lte(filp->f_cred->fsuid, uid_max)) ^
-                   !(info->invert & XT_OWNER_UID))
+                   !(info->invert & XT_OWNER_UID)) {
+                       read_unlock_bh(&sk->sk_callback_lock);
                        return false;
+               }
        }
 
        if (info->match & XT_OWNER_GID) {
@@ -112,10 +117,13 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
                        }
                }
 
-               if (match ^ !(info->invert & XT_OWNER_GID))
+               if (match ^ !(info->invert & XT_OWNER_GID)) {
+                       read_unlock_bh(&sk->sk_callback_lock);
                        return false;
+               }
        }
 
+       read_unlock_bh(&sk->sk_callback_lock);
        return true;
 }
 
index 92ef5ed2e7b0422e389d1a9c5696c6e2323a7450..9c7ffd10df2a72c00d626ab40ca95bb739425983 100644 (file)
@@ -1691,6 +1691,9 @@ static int genl_bind(struct net *net, int group)
                if ((grp->flags & GENL_UNS_ADMIN_PERM) &&
                    !ns_capable(net->user_ns, CAP_NET_ADMIN))
                        ret = -EPERM;
+               if (grp->cap_sys_admin &&
+                   !ns_capable(net->user_ns, CAP_SYS_ADMIN))
+                       ret = -EPERM;
 
                break;
        }
index a84e00b5904be0fad471324d1492979403a2fab3..7adf48549a3b7d263ad5cd7dd3b665efebdc32f9 100644 (file)
@@ -4300,7 +4300,7 @@ static void packet_mm_open(struct vm_area_struct *vma)
        struct sock *sk = sock->sk;
 
        if (sk)
-               atomic_inc(&pkt_sk(sk)->mapped);
+               atomic_long_inc(&pkt_sk(sk)->mapped);
 }
 
 static void packet_mm_close(struct vm_area_struct *vma)
@@ -4310,7 +4310,7 @@ static void packet_mm_close(struct vm_area_struct *vma)
        struct sock *sk = sock->sk;
 
        if (sk)
-               atomic_dec(&pkt_sk(sk)->mapped);
+               atomic_long_dec(&pkt_sk(sk)->mapped);
 }
 
 static const struct vm_operations_struct packet_mmap_ops = {
@@ -4405,7 +4405,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 
        err = -EBUSY;
        if (!closing) {
-               if (atomic_read(&po->mapped))
+               if (atomic_long_read(&po->mapped))
                        goto out;
                if (packet_read_pending(rb))
                        goto out;
@@ -4508,7 +4508,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 
        err = -EBUSY;
        mutex_lock(&po->pg_vec_lock);
-       if (closing || atomic_read(&po->mapped) == 0) {
+       if (closing || atomic_long_read(&po->mapped) == 0) {
                err = 0;
                spin_lock_bh(&rb_queue->lock);
                swap(rb->pg_vec, pg_vec);
@@ -4526,9 +4526,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                po->prot_hook.func = (po->rx_ring.pg_vec) ?
                                                tpacket_rcv : packet_rcv;
                skb_queue_purge(rb_queue);
-               if (atomic_read(&po->mapped))
-                       pr_err("packet_mmap: vma is busy: %d\n",
-                              atomic_read(&po->mapped));
+               if (atomic_long_read(&po->mapped))
+                       pr_err("packet_mmap: vma is busy: %ld\n",
+                              atomic_long_read(&po->mapped));
        }
        mutex_unlock(&po->pg_vec_lock);
 
@@ -4606,7 +4606,7 @@ static int packet_mmap(struct file *file, struct socket *sock,
                }
        }
 
-       atomic_inc(&po->mapped);
+       atomic_long_inc(&po->mapped);
        vma->vm_ops = &packet_mmap_ops;
        err = 0;
 
index d29c94c45159dd488530bc6bb6a1b7d847d1d1e2..d5d70712007ad32bf1227ee59dcc12e12bbb77e4 100644 (file)
@@ -122,7 +122,7 @@ struct packet_sock {
        __be16                  num;
        struct packet_rollover  *rollover;
        struct packet_mclist    *mclist;
-       atomic_t                mapped;
+       atomic_long_t           mapped;
        enum tpacket_versions   tp_version;
        unsigned int            tp_hdrlen;
        unsigned int            tp_reserve;
index 81a794e36f535864869812c2003ff65d4c96efd0..c34e902855dbefdd37b86e2f9cb9ef067be87c3d 100644 (file)
@@ -31,7 +31,8 @@ enum psample_nl_multicast_groups {
 
 static const struct genl_multicast_group psample_nl_mcgrps[] = {
        [PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
-       [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
+       [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME,
+                                     .flags = GENL_UNS_ADMIN_PERM },
 };
 
 static struct genl_family psample_nl_family __ro_after_init;
index 0cc5a4e19900e10b31172433f36f5835101908ed..ecb91ad4ce639e7f8f3d2a9698e005cf696b123f 100644 (file)
@@ -1315,9 +1315,11 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        case TIOCINQ: {
                struct sk_buff *skb;
                long amount = 0L;
-               /* These two are safe on a single CPU system as only user tasks fiddle here */
+
+               spin_lock_irq(&sk->sk_receive_queue.lock);
                if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
                        amount = skb->len;
+               spin_unlock_irq(&sk->sk_receive_queue.lock);
                return put_user(amount, (unsigned int __user *) argp);
        }
 
index b3f4a503ee2ba4fd9620567208e2e77ed80b41b1..f69c47945175b657da6d7368c94693e81465296f 100644 (file)
@@ -286,9 +286,31 @@ static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
               !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
 }
 
+static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
+
+static void tcf_ct_nf_get(struct nf_flowtable *ft)
+{
+       struct tcf_ct_flow_table *ct_ft =
+               container_of(ft, struct tcf_ct_flow_table, nf_ft);
+
+       tcf_ct_flow_table_get_ref(ct_ft);
+}
+
+static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
+
+static void tcf_ct_nf_put(struct nf_flowtable *ft)
+{
+       struct tcf_ct_flow_table *ct_ft =
+               container_of(ft, struct tcf_ct_flow_table, nf_ft);
+
+       tcf_ct_flow_table_put(ct_ft);
+}
+
 static struct nf_flowtable_type flowtable_ct = {
        .gc             = tcf_ct_flow_is_outdated,
        .action         = tcf_ct_flow_table_fill_actions,
+       .get            = tcf_ct_nf_get,
+       .put            = tcf_ct_nf_put,
        .owner          = THIS_MODULE,
 };
 
@@ -337,9 +359,13 @@ err_alloc:
        return err;
 }
 
+static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
+{
+       refcount_inc(&ct_ft->ref);
+}
+
 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
 {
-       struct flow_block_cb *block_cb, *tmp_cb;
        struct tcf_ct_flow_table *ct_ft;
        struct flow_block *block;
 
@@ -347,13 +373,9 @@ static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
                             rwork);
        nf_flow_table_free(&ct_ft->nf_ft);
 
-       /* Remove any remaining callbacks before cleanup */
        block = &ct_ft->nf_ft.flow_block;
        down_write(&ct_ft->nf_ft.flow_block_lock);
-       list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
-               list_del(&block_cb->list);
-               flow_block_cb_free(block_cb);
-       }
+       WARN_ON(!list_empty(&block->cb_list));
        up_write(&ct_ft->nf_ft.flow_block_lock);
        kfree(ct_ft);
 
index 2a1388841951e4b6d447ab1f7b2dff4586ad4218..73eebddbbf41d37a12de450420f41ddcedeb26f3 100644 (file)
@@ -723,7 +723,7 @@ static void smcd_conn_save_peer_info(struct smc_sock *smc,
        int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
 
        smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
-       smc->conn.peer_token = clc->d0.token;
+       smc->conn.peer_token = ntohll(clc->d0.token);
        /* msg header takes up space in the buffer */
        smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
        atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
@@ -1415,7 +1415,7 @@ static int smc_connect_ism(struct smc_sock *smc,
                if (rc)
                        return rc;
        }
-       ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
+       ini->ism_peer_gid[ini->ism_selected] = ntohll(aclc->d0.gid);
 
        /* there is only one lgr role for SMC-D; use server lock */
        mutex_lock(&smc_server_lgr_pending);
index 8deb46c28f1d55e81e996bd6ae06c1f09e30320b..72f4d81a3f41f27215baa9bacfd05b1e51356cf7 100644 (file)
@@ -1004,6 +1004,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
 {
        struct smc_connection *conn = &smc->conn;
        struct smc_clc_first_contact_ext_v2x fce;
+       struct smcd_dev *smcd = conn->lgr->smcd;
        struct smc_clc_msg_accept_confirm *clc;
        struct smc_clc_fce_gid_ext gle;
        struct smc_clc_msg_trail trl;
@@ -1021,17 +1022,15 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
                memcpy(clc->hdr.eyecatcher, SMCD_EYECATCHER,
                       sizeof(SMCD_EYECATCHER));
                clc->hdr.typev1 = SMC_TYPE_D;
-               clc->d0.gid =
-                       conn->lgr->smcd->ops->get_local_gid(conn->lgr->smcd);
-               clc->d0.token = conn->rmb_desc->token;
+               clc->d0.gid = htonll(smcd->ops->get_local_gid(smcd));
+               clc->d0.token = htonll(conn->rmb_desc->token);
                clc->d0.dmbe_size = conn->rmbe_size_comp;
                clc->d0.dmbe_idx = 0;
                memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
                if (version == SMC_V1) {
                        clc->hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
                } else {
-                       clc_v2->d1.chid =
-                               htons(smc_ism_get_chid(conn->lgr->smcd));
+                       clc_v2->d1.chid = htons(smc_ism_get_chid(smcd));
                        if (eid && eid[0])
                                memcpy(clc_v2->d1.eid, eid, SMC_MAX_EID_LEN);
                        len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
index c5c8e7db775a7680ef7a375692b504bf243b4900..08155a96a02a17864712f5de157244cc0949a24f 100644 (file)
@@ -204,8 +204,8 @@ struct smcr_clc_msg_accept_confirm {        /* SMCR accept/confirm */
 } __packed;
 
 struct smcd_clc_msg_accept_confirm_common {    /* SMCD accept/confirm */
-       u64 gid;                /* Sender GID */
-       u64 token;              /* DMB token */
+       __be64 gid;             /* Sender GID */
+       __be64 token;           /* DMB token */
        u8 dmbe_idx;            /* DMBE index */
 #if defined(__BIG_ENDIAN_BITFIELD)
        u8 dmbe_size : 4,       /* buf size (compressed) */
index 7bfe7d9a32aa601d352038b105dbcfe3604a0534..04534ea537c8fd15082a2c5b23b02472f9175c3b 100644 (file)
@@ -40,9 +40,6 @@ static unsigned long number_cred_unused;
 
 static struct cred machine_cred = {
        .usage = ATOMIC_INIT(1),
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       .magic = CRED_MAGIC,
-#endif
 };
 
 /*
index 316f761879624d688af85b60527868d5f0d00a49..e37b4d2e2acde25d6879770629b3996d03860c56 100644 (file)
@@ -952,6 +952,8 @@ static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
                }
 
                sk_msg_page_add(msg_pl, page, part, off);
+               msg_pl->sg.copybreak = 0;
+               msg_pl->sg.curr = msg_pl->sg.end;
                sk_mem_charge(sk, part);
                *copied += part;
                try_to_copy -= part;
index f6dc896bf44c6e3b0952d783c9d633cd6596dbee..6df246b532606312f2cbf547c7332fa3463a7e3e 100644 (file)
@@ -59,8 +59,7 @@ static bool virtio_transport_can_zcopy(const struct virtio_transport *t_ops,
        t_ops = virtio_transport_get_ops(info->vsk);
 
        if (t_ops->can_msgzerocopy) {
-               int pages_in_iov = iov_iter_npages(iov_iter, MAX_SKB_FRAGS);
-               int pages_to_send = min(pages_in_iov, MAX_SKB_FRAGS);
+               int pages_to_send = iov_iter_npages(iov_iter, MAX_SKB_FRAGS);
 
                /* +1 is for packet header. */
                return t_ops->can_msgzerocopy(pages_to_send + 1);
@@ -844,7 +843,7 @@ static s64 virtio_transport_has_space(struct vsock_sock *vsk)
        struct virtio_vsock_sock *vvs = vsk->trans;
        s64 bytes;
 
-       bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
+       bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
        if (bytes < 0)
                bytes = 0;
 
index ae9f8cb611f6ca9b1bd0693fd5771198f7555d8f..3da0b52f308d4a4f4cd946301987fb246e25b76b 100644 (file)
@@ -947,7 +947,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
 
        rcu_read_lock();
        if (xsk_check_common(xs))
-               goto skip_tx;
+               goto out;
 
        pool = xs->pool;
 
@@ -959,12 +959,11 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
                        xsk_generic_xmit(sk);
        }
 
-skip_tx:
        if (xs->rx && !xskq_prod_is_empty(xs->rx))
                mask |= EPOLLIN | EPOLLRDNORM;
        if (xs->tx && xsk_tx_writeable(xs))
                mask |= EPOLLOUT | EPOLLWRNORM;
-
+out:
        rcu_read_unlock();
        return mask;
 }
index d83ba5d8f3f49f6c5e7349fefff382c0264fafb8..f27d552aec43f2b41aacf195069510c3121be03b 100755 (executable)
@@ -138,15 +138,11 @@ $total_size = 0;
 while (my $line = <STDIN>) {
        if ($line =~ m/$funcre/) {
                $func = $1;
-               next if $line !~ m/^($xs*)/;
+               next if $line !~ m/^($x*)/;
                if ($total_size > $min_stack) {
                        push @stack, "$intro$total_size\n";
                }
-
-               $addr = $1;
-               $addr =~ s/ /0/g;
-               $addr = "0x$addr";
-
+               $addr = "0x$1";
                $intro = "$addr $func [$file]:";
                my $padlen = 56 - length($intro);
                while ($padlen > 0) {
index bd07477dd1440fb21137d856b887b62e151d4082..5ffb2364409b1746c5f1b7e8c4c2e3a85ea2c5a4 100755 (executable)
@@ -1,8 +1,8 @@
 #!/usr/bin/env python3
 # SPDX-License-Identifier: GPL-2.0-only
 
+import fnmatch
 import os
-import glob
 import re
 import argparse
 
@@ -81,10 +81,20 @@ def print_compat(filename, compatibles):
        else:
                print(*compatibles, sep='\n')
 
+def glob_without_symlinks(root, glob):
+       for path, dirs, files in os.walk(root):
+               # Ignore hidden directories
+               for d in dirs:
+                       if fnmatch.fnmatch(d, ".*"):
+                               dirs.remove(d)
+               for f in files:
+                       if fnmatch.fnmatch(f, glob):
+                               yield os.path.join(path, f)
+
 def files_to_parse(path_args):
        for f in path_args:
                if os.path.isdir(f):
-                       for filename in glob.iglob(f + "/**/*.c", recursive=True):
+                       for filename in glob_without_symlinks(f, "*.c"):
                                yield filename
                else:
                        yield f
index 16376c5cfec641340d697b2de3cb91f9aef2dc07..0eabc5f4f8ca225f8ac360d7995dab380e20d363 100644 (file)
@@ -36,26 +36,26 @@ def for_each_bus():
     for kobj in kset_for_each_object(gdb.parse_and_eval('bus_kset')):
         subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
         subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
-        yield subsys_priv['bus']
+        yield subsys_priv
 
 
 def for_each_class():
     for kobj in kset_for_each_object(gdb.parse_and_eval('class_kset')):
         subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
         subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
-        yield subsys_priv['class']
+        yield subsys_priv
 
 
 def get_bus_by_name(name):
     for item in for_each_bus():
-        if item['name'].string() == name:
+        if item['bus']['name'].string() == name:
             return item
     raise gdb.GdbError("Can't find bus type {!r}".format(name))
 
 
 def get_class_by_name(name):
     for item in for_each_class():
-        if item['name'].string() == name:
+        if item['class']['name'].string() == name:
             return item
     raise gdb.GdbError("Can't find device class {!r}".format(name))
 
@@ -70,13 +70,13 @@ def klist_for_each(klist):
 
 
 def bus_for_each_device(bus):
-    for kn in klist_for_each(bus['p']['klist_devices']):
+    for kn in klist_for_each(bus['klist_devices']):
         dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_bus')
         yield dp['device']
 
 
 def class_for_each_device(cls):
-    for kn in klist_for_each(cls['p']['klist_devices']):
+    for kn in klist_for_each(cls['klist_devices']):
         dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_class')
         yield dp['device']
 
@@ -103,7 +103,7 @@ class LxDeviceListBus(gdb.Command):
     def invoke(self, arg, from_tty):
         if not arg:
             for bus in for_each_bus():
-                gdb.write('bus {}:\t{}\n'.format(bus['name'].string(), bus))
+                gdb.write('bus {}:\t{}\n'.format(bus['bus']['name'].string(), bus))
                 for dev in bus_for_each_device(bus):
                     _show_device(dev, level=1)
         else:
@@ -123,7 +123,7 @@ class LxDeviceListClass(gdb.Command):
     def invoke(self, arg, from_tty):
         if not arg:
             for cls in for_each_class():
-                gdb.write("class {}:\t{}\n".format(cls['name'].string(), cls))
+                gdb.write("class {}:\t{}\n".format(cls['class']['name'].string(), cls))
                 for dev in class_for_each_device(cls):
                     _show_device(dev, level=1)
         else:
index 17ec19e9b5bf6a93e95e224cc21c88320c5ad4ba..aa5ab6251f763b9860a6128e8582d1ef4af91f6a 100644 (file)
@@ -13,7 +13,7 @@
 
 import gdb
 
-from linux import utils
+from linux import utils, lists
 
 
 task_type = utils.CachedType("struct task_struct")
@@ -22,19 +22,15 @@ task_type = utils.CachedType("struct task_struct")
 def task_lists():
     task_ptr_type = task_type.get_type().pointer()
     init_task = gdb.parse_and_eval("init_task").address
-    t = g = init_task
+    t = init_task
 
     while True:
-        while True:
-            yield t
+        thread_head = t['signal']['thread_head']
+        for thread in lists.list_for_each_entry(thread_head, task_ptr_type, 'thread_node'):
+            yield thread
 
-            t = utils.container_of(t['thread_group']['next'],
-                                   task_ptr_type, "thread_group")
-            if t == g:
-                break
-
-        t = g = utils.container_of(g['tasks']['next'],
-                                   task_ptr_type, "tasks")
+        t = utils.container_of(t['tasks']['next'],
+                               task_ptr_type, "tasks")
         if t == init_task:
             return
 
index 598ef5465f8256214cb0ad29d5fe91a37872df2e..3edb156ae52c30d8e314ed9097bdae8b3cadb02b 100644 (file)
@@ -322,7 +322,7 @@ int main(int argc, char **argv)
                                     CMS_NOSMIMECAP | use_keyid |
                                     use_signed_attrs),
                    "CMS_add1_signer");
-               ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
+               ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) != 1,
                    "CMS_final");
 
 #else
@@ -341,10 +341,10 @@ int main(int argc, char **argv)
                        b = BIO_new_file(sig_file_name, "wb");
                        ERR(!b, "%s", sig_file_name);
 #ifndef USE_PKCS7
-                       ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0,
+                       ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) != 1,
                            "%s", sig_file_name);
 #else
-                       ERR(i2d_PKCS7_bio(b, pkcs7) < 0,
+                       ERR(i2d_PKCS7_bio(b, pkcs7) != 1,
                            "%s", sig_file_name);
 #endif
                        BIO_free(b);
@@ -374,9 +374,9 @@ int main(int argc, char **argv)
 
        if (!raw_sig) {
 #ifndef USE_PKCS7
-               ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
+               ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) != 1, "%s", dest_name);
 #else
-               ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name);
+               ERR(i2d_PKCS7_bio(bd, pkcs7) != 1, "%s", dest_name);
 #endif
        } else {
                BIO *b;
@@ -396,7 +396,7 @@ int main(int argc, char **argv)
        ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
        ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
 
-       ERR(BIO_free(bd) < 0, "%s", dest_name);
+       ERR(BIO_free(bd) != 1, "%s", dest_name);
 
        /* Finally, if we're signing in place, replace the original. */
        if (replace_orig)
index feda711c6b7b84c19099568b96b3b5cbf121886d..340b2bbbb2dd357a5b2704f205a7dc70b28bdbe4 100644 (file)
@@ -1660,8 +1660,6 @@ static int inode_has_perm(const struct cred *cred,
        struct inode_security_struct *isec;
        u32 sid;
 
-       validate_creds(cred);
-
        if (unlikely(IS_PRIVATE(inode)))
                return 0;
 
@@ -3056,8 +3054,6 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
        struct inode_security_struct *isec;
        u32 sid;
 
-       validate_creds(cred);
-
        ad.type = LSM_AUDIT_DATA_DENTRY;
        ad.u.dentry = dentry;
        sid = cred_sid(cred);
@@ -3101,8 +3097,6 @@ static int selinux_inode_permission(struct inode *inode, int mask)
        if (!mask)
                return 0;
 
-       validate_creds(cred);
-
        if (unlikely(IS_PRIVATE(inode)))
                return 0;
 
index 20bb2d7c8d4bf6cde42153cdbb7a16a0cd2bbb75..6d0c9c37796c229e90936c5b42ae9980be69063f 100644 (file)
@@ -253,6 +253,7 @@ static const char * const snd_pcm_state_names[] = {
        STATE(DRAINING),
        STATE(PAUSED),
        STATE(SUSPENDED),
+       STATE(DISCONNECTED),
 };
 
 static const char * const snd_pcm_access_names[] = {
index b59b78a09224090ad04b23940b0c3a2ec4c526da..b8bff5522bce20e601ee363e0edd1bdd7b3261f2 100644 (file)
@@ -397,7 +397,6 @@ static int snd_pcmtst_pcm_close(struct snd_pcm_substream *substream)
        struct pcmtst_buf_iter *v_iter = substream->runtime->private_data;
 
        timer_shutdown_sync(&v_iter->timer_instance);
-       v_iter->substream = NULL;
        playback_capture_test = !v_iter->is_buf_corrupted;
        kfree(v_iter);
        return 0;
@@ -435,6 +434,7 @@ static int snd_pcmtst_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                // We can't call timer_shutdown_sync here, as it is forbidden to sleep here
                v_iter->suspend = true;
+               timer_delete(&v_iter->timer_instance);
                break;
        }
 
@@ -512,12 +512,22 @@ static int snd_pcmtst_ioctl(struct snd_pcm_substream *substream, unsigned int cm
        return snd_pcm_lib_ioctl(substream, cmd, arg);
 }
 
+static int snd_pcmtst_sync_stop(struct snd_pcm_substream *substream)
+{
+       struct pcmtst_buf_iter *v_iter = substream->runtime->private_data;
+
+       timer_delete_sync(&v_iter->timer_instance);
+
+       return 0;
+}
+
 static const struct snd_pcm_ops snd_pcmtst_playback_ops = {
        .open =         snd_pcmtst_pcm_open,
        .close =        snd_pcmtst_pcm_close,
        .trigger =      snd_pcmtst_pcm_trigger,
        .hw_params =    snd_pcmtst_pcm_hw_params,
        .ioctl =        snd_pcmtst_ioctl,
+       .sync_stop =    snd_pcmtst_sync_stop,
        .hw_free =      snd_pcmtst_pcm_hw_free,
        .prepare =      snd_pcmtst_pcm_prepare,
        .pointer =      snd_pcmtst_pcm_pointer,
@@ -530,6 +540,7 @@ static const struct snd_pcm_ops snd_pcmtst_capture_ops = {
        .hw_params =    snd_pcmtst_pcm_hw_params,
        .hw_free =      snd_pcmtst_pcm_hw_free,
        .ioctl =        snd_pcmtst_ioctl,
+       .sync_stop =    snd_pcmtst_sync_stop,
        .prepare =      snd_pcmtst_pcm_prepare,
        .pointer =      snd_pcmtst_pcm_pointer,
 };
index 1cde2a69bdb4baa0d95d34dde3e1a86933931268..78cee53fee02aad32dce7340be21901f0d394bb2 100644 (file)
@@ -1993,7 +1993,10 @@ static const struct snd_pci_quirk force_connect_list[] = {
        SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1),
+       SND_PCI_QUIRK(0x1043, 0x86ae, "ASUS", 1),  /* Z170 PRO */
+       SND_PCI_QUIRK(0x1043, 0x86c7, "ASUS", 1),  /* Z170M PLUS */
        SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+       SND_PCI_QUIRK(0x8086, 0x2060, "Intel NUC5CPYB", 1),
        SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
        {}
 };
index f9ddacfd920e20ced7150cecb4a083721e2bd026..e45d4c405f8ff1916fcf54ea9d3c231210f1d21c 100644 (file)
@@ -9705,6 +9705,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
        SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
+       SND_PCI_QUIRK(0x1028, 0x0beb, "Dell XPS 15 9530 (2023)", ALC289_FIXUP_DELL_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
        SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
@@ -9794,6 +9795,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x84ae, "HP 15-db0403ng", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
        SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
@@ -9963,6 +9965,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally RC71L_RC71L", ALC294_FIXUP_ASUS_ALLY),
        SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1043, 0x18d3, "ASUS UM3504DA", ALC294_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x1970, "ASUS UX550VE", ALC289_FIXUP_ASUS_GA401),
@@ -10204,6 +10207,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x387d, "Yoga S780-16 pro Quad AAC", ALC287_FIXUP_TAS2781_I2C),
        SND_PCI_QUIRK(0x17aa, 0x387e, "Yoga S780-16 pro Quad YC", ALC287_FIXUP_TAS2781_I2C),
        SND_PCI_QUIRK(0x17aa, 0x3881, "YB9 dual power mode2 YC", ALC287_FIXUP_TAS2781_I2C),
+       SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
        SND_PCI_QUIRK(0x17aa, 0x3884, "Y780 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
        SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
        SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
@@ -10271,6 +10275,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
        SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0xf111, 0x0005, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
 
 #if 0
        /* Below is a quirk table taken from the old code.
@@ -12196,6 +12202,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x3364, "Lenovo ThinkCentre M90 Gen5", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
index fb802802939e170f73e87c85ec223ebf40fab4e8..63a90c7e897685e05c8b325c2f9d89942bec05ce 100644 (file)
@@ -455,9 +455,9 @@ static int tas2781_save_calibration(struct tasdevice_priv *tas_priv)
                status = efi.get_variable(efi_name, &efi_guid, &attr,
                        &tas_priv->cali_data.total_sz,
                        tas_priv->cali_data.data);
-               if (status != EFI_SUCCESS)
-                       return -EINVAL;
        }
+       if (status != EFI_SUCCESS)
+               return -EINVAL;
 
        tmp_val = (unsigned int *)tas_priv->cali_data.data;
 
@@ -550,11 +550,6 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
        tas2781_save_calibration(tas_priv);
 
 out:
-       if (tas_priv->fw_state == TASDEVICE_DSP_FW_FAIL) {
-               /*If DSP FW fail, kcontrol won't be created */
-               tasdevice_config_info_remove(tas_priv);
-               tasdevice_dsp_remove(tas_priv);
-       }
        mutex_unlock(&tas_priv->codec_lock);
        if (fmw)
                release_firmware(fmw);
@@ -612,9 +607,13 @@ static void tas2781_hda_unbind(struct device *dev,
 {
        struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
        struct hda_component *comps = master_data;
+       comps = &comps[tas_priv->index];
 
-       if (comps[tas_priv->index].dev == dev)
-               memset(&comps[tas_priv->index], 0, sizeof(*comps));
+       if (comps->dev == dev) {
+               comps->dev = NULL;
+               memset(comps->name, 0, sizeof(comps->name));
+               comps->playback_hook = NULL;
+       }
 
        tasdevice_config_info_remove(tas_priv);
        tasdevice_dsp_remove(tas_priv);
@@ -675,14 +674,14 @@ static int tas2781_hda_i2c_probe(struct i2c_client *clt)
 
        pm_runtime_put_autosuspend(tas_priv->dev);
 
+       tas2781_reset(tas_priv);
+
        ret = component_add(tas_priv->dev, &tas2781_hda_comp_ops);
        if (ret) {
                dev_err(tas_priv->dev, "Register component failed: %d\n", ret);
                pm_runtime_disable(tas_priv->dev);
-               goto err;
        }
 
-       tas2781_reset(tas_priv);
 err:
        if (ret)
                tas2781_hda_remove(&clt->dev);
index 20cee7104c2b3095c1dfb226eec36a526f1490cd..3bc4b2e41650e693c23d7a1972e134beae23194a 100644 (file)
@@ -103,6 +103,20 @@ static const struct config_entry config_table[] = {
                        {}
                },
        },
+       {
+               .flags = FLAG_AMD_LEGACY,
+               .device = ACP_PCI_DEV_ID,
+               .dmi_table = (const struct dmi_system_id []) {
+                       {
+                               .matches = {
+                                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "HUAWEI"),
+                                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HVY-WXX9"),
+                                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "M1010"),
+                               },
+                       },
+                       {}
+               },
+       },
        {
                .flags = FLAG_AMD_LEGACY,
                .device = ACP_PCI_DEV_ID,
index 15a864dcd7bd3a526ccd5a568c9dc46ad2e394c8..d83cb6e4c62aecc6e54a700e5d22f136253e42fb 100644 (file)
@@ -283,6 +283,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "E1504FA"),
+               }
+       },
        {
                .driver_data = &acp6x_card,
                .matches = {
@@ -367,6 +374,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "8A3E"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+                       DMI_MATCH(DMI_BOARD_NAME, "8B2F"),
+               }
+       },
        {
                .driver_data = &acp6x_card,
                .matches = {
@@ -381,6 +395,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "pang12"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "System76"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "pang13"),
+               }
+       },
        {}
 };
 
index 0b40fdfb1825bf48bae092f84c6b6c57da1d91e7..d8ec325b9cc906dfedb9366ec5433ba35390f6f0 100644 (file)
@@ -578,7 +578,7 @@ static int cs43130_set_sp_fmt(int dai_id, unsigned int bitwidth_sclk,
                break;
        case SND_SOC_DAIFMT_LEFT_J:
                hi_size = bitwidth_sclk;
-               frm_delay = 2;
+               frm_delay = 0;
                frm_phase = 1;
                break;
        case SND_SOC_DAIFMT_DSP_A:
@@ -1682,7 +1682,7 @@ static ssize_t hpload_dc_r_show(struct device *dev,
        return cs43130_show_dc(dev, buf, HP_RIGHT);
 }
 
-static u16 const cs43130_ac_freq[CS43130_AC_FREQ] = {
+static const u16 cs43130_ac_freq[CS43130_AC_FREQ] = {
        24,
        43,
        93,
@@ -2362,7 +2362,7 @@ static const struct regmap_config cs43130_regmap = {
        .use_single_write       = true,
 };
 
-static u16 const cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
+static const u16 cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
        50,
        120,
 };
index 4c44059427793822d6b8e0836a05d7c1273db42d..6bc068cdcbe2a89e1488530bfa4e6e9c49c0651c 100644 (file)
@@ -696,7 +696,7 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
                aad_pdata->mic_det_thr =
                        da7219_aad_fw_mic_det_thr(dev, fw_val32);
        else
-               aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_500_OHMS;
+               aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_200_OHMS;
 
        if (fwnode_property_read_u32(aad_np, "dlg,jack-ins-deb", &fw_val32) >= 0)
                aad_pdata->jack_ins_deb =
index 355f30779a3487efc74d247eeaf63664051b39ff..b075689db2dcaa4afea6566e767ffbe8230c6df1 100644 (file)
@@ -132,6 +132,9 @@ static struct snd_soc_dai_driver hdac_hda_dais[] = {
                .sig_bits = 24,
        },
 },
+};
+
+static struct snd_soc_dai_driver hdac_hda_hdmi_dais[] = {
 {
        .id = HDAC_HDMI_0_DAI_ID,
        .name = "intel-hdmi-hifi1",
@@ -607,8 +610,16 @@ static const struct snd_soc_component_driver hdac_hda_codec = {
        .endianness             = 1,
 };
 
+static const struct snd_soc_component_driver hdac_hda_hdmi_codec = {
+       .probe                  = hdac_hda_codec_probe,
+       .remove                 = hdac_hda_codec_remove,
+       .idle_bias_on           = false,
+       .endianness             = 1,
+};
+
 static int hdac_hda_dev_probe(struct hdac_device *hdev)
 {
+       struct hdac_hda_priv *hda_pvt = dev_get_drvdata(&hdev->dev);
        struct hdac_ext_link *hlink;
        int ret;
 
@@ -621,9 +632,15 @@ static int hdac_hda_dev_probe(struct hdac_device *hdev)
        snd_hdac_ext_bus_link_get(hdev->bus, hlink);
 
        /* ASoC specific initialization */
-       ret = devm_snd_soc_register_component(&hdev->dev,
-                                        &hdac_hda_codec, hdac_hda_dais,
-                                        ARRAY_SIZE(hdac_hda_dais));
+       if (hda_pvt->need_display_power)
+               ret = devm_snd_soc_register_component(&hdev->dev,
+                                               &hdac_hda_hdmi_codec, hdac_hda_hdmi_dais,
+                                               ARRAY_SIZE(hdac_hda_hdmi_dais));
+       else
+               ret = devm_snd_soc_register_component(&hdev->dev,
+                                               &hdac_hda_codec, hdac_hda_dais,
+                                               ARRAY_SIZE(hdac_hda_dais));
+
        if (ret < 0) {
                dev_err(&hdev->dev, "failed to register HDA codec %d\n", ret);
                return ret;
index 82f9873ffada0af35a6729ee5ee530055d82417b..124c2e144f337357af954e1ee4bc9d7b1e3c4b37 100644 (file)
@@ -2021,6 +2021,11 @@ static int tx_macro_probe(struct platform_device *pdev)
 
        tx->dev = dev;
 
+       /* Set active_decimator default value */
+       tx->active_decimator[TX_MACRO_AIF1_CAP] = -1;
+       tx->active_decimator[TX_MACRO_AIF2_CAP] = -1;
+       tx->active_decimator[TX_MACRO_AIF3_CAP] = -1;
+
        /* set MCLK and NPL rates */
        clk_set_rate(tx->mclk, MCLK_FREQ);
        clk_set_rate(tx->npl, MCLK_FREQ);
index ff3024899f456dc8f20ee196215d23017abefd16..7199d734c79f2c4919f38b7db1f950858232dbd9 100644 (file)
@@ -184,6 +184,7 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
        struct soc_bytes_ext *params = (void *)kcontrol->private_value;
        int i, reg;
        u16 reg_val, *val;
+       __be16 tmp;
 
        val = (u16 *)ucontrol->value.bytes.data;
        reg = NAU8822_REG_EQ1;
@@ -192,8 +193,8 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
                /* conversion of 16-bit integers between native CPU format
                 * and big endian format
                 */
-               reg_val = cpu_to_be16(reg_val);
-               memcpy(val + i, &reg_val, sizeof(reg_val));
+               tmp = cpu_to_be16(reg_val);
+               memcpy(val + i, &tmp, sizeof(tmp));
        }
 
        return 0;
@@ -216,6 +217,7 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
        void *data;
        u16 *val, value;
        int i, reg, ret;
+       __be16 *tmp;
 
        data = kmemdup(ucontrol->value.bytes.data,
                params->max, GFP_KERNEL | GFP_DMA);
@@ -228,7 +230,8 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
                /* conversion of 16-bit integers between native CPU format
                 * and big endian format
                 */
-               value = be16_to_cpu(*(val + i));
+               tmp = (__be16 *)(val + i);
+               value = be16_to_cpup(tmp);
                ret = snd_soc_component_write(component, reg + i, value);
                if (ret) {
                        dev_err(component->dev,
index 7938b52d741d8cd6f354ca61a149db09b41cede2..a0d01d71d8b56f83bc93d04b85681bd1887116ac 100644 (file)
@@ -448,6 +448,7 @@ struct rt5645_priv {
        struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
        struct rt5645_eq_param_s *eq_param;
        struct timer_list btn_check_timer;
+       struct mutex jd_mutex;
 
        int codec_type;
        int sysclk;
@@ -3193,6 +3194,8 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
                                rt5645_enable_push_button_irq(component, true);
                        }
                } else {
+                       if (rt5645->en_button_func)
+                               rt5645_enable_push_button_irq(component, false);
                        snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
                        snd_soc_dapm_sync(dapm);
                        rt5645->jack_type = SND_JACK_HEADPHONE;
@@ -3295,6 +3298,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
        if (!rt5645->component)
                return;
 
+       mutex_lock(&rt5645->jd_mutex);
+
        switch (rt5645->pdata.jd_mode) {
        case 0: /* Not using rt5645 JD */
                if (rt5645->gpiod_hp_det) {
@@ -3321,7 +3326,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
 
        if (!val && (rt5645->jack_type == 0)) { /* jack in */
                report = rt5645_jack_detect(rt5645->component, 1);
-       } else if (!val && rt5645->jack_type != 0) {
+       } else if (!val && rt5645->jack_type == SND_JACK_HEADSET) {
                /* for push button and jack out */
                btn_type = 0;
                if (snd_soc_component_read(rt5645->component, RT5645_INT_IRQ_ST) & 0x4) {
@@ -3377,6 +3382,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
                rt5645_jack_detect(rt5645->component, 0);
        }
 
+       mutex_unlock(&rt5645->jd_mutex);
+
        snd_soc_jack_report(rt5645->hp_jack, report, SND_JACK_HEADPHONE);
        snd_soc_jack_report(rt5645->mic_jack, report, SND_JACK_MICROPHONE);
        if (rt5645->en_button_func)
@@ -4150,6 +4157,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c)
        }
        timer_setup(&rt5645->btn_check_timer, rt5645_btn_check_callback, 0);
 
+       mutex_init(&rt5645->jd_mutex);
        INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
        INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
 
index 044b6f604c090a69401597a5d2427cad86c46466..260bac695b20ab7f93ed1584f1437fd6f219dc92 100644 (file)
@@ -186,7 +186,7 @@ SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0),
 
 /* Boost mixer */
 static const struct snd_kcontrol_new wm8974_boost_mixer[] = {
-SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 1),
+SOC_DAPM_SINGLE("PGA Switch", WM8974_INPPGA, 6, 1, 1),
 };
 
 /* Input PGA */
@@ -246,8 +246,8 @@ static const struct snd_soc_dapm_route wm8974_dapm_routes[] = {
 
        /* Boost Mixer */
        {"ADC", NULL, "Boost Mixer"},
-       {"Boost Mixer", "Aux Switch", "Aux Input"},
-       {"Boost Mixer", NULL, "Input PGA"},
+       {"Boost Mixer", NULL, "Aux Input"},
+       {"Boost Mixer", "PGA Switch", "Input PGA"},
        {"Boost Mixer", NULL, "MICP"},
 
        /* Input PGA */
index 236b12b69ae5171374eaa7cf6008e375391b5074..c01e31175015cc2f354175dec019fac591a98b4b 100644 (file)
@@ -1451,12 +1451,12 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
                ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset,
                                          &region->base_addr);
                if (ret < 0)
-                       return ret;
+                       goto err;
 
                ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset,
                                          &offset);
                if (ret < 0)
-                       return ret;
+                       goto err;
 
                region->cumulative_size = offset;
 
@@ -1467,6 +1467,10 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
        }
 
        return 0;
+
+err:
+       kfree(buf->regions);
+       return ret;
 }
 
 static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf)
index 725c530a363607aefd02b6a325ad0859ec9995f7..be342ee03fb9cb6c225ea5bb42eae22d36254542 100644 (file)
@@ -360,6 +360,7 @@ config SND_SOC_IMX_HDMI
 config SND_SOC_IMX_RPMSG
        tristate "SoC Audio support for i.MX boards with rpmsg"
        depends on RPMSG
+       depends on OF && I2C
        select SND_SOC_IMX_PCM_RPMSG
        select SND_SOC_IMX_AUDIO_RPMSG
        help
index 79e7c6b98a754fb9e2f56b1dec0a1e27030f227d..32bbe5056a63520ec953d3a7425129f18234c1a5 100644 (file)
@@ -673,6 +673,20 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
                           FSL_SAI_CR3_TRCE_MASK,
                           FSL_SAI_CR3_TRCE((dl_cfg[dl_cfg_idx].mask[tx] & trce_mask)));
 
+       /*
+        * When the TERE and FSD_MSTR enabled before configuring the word width
+        * There will be no frame sync clock issue, because word width impact
+        * the generation of frame sync clock.
+        *
+        * TERE enabled earlier only for i.MX8MP case for the hardware limitation,
+        * We need to disable FSD_MSTR before configuring word width, then enable
+        * FSD_MSTR bit for this specific case.
+        */
+       if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output &&
+           !sai->is_consumer_mode)
+               regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
+                                  FSL_SAI_CR4_FSD_MSTR, 0);
+
        regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
                           FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
                           FSL_SAI_CR4_CHMOD_MASK,
@@ -680,6 +694,13 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
        regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx, ofs),
                           FSL_SAI_CR5_WNW_MASK | FSL_SAI_CR5_W0W_MASK |
                           FSL_SAI_CR5_FBT_MASK, val_cr5);
+
+       /* Enable FSD_MSTR after configuring word width */
+       if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output &&
+           !sai->is_consumer_mode)
+               regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
+                                  FSL_SAI_CR4_FSD_MSTR, FSL_SAI_CR4_FSD_MSTR);
+
        regmap_write(sai->regmap, FSL_SAI_xMR(tx),
                     ~0UL - ((1 << min(channels, slots)) - 1));
 
index fa0a15263c66dc117dcdd7886de6e9e19660ff22..f0fb33d719c25135722014f9763c65df3289ed7e 100644 (file)
@@ -358,7 +358,7 @@ static int fsl_xcvr_en_aud_pll(struct fsl_xcvr *xcvr, u32 freq)
        struct device *dev = &xcvr->pdev->dev;
        int ret;
 
-       freq = xcvr->soc_data->spdif_only ? freq / 10 : freq;
+       freq = xcvr->soc_data->spdif_only ? freq / 5 : freq;
        clk_disable_unprepare(xcvr->phy_clk);
        ret = clk_set_rate(xcvr->phy_clk, freq);
        if (ret < 0) {
@@ -409,11 +409,21 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
        bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
        u32 m_ctl = 0, v_ctl = 0;
        u32 r = substream->runtime->rate, ch = substream->runtime->channels;
-       u32 fout = 32 * r * ch * 10 * 2;
+       u32 fout = 32 * r * ch * 10;
        int ret = 0;
 
        switch (xcvr->mode) {
        case FSL_XCVR_MODE_SPDIF:
+               if (xcvr->soc_data->spdif_only && tx) {
+                       ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL_SET,
+                                                FSL_XCVR_TX_DPTH_CTRL_BYPASS_FEM,
+                                                FSL_XCVR_TX_DPTH_CTRL_BYPASS_FEM);
+                       if (ret < 0) {
+                               dev_err(dai->dev, "Failed to set bypass fem: %d\n", ret);
+                               return ret;
+                       }
+               }
+               fallthrough;
        case FSL_XCVR_MODE_ARC:
                if (tx) {
                        ret = fsl_xcvr_en_aud_pll(xcvr, fout);
index 6c6ef63cd5d9ef1ae8a7e306e7f0147d42b45c9e..6e172719c9795b1aa0932f7c7bde8285dbd83c55 100644 (file)
@@ -154,6 +154,8 @@ static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
                card->dapm_widgets = skl_hda_widgets;
                card->num_dapm_widgets = ARRAY_SIZE(skl_hda_widgets);
                if (!ctx->idisp_codec) {
+                       card->dapm_routes = &skl_hda_map[IDISP_ROUTE_COUNT];
+                       num_route -= IDISP_ROUTE_COUNT;
                        for (i = 0; i < IDISP_DAI_COUNT; i++) {
                                skl_hda_be_dai_links[i].codecs = &snd_soc_dummy_dlc;
                                skl_hda_be_dai_links[i].num_codecs = 1;
index 3312ad8a563b3fb812ba4070fd0914b9a07848fd..4e428472977326eb883223d580f9364225fbbc7c 100644 (file)
@@ -1546,7 +1546,7 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
 {
        struct device *dev = card->dev;
        struct snd_soc_acpi_mach *mach = dev_get_platdata(card->dev);
-       int sdw_be_num = 0, ssp_num = 0, dmic_num = 0, hdmi_num = 0, bt_num = 0;
+       int sdw_be_num = 0, ssp_num = 0, dmic_num = 0, bt_num = 0;
        struct mc_private *ctx = snd_soc_card_get_drvdata(card);
        struct snd_soc_acpi_mach_params *mach_params = &mach->mach_params;
        const struct snd_soc_acpi_link_adr *adr_link = mach_params->links;
@@ -1564,6 +1564,7 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
        char *codec_name, *codec_dai_name;
        int i, j, be_id = 0;
        int codec_index;
+       int hdmi_num;
        int ret;
 
        ret = get_dailink_info(dev, adr_link, &sdw_be_num, &codec_conf_num);
@@ -1584,14 +1585,13 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
                ssp_num = hweight_long(ssp_mask);
        }
 
-       if (mach_params->codec_mask & IDISP_CODEC_MASK) {
+       if (mach_params->codec_mask & IDISP_CODEC_MASK)
                ctx->hdmi.idisp_codec = true;
 
-               if (sof_sdw_quirk & SOF_SDW_TGL_HDMI)
-                       hdmi_num = SOF_TGL_HDMI_COUNT;
-               else
-                       hdmi_num = SOF_PRE_TGL_HDMI_COUNT;
-       }
+       if (sof_sdw_quirk & SOF_SDW_TGL_HDMI)
+               hdmi_num = SOF_TGL_HDMI_COUNT;
+       else
+               hdmi_num = SOF_PRE_TGL_HDMI_COUNT;
 
        /* enable dmic01 & dmic16k */
        if (sof_sdw_quirk & SOF_SDW_PCH_DMIC || mach_params->dmic_num)
@@ -1601,7 +1601,8 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
                bt_num = 1;
 
        dev_dbg(dev, "sdw %d, ssp %d, dmic %d, hdmi %d, bt: %d\n",
-               sdw_be_num, ssp_num, dmic_num, hdmi_num, bt_num);
+               sdw_be_num, ssp_num, dmic_num,
+               ctx->hdmi.idisp_codec ? hdmi_num : 0, bt_num);
 
        /* allocate BE dailinks */
        num_links = sdw_be_num + ssp_num + dmic_num + hdmi_num + bt_num;
index d0c02e8a67854c3d6c38102c9bde12238a422ba2..174aae6e0398f2c311f2e53121681fe74e1bd6be 100644 (file)
@@ -240,8 +240,10 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
        snd_pcm_set_sync(substream);
 
        mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
-       if (!mconfig)
+       if (!mconfig) {
+               kfree(dma_params);
                return -EINVAL;
+       }
 
        skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
 
@@ -1462,6 +1464,7 @@ int skl_platform_register(struct device *dev)
                dais = krealloc(skl->dais, sizeof(skl_fe_dai) +
                                sizeof(skl_platform_dai), GFP_KERNEL);
                if (!dais) {
+                       kfree(skl->dais);
                        ret = -ENOMEM;
                        goto err;
                }
@@ -1474,8 +1477,10 @@ int skl_platform_register(struct device *dev)
 
        ret = devm_snd_soc_register_component(dev, &skl_component,
                                         skl->dais, num_dais);
-       if (ret)
+       if (ret) {
+               kfree(skl->dais);
                dev_err(dev, "soc component registration failed %d\n", ret);
+       }
 err:
        return ret;
 }
index 7a425271b08b1686fc8310879a47e52be456da30..fd9624ad5f72b07e2a58e5ba220de2893aa7d301 100644 (file)
@@ -1003,8 +1003,10 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
 
        reply.size = (reply.header >> 32) & IPC_DATA_OFFSET_SZ_MASK;
        buf = krealloc(reply.data, reply.size, GFP_KERNEL);
-       if (!buf)
+       if (!buf) {
+               kfree(reply.data);
                return -ENOMEM;
+       }
        *payload = buf;
        *bytes = reply.size;
 
index d93b18f07be59a9218281c3f370983e6104d7970..39cb0b889aff91326fccf4e0aea681707f89b17a 100644 (file)
@@ -27,6 +27,23 @@ struct sc8280xp_snd_data {
 static int sc8280xp_snd_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct sc8280xp_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+       struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+       struct snd_soc_card *card = rtd->card;
+
+       switch (cpu_dai->id) {
+       case WSA_CODEC_DMA_RX_0:
+       case WSA_CODEC_DMA_RX_1:
+               /*
+                * set limit of 0dB on Digital Volume for Speakers,
+                * this can prevent damage of speakers to some extent without
+                * active speaker protection
+                */
+               snd_soc_limit_volume(card, "WSA_RX0 Digital Volume", 84);
+               snd_soc_limit_volume(card, "WSA_RX1 Digital Volume", 84);
+               break;
+       default:
+               break;
+       }
 
        return qcom_snd_wcd_jack_setup(rtd, &data->jack, &data->jack_setup);
 }
index 55b009d3c6815434c82d0250dd6dae5158d33dde..2d25748ca70662bf771c6896297ccb6a0fb0798f 100644 (file)
@@ -661,7 +661,7 @@ int snd_soc_limit_volume(struct snd_soc_card *card,
        kctl = snd_soc_card_get_kcontrol(card, name);
        if (kctl) {
                struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
-               if (max <= mc->max) {
+               if (max <= mc->max - mc->min) {
                        mc->platform_max = max;
                        ret = 0;
                }
index 323e4d7b6adfe12162a8c1ac43bbb4caa2d5cdd2..f6d1b2e11795fb07d21ef33369c5c95a22a7aa49 100644 (file)
@@ -704,11 +704,6 @@ static int soc_pcm_clean(struct snd_soc_pcm_runtime *rtd,
                        if (snd_soc_dai_active(dai) == 0 &&
                            (dai->rate || dai->channels || dai->sample_bits))
                                soc_pcm_set_dai_params(dai, NULL);
-
-                       if (snd_soc_dai_stream_active(dai, substream->stream) ==  0) {
-                               if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
-                                       snd_soc_dai_digital_mute(dai, 1, substream->stream);
-                       }
                }
        }
 
@@ -947,8 +942,10 @@ static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd,
                if (snd_soc_dai_active(dai) == 1)
                        soc_pcm_set_dai_params(dai, NULL);
 
-               if (snd_soc_dai_stream_active(dai, substream->stream) == 1)
-                       snd_soc_dai_digital_mute(dai, 1, substream->stream);
+               if (snd_soc_dai_stream_active(dai, substream->stream) == 1) {
+                       if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
+                               snd_soc_dai_digital_mute(dai, 1, substream->stream);
+               }
        }
 
        /* run the stream event */
index ba4ef290b6343fee3b8267180ffdb352fef6b77a..2c7a5e7a364cf53351e901d907252f258fee2c71 100644 (file)
@@ -493,6 +493,7 @@ static int sof_ipc3_widget_setup_comp_mixer(struct snd_sof_widget *swidget)
 static int sof_ipc3_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
 {
        struct snd_soc_component *scomp = swidget->scomp;
+       struct snd_sof_pipeline *spipe = swidget->spipe;
        struct sof_ipc_pipe_new *pipeline;
        struct snd_sof_widget *comp_swidget;
        int ret;
@@ -545,6 +546,7 @@ static int sof_ipc3_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
                swidget->dynamic_pipeline_widget);
 
        swidget->core = pipeline->core;
+       spipe->core_mask |= BIT(pipeline->core);
 
        return 0;
 
index 938efaceb81cf107bdb2fceeaed58727ddd19fbc..b4cdcec33e120944522c572acaf722a2e82a0acb 100644 (file)
@@ -89,7 +89,7 @@ sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidge
        struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
        struct sof_ipc4_gain *gain = swidget->private;
        struct sof_ipc4_msg *msg = &cdata->msg;
-       struct sof_ipc4_gain_data data;
+       struct sof_ipc4_gain_params params;
        bool all_channels_equal = true;
        u32 value;
        int ret, i;
@@ -109,20 +109,20 @@ sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidge
         */
        for (i = 0; i < scontrol->num_channels; i++) {
                if (all_channels_equal) {
-                       data.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
-                       data.init_val = cdata->chanv[0].value;
+                       params.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
+                       params.init_val = cdata->chanv[0].value;
                } else {
-                       data.channels = cdata->chanv[i].channel;
-                       data.init_val = cdata->chanv[i].value;
+                       params.channels = cdata->chanv[i].channel;
+                       params.init_val = cdata->chanv[i].value;
                }
 
                /* set curve type and duration from topology */
-               data.curve_duration_l = gain->data.curve_duration_l;
-               data.curve_duration_h = gain->data.curve_duration_h;
-               data.curve_type = gain->data.curve_type;
+               params.curve_duration_l = gain->data.params.curve_duration_l;
+               params.curve_duration_h = gain->data.params.curve_duration_h;
+               params.curve_type = gain->data.params.curve_type;
 
-               msg->data_ptr = &data;
-               msg->data_size = sizeof(data);
+               msg->data_ptr = &params;
+               msg->data_size = sizeof(params);
 
                ret = sof_ipc4_set_get_kcontrol_data(scontrol, true, lock);
                msg->data_ptr = NULL;
index b24a64377f687463c74da373257759f0cbce1665..e012b6e166accd07e5d46cf99b2938b14e58888e 100644 (file)
@@ -130,18 +130,18 @@ static const struct sof_topology_token comp_ext_tokens[] = {
 
 static const struct sof_topology_token gain_tokens[] = {
        {SOF_TKN_GAIN_RAMP_TYPE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
-               get_token_u32, offsetof(struct sof_ipc4_gain_data, curve_type)},
+               get_token_u32, offsetof(struct sof_ipc4_gain_params, curve_type)},
        {SOF_TKN_GAIN_RAMP_DURATION,
                SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
-               offsetof(struct sof_ipc4_gain_data, curve_duration_l)},
+               offsetof(struct sof_ipc4_gain_params, curve_duration_l)},
        {SOF_TKN_GAIN_VAL, SND_SOC_TPLG_TUPLE_TYPE_WORD,
-               get_token_u32, offsetof(struct sof_ipc4_gain_data, init_val)},
+               get_token_u32, offsetof(struct sof_ipc4_gain_params, init_val)},
 };
 
 /* SRC */
 static const struct sof_topology_token src_tokens[] = {
        {SOF_TKN_SRC_RATE_OUT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
-               offsetof(struct sof_ipc4_src, sink_rate)},
+               offsetof(struct sof_ipc4_src_data, sink_rate)},
 };
 
 static const struct sof_token_info ipc4_token_list[SOF_TOKEN_COUNT] = {
@@ -656,6 +656,7 @@ static int sof_ipc4_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
 {
        struct snd_soc_component *scomp = swidget->scomp;
        struct sof_ipc4_pipeline *pipeline;
+       struct snd_sof_pipeline *spipe = swidget->spipe;
        int ret;
 
        pipeline = kzalloc(sizeof(*pipeline), GFP_KERNEL);
@@ -670,6 +671,7 @@ static int sof_ipc4_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
        }
 
        swidget->core = pipeline->core_id;
+       spipe->core_mask |= BIT(pipeline->core_id);
 
        if (pipeline->use_chain_dma) {
                dev_dbg(scomp->dev, "Set up chain DMA for %s\n", swidget->widget->name);
@@ -718,15 +720,15 @@ static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
 
        swidget->private = gain;
 
-       gain->data.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
-       gain->data.init_val = SOF_IPC4_VOL_ZERO_DB;
+       gain->data.params.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
+       gain->data.params.init_val = SOF_IPC4_VOL_ZERO_DB;
 
-       ret = sof_ipc4_get_audio_fmt(scomp, swidget, &gain->available_fmt, &gain->base_config);
+       ret = sof_ipc4_get_audio_fmt(scomp, swidget, &gain->available_fmt, &gain->data.base_config);
        if (ret)
                goto err;
 
-       ret = sof_update_ipc_object(scomp, &gain->data, SOF_GAIN_TOKENS, swidget->tuples,
-                                   swidget->num_tuples, sizeof(gain->data), 1);
+       ret = sof_update_ipc_object(scomp, &gain->data.params, SOF_GAIN_TOKENS,
+                                   swidget->tuples, swidget->num_tuples, sizeof(gain->data), 1);
        if (ret) {
                dev_err(scomp->dev, "Parsing gain tokens failed\n");
                goto err;
@@ -734,8 +736,8 @@ static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
 
        dev_dbg(scomp->dev,
                "pga widget %s: ramp type: %d, ramp duration %d, initial gain value: %#x\n",
-               swidget->widget->name, gain->data.curve_type, gain->data.curve_duration_l,
-               gain->data.init_val);
+               swidget->widget->name, gain->data.params.curve_type,
+               gain->data.params.curve_duration_l, gain->data.params.init_val);
 
        ret = sof_ipc4_widget_setup_msg(swidget, &gain->msg);
        if (ret)
@@ -797,6 +799,7 @@ err:
 static int sof_ipc4_widget_setup_comp_src(struct snd_sof_widget *swidget)
 {
        struct snd_soc_component *scomp = swidget->scomp;
+       struct snd_sof_pipeline *spipe = swidget->spipe;
        struct sof_ipc4_src *src;
        int ret;
 
@@ -808,18 +811,21 @@ static int sof_ipc4_widget_setup_comp_src(struct snd_sof_widget *swidget)
 
        swidget->private = src;
 
-       ret = sof_ipc4_get_audio_fmt(scomp, swidget, &src->available_fmt, &src->base_config);
+       ret = sof_ipc4_get_audio_fmt(scomp, swidget, &src->available_fmt,
+                                    &src->data.base_config);
        if (ret)
                goto err;
 
-       ret = sof_update_ipc_object(scomp, src, SOF_SRC_TOKENS, swidget->tuples,
+       ret = sof_update_ipc_object(scomp, &src->data, SOF_SRC_TOKENS, swidget->tuples,
                                    swidget->num_tuples, sizeof(*src), 1);
        if (ret) {
                dev_err(scomp->dev, "Parsing SRC tokens failed\n");
                goto err;
        }
 
-       dev_dbg(scomp->dev, "SRC sink rate %d\n", src->sink_rate);
+       spipe->core_mask |= BIT(swidget->core);
+
+       dev_dbg(scomp->dev, "SRC sink rate %d\n", src->data.sink_rate);
 
        ret = sof_ipc4_widget_setup_msg(swidget, &src->msg);
        if (ret)
@@ -864,6 +870,7 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
 {
        struct snd_soc_component *scomp = swidget->scomp;
        struct sof_ipc4_fw_module *fw_module;
+       struct snd_sof_pipeline *spipe = swidget->spipe;
        struct sof_ipc4_process *process;
        void *cfg;
        int ret;
@@ -920,6 +927,9 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
 
        sof_ipc4_widget_update_kcontrol_module_id(swidget);
 
+       /* set pipeline core mask to keep track of the core the module is scheduled to run on */
+       spipe->core_mask |= BIT(swidget->core);
+
        return 0;
 free_base_cfg_ext:
        kfree(process->base_config_ext);
@@ -1816,7 +1826,7 @@ static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
        u32 out_ref_rate, out_ref_channels, out_ref_valid_bits;
        int ret;
 
-       ret = sof_ipc4_init_input_audio_fmt(sdev, swidget, &gain->base_config,
+       ret = sof_ipc4_init_input_audio_fmt(sdev, swidget, &gain->data.base_config,
                                            pipeline_params, available_fmt);
        if (ret < 0)
                return ret;
@@ -1826,7 +1836,7 @@ static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
        out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_fmt->fmt_cfg);
        out_ref_valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_fmt->fmt_cfg);
 
-       ret = sof_ipc4_init_output_audio_fmt(sdev, &gain->base_config, available_fmt,
+       ret = sof_ipc4_init_output_audio_fmt(sdev, &gain->data.base_config, available_fmt,
                                             out_ref_rate, out_ref_channels, out_ref_valid_bits);
        if (ret < 0) {
                dev_err(sdev->dev, "Failed to initialize output format for %s",
@@ -1835,7 +1845,7 @@ static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
        }
 
        /* update pipeline memory usage */
-       sof_ipc4_update_resource_usage(sdev, swidget, &gain->base_config);
+       sof_ipc4_update_resource_usage(sdev, swidget, &gain->data.base_config);
 
        return 0;
 }
@@ -1891,7 +1901,7 @@ static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
        u32 out_ref_rate, out_ref_channels, out_ref_valid_bits;
        int output_format_index, input_format_index;
 
-       input_format_index = sof_ipc4_init_input_audio_fmt(sdev, swidget, &src->base_config,
+       input_format_index = sof_ipc4_init_input_audio_fmt(sdev, swidget, &src->data.base_config,
                                                           pipeline_params, available_fmt);
        if (input_format_index < 0)
                return input_format_index;
@@ -1921,7 +1931,7 @@ static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
         */
        out_ref_rate = params_rate(fe_params);
 
-       output_format_index = sof_ipc4_init_output_audio_fmt(sdev, &src->base_config,
+       output_format_index = sof_ipc4_init_output_audio_fmt(sdev, &src->data.base_config,
                                                             available_fmt, out_ref_rate,
                                                             out_ref_channels, out_ref_valid_bits);
        if (output_format_index < 0) {
@@ -1931,10 +1941,10 @@ static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
        }
 
        /* update pipeline memory usage */
-       sof_ipc4_update_resource_usage(sdev, swidget, &src->base_config);
+       sof_ipc4_update_resource_usage(sdev, swidget, &src->data.base_config);
 
        out_audio_fmt = &available_fmt->output_pin_fmts[output_format_index].audio_fmt;
-       src->sink_rate = out_audio_fmt->sampling_frequency;
+       src->data.sink_rate = out_audio_fmt->sampling_frequency;
 
        /* update pipeline_params for sink widgets */
        return sof_ipc4_update_hw_params(sdev, pipeline_params, out_audio_fmt);
@@ -2314,9 +2324,8 @@ static int sof_ipc4_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget
        {
                struct sof_ipc4_gain *gain = swidget->private;
 
-               ipc_size = sizeof(struct sof_ipc4_base_module_cfg) +
-                          sizeof(struct sof_ipc4_gain_data);
-               ipc_data = gain;
+               ipc_size = sizeof(gain->data);
+               ipc_data = &gain->data;
 
                msg = &gain->msg;
                break;
@@ -2335,8 +2344,8 @@ static int sof_ipc4_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget
        {
                struct sof_ipc4_src *src = swidget->private;
 
-               ipc_size = sizeof(struct sof_ipc4_base_module_cfg) + sizeof(src->sink_rate);
-               ipc_data = src;
+               ipc_size = sizeof(src->data);
+               ipc_data = &src->data;
 
                msg = &src->msg;
                break;
index 0a57b8ab3e08f09db7020e83946aac45f9969e89..dce174a190ddc630068ee5c6393a83dfb2dd2d05 100644 (file)
@@ -361,7 +361,7 @@ struct sof_ipc4_control_msg_payload {
 } __packed;
 
 /**
- * struct sof_ipc4_gain_data - IPC gain blob
+ * struct sof_ipc4_gain_params - IPC gain parameters
  * @channels: Channels
  * @init_val: Initial value
  * @curve_type: Curve type
@@ -369,24 +369,32 @@ struct sof_ipc4_control_msg_payload {
  * @curve_duration_l: Curve duration low part
  * @curve_duration_h: Curve duration high part
  */
-struct sof_ipc4_gain_data {
+struct sof_ipc4_gain_params {
        uint32_t channels;
        uint32_t init_val;
        uint32_t curve_type;
        uint32_t reserved;
        uint32_t curve_duration_l;
        uint32_t curve_duration_h;
-} __aligned(8);
+} __packed __aligned(4);
 
 /**
- * struct sof_ipc4_gain - gain config data
+ * struct sof_ipc4_gain_data - IPC gain init blob
  * @base_config: IPC base config data
+ * @params: Initial parameters for the gain module
+ */
+struct sof_ipc4_gain_data {
+       struct sof_ipc4_base_module_cfg base_config;
+       struct sof_ipc4_gain_params params;
+} __packed __aligned(4);
+
+/**
+ * struct sof_ipc4_gain - gain config data
  * @data: IPC gain blob
  * @available_fmt: Available audio format
  * @msg: message structure for gain
  */
 struct sof_ipc4_gain {
-       struct sof_ipc4_base_module_cfg base_config;
        struct sof_ipc4_gain_data data;
        struct sof_ipc4_available_audio_format available_fmt;
        struct sof_ipc4_msg msg;
@@ -404,16 +412,24 @@ struct sof_ipc4_mixer {
        struct sof_ipc4_msg msg;
 };
 
-/**
- * struct sof_ipc4_src SRC config data
+/*
+ * struct sof_ipc4_src_data - IPC data for SRC
  * @base_config: IPC base config data
  * @sink_rate: Output rate for sink module
+ */
+struct sof_ipc4_src_data {
+       struct sof_ipc4_base_module_cfg base_config;
+       uint32_t sink_rate;
+} __packed __aligned(4);
+
+/**
+ * struct sof_ipc4_src - SRC config data
+ * @data: IPC base config data
  * @available_fmt: Available audio format
  * @msg: IPC4 message struct containing header and data info
  */
 struct sof_ipc4_src {
-       struct sof_ipc4_base_module_cfg base_config;
-       uint32_t sink_rate;
+       struct sof_ipc4_src_data data;
        struct sof_ipc4_available_audio_format available_fmt;
        struct sof_ipc4_msg msg;
 };
index b69fa788b16f71f951c158e894cd1dceb97f8335..e0d88e7aa8ca0291f2763619f567a667bbda39e6 100644 (file)
@@ -597,6 +597,9 @@ static struct snd_sof_dsp_ops sof_mt8186_ops = {
 
 static struct snd_sof_of_mach sof_mt8186_machs[] = {
        {
+               .compatible = "google,steelix",
+               .sof_tplg_filename = "sof-mt8186-google-steelix.tplg"
+       }, {
                .compatible = "mediatek,mt8186",
                .sof_tplg_filename = "sof-mt8186.tplg",
        },
index 563fe6f7789f735eb68aa1ead4aac6ec9df63297..77cc64ac71131b6ed2eba300714aab90b887984c 100644 (file)
@@ -46,6 +46,7 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
                                    struct snd_sof_widget *swidget)
 {
        const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+       struct snd_sof_pipeline *spipe = swidget->spipe;
        struct snd_sof_widget *pipe_widget;
        int err = 0;
        int ret;
@@ -87,15 +88,22 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
        }
 
        /*
-        * disable widget core. continue to route setup status and complete flag
-        * even if this fails and return the appropriate error
+        * decrement ref count for cores associated with all modules in the pipeline and clear
+        * the complete flag
         */
-       ret = snd_sof_dsp_core_put(sdev, swidget->core);
-       if (ret < 0) {
-               dev_err(sdev->dev, "error: failed to disable target core: %d for widget %s\n",
-                       swidget->core, swidget->widget->name);
-               if (!err)
-                       err = ret;
+       if (swidget->id == snd_soc_dapm_scheduler) {
+               int i;
+
+               for_each_set_bit(i, &spipe->core_mask, sdev->num_cores) {
+                       ret = snd_sof_dsp_core_put(sdev, i);
+                       if (ret < 0) {
+                               dev_err(sdev->dev, "failed to disable target core: %d for pipeline %s\n",
+                                       i, swidget->widget->name);
+                               if (!err)
+                                       err = ret;
+                       }
+               }
+               swidget->spipe->complete = 0;
        }
 
        /*
@@ -108,10 +116,6 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
                        err = ret;
        }
 
-       /* clear pipeline complete */
-       if (swidget->id == snd_soc_dapm_scheduler)
-               swidget->spipe->complete = 0;
-
        if (!err)
                dev_dbg(sdev->dev, "widget %s freed\n", swidget->widget->name);
 
@@ -134,8 +138,10 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
                                     struct snd_sof_widget *swidget)
 {
        const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+       struct snd_sof_pipeline *spipe = swidget->spipe;
        bool use_count_decremented = false;
        int ret;
+       int i;
 
        /* skip if there is no private data */
        if (!swidget->private)
@@ -166,19 +172,23 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
                        goto use_count_dec;
        }
 
-       /* enable widget core */
-       ret = snd_sof_dsp_core_get(sdev, swidget->core);
-       if (ret < 0) {
-               dev_err(sdev->dev, "error: failed to enable target core for widget %s\n",
-                       swidget->widget->name);
-               goto pipe_widget_free;
+       /* update ref count for cores associated with all modules in the pipeline */
+       if (swidget->id == snd_soc_dapm_scheduler) {
+               for_each_set_bit(i, &spipe->core_mask, sdev->num_cores) {
+                       ret = snd_sof_dsp_core_get(sdev, i);
+                       if (ret < 0) {
+                               dev_err(sdev->dev, "failed to enable target core %d for pipeline %s\n",
+                                       i, swidget->widget->name);
+                               goto pipe_widget_free;
+                       }
+               }
        }
 
        /* setup widget in the DSP */
        if (tplg_ops && tplg_ops->widget_setup) {
                ret = tplg_ops->widget_setup(sdev, swidget);
                if (ret < 0)
-                       goto core_put;
+                       goto pipe_widget_free;
        }
 
        /* send config for DAI components */
@@ -208,15 +218,22 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
        return 0;
 
 widget_free:
-       /* widget use_count and core ref_count will both be decremented by sof_widget_free() */
+       /* widget use_count will be decremented by sof_widget_free() */
        sof_widget_free_unlocked(sdev, swidget);
        use_count_decremented = true;
-core_put:
-       if (!use_count_decremented)
-               snd_sof_dsp_core_put(sdev, swidget->core);
 pipe_widget_free:
-       if (swidget->id != snd_soc_dapm_scheduler)
+       if (swidget->id != snd_soc_dapm_scheduler) {
                sof_widget_free_unlocked(sdev, swidget->spipe->pipe_widget);
+       } else {
+               int j;
+
+               /* decrement ref count for all cores that were updated previously */
+               for_each_set_bit(j, &spipe->core_mask, sdev->num_cores) {
+                       if (j >= i)
+                               break;
+                       snd_sof_dsp_core_put(sdev, j);
+               }
+       }
 use_count_dec:
        if (!use_count_decremented)
                swidget->use_count--;
index 5d5eeb1a1a6f0d26838004838bcbe8c856fe4579..a6d6bcd00ceeceaca5cc7c6b07bb4045403aca27 100644 (file)
@@ -480,6 +480,7 @@ struct snd_sof_widget {
  * @paused_count: Count of number of PCM's that have started and have currently paused this
                  pipeline
  * @complete: flag used to indicate that pipeline set up is complete.
+ * @core_mask: Mask containing target cores for all modules in the pipeline
  * @list: List item in sdev pipeline_list
  */
 struct snd_sof_pipeline {
@@ -487,6 +488,7 @@ struct snd_sof_pipeline {
        int started_count;
        int paused_count;
        int complete;
+       unsigned long core_mask;
        struct list_head list;
 };
 
index a3a3af252259d9f447da23381bc9cb37d2e22311..37ec671a2d766fb34cc78e08d7a5aeb8f619381b 100644 (file)
@@ -1736,8 +1736,10 @@ static int sof_dai_load(struct snd_soc_component *scomp, int index,
        /* perform pcm set op */
        if (ipc_pcm_ops && ipc_pcm_ops->pcm_setup) {
                ret = ipc_pcm_ops->pcm_setup(sdev, spcm);
-               if (ret < 0)
+               if (ret < 0) {
+                       kfree(spcm);
                        return ret;
+               }
        }
 
        dai_drv->dobj.private = spcm;
index 898bc3baca7b994ef202e80ecbff93288ddc7bba..c8d48566e17598df4e7c390240512ac25c84128d 100644 (file)
@@ -2978,6 +2978,7 @@ static int snd_bbfpro_controls_create(struct usb_mixer_interface *mixer)
 #define SND_DJM_850_IDX                0x2
 #define SND_DJM_900NXS2_IDX    0x3
 #define SND_DJM_750MK2_IDX     0x4
+#define SND_DJM_450_IDX                0x5
 
 
 #define SND_DJM_CTL(_name, suffix, _default_value, _windex) { \
@@ -3108,6 +3109,31 @@ static const struct snd_djm_ctl snd_djm_ctls_250mk2[] = {
 };
 
 
+// DJM-450
+static const u16 snd_djm_opts_450_cap1[] = {
+       0x0103, 0x0100, 0x0106, 0x0107, 0x0108, 0x0109, 0x010d, 0x010a };
+
+static const u16 snd_djm_opts_450_cap2[] = {
+       0x0203, 0x0200, 0x0206, 0x0207, 0x0208, 0x0209, 0x020d, 0x020a };
+
+static const u16 snd_djm_opts_450_cap3[] = {
+       0x030a, 0x0311, 0x0312, 0x0307, 0x0308, 0x0309, 0x030d };
+
+static const u16 snd_djm_opts_450_pb1[] = { 0x0100, 0x0101, 0x0104 };
+static const u16 snd_djm_opts_450_pb2[] = { 0x0200, 0x0201, 0x0204 };
+static const u16 snd_djm_opts_450_pb3[] = { 0x0300, 0x0301, 0x0304 };
+
+static const struct snd_djm_ctl snd_djm_ctls_450[] = {
+       SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
+       SND_DJM_CTL("Ch1 Input",   450_cap1, 2, SND_DJM_WINDEX_CAP),
+       SND_DJM_CTL("Ch2 Input",   450_cap2, 2, SND_DJM_WINDEX_CAP),
+       SND_DJM_CTL("Ch3 Input",   450_cap3, 0, SND_DJM_WINDEX_CAP),
+       SND_DJM_CTL("Ch1 Output",   450_pb1, 0, SND_DJM_WINDEX_PB),
+       SND_DJM_CTL("Ch2 Output",   450_pb2, 1, SND_DJM_WINDEX_PB),
+       SND_DJM_CTL("Ch3 Output",   450_pb3, 2, SND_DJM_WINDEX_PB)
+};
+
+
 // DJM-750
 static const u16 snd_djm_opts_750_cap1[] = {
        0x0101, 0x0103, 0x0106, 0x0107, 0x0108, 0x0109, 0x010a, 0x010f };
@@ -3203,6 +3229,7 @@ static const struct snd_djm_device snd_djm_devices[] = {
        [SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
        [SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
        [SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
+       [SND_DJM_450_IDX] = SND_DJM_DEVICE(450),
 };
 
 
@@ -3454,6 +3481,9 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
        case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
                err = snd_djm_controls_create(mixer, SND_DJM_250MK2_IDX);
                break;
+       case USB_ID(0x2b73, 0x0013): /* Pioneer DJ DJM-450 */
+               err = snd_djm_controls_create(mixer, SND_DJM_450_IDX);
+               break;
        case USB_ID(0x08e4, 0x017f): /* Pioneer DJ DJM-750 */
                err = snd_djm_controls_create(mixer, SND_DJM_750_IDX);
                break;
index 649ebdef9c3fa0ac5fd052e397ff4ce23f229ee1..1685d7ea6a9f70021d8ea3fcb10ad5eb316176d8 100644 (file)
@@ -6,7 +6,6 @@
  *
  * Yes, this is unfortunate.  A better solution is in the works.
  */
-NORETURN(__invalid_creds)
 NORETURN(__kunit_abort)
 NORETURN(__module_put_and_kthread_exit)
 NORETURN(__reiserfs_panic)
index a343823c8ddfc9e0023b55b2c648db99d8de7ba7..61c2c96cc0701b886d7c1daecd92cfa25581d1f7 100644 (file)
@@ -434,6 +434,11 @@ static void json_print_metric(void *ps __maybe_unused, const char *group,
        strbuf_release(&buf);
 }
 
+static bool json_skip_duplicate_pmus(void *ps __maybe_unused)
+{
+       return false;
+}
+
 static bool default_skip_duplicate_pmus(void *ps)
 {
        struct print_state *print_state = ps;
@@ -503,6 +508,7 @@ int cmd_list(int argc, const char **argv)
                        .print_end = json_print_end,
                        .print_event = json_print_event,
                        .print_metric = json_print_metric,
+                       .skip_duplicate_pmus = json_skip_duplicate_pmus,
                };
                ps = &json_ps;
        } else {
index e2848a9d48487b6b69e1eabad5a0241485263255..afcdad58ef89c24d5a8b90dc41c3611ccd639fa4 100644 (file)
         "MetricName": "slots_lost_misspeculation_fraction",
         "MetricExpr": "100 * ((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots))",
         "BriefDescription": "Fraction of slots lost due to misspeculation",
+        "DefaultMetricgroupName": "TopdownL1",
         "MetricGroup": "Default;TopdownL1",
         "ScaleUnit": "1percent of slots"
     },
         "MetricName": "retired_fraction",
         "MetricExpr": "100 * (OP_RETIRED / (CPU_CYCLES * #slots))",
         "BriefDescription": "Fraction of slots retiring, useful work",
+        "DefaultMetricgroupName": "TopdownL1",
         "MetricGroup": "Default;TopdownL1",
        "ScaleUnit": "1percent of slots"
     },
index 0484736d9fe440f312999bd33a60c0765bdd3032..ca3e0404f18720d7a3cc2376896195f55cf1192d 100644 (file)
@@ -225,7 +225,7 @@ static struct metric *metric__new(const struct pmu_metric *pm,
 
        m->pmu = pm->pmu ?: "cpu";
        m->metric_name = pm->metric_name;
-       m->default_metricgroup_name = pm->default_metricgroup_name;
+       m->default_metricgroup_name = pm->default_metricgroup_name ?: "";
        m->modifier = NULL;
        if (modifier) {
                m->modifier = strdup(modifier);
index 90f3c9802ffb80fcb1c8a083835e1447087fcae6..95dc58b94178bf00b447b04440644bba1cd2209d 100644 (file)
@@ -62,5 +62,6 @@ cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
 cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
 cxl_core-y += config_check.o
 cxl_core-y += cxl_core_test.o
+cxl_core-y += cxl_core_exports.o
 
 obj-m += test/
diff --git a/tools/testing/cxl/cxl_core_exports.c b/tools/testing/cxl/cxl_core_exports.c
new file mode 100644 (file)
index 0000000..077e688
--- /dev/null
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+
+#include "cxl.h"
+
+/* Exporting of cxl_core symbols that are only used by cxl_test */
+EXPORT_SYMBOL_NS_GPL(cxl_num_decoders_committed, CXL);
index b8854629990227d9f3f984298db0e70ce95849d4..f4e517a0c7740ffa2dfb4889231d42fad438a5a9 100644 (file)
@@ -669,10 +669,11 @@ static int mock_decoder_commit(struct cxl_decoder *cxld)
                return 0;
 
        dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
-       if (port->commit_end + 1 != id) {
+       if (cxl_num_decoders_committed(port) != id) {
                dev_dbg(&port->dev,
                        "%s: out of order commit, expected decoder%d.%d\n",
-                       dev_name(&cxld->dev), port->id, port->commit_end + 1);
+                       dev_name(&cxld->dev), port->id,
+                       cxl_num_decoders_committed(port));
                return -EBUSY;
        }
 
index fd26189d53be8ea63d3a1c8203e2ebeef93f5a0e..b8419f460368a4338bc40d70f2f786cea8a993f6 100644 (file)
@@ -924,7 +924,7 @@ static __init int ndtest_init(void)
 
        nfit_test_setup(ndtest_resource_lookup, NULL);
 
-       rc = class_regster(&ndtest_dimm_class);
+       rc = class_register(&ndtest_dimm_class);
        if (rc)
                goto err_register;
 
index 3b2061d1c1a527c9868d8a80c18a1a91781f27e4..8247a7c69c36d7d446083d422ada0c5c6d897625 100644 (file)
@@ -155,12 +155,10 @@ ifneq ($(KBUILD_OUTPUT),)
   abs_objtree := $(realpath $(abs_objtree))
   BUILD := $(abs_objtree)/kselftest
   KHDR_INCLUDES := -isystem ${abs_objtree}/usr/include
-  KHDR_DIR := ${abs_objtree}/usr/include
 else
   BUILD := $(CURDIR)
   abs_srctree := $(shell cd $(top_srcdir) && pwd)
   KHDR_INCLUDES := -isystem ${abs_srctree}/usr/include
-  KHDR_DIR := ${abs_srctree}/usr/include
   DEFAULT_INSTALL_HDR_PATH := 1
 endif
 
@@ -174,7 +172,7 @@ export KHDR_INCLUDES
 # all isn't the first target in the file.
 .DEFAULT_GOAL := all
 
-all: kernel_header_files
+all:
        @ret=1;                                                 \
        for TARGET in $(TARGETS); do                            \
                BUILD_TARGET=$$BUILD/$$TARGET;                  \
@@ -185,23 +183,6 @@ all: kernel_header_files
                ret=$$((ret * $$?));                            \
        done; exit $$ret;
 
-kernel_header_files:
-       @ls $(KHDR_DIR)/linux/*.h >/dev/null 2>/dev/null;                          \
-       if [ $$? -ne 0 ]; then                                                     \
-            RED='\033[1;31m';                                                  \
-            NOCOLOR='\033[0m';                                                 \
-            echo;                                                              \
-            echo -e "$${RED}error$${NOCOLOR}: missing kernel header files.";   \
-            echo "Please run this and try again:";                             \
-            echo;                                                              \
-            echo "    cd $(top_srcdir)";                                       \
-            echo "    make headers";                                           \
-            echo;                                                              \
-           exit 1;                                                                \
-       fi
-
-.PHONY: kernel_header_files
-
 run_tests: all
        @for TARGET in $(TARGETS); do \
                BUILD_TARGET=$$BUILD/$$TARGET;  \
index 2e70a6048278459e889574972ce29221510221a3..49a29dbc19107229c52b1b2bdd055854295fca4f 100644 (file)
@@ -50,7 +50,6 @@ CONFIG_CRYPTO_SEQIV=y
 CONFIG_CRYPTO_XXHASH=y
 CONFIG_DCB=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_DEBUG_INFO_BTF=y
 CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
 CONFIG_DEBUG_MEMORY_INIT=y
index fc6b2954e8f50adf1d3ba28da95be0cb5ea9de51..59993fc9c0d7e2b33d9ac6e4fa1caf434398cc04 100644 (file)
@@ -1,6 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
+#include <unistd.h>
 #include <test_progs.h>
 #include <network_helpers.h>
+#include "tailcall_poke.skel.h"
+
 
 /* test_tailcall_1 checks basic functionality by patching multiple locations
  * in a single program for a single tail call slot with nop->jmp, jmp->nop
@@ -1105,6 +1108,85 @@ out:
        bpf_object__close(tgt_obj);
 }
 
+#define JMP_TABLE "/sys/fs/bpf/jmp_table"
+
+static int poke_thread_exit;
+
+static void *poke_update(void *arg)
+{
+       __u32 zero = 0, prog1_fd, prog2_fd, map_fd;
+       struct tailcall_poke *call = arg;
+
+       map_fd = bpf_map__fd(call->maps.jmp_table);
+       prog1_fd = bpf_program__fd(call->progs.call1);
+       prog2_fd = bpf_program__fd(call->progs.call2);
+
+       while (!poke_thread_exit) {
+               bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
+               bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
+       }
+
+       return NULL;
+}
+
+/*
+ * We are trying to hit prog array update during another program load
+ * that shares the same prog array map.
+ *
+ * For that we share the jmp_table map between two skeleton instances
+ * by pinning the jmp_table to same path. Then first skeleton instance
+ * periodically updates jmp_table in 'poke update' thread while we load
+ * the second skeleton instance in the main thread.
+ */
+static void test_tailcall_poke(void)
+{
+       struct tailcall_poke *call, *test;
+       int err, cnt = 10;
+       pthread_t thread;
+
+       unlink(JMP_TABLE);
+
+       call = tailcall_poke__open_and_load();
+       if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
+               return;
+
+       err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
+       if (!ASSERT_OK(err, "bpf_map__pin"))
+               goto out;
+
+       err = pthread_create(&thread, NULL, poke_update, call);
+       if (!ASSERT_OK(err, "new toggler"))
+               goto out;
+
+       while (cnt--) {
+               test = tailcall_poke__open();
+               if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
+                       break;
+
+               err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
+               if (!ASSERT_OK(err, "bpf_map__pin")) {
+                       tailcall_poke__destroy(test);
+                       break;
+               }
+
+               bpf_program__set_autoload(test->progs.test, true);
+               bpf_program__set_autoload(test->progs.call1, false);
+               bpf_program__set_autoload(test->progs.call2, false);
+
+               err = tailcall_poke__load(test);
+               tailcall_poke__destroy(test);
+               if (!ASSERT_OK(err, "tailcall_poke__load"))
+                       break;
+       }
+
+       poke_thread_exit = 1;
+       ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
+
+out:
+       bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
+       tailcall_poke__destroy(call);
+}
+
 void test_tailcalls(void)
 {
        if (test__start_subtest("tailcall_1"))
@@ -1139,4 +1221,6 @@ void test_tailcalls(void)
                test_tailcall_bpf2bpf_fentry_fexit();
        if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
                test_tailcall_bpf2bpf_fentry_entry();
+       if (test__start_subtest("tailcall_poke"))
+               test_tailcall_poke();
 }
diff --git a/tools/testing/selftests/bpf/progs/tailcall_poke.c b/tools/testing/selftests/bpf/progs/tailcall_poke.c
new file mode 100644 (file)
index 0000000..c78b94b
--- /dev/null
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+       __uint(max_entries, 1);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test, int a)
+{
+       bpf_tail_call_static(ctx, &jmp_table, 0);
+       return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(call1, int a)
+{
+       return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(call2, int a)
+{
+       return 0;
+}
index 0617275d93cc70d6495680e9abc15148b628feb1..0f456dbab62f37ada813badfda6c3a1a07174412 100644 (file)
@@ -46,7 +46,6 @@ CONFIG_CRYPTO_SEQIV=y
 CONFIG_CRYPTO_XXHASH=y
 CONFIG_DCB=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DEFAULT_FQ_CODEL=y
index 050e9751321cff1015474c3023df28c2194d5a52..ad9202335656cc82e8475cf74aba72b6adf7e2b0 100644 (file)
@@ -293,15 +293,13 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
                                    __u64 bitmap_size, __u32 flags,
                                    struct __test_metadata *_metadata)
 {
-       unsigned long i, count, nbits = bitmap_size * BITS_PER_BYTE;
+       unsigned long i, nbits = bitmap_size * BITS_PER_BYTE;
        unsigned long nr = nbits / 2;
        __u64 out_dirty = 0;
 
        /* Mark all even bits as dirty in the mock domain */
-       for (count = 0, i = 0; i < nbits; count += !(i % 2), i++)
-               if (!(i % 2))
-                       set_bit(i, (unsigned long *)bitmap);
-       ASSERT_EQ(nr, count);
+       for (i = 0; i < nbits; i += 2)
+               set_bit(i, (unsigned long *)bitmap);
 
        test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
                                       bitmap, &out_dirty);
@@ -311,9 +309,10 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
        memset(bitmap, 0, bitmap_size);
        test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
                                  flags);
-       for (count = 0, i = 0; i < nbits; count += !(i % 2), i++)
+       /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
+       for (i = 0; i < nbits; i++) {
                ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap));
-       ASSERT_EQ(count, out_dirty);
+       }
 
        memset(bitmap, 0, bitmap_size);
        test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
index 52c59bad721395f9ad6644dcc3b5b3d7aeb2f62c..963435959a92f054e291d82049366ac1bd346bf8 100644 (file)
@@ -224,7 +224,7 @@ else
 LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
 endif
 CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-       -Wno-gnu-variable-sized-type-not-at-end -MD\
+       -Wno-gnu-variable-sized-type-not-at-end -MD -MP \
        -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
        -fno-builtin-strnlen \
        -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
index 18ac5c1952a3a382590d799c8959f600cafc7cd2..83e25bccc139decff79249e99a336ef2cb8cc820 100644 (file)
@@ -259,7 +259,7 @@ int main(int argc, char **argv)
        __TEST_REQUIRE(token == MAGIC_TOKEN,
                       "This test must be run with the magic token %d.\n"
                       "This is done by nx_huge_pages_test.sh, which\n"
-                      "also handles environment setup for the test.");
+                      "also handles environment setup for the test.", MAGIC_TOKEN);
 
        run_test(reclaim_period_ms, false, reboot_permissions);
        run_test(reclaim_period_ms, true, reboot_permissions);
index 118e0964bda9468364e38c08e18a2b1f84de4b10..aa646e0661f36cac428395667f47ed11013ef083 100644 (file)
@@ -44,26 +44,10 @@ endif
 selfdir = $(realpath $(dir $(filter %/lib.mk,$(MAKEFILE_LIST))))
 top_srcdir = $(selfdir)/../../..
 
-ifeq ("$(origin O)", "command line")
-  KBUILD_OUTPUT := $(O)
+ifeq ($(KHDR_INCLUDES),)
+KHDR_INCLUDES := -isystem $(top_srcdir)/usr/include
 endif
 
-ifneq ($(KBUILD_OUTPUT),)
-  # Make's built-in functions such as $(abspath ...), $(realpath ...) cannot
-  # expand a shell special character '~'. We use a somewhat tedious way here.
-  abs_objtree := $(shell cd $(top_srcdir) && mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd)
-  $(if $(abs_objtree),, \
-    $(error failed to create output directory "$(KBUILD_OUTPUT)"))
-  # $(realpath ...) resolves symlinks
-  abs_objtree := $(realpath $(abs_objtree))
-  KHDR_DIR := ${abs_objtree}/usr/include
-else
-  abs_srctree := $(shell cd $(top_srcdir) && pwd)
-  KHDR_DIR := ${abs_srctree}/usr/include
-endif
-
-KHDR_INCLUDES := -isystem $(KHDR_DIR)
-
 # The following are built by lib.mk common compile rules.
 # TEST_CUSTOM_PROGS should be used by tests that require
 # custom build rule and prevent common build rule use.
@@ -74,25 +58,7 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
 TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
 TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
 
-all: kernel_header_files $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) \
-     $(TEST_GEN_FILES)
-
-kernel_header_files:
-       @ls $(KHDR_DIR)/linux/*.h >/dev/null 2>/dev/null;                      \
-       if [ $$? -ne 0 ]; then                                                 \
-            RED='\033[1;31m';                                                  \
-            NOCOLOR='\033[0m';                                                 \
-            echo;                                                              \
-            echo -e "$${RED}error$${NOCOLOR}: missing kernel header files.";   \
-            echo "Please run this and try again:";                             \
-            echo;                                                              \
-            echo "    cd $(top_srcdir)";                                       \
-            echo "    make headers";                                           \
-            echo;                                                              \
-           exit 1; \
-       fi
-
-.PHONY: kernel_header_files
+all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
 
 define RUN_TESTS
        BASE_DIR="$(selfdir)";                  \
index 78dfec8bc676fa0b72be20f0e3c5c8739b07ea23..dede0bcf97a383f4882761427c0c2e7b7b495ca2 100644 (file)
@@ -60,7 +60,7 @@ TEST_GEN_FILES += mrelease_test
 TEST_GEN_FILES += mremap_dontunmap
 TEST_GEN_FILES += mremap_test
 TEST_GEN_FILES += on-fault-limit
-TEST_GEN_PROGS += pagemap_ioctl
+TEST_GEN_FILES += pagemap_ioctl
 TEST_GEN_FILES += thuge-gen
 TEST_GEN_FILES += transhuge-stress
 TEST_GEN_FILES += uffd-stress
@@ -72,7 +72,7 @@ TEST_GEN_FILES += mdwe_test
 TEST_GEN_FILES += hugetlb_fault_after_madv
 
 ifneq ($(ARCH),arm64)
-TEST_GEN_PROGS += soft-dirty
+TEST_GEN_FILES += soft-dirty
 endif
 
 ifeq ($(ARCH),x86_64)
index 7324ce5363c0c98103aced7d686f373089db60de..6f2f839904416c4d1dce542b2dd04250e2bddaa2 100644 (file)
@@ -1680,6 +1680,8 @@ int main(int argc, char **argv)
 {
        int err;
 
+       ksft_print_header();
+
        pagesize = getpagesize();
        thpsize = read_pmd_pagesize();
        if (thpsize)
@@ -1689,7 +1691,6 @@ int main(int argc, char **argv)
                                                    ARRAY_SIZE(hugetlbsizes));
        detect_huge_zeropage();
 
-       ksft_print_header();
        ksft_set_plan(ARRAY_SIZE(anon_test_cases) * tests_per_anon_test_case() +
                      ARRAY_SIZE(anon_thp_test_cases) * tests_per_anon_thp_test_case() +
                      ARRAY_SIZE(non_anon_test_cases) * tests_per_non_anon_test_case());
index befab43719badff842fd771eb52f54fbc98b510c..d59517ed3d48bdb25c94cecbae991fea1acb5108 100644 (file)
@@ -36,6 +36,7 @@ int pagemap_fd;
 int uffd;
 int page_size;
 int hpage_size;
+const char *progname;
 
 #define LEN(region)    ((region.end - region.start)/page_size)
 
@@ -1149,11 +1150,11 @@ int sanity_tests(void)
        munmap(mem, mem_size);
 
        /* 9. Memory mapped file */
-       fd = open(__FILE__, O_RDONLY);
+       fd = open(progname, O_RDONLY);
        if (fd < 0)
                ksft_exit_fail_msg("%s Memory mapped file\n", __func__);
 
-       ret = stat(__FILE__, &sbuf);
+       ret = stat(progname, &sbuf);
        if (ret < 0)
                ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
 
@@ -1472,12 +1473,14 @@ static void transact_test(int page_size)
                              extra_thread_faults);
 }
 
-int main(void)
+int main(int argc, char *argv[])
 {
        int mem_size, shmid, buf_size, fd, i, ret;
        char *mem, *map, *fmem;
        struct stat sbuf;
 
+       progname = argv[0];
+
        ksft_print_header();
 
        if (init_uffd())
index 486800a7024b373f167f12b48ce1f13b095e6098..3b1b9e8dd70c5d85e76364be4f39f431f4f4872f 100644 (file)
@@ -115,8 +115,6 @@ EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
 
 static const struct file_operations stat_fops_per_vm;
 
-static struct file_operations kvm_chardev_ops;
-
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
                           unsigned long arg);
 #ifdef CONFIG_KVM_COMPAT
@@ -1157,9 +1155,6 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
        if (!kvm)
                return ERR_PTR(-ENOMEM);
 
-       /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */
-       __module_get(kvm_chardev_ops.owner);
-
        KVM_MMU_LOCK_INIT(kvm);
        mmgrab(current->mm);
        kvm->mm = current->mm;
@@ -1279,7 +1274,6 @@ out_err_no_irq_srcu:
 out_err_no_srcu:
        kvm_arch_free_vm(kvm);
        mmdrop(current->mm);
-       module_put(kvm_chardev_ops.owner);
        return ERR_PTR(r);
 }
 
@@ -1348,7 +1342,6 @@ static void kvm_destroy_vm(struct kvm *kvm)
        preempt_notifier_dec();
        hardware_disable_all();
        mmdrop(mm);
-       module_put(kvm_chardev_ops.owner);
 }
 
 void kvm_get_kvm(struct kvm *kvm)
@@ -3887,7 +3880,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
-static const struct file_operations kvm_vcpu_fops = {
+static struct file_operations kvm_vcpu_fops = {
        .release        = kvm_vcpu_release,
        .unlocked_ioctl = kvm_vcpu_ioctl,
        .mmap           = kvm_vcpu_mmap,
@@ -4081,6 +4074,7 @@ static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations kvm_vcpu_stats_fops = {
+       .owner = THIS_MODULE,
        .read = kvm_vcpu_stats_read,
        .release = kvm_vcpu_stats_release,
        .llseek = noop_llseek,
@@ -4431,7 +4425,7 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
-static const struct file_operations kvm_device_fops = {
+static struct file_operations kvm_device_fops = {
        .unlocked_ioctl = kvm_device_ioctl,
        .release = kvm_device_release,
        KVM_COMPAT(kvm_device_ioctl),
@@ -4759,6 +4753,7 @@ static int kvm_vm_stats_release(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations kvm_vm_stats_fops = {
+       .owner = THIS_MODULE,
        .read = kvm_vm_stats_read,
        .release = kvm_vm_stats_release,
        .llseek = noop_llseek,
@@ -5060,7 +5055,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
 }
 #endif
 
-static const struct file_operations kvm_vm_fops = {
+static struct file_operations kvm_vm_fops = {
        .release        = kvm_vm_release,
        .unlocked_ioctl = kvm_vm_ioctl,
        .llseek         = noop_llseek,
@@ -6095,6 +6090,9 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
                goto err_async_pf;
 
        kvm_chardev_ops.owner = module;
+       kvm_vm_fops.owner = module;
+       kvm_vcpu_fops.owner = module;
+       kvm_device_fops.owner = module;
 
        kvm_preempt_ops.sched_in = kvm_sched_in;
        kvm_preempt_ops.sched_out = kvm_sched_out;