]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
Merge tag 'staging-5.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 16 Jul 2020 18:17:41 +0000 (11:17 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 16 Jul 2020 18:17:41 +0000 (11:17 -0700)
Pull IIO and staging driver fixes from Greg KH:
 "Here are some IIO and staging driver fixes for 5.8-rc6.

  The majority of fixes are for IIO drivers, resolving a number of small
  reported issues, and there are some counter fixes in here too that
  were tied to the IIO fixes. There's only one staging driver fix here,
  a comedi fix found by code inspection.

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'staging-5.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging:
  staging: comedi: verify array index is correct before using it
  iio: adc: ad7780: Fix a resource handling path in 'ad7780_probe()'
  iio:pressure:ms5611 Fix buffer element alignment
  iio:humidity:hts221 Fix alignment and data leak issues
  iio:humidity:hdc100x Fix alignment and data leak issues
  iio:magnetometer:ak8974: Fix alignment and data leak issues
  iio: adc: adi-axi-adc: Fix object reference counting
  iio: pressure: zpa2326: handle pm_runtime_get_sync failure
  counter: 104-quad-8: Add lock guards - filter clock prescaler
  counter: 104-quad-8: Add lock guards - differential encoder
  iio: core: add missing IIO_MOD_H2/ETHANOL string identifiers
  iio: magnetometer: ak8974: Fix runtime PM imbalance on error
  iio: mma8452: Add missed iio_device_unregister() call in mma8452_probe()
  iio:health:afe4404 Fix timestamp alignment and prevent data leak.
  iio:health:afe4403 Fix timestamp alignment and prevent data leak.

647 files changed:
.gitignore
.mailmap
Documentation/admin-guide/README.rst
Documentation/arm64/cpu-feature-registers.rst
Documentation/arm64/silicon-errata.rst
Documentation/block/bfq-iosched.rst
Documentation/core-api/dma-api.rst
Documentation/dev-tools/kunit/faq.rst
Documentation/devicetree/bindings/Makefile
Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
Documentation/devicetree/bindings/bus/socionext,uniphier-system-bus.yaml
Documentation/devicetree/bindings/clock/imx27-clock.yaml
Documentation/devicetree/bindings/clock/imx31-clock.yaml
Documentation/devicetree/bindings/clock/imx5-clock.yaml
Documentation/devicetree/bindings/display/bridge/sii902x.txt
Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt
Documentation/devicetree/bindings/display/imx/ldb.txt
Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml
Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt
Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.txt
Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt
Documentation/devicetree/bindings/misc/olpc,xo1.75-ec.txt
Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
Documentation/devicetree/bindings/sound/audio-graph-card.txt
Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
Documentation/devicetree/bindings/thermal/thermal-sensor.yaml
Documentation/devicetree/bindings/thermal/thermal-zones.yaml
Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml
Documentation/devicetree/bindings/timer/csky,mptimer.txt
Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml
Documentation/devicetree/writing-schema.rst
Documentation/i2c/slave-eeprom-backend.rst
Documentation/kbuild/modules.rst
Documentation/kbuild/reproducible-builds.rst
Documentation/mips/ingenic-tcu.rst
Documentation/networking/arcnet.rst
Documentation/networking/ax25.rst
Documentation/networking/can_ucan_protocol.rst
Documentation/networking/dsa/dsa.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/ipvs-sysctl.rst
Documentation/networking/rxrpc.rst
Documentation/process/changes.rst
Documentation/process/coding-style.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/include/asm/elf.h
arch/arc/include/asm/irqflags-compact.h
arch/arc/kernel/entry.S
arch/arc/kernel/head.S
arch/arc/kernel/setup.c
arch/arm/boot/dts/omap3-n900.dts
arch/arm/kernel/asm-offsets.c
arch/arm/xen/enlighten.c
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/arch_timer.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/vdso/clocksource.h
arch/arm64/include/asm/vdso/compat_gettimeofday.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/kgdb.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/hyp-init.S
arch/arm64/kvm/pmu.c
arch/arm64/kvm/pvtime.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/vgic/vgic-v4.c
arch/m68k/kernel/setup_no.c
arch/m68k/mm/mcfmmu.c
arch/mips/boot/dts/ingenic/gcw0.dts
arch/mips/include/asm/unroll.h
arch/mips/kernel/traps.c
arch/mips/kvm/emulate.c
arch/mips/lantiq/xway/sysctrl.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/mm/book3s64/pkeys.c
arch/riscv/Kconfig
arch/riscv/include/asm/gdb_xml.h
arch/riscv/include/asm/kgdb.h
arch/riscv/kernel/kgdb.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/kvm_host.h
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/setup.c
arch/s390/mm/hugetlbpage.c
arch/s390/mm/maccess.c
arch/s390/pci/pci_event.c
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64_compat.S
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/idtentry.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/ldt.c
arch/x86/kernel/traps.c
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/xen-asm_64.S
arch/xtensa/kernel/perf_event.c
arch/xtensa/kernel/setup.c
arch/xtensa/kernel/xtensa_ksyms.c
block/bio-integrity.c
block/blk-mq-debugfs.c
block/blk-mq.c
block/keyslot-manager.c
crypto/af_alg.c
crypto/algif_aead.c
crypto/algif_hash.c
crypto/algif_skcipher.c
crypto/asymmetric_keys/public_key.c
drivers/acpi/dptf/dptf_power.c
drivers/acpi/fan.c
drivers/block/nbd.c
drivers/block/virtio_blk.c
drivers/char/tpm/st33zp24/i2c.c
drivers/char/tpm/st33zp24/spi.c
drivers/char/tpm/st33zp24/st33zp24.c
drivers/char/tpm/tpm-dev-common.c
drivers/char/tpm/tpm_ibmvtpm.c
drivers/char/tpm/tpm_tis.c
drivers/char/tpm/tpm_tis_core.c
drivers/char/tpm/tpm_tis_spi_main.c
drivers/clk/Kconfig
drivers/clk/clk-ast2600.c
drivers/clk/mvebu/Kconfig
drivers/clocksource/arm_arch_timer.c
drivers/dma-buf/dma-buf.c
drivers/dma/dmatest.c
drivers/dma/dw/core.c
drivers/dma/fsl-edma-common.c
drivers/dma/fsl-edma-common.h
drivers/dma/fsl-edma.c
drivers/dma/idxd/cdev.c
drivers/dma/idxd/device.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/irq.c
drivers/dma/idxd/sysfs.c
drivers/dma/imx-sdma.c
drivers/dma/ioat/dma.c
drivers/dma/ioat/dma.h
drivers/dma/mcf-edma.c
drivers/dma/sh/usb-dmac.c
drivers/dma/tegra210-adma.c
drivers/dma/ti/k3-udma-private.c
drivers/dma/ti/k3-udma.c
drivers/firmware/psci/psci_checker.c
drivers/gpio/gpio-arizona.c
drivers/gpio/gpio-pca953x.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
drivers/gpu/drm/exynos/exynos_drm_dma.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display.h
drivers/gpu/drm/i915/display/intel_fbc.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/shaders/README [new file with mode: 0644]
drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm [new file with mode: 0644]
drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/debugfs.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio_context.h
drivers/gpu/drm/i915/gvt/reg.h
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/mediatek/Kconfig
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
drivers/gpu/drm/meson/meson_registers.h
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/msm_submitqueue.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/hv/vmbus_drv.c
drivers/hwmon/acpi_power_meter.c
drivers/hwmon/bt1-pvt.c
drivers/hwmon/max6697.c
drivers/hwmon/pmbus/Kconfig
drivers/hwmon/pmbus/pmbus_core.c
drivers/i2c/Kconfig
drivers/i2c/algos/i2c-algo-pca.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/siw/siw_main.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/elants_i2c.c
drivers/iommu/Kconfig
drivers/iommu/amd/amd_iommu.h
drivers/iommu/arm-smmu-qcom.c
drivers/iommu/iommu.c
drivers/iommu/sun50i-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-riscv-intc.c
drivers/md/dm-rq.c
drivers/md/dm-writecache.c
drivers/md/dm-zoned-metadata.c
drivers/md/dm-zoned-reclaim.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/message/fusion/mptbase.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/owl-mmc.c
drivers/mmc/host/sdhci-msm.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/raw/nandsim.c
drivers/mtd/nand/raw/xway_nand.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz9477.c
drivers/net/dsa/microchip/ksz9477_i2c.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/port.h
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_lif.h
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.h
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
drivers/net/ipa/gsi.c
drivers/net/ipa/ipa_cmd.c
drivers/net/ipa/ipa_cmd.h
drivers/net/ipa/ipa_data-sdm845.c
drivers/net/ipa/ipa_endpoint.c
drivers/net/ipa/ipa_gsi.c
drivers/net/ipa/ipa_gsi.h
drivers/net/ipa/ipa_qmi_msg.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/tun.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/smsc95xx.c
drivers/net/wan/lapbether.c
drivers/net/wireguard/device.c
drivers/net/wireguard/queueing.h
drivers/net/wireguard/receive.c
drivers/nvdimm/security.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/pinctrl-amd.h
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/intel_speed_select_if/isst_if_common.h
drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
drivers/platform/x86/thinkpad_acpi.c
drivers/s390/cio/vfio_ccw_chp.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libiscsi.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_dh.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/scsi_transport_spi.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-pxa2xx.c
drivers/thermal/cpufreq_cooling.c
drivers/thermal/imx_thermal.c
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
drivers/thermal/intel/int340x_thermal/int3403_thermal.c
drivers/thermal/mtk_thermal.c
drivers/thermal/qcom/tsens.c
drivers/thermal/rcar_gen3_thermal.c
drivers/thermal/sprd_thermal.c
drivers/tty/serial/cpm_uart/cpm_uart_core.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/xilinx_uartps.c
drivers/xen/xenbus/xenbus_client.c
fs/afs/fs_operation.c
fs/afs/write.c
fs/autofs/waitq.c
fs/btrfs/ctree.c
fs/btrfs/discard.c
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ref-verify.c
fs/btrfs/space-info.c
fs/btrfs/super.c
fs/btrfs/volumes.h
fs/cachefiles/rdwr.c
fs/cifs/cifs_debug.c
fs/cifs/cifsfs.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/transport.c
fs/exfat/dir.c
fs/exfat/exfat_fs.h
fs/exfat/file.c
fs/exfat/namei.c
fs/exfat/super.c
fs/file_table.c
fs/gfs2/aops.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/log.c
fs/gfs2/log.h
fs/gfs2/main.c
fs/gfs2/ops_fstype.c
fs/gfs2/recovery.c
fs/gfs2/super.c
fs/io_uring.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsd.h
fs/nfsd/vfs.c
fs/proc/proc_sysctl.c
fs/read_write.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
include/crypto/if_alg.h
include/linux/bits.h
include/linux/blkdev.h
include/linux/bpf-netns.h
include/linux/bpf.h
include/linux/btf.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/compiler-gcc.h
include/linux/compiler_types.h
include/linux/dma-direct.h
include/linux/dma-mapping.h
include/linux/filter.h
include/linux/fs.h
include/linux/ieee80211.h
include/linux/if_vlan.h
include/linux/input/elan-i2c-ids.h
include/linux/kallsyms.h
include/linux/kgdb.h
include/linux/lsm_hook_defs.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mod_devicetable.h
include/linux/pci.h
include/linux/scatterlist.h
include/linux/sched/jobctl.h
include/linux/serial_core.h
include/linux/skmsg.h
include/linux/task_work.h
include/net/dst.h
include/net/flow_dissector.h
include/net/genetlink.h
include/net/inet_ecn.h
include/net/ip_tunnels.h
include/net/netns/bpf.h
include/net/pkt_sched.h
include/net/sock.h
include/net/xsk_buff_pool.h
include/sound/compress_driver.h
include/uapi/linux/bpf.h
include/uapi/linux/idxd.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/io_uring.h
init/Kconfig
kernel/bpf/btf.c
kernel/bpf/net_namespace.c
kernel/bpf/reuseport_array.c
kernel/bpf/ringbuf.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/debug/gdbstub.c
kernel/dma/direct.c
kernel/dma/mapping.c
kernel/dma/pool.c
kernel/fork.c
kernel/kallsyms.c
kernel/kprobes.c
kernel/module.c
kernel/padata.c
kernel/rcu/rcuperf.c
kernel/signal.c
kernel/task_work.c
lib/Kconfig.kgdb
lib/packing.c
mm/cma.c
mm/filemap.c
mm/hugetlb.c
mm/migrate.c
mm/mremap.c
mm/page_alloc.c
net/8021q/vlan_dev.c
net/bpf/test_run.c
net/bpfilter/bpfilter_kern.c
net/bridge/br_mrp.c
net/bridge/br_multicast.c
net/bridge/br_private.h
net/bridge/br_private_mrp.h
net/core/dev_addr_lists.c
net/core/filter.c
net/core/flow_dissector.c
net/core/skmsg.c
net/core/sock.c
net/core/sock_map.c
net/core/sysctl_net_core.c
net/ethtool/netlink.c
net/hsr/hsr_device.c
net/ipv4/icmp.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/icmp.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/route.c
net/ipv6/sit.c
net/l2tp/l2tp_core.c
net/llc/af_llc.c
net/mac80211/mesh_hwmp.c
net/mac80211/rx.c
net/mac80211/status.c
net/mac80211/tx.c
net/mptcp/options.c
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/nf_conntrack_core.c
net/netlink/genetlink.c
net/qrtr/qrtr.c
net/rds/connection.c
net/rds/rds.h
net/rds/send.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_ct.c
net/sched/act_ctinfo.c
net/sched/act_mpls.c
net/sched/act_skbedit.c
net/sched/cls_api.c
net/sched/cls_flow.c
net/sched/cls_flower.c
net/sched/em_ipset.c
net/sched/em_ipt.c
net/sched/em_meta.c
net/sched/sch_atm.c
net/sched/sch_cake.c
net/sched/sch_dsmark.c
net/sched/sch_teql.c
net/smc/smc_clc.c
net/smc/smc_clc.h
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_ib.c
net/smc/smc_ib.h
net/smc/smc_ism.c
net/smc/smc_ism.h
net/smc/smc_llc.c
net/smc/smc_pnet.c
net/smc/smc_wr.c
net/sunrpc/svcsock.c
net/tipc/link.c
net/wireless/nl80211.c
net/xdp/xsk_buff_pool.c
net/xfrm/xfrm_interface.c
samples/vfs/test-statx.c
scripts/Makefile.extrawarn
scripts/Makefile.lib
scripts/dtc/checks.c
scripts/dtc/dtc.h
scripts/dtc/flattree.c
scripts/dtc/libfdt/fdt_rw.c
scripts/dtc/libfdt/fdt_sw.c
scripts/dtc/libfdt/libfdt.h
scripts/dtc/treesource.c
scripts/dtc/version_gen.h
scripts/dtc/yamltree.c
scripts/gcc-plugins/Kconfig
scripts/kconfig/qconf.cc
scripts/kconfig/qconf.h
security/integrity/iint.c
security/integrity/ima/ima.h
security/integrity/ima/ima_crypto.c
security/security.c
sound/core/compress_offload.c
sound/drivers/opl3/opl3_synth.c
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/renoir/Makefile
sound/soc/codecs/rt5682.c
sound/soc/fsl/fsl_mqs.c
sound/usb/card.h
sound/usb/endpoint.c
sound/usb/pcm.c
sound/usb/quirks-table.h
tools/arch/x86/lib/memcpy_64.S
tools/include/linux/bits.h
tools/include/uapi/linux/bpf.h
tools/lib/bpf/bpf.h
tools/lib/bpf/hashmap.h
tools/lib/bpf/libbpf.c
tools/lib/traceevent/kbuffer-parse.c
tools/lib/traceevent/kbuffer.h
tools/perf/arch/x86/util/intel-pt.c
tools/perf/builtin-record.c
tools/perf/builtin-script.c
tools/perf/scripts/python/export-to-postgresql.py
tools/perf/scripts/python/exported-sql-viewer.py
tools/perf/scripts/python/flamegraph.py
tools/perf/ui/browsers/hists.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/intel-pt.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_config.py
tools/testing/kunit/kunit_parser.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/kunit/test_data/test_insufficient_memory.log [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
tools/testing/selftests/bpf/progs/fentry_test.c
tools/testing/selftests/bpf/progs/fexit_test.c
tools/testing/selftests/bpf/progs/test_sockmap_kern.h
tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/kmod/kmod.sh
tools/testing/selftests/kselftest.h
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/tpm2/test_smoke.sh
tools/testing/selftests/tpm2/test_space.sh
tools/testing/selftests/tpm2/tpm2.py
tools/testing/selftests/tpm2/tpm2_tests.py
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/helpers.h [new file with mode: 0644]
tools/testing/selftests/x86/single_step_syscall.c
tools/testing/selftests/x86/syscall_arg_fault.c
tools/testing/selftests/x86/syscall_nt.c
tools/testing/selftests/x86/test_vsyscall.c
tools/testing/selftests/x86/unwind_vdso.c
virt/kvm/kvm_main.c

index 87b9dd8a163b818bba39afd79b818da7e1a46339..d5f4804ed07cd36336a5e80f2a24e45104f902cf 100644 (file)
@@ -143,6 +143,9 @@ x509.genkey
 /allrandom.config
 /allyes.config
 
+# Kconfig savedefconfig output
+/defconfig
+
 # Kdevelop4
 *.kdev4
 
index c69d9c734fb5e7e2268870ed680e34a0b29b9bf3..6da12dfd10dc942c6dac2d7df8aabcd6f87d1802 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -90,11 +90,16 @@ Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
 Frank Zago <fzago@systemfabricworks.com>
 Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
 Greg Kroah-Hartman <greg@echidna.(none)>
 Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
+Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
+Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
 Henk Vergonet <Henk.Vergonet@gmail.com>
 Henrik Kretzschmar <henne@nachtwindheim.de>
 Henrik Rydberg <rydberg@bitmath.org>
index 5fb52690002386bf9a2a570a3a85f714b6a72672..5aad534233cd807af4b3a2be583a0a28389a3ce5 100644 (file)
@@ -258,7 +258,7 @@ Configuring the kernel
 Compiling the kernel
 --------------------
 
- - Make sure you have at least gcc 4.6 available.
+ - Make sure you have at least gcc 4.9 available.
    For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
 
    Please note that you can still run a.out user programs with this kernel.
index 314fa5bc2655da3b30fdde5c12e602ae2633ccc5..f28853f80089bffd0882dc6be9e1b18898f8ef2f 100644 (file)
@@ -171,6 +171,7 @@ infrastructure:
 
 
   3) ID_AA64PFR1_EL1 - Processor Feature Register 1
+
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
@@ -181,6 +182,7 @@ infrastructure:
 
 
   4) MIDR_EL1 - Main ID Register
+
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
index 936cf2a59ca4b3fdc361e845cc61042fafcb8c5b..3f7c3a7e8a2b09a68dc70d6821b23f3b976d719e 100644 (file)
@@ -147,6 +147,14 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | Falkor v{1,2}   | E1041           | QCOM_FALKOR_ERRATUM_1041    |
 +----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold    | N/A             | ARM64_ERRATUM_1463225       |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold    | N/A             | ARM64_ERRATUM_1418040       |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Silver  | N/A             | ARM64_ERRATUM_1530923       |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Silver  | N/A             | ARM64_ERRATUM_1024718       |
++----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
 | Fujitsu        | A64FX           | E#010001        | FUJITSU_ERRATUM_010001      |
 +----------------+-----------------+-----------------+-----------------------------+
index 0d237d4028600b387f741efb79f46a83437a39d5..19d4d1570cee75ce477e3dc81f8fd23e4ac47635 100644 (file)
@@ -492,13 +492,6 @@ set max_budget to higher values than those to which BFQ would have set
 it with auto-tuning. An alternative way to achieve this goal is to
 just increase the value of timeout_sync, leaving max_budget equal to 0.
 
-weights
--------
-
-Read-only parameter, used to show the weights of the currently active
-BFQ queues.
-
-
 4. Group scheduling with BFQ
 ============================
 
@@ -566,7 +559,7 @@ Parameters to set
 For each group, there is only the following parameter to set.
 
 weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the
-group inside its parent. Available values: 1..10000 (default 100). The
+group inside its parent. Available values: 1..1000 (default 100). The
 linear mapping between ioprio and weights, described at the beginning
 of the tunable section, is still valid, but all weights higher than
 IOPRIO_BE_NR*10 are mapped to ioprio 0.
index 2d8d2fed731720b130248caccf8badc22b5ffec2..f41620439ef349b0592d177052b7d6fa85fcf152 100644 (file)
@@ -204,6 +204,14 @@ Returns the maximum size of a mapping for the device. The size parameter
 of the mapping functions like dma_map_single(), dma_map_page() and
 others should not be larger than the returned value.
 
+::
+
+       bool
+       dma_need_sync(struct device *dev, dma_addr_t dma_addr);
+
+Returns %true if dma_sync_single_for_{device,cpu} calls are required to
+transfer memory ownership.  Returns %false if those calls can be skipped.
+
 ::
 
        unsigned long
index ea55b2467653f008d0ea78adbf8c1d82d2447dd4..1628862e70245186509754e5f2b1e3b87ec7ac56 100644 (file)
@@ -61,3 +61,43 @@ test, or an end-to-end test.
   kernel by installing a production configuration of the kernel on production
   hardware with a production userspace and then trying to exercise some behavior
   that depends on interactions between the hardware, the kernel, and userspace.
+
+KUnit isn't working, what should I do?
+======================================
+
+Unfortunately, there are a number of things which can break, but here are some
+things to try.
+
+1. Try running ``./tools/testing/kunit/kunit.py run`` with the ``--raw_output``
+   parameter. This might show details or error messages hidden by the kunit_tool
+   parser.
+2. Instead of running ``kunit.py run``, try running ``kunit.py config``,
+   ``kunit.py build``, and ``kunit.py exec`` independently. This can help track
+   down where an issue is occurring. (If you think the parser is at fault, you
+   can run it manually against stdin or a file with ``kunit.py parse``.)
+3. Running the UML kernel directly can often reveal issues or error messages
+   kunit_tool ignores. This should be as simple as running ``./vmlinux`` after
+   building the UML kernel (e.g., by using ``kunit.py build``). Note that UML
+   has some unusual requirements (such as the host having a tmpfs filesystem
+   mounted), and has had issues in the past when built statically and the host
+   has KASLR enabled. (On older host kernels, you may need to run ``setarch
+   `uname -m` -R ./vmlinux`` to disable KASLR.)
+4. Make sure the kernel .config has ``CONFIG_KUNIT=y`` and at least one test
+   (e.g. ``CONFIG_KUNIT_EXAMPLE_TEST=y``). kunit_tool will keep its .config
+   around, so you can see what config was used after running ``kunit.py run``.
+   It also preserves any config changes you might make, so you can
+   enable/disable things with ``make ARCH=um menuconfig`` or similar, and then
+   re-run kunit_tool.
+5. Try to run ``make ARCH=um defconfig`` before running ``kunit.py run``. This
+   may help clean up any residual config items which could be causing problems.
+6. Finally, try running KUnit outside UML. KUnit and KUnit tests can run be
+   built into any kernel, or can be built as a module and loaded at runtime.
+   Doing so should allow you to determine if UML is causing the issue you're
+   seeing. When tests are built-in, they will execute when the kernel boots, and
+   modules will automatically execute associated tests when loaded. Test results
+   can be collected from ``/sys/kernel/debug/kunit/<test suite>/results``, and
+   can be parsed with ``kunit.py parse``. For more details, see "KUnit on
+   non-UML architectures" in :doc:`usage`.
+
+If none of the above tricks help, you are always welcome to email any issues to
+kunit-dev@googlegroups.com.
index a63898954068d4a62007c240cbf9fa4861f8baa1..91c4d00e96d3c9b4fa73692c35cc809f1fffac4f 100644 (file)
@@ -2,7 +2,6 @@
 DT_DOC_CHECKER ?= dt-doc-validate
 DT_EXTRACT_EX ?= dt-extract-example
 DT_MK_SCHEMA ?= dt-mk-schema
-DT_MK_SCHEMA_USERONLY_FLAG := $(if $(DT_SCHEMA_FILES), -u)
 
 DT_SCHEMA_MIN_VERSION = 2020.5
 
@@ -35,21 +34,40 @@ quiet_cmd_mk_schema = SCHEMA  $@
 
 DT_DOCS = $(shell $(find_cmd) | sed -e 's|^$(srctree)/||')
 
-DT_SCHEMA_FILES ?= $(DT_DOCS)
-
-extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
-extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
-extra-$(CHECK_DT_BINDING) += processed-schema-examples.yaml
-
 override DTC_FLAGS := \
        -Wno-avoid_unnecessary_addr_size \
-       -Wno-graph_child_address
+       -Wno-graph_child_address \
+       -Wno-interrupt_provider
 
 $(obj)/processed-schema-examples.yaml: $(DT_DOCS) check_dtschema_version FORCE
        $(call if_changed,mk_schema)
 
-$(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := $(DT_MK_SCHEMA_USERONLY_FLAG)
+ifeq ($(DT_SCHEMA_FILES),)
+
+# Unless DT_SCHEMA_FILES is specified, use the full schema for dtbs_check too.
+# Just copy processed-schema-examples.yaml
+
+$(obj)/processed-schema.yaml: $(obj)/processed-schema-examples.yaml FORCE
+       $(call if_changed,copy)
+
+DT_SCHEMA_FILES = $(DT_DOCS)
+
+else
+
+# If DT_SCHEMA_FILES is specified, use it for processed-schema.yaml
+
+$(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := -u
 $(obj)/processed-schema.yaml: $(DT_SCHEMA_FILES) check_dtschema_version FORCE
        $(call if_changed,mk_schema)
 
-extra-y += processed-schema.yaml
+endif
+
+extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
+extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
+extra-$(CHECK_DT_BINDING) += processed-schema-examples.yaml
+extra-$(CHECK_DTBS) += processed-schema.yaml
+
+# Hack: avoid 'Argument list too long' error for 'make clean'. Remove most of
+# build artifacts here before they are processed by scripts/Makefile.clean
+clean-files = $(shell find $(obj) \( -name '*.example.dts' -o \
+                       -name '*.example.dt.yaml' \) -delete 2>/dev/null)
index 715047444391a837f357b86db7b8242f3e8e00f9..10b8459e49f8c22a4ff12c2485b00030dc72df64 100644 (file)
@@ -47,7 +47,7 @@ Required properties:
                          &lsio_mu1 1 2
                          &lsio_mu1 1 3
                          &lsio_mu1 3 3>;
-               See Documentation/devicetree/bindings/mailbox/fsl,mu.txt
+               See Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
                for detailed mailbox binding.
 
 Note: Each mu which supports general interrupt should have an alias correctly
index c4c9119e4a206551208cf04b31fa0cfafe6a5051..a0c6c5d2b70fbaa7446a8252707ad521e84e9271 100644 (file)
@@ -80,14 +80,14 @@ examples:
         ranges = <1 0x00000000 0x42000000 0x02000000>,
                  <5 0x00000000 0x46000000 0x01000000>;
 
-        ethernet@1,01f00000 {
+        ethernet@1,1f00000 {
             compatible = "smsc,lan9115";
             reg = <1 0x01f00000 0x1000>;
             interrupts = <0 48 4>;
             phy-mode = "mii";
         };
 
-        uart@5,00200000 {
+        serial@5,200000 {
             compatible = "ns16550a";
             reg = <5 0x00200000 0x20>;
             interrupts = <0 49 4>;
index b5f3ed084ea0ba4f456ea5c7fff1233f16d38b56..a75365453dbce252d72e191b920b9daf98d2513b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX27
 
 maintainers:
-  - Fabio Estevam <fabio.estevam@freescale.com>
+  - Fabio Estevam <fabio.estevam@nxp.com>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index 1b6f75d3928a8881ad73e87daa6f0831d2d12120..a25a374b3b2aab1767e6b649a5af280d0697738a 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX31
 
 maintainers:
-  - Fabio Estevam <fabio.estevam@freescale.com>
+  - Fabio Estevam <fabio.estevam@nxp.com>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index f5c2b3d7a910bc5f866b776aa5da0f798e5123c8..4d9e7c73dce919558c0a0166769a040cc8741425 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX5
 
 maintainers:
-  - Fabio Estevam <fabio.estevam@freescale.com>
+  - Fabio Estevam <fabio.estevam@nxp.com>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index 6e14e087c0d0ad9d00bbd90d002b1aaf14e8b2e7..0d1db3f9da84f19eb9400ab4d02da2856e4d2687 100644 (file)
@@ -37,7 +37,7 @@ Optional properties:
        simple-card or audio-graph-card binding. See their binding
        documents on how to describe the way the sii902x device is
        connected to the rest of the audio system:
-       Documentation/devicetree/bindings/sound/simple-card.txt
+       Documentation/devicetree/bindings/sound/simple-card.yaml
        Documentation/devicetree/bindings/sound/audio-graph-card.txt
        Note: In case of the audio-graph-card binding the used port
        index should be 3.
index 5bf77f6dd19db0ea3eab3cb3f837f54089c2a547..5a99490c17b9b5cd85a08fbdf33a76bf7cdbfbe4 100644 (file)
@@ -68,7 +68,7 @@ Required properties:
   datasheet
 - clocks : phandle to the PRE axi clock input, as described
   in Documentation/devicetree/bindings/clock/clock-bindings.txt and
-  Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+  Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
 - clock-names: should be "axi"
 - interrupts: should contain the PRE interrupt
 - fsl,iram: phandle pointing to the mmio-sram device node, that should be
@@ -94,7 +94,7 @@ Required properties:
   datasheet
 - clocks : phandles to the PRG ipg and axi clock inputs, as described
   in Documentation/devicetree/bindings/clock/clock-bindings.txt and
-  Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+  Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
 - clock-names: should be "ipg" and "axi"
 - fsl,pres: phandles to the PRE units attached to this PRG, with the fixed
   PRE as the first entry and the muxable PREs following.
index 38c637fa39ddf4efaef296a3878859dfb598e82c..8e6e7d797943ff4635dd8260e346a8bc109819b7 100644 (file)
@@ -30,8 +30,8 @@ Required properties:
                 "di2_sel" - IPU2 DI0 mux
                 "di3_sel" - IPU2 DI1 mux
         The needed clock numbers for each are documented in
-        Documentation/devicetree/bindings/clock/imx5-clock.txt, and in
-        Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+        Documentation/devicetree/bindings/clock/imx5-clock.yaml, and in
+        Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
 
 Optional properties:
  - pinctrl-names : should be "default" on i.MX53, not used on i.MX6q
index 41fd5713c156445a19937cd6f4f50e7ee59b484e..be69e0cc50fcd08235666ce045889aca05fc7c24 100644 (file)
@@ -33,7 +33,7 @@ additionalProperties: false
 
 examples:
   - |
-    sysreg {
+    sysreg@0 {
         compatible = "arm,versatile-sysreg", "syscon", "simple-mfd";
         reg = <0x00000 0x1000>;
 
index ec8ae742d4da2a443a74f9e93801331953f8b9a3..7204da5eb4c5934cbc3d7ebbf9a2e11f5cd72bc3 100644 (file)
@@ -24,7 +24,7 @@ properties:
     description: |
       Should contain a list of phandles pointing to display interface port
       of vop devices. vop definitions as defined in
-      Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+      Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
 
 required:
   - compatible
index ba455589f869355dc534c5421ff25a7aa96bf4a4..e1c49b660d3a3dc4cdcaaf298b872f64ced242ec 100644 (file)
@@ -12,7 +12,7 @@ Required properties for the top level node:
    Only the GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
 - #interrupt-cells : Specifies the number of cells needed to encode an
    interrupt. Should be 2. The first cell defines the interrupt number,
-   the second encodes the triger flags encoded as described in
+   the second encodes the trigger flags encoded as described in
    Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
 - compatible:
   - "mediatek,mt7621-gpio" for Mediatek controllers
index e13405355166ddbcc7c2612883134101a254c70f..e6bbcae4d07fb21b595259e4289f154d45b7913f 100644 (file)
@@ -10,7 +10,7 @@ Interrupt number definition:
  16-31  : private  irq, and we use 16 as the co-processor timer.
  31-1024: common irq for soc ip.
 
-Interrupt triger mode: (Defined in dt-bindings/interrupt-controller/irq.h)
+Interrupt trigger mode: (Defined in dt-bindings/interrupt-controller/irq.h)
  IRQ_TYPE_LEVEL_HIGH (default)
  IRQ_TYPE_LEVEL_LOW
  IRQ_TYPE_EDGE_RISING
index 4438432bfe9b3d396f5371bb6ba026c558a6fa64..ad76edccf881661b627afdb1a95c2791d2896ce8 100644 (file)
@@ -87,7 +87,7 @@ Example:
                ranges;
 
                /* APU<->RPU0 IPI mailbox controller */
-               ipi_mailbox_rpu0: mailbox@ff90400 {
+               ipi_mailbox_rpu0: mailbox@ff990400 {
                        reg = <0xff990400 0x20>,
                              <0xff990420 0x20>,
                              <0xff990080 0x20>,
index 8c4d649cdd8ff474466770bacb660bba5d87dea7..2d7cdf19a0d0fc625463a0882d3ed60aa03677bd 100644 (file)
@@ -8,7 +8,7 @@ The embedded controller requires the SPI controller driver to signal readiness
 to receive a transfer (that is, when TX FIFO contains the response data) by
 strobing the ACK pin with the ready signal. See the "ready-gpios" property of the
 SSP binding as documented in:
-<Documentation/devicetree/bindings/spi/spi-pxa2xx.txt>.
+<Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml>.
 
 Example:
        &ssp3 {
index 219bcbd0d34478ba5ceb6ffe16aa7f52e7ef1b36..9ef5bacda8c18ce4dca582547fada019f00fd794 100644 (file)
@@ -3,7 +3,7 @@ MediaTek SoC built-in Bluetooth Devices
 
 This device is a serial attached device to BTIF device and thus it must be a
 child node of the serial node with BTIF. The dt-bindings details for BTIF
-device can be known via Documentation/devicetree/bindings/serial/8250.txt.
+device can be known via Documentation/devicetree/bindings/serial/8250.yaml.
 
 Required properties:
 
index b68613188c1916f475231c405d96eca48f56de85..1b8e8b4a63797dfe8b402090f7352c8bfaeefa54 100644 (file)
@@ -114,7 +114,7 @@ with values derived from the SoC user manual.
    [flags]>
 
 On other mach-shmobile platforms GPIO is handled by the gpio-rcar driver.
-Please refer to Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+Please refer to Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
 for documentation of the GPIO device tree bindings on those platforms.
 
 
index 269682619a702dc38d864d1ca78b4775465c8a2e..d5f6919a2d69eca67b86468a339c75b7ca3e4407 100644 (file)
@@ -5,7 +5,7 @@ It is based on common bindings for device graphs.
 see ${LINUX}/Documentation/devicetree/bindings/graph.txt
 
 Basically, Audio Graph Card property is same as Simple Card.
-see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.txt
+see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.yaml
 
 Below are same as Simple-Card.
 
index 4d51f3f5ea98c3188da684d3584ea6af79cca814..a6ffcdec6f6aeebe7424c398e595be53a1eb3ac1 100644 (file)
@@ -5,7 +5,7 @@ codec or external codecs.
 
 sti sound drivers allows to expose sti SoC audio interface through the
 generic ASoC simple card. For details about sound card declaration please refer to
-Documentation/devicetree/bindings/sound/simple-card.txt.
+Documentation/devicetree/bindings/sound/simple-card.yaml.
 
 1) sti-uniperiph-dai: audio dai device.
 ---------------------------------------
index 790311a42bf1ae5f1cd03455695d950b03a4a523..c8c1e913f4e787396b5d9c7fee6a9c587492dec4 100644 (file)
@@ -19,7 +19,7 @@ Required properties:
 
 SPI Controller nodes must be child of GENI based Qualcomm Universal
 Peripharal. Please refer GENI based QUP wrapper controller node bindings
-described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt.
+described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml.
 
 SPI slave nodes must be children of the SPI master node and conform to SPI bus
 binding as described in Documentation/devicetree/bindings/spi/spi-bus.txt.
index fcd25a0af38c93fed51926389cf419b56ab9eb93..727d04550324c137d816c2070db01972bb05b335 100644 (file)
@@ -41,7 +41,7 @@ examples:
     #include <dt-bindings/interrupt-controller/arm-gic.h>
 
     // Example 1: SDM845 TSENS
-    soc: soc@0 {
+    soc: soc {
             #address-cells = <2>;
             #size-cells = <2>;
 
index b8515d3eeaa2b4231328fe105a598a1e7edbc0cf..3ec9cc87ec502eaae50dd537d4adb2e74c408517 100644 (file)
@@ -224,7 +224,7 @@ examples:
     #include <dt-bindings/thermal/thermal.h>
 
     // Example 1: SDM845 TSENS
-    soc: soc@0 {
+    soc {
             #address-cells = <2>;
             #size-cells = <2>;
 
index 25b9209c2e5d9f3e54beb0590b2be7bd3532a352..ea14de80ec759d95666437c650eeddcb5d338e15 100644 (file)
@@ -35,7 +35,7 @@ examples:
     #include <dt-bindings/soc/ti,sci_pm_domain.h>
     vtm: thermal@42050000 {
         compatible = "ti,am654-vtm";
-        reg = <0x0 0x42050000 0x0 0x25c>;
+        reg = <0x42050000 0x25c>;
         power-domains = <&k3_pds 80 TI_SCI_PD_EXCLUSIVE>;
         #thermal-sensor-cells = <1>;
     };
index 15cfec08fbb8e901a4392098057c24f788587bc8..f5c7e99cf52bf47b07564f0f9f32d05b5075be01 100644 (file)
@@ -8,7 +8,7 @@ regs is accessed by cpu co-processor 4 registers with mtcr/mfcr.
  - PTIM_CTLR "cr<0, 14>" Control reg to start reset timer.
  - PTIM_TSR  "cr<1, 14>" Interrupt cleanup status reg.
  - PTIM_CCVR "cr<3, 14>" Current counter value reg.
- - PTIM_LVR  "cr<6, 14>" Window value reg to triger next event.
+ - PTIM_LVR  "cr<6, 14>" Window value reg to trigger next event.
 
 ==============================
 timer node bindings definition
index e4e83d3971ac0071ffab2ea84aff12d1fad0676d..8b019ac05bbe0dfb1096fa60b857ed848682d14f 100644 (file)
@@ -127,8 +127,8 @@ examples:
                 #address-cells = <1>;
                 #size-cells = <0>;
 
-                string@0409 {
-                        reg = <0x0409>;
+                string@409 {
+                        reg = <0x409>;
                         manufacturer = "ASPEED";
                         product = "USB Virtual Hub";
                         serial-number = "0000";
index 220cf464ed778b5289d6589c867788ad62287b64..8c74a99f95e23f4bdd304284b4179100a6686c48 100644 (file)
@@ -1,4 +1,4 @@
-:orphan:
+.. SPDX-License-Identifier: GPL-2.0
 
 Writing DeviceTree Bindings in json-schema
 ==========================================
@@ -124,9 +124,12 @@ dtc must also be built with YAML output support enabled. This requires that
 libyaml and its headers be installed on the host system. For some distributions
 that involves installing the development package, such as:
 
-Debian:
+Debian::
+
   apt-get install libyaml-dev
-Fedora:
+
+Fedora::
+
   dnf -y install libyaml-devel
 
 Running checks
index 0b8cd83698e0f00d944c08f174a266ff93720b4f..38d951f103023a4637b2d7110886148f2af00b8a 100644 (file)
@@ -1,14 +1,26 @@
 ==============================
-Linux I2C slave eeprom backend
+Linux I2C slave EEPROM backend
 ==============================
 
-by Wolfram Sang <wsa@sang-engineering.com> in 2014-15
+by Wolfram Sang <wsa@sang-engineering.com> in 2014-20
 
-This is a proof-of-concept backend which acts like an EEPROM on the connected
-I2C bus. The memory contents can be modified from userspace via this file
-located in sysfs::
+This backend simulates an EEPROM on the connected I2C bus. Its memory contents
+can be accessed from userspace via this file located in sysfs::
 
        /sys/bus/i2c/devices/<device-directory>/slave-eeprom
 
+The following types are available: 24c02, 24c32, 24c64, and 24c512. Read-only
+variants are also supported. The name needed for instantiating has the form
+'slave-<type>[ro]'. Examples follow:
+
+24c02, read/write, address 0x64:
+  # echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-1/new_device
+
+24c512, read-only, address 0x42:
+  # echo slave-24c512ro 0x1042 > /sys/bus/i2c/devices/i2c-1/new_device
+
+You can also preload data during boot if a device-property named
+'firmware-name' contains a valid filename (DT or ACPI only).
+
 As of 2015, Linux doesn't support poll on binary sysfs files, so there is no
 notification when another master changed the content.
index a45cccff467d8e931bb2e209893d072cbd2266ea..85ccc878895e75deecb87215515e44602c3d12c9 100644 (file)
@@ -182,7 +182,8 @@ module 8123.ko, which is built from the following files::
        8123_pci.c
        8123_bin.o_shipped      <= Binary blob
 
---- 3.1 Shared Makefile
+3.1 Shared Makefile
+-------------------
 
        An external module always includes a wrapper makefile that
        supports building the module using "make" with no arguments.
@@ -470,9 +471,9 @@ build.
 
        The syntax of the Module.symvers file is::
 
-       <CRC>       <Symbol>         <Module>                         <Export Type>     <Namespace>
+               <CRC>       <Symbol>         <Module>                         <Export Type>     <Namespace>
 
-       0xe1cc2a05  usb_stor_suspend drivers/usb/storage/usb-storage  EXPORT_SYMBOL_GPL USB_STORAGE
+               0xe1cc2a05  usb_stor_suspend drivers/usb/storage/usb-storage  EXPORT_SYMBOL_GPL USB_STORAGE
 
        The fields are separated by tabs and values may be empty (e.g.
        if no namespace is defined for an exported symbol).
index 503393854e2e2a7bbe12d51de0c848f293ea4fe4..3b25655e441bc77cfcb825df10f6d64e2de1e6a8 100644 (file)
@@ -101,7 +101,7 @@ Structure randomisation
 
 If you enable ``CONFIG_GCC_PLUGIN_RANDSTRUCT``, you will need to
 pre-generate the random seed in
-``scripts/gcc-plgins/randomize_layout_seed.h`` so the same value
+``scripts/gcc-plugins/randomize_layout_seed.h`` so the same value
 is used in rebuilds.
 
 Debug info conflicts
index c5a646b14450c68012eb10dceab933ce04e4ee74..2b75760619b43f14bca318596cca5acdbfcb9195 100644 (file)
@@ -68,4 +68,4 @@ and frameworks can be controlled from the same registers, all of these
 drivers access their registers through the same regmap.
 
 For more information regarding the devicetree bindings of the TCU drivers,
-have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.txt.
+have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.yaml.
index e93d9820f0f1715c60fc47b9796c90f813d14f97..82fce606c0f0bc88edd87b82da972e682983c6ca 100644 (file)
@@ -434,7 +434,7 @@ can set up your network then:
        ifconfig arc0 insight
        route add insight arc0
        route add freedom arc0  /* I would use the subnet here (like I said
-                                       to to in "single protocol" above),
+                                       to in "single protocol" above),
                                        but the rest of the subnet
                                        unfortunately lies across the PPP
                                        link on freedom, which confuses
index 824afd7002dbeda6f55ad4cc4f3a2610ad5d5e57..f060cfb1445a38a6e0d81c68e5184815976b7fed 100644 (file)
@@ -6,7 +6,7 @@ AX.25
 
 To use the amateur radio protocols within Linux you will need to get a
 suitable copy of the AX.25 Utilities. More detailed information about
-AX.25, NET/ROM and ROSE, associated programs and and utilities can be
+AX.25, NET/ROM and ROSE, associated programs and utilities can be
 found on http://www.linux-ax25.org.
 
 There is an active mailing list for discussing Linux amateur radio matters
index 4cef88d24fc7569536b58fd5378574aa5f9a9c3a..638ac1ee7914f1b6104d8b7119a9d94ef253f264 100644 (file)
@@ -144,7 +144,7 @@ UCAN_COMMAND_SET_BITTIMING
 
 *Host2Dev; mandatory*
 
-Setup bittiming by sending the the structure
+Setup bittiming by sending the structure
 ``ucan_ctl_payload_t.cmd_set_bittiming`` (see ``struct bittiming`` for
 details)
 
@@ -232,7 +232,7 @@ UCAN_IN_TX_COMPLETE
   zero
 
 The CAN device has sent a message to the CAN bus. It answers with a
-list of of tuples <echo-ids, flags>.
+list of tuples <echo-ids, flags>.
 
 The echo-id identifies the frame from (echos the id from a previous
 UCAN_OUT_TX message). The flag indicates the result of the
index 563d56c6a25c924e41d513272aee32d57ab535c1..a8d15dd2b42b72eef0f9cbe55b7f431033b1be2a 100644 (file)
@@ -95,7 +95,7 @@ Ethernet switch.
 Networking stack hooks
 ----------------------
 
-When a master netdev is used with DSA, a small hook is placed in in the
+When a master netdev is used with DSA, a small hook is placed in the
 networking stack is in order to have the DSA subsystem process the Ethernet
 switch specific tagging protocol. DSA accomplishes this by registering a
 specific (and fake) Ethernet type (later becoming ``skb->protocol``) with the
index b72f89d5694c9064460b7617cb5946333a2c1d22..837d51f9e1fab7c0999a51184f95971fb43c1b9b 100644 (file)
@@ -741,7 +741,7 @@ tcp_fastopen - INTEGER
 
        Default: 0x1
 
-       Note that that additional client or server features are only
+       Note that additional client or server features are only
        effective if the basic support (0x1 and 0x2) are enabled respectively.
 
 tcp_fastopen_blackhole_timeout_sec - INTEGER
index be36c4600e8f5b9995d4a4bee022c036c01efe6c..2afccc63856ee07357d6d800d9f200b3f6549d94 100644 (file)
@@ -114,7 +114,7 @@ drop_entry - INTEGER
        modes (when there is no enough available memory, the strategy
        is enabled and the variable is automatically set to 2,
        otherwise the strategy is disabled and the variable is set to
-       1), and 3 means that that the strategy is always enabled.
+       1), and 3 means that the strategy is always enabled.
 
 drop_packet - INTEGER
        - 0  - disabled (default)
index 68552b92dc44249862c17b22cef6313be2968608..39c2249c7aa78d2fbda659d2f49676e60ede034b 100644 (file)
@@ -186,7 +186,7 @@ About the AF_RXRPC driver:
      time [tunable] after the last connection using it discarded, in case a new
      connection is made that could use it.
 
- (#) A client-side connection is only shared between calls if they have have
+ (#) A client-side connection is only shared between calls if they have
      the same key struct describing their security (and assuming the calls
      would otherwise share the connection).  Non-secured calls would also be
      able to share connections with each other.
index 5cfb54c2aaa6e639e771d65a7682cef85c53c868..8f68e728ae6ba5f3550763cfda0b293cb4601781 100644 (file)
@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
 ====================== ===============  ========================================
         Program        Minimal version       Command to check the version
 ====================== ===============  ========================================
-GNU C                  4.8              gcc --version
+GNU C                  4.9              gcc --version
 GNU make               3.81             make --version
 binutils               2.23             ld -v
 flex                   2.5.35           flex --version
index 2657a55c6f120d1c01e3f168cc189d3483264269..1bee6f8affdb8f371d34094bf66111470c25a05b 100644 (file)
@@ -319,6 +319,26 @@ If you are afraid to mix up your local variable names, you have another
 problem, which is called the function-growth-hormone-imbalance syndrome.
 See chapter 6 (Functions).
 
+For symbol names and documentation, avoid introducing new usage of
+'master / slave' (or 'slave' independent of 'master') and 'blacklist /
+whitelist'.
+
+Recommended replacements for 'master / slave' are:
+    '{primary,main} / {secondary,replica,subordinate}'
+    '{initiator,requester} / {target,responder}'
+    '{controller,host} / {device,worker,proxy}'
+    'leader / follower'
+    'director / performer'
+
+Recommended replacements for 'blacklist/whitelist' are:
+    'denylist / allowlist'
+    'blocklist / passlist'
+
+Exceptions for introducing new usage is to maintain a userspace ABI/API,
+or when updating code for an existing (as of 2020) hardware or protocol
+specification that mandates those terms. For new specifications
+translate specification usage of the terminology to the kernel coding
+standard where possible.
 
 5) Typedefs
 -----------
index 426f94582b7a1a58a21e7e703beb3bc052440f90..320788f81a051ac8db3703a2e791ff8879d05915 100644 (file)
@@ -4339,14 +4339,15 @@ Errors:
 #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
 
   struct kvm_vmx_nested_state_hdr {
-       __u32 flags;
        __u64 vmxon_pa;
        __u64 vmcs12_pa;
-       __u64 preemption_timer_deadline;
 
        struct {
                __u16 flags;
        } smm;
+
+       __u32 flags;
+       __u64 preemption_timer_deadline;
   };
 
   struct kvm_vmx_nested_state_data {
index 496fd4eafb68cccc69880a392d540abec8e078df..5d62356f11ee36851144f4455c8dec0ca955b0d5 100644 (file)
@@ -2929,6 +2929,7 @@ F:        include/uapi/linux/atm*
 
 ATMEL MACB ETHERNET DRIVER
 M:     Nicolas Ferre <nicolas.ferre@microchip.com>
+M:     Claudiu Beznea <claudiu.beznea@microchip.com>
 S:     Supported
 F:     drivers/net/ethernet/cadence/
 
@@ -3306,7 +3307,7 @@ X:        arch/riscv/net/bpf_jit_comp32.c
 
 BPF JIT for S390
 M:     Ilya Leoshkevich <iii@linux.ibm.com>
-M:     Heiko Carstens <heiko.carstens@de.ibm.com>
+M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
@@ -3946,7 +3947,7 @@ L:        linux-crypto@vger.kernel.org
 S:     Supported
 F:     drivers/char/hw_random/cctrng.c
 F:     drivers/char/hw_random/cctrng.h
-F:     Documentation/devicetree/bindings/rng/arm-cctrng.txt
+F:     Documentation/devicetree/bindings/rng/arm-cctrng.yaml
 W:     https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
 
 CEC FRAMEWORK
@@ -5111,7 +5112,7 @@ M:        Vinod Koul <vkoul@kernel.org>
 L:     dmaengine@vger.kernel.org
 S:     Maintained
 Q:     https://patchwork.kernel.org/project/linux-dmaengine/list/
-T:     git git://git.infradead.org/users/vkoul/slave-dma.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git
 F:     Documentation/devicetree/bindings/dma/
 F:     Documentation/driver-api/dmaengine/
 F:     drivers/dma/
@@ -5490,7 +5491,7 @@ F:        include/uapi/drm/r128_drm.h
 DRM DRIVER FOR RAYDIUM RM67191 PANELS
 M:     Robert Chiras <robert.chiras@nxp.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt
+F:     Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
 F:     drivers/gpu/drm/panel/panel-raydium-rm67191.c
 
 DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
@@ -14574,8 +14575,8 @@ RENESAS R-CAR THERMAL DRIVERS
 M:     Niklas Söderlund <niklas.soderlund@ragnatech.se>
 L:     linux-renesas-soc@vger.kernel.org
 S:     Supported
-F:     Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
-F:     Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+F:     Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+F:     Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
 F:     drivers/thermal/rcar_gen3_thermal.c
 F:     drivers/thermal/rcar_thermal.c
 
@@ -14831,7 +14832,7 @@ S:      Maintained
 F:     drivers/video/fbdev/savage/
 
 S390
-M:     Heiko Carstens <heiko.carstens@de.ibm.com>
+M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
 M:     Christian Borntraeger <borntraeger@de.ibm.com>
 L:     linux-s390@vger.kernel.org
@@ -14862,7 +14863,7 @@ F:      drivers/s390/block/dasd*
 F:     include/linux/dasd_mod.h
 
 S390 IOMMU (PCI)
-M:     Gerald Schaefer <gerald.schaefer@de.ibm.com>
+M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -14890,7 +14891,7 @@ F:      drivers/s390/net/
 
 S390 PCI SUBSYSTEM
 M:     Niklas Schnelle <schnelle@linux.ibm.com>
-M:     Gerald Schaefer <gerald.schaefer@de.ibm.com>
+M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
index a60c98519c37bc656471bdbc2d3bc0b0aad8ecb1..0b5f8538bde50b7c86c98416c119f15889ef1122 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
@@ -970,8 +970,8 @@ LDFLAGS_vmlinux     += --pack-dyn-relocs=relr
 endif
 
 # Align the bit size of userspace programs with the kernel
-KBUILD_USERCFLAGS  += $(filter -m32 -m64, $(KBUILD_CFLAGS))
-KBUILD_USERLDFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+KBUILD_USERCFLAGS  += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
 
 # make the checker run with the right architecture
 CHECKFLAGS += --arch=$(ARCH)
index fddc700297278bf4c0c69033eb7ce9d51497d141..197896cfbd23e04d8c19bfbdddbb194e3a5aa209 100644 (file)
@@ -170,6 +170,15 @@ config ARC_CPU_HS
 
 endchoice
 
+config ARC_TUNE_MCPU
+       string "Override default -mcpu compiler flag"
+       default ""
+       help
+         Override default -mcpu=xxx compiler flag (which is set depending on
+         the ISA version) with the specified value.
+         NOTE: If specified flag isn't supported by current compiler the
+         ISA default value will be used as a fallback.
+
 config CPU_BIG_ENDIAN
        bool "Enable Big Endian Mode"
        help
@@ -465,6 +474,12 @@ config ARC_IRQ_NO_AUTOSAVE
          This is programmable and can be optionally disabled in which case
          software INTERRUPT_PROLOGUE/EPILGUE do the needed work
 
+config ARC_LPB_DISABLE
+       bool "Disable loop buffer (LPB)"
+       help
+         On HS cores, loop buffer (LPB) is programmable in runtime and can
+         be optionally disabled.
+
 endif # ISA_ARCV2
 
 endmenu   # "ARC CPU Configuration"
index 20e9ab6cc521f9ba8314d683d098224bbfebed37..d00f8b8afd08eb62cd28c62d1132d113886061cf 100644 (file)
@@ -10,8 +10,25 @@ CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
 endif
 
 cflags-y       += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
-cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=hs38
+
+tune-mcpu-def-$(CONFIG_ISA_ARCOMPACT)  := -mcpu=arc700
+tune-mcpu-def-$(CONFIG_ISA_ARCV2)      := -mcpu=hs38
+
+ifeq ($(CONFIG_ARC_TUNE_MCPU),"")
+cflags-y                               += $(tune-mcpu-def-y)
+else
+tune-mcpu                              := $(shell echo $(CONFIG_ARC_TUNE_MCPU))
+tune-mcpu-ok                           := $(call cc-option-yn, $(tune-mcpu))
+ifeq ($(tune-mcpu-ok),y)
+cflags-y                               += $(tune-mcpu)
+else
+# The flag provided by 'CONFIG_ARC_TUNE_MCPU' option isn't known by this compiler
+# (probably the compiler is too old). Use ISA default mcpu flag instead as a safe option.
+$(warning ** WARNING ** CONFIG_ARC_TUNE_MCPU flag '$(tune-mcpu)' is unknown, fallback to '$(tune-mcpu-def-y)')
+cflags-y                               += $(tune-mcpu-def-y)
+endif
+endif
+
 
 ifdef CONFIG_ARC_CURR_IN_REG
 # For a global register defintion, make sure it gets passed to every file
index c77a0e3671acce9e30b96f89df9303c795317b99..0284ace0e1ab4242e0ab13c808f4ad3b4d10d3d2 100644 (file)
@@ -19,7 +19,7 @@
 #define  R_ARC_32_PCREL                0x31
 
 /*to set parameters in the core dumps */
-#define ELF_ARCH               EM_ARCOMPACT
+#define ELF_ARCH               EM_ARC_INUSE
 #define ELF_CLASS              ELFCLASS32
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
index 7fc73fef5e29e290524143a6d2fc899823d6df56..863d63ad18d6f2c27f61bb8ab54c6c96dcf4289e 100644 (file)
@@ -90,6 +90,9 @@ static inline void arch_local_irq_restore(unsigned long flags)
 /*
  * Unconditionally Enable IRQs
  */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+extern void arch_local_irq_enable(void);
+#else
 static inline void arch_local_irq_enable(void)
 {
        unsigned long temp;
@@ -102,7 +105,7 @@ static inline void arch_local_irq_enable(void)
        : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
        : "cc", "memory");
 }
-
+#endif
 
 /*
  * Unconditionally Disable IRQs
index 60406ec62eb81fb9b72f0d768000fb60d2de6cde..ea00c8a17f079c6ddbd1b08e3642f603023626a9 100644 (file)
@@ -165,7 +165,6 @@ END(EV_Extension)
 tracesys:
        ; save EFA in case tracer wants the PC of traced task
        ; using ERET won't work since next-PC has already committed
-       lr  r12, [efa]
        GET_CURR_TASK_FIELD_PTR   TASK_THREAD, r11
        st  r12, [r11, THREAD_FAULT_ADDR]       ; thread.fault_address
 
@@ -208,15 +207,9 @@ tracesys_exit:
 ; Breakpoint TRAP
 ; ---------------------------------------------
 trap_with_param:
-
-       ; stop_pc info by gdb needs this info
-       lr  r0, [efa]
+       mov r0, r12     ; EFA in case ptracer/gdb wants stop_pc
        mov r1, sp
 
-       ; Now that we have read EFA, it is safe to do "fake" rtie
-       ;   and get out of CPU exception mode
-       FAKE_RET_FROM_EXCPN
-
        ; Save callee regs in case gdb wants to have a look
        ; SP will grow up by size of CALLEE Reg-File
        ; NOTE: clobbers r12
@@ -243,6 +236,10 @@ ENTRY(EV_Trap)
 
        EXCEPTION_PROLOGUE
 
+       lr  r12, [efa]
+
+       FAKE_RET_FROM_EXCPN
+
        ;============ TRAP 1   :breakpoints
        ; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
        bmsk.f 0, r10, 7
@@ -250,9 +247,6 @@ ENTRY(EV_Trap)
 
        ;============ TRAP  (no param): syscall top level
 
-       ; First return from Exception to pure K mode (Exception/IRQs renabled)
-       FAKE_RET_FROM_EXCPN
-
        ; If syscall tracing ongoing, invoke pre-post-hooks
        GET_CURR_THR_INFO_FLAGS   r10
        btst r10, TIF_SYSCALL_TRACE
index 6eb23f1545eec4af6a21aeebb5cd2b063722ab44..17fd1ed700ccab4285973c15fa83b143ad695cea 100644 (file)
        bclr    r5, r5, STATUS_AD_BIT
 #endif
        kflag   r5
+
+#ifdef CONFIG_ARC_LPB_DISABLE
+       lr      r5, [ARC_REG_LPB_BUILD]
+       breq    r5, 0, 1f               ; LPB doesn't exist
+       mov     r5, 1
+       sr      r5, [ARC_REG_LPB_CTRL]
+1:
+#endif /* CONFIG_ARC_LPB_DISABLE */
 #endif
        ; Config DSP_CTRL properly, so kernel may use integer multiply,
        ; multiply-accumulate, and divide operations
index dad8a656a2f1b7a70ab30a0db62c19270b267a86..41f07b3e594e0f16f605504c754dc28cc5525a92 100644 (file)
@@ -58,10 +58,12 @@ static const struct id_to_str arc_legacy_rel[] = {
        { 0x00,         NULL   }
 };
 
-static const struct id_to_str arc_cpu_rel[] = {
+static const struct id_to_str arc_hs_ver54_rel[] = {
        /* UARCH.MAJOR, Release */
        {  0,           "R3.10a"},
        {  1,           "R3.50a"},
+       {  2,           "R3.60a"},
+       {  3,           "R4.00a"},
        {  0xFF,        NULL   }
 };
 
@@ -117,12 +119,6 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
        struct bcr_uarch_build_arcv2 uarch;
        const struct id_to_str *tbl;
 
-       /*
-        * Up until (including) the first core4 release (0x54) things were
-        * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
-        * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
-        */
-
        if (cpu->core.family < 0x54) { /* includes arc700 */
 
                for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
@@ -143,11 +139,10 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
        }
 
        /*
-        * However the subsequent HS release (same 0x54) allow HS38 or HS48
-        * configurations and encode this info in a different BCR.
-        * The BCR was introduced in 0x54 so can't be read unconditionally.
+        * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
+        * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
+        * releases only update it.
         */
-
        READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
 
        if (uarch.prod == 4) {
@@ -158,7 +153,7 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
                cpu->name = "HS38";
        }
 
-       for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
+       for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
                if (uarch.maj == tbl->id) {
                        cpu->release = tbl->str;
                        break;
index 4089d97405c950e0148180334d0e5fbe88154df8..3dbcae3d60d285af39798a7746ab32f6cb7f71e6 100644 (file)
                        linux,code = <SW_FRONT_PROXIMITY>;
                        linux,can-disable;
                };
+
+               machine_cover {
+                       label = "Machine Cover";
+                       gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
+                       linux,input-type = <EV_SW>;
+                       linux,code = <SW_MACHINE_COVER>;
+                       linux,can-disable;
+               };
        };
 
        isp1707: isp1707 {
        pinctrl-0 = <&mmc1_pins>;
        vmmc-supply = <&vmmc1>;
        bus-width = <4>;
-       /* For debugging, it is often good idea to remove this GPIO.
-          It means you can remove back cover (to reboot by removing
-          battery) and still use the MMC card. */
-       cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
 };
 
 /* most boards use vaux3, only some old versions use vmmc2 instead */
index c036a4a2f8e213fb650a1759781afa6cbba31005..a1570c8bab25acd9bdbfbc7f26db705fa923f873 100644 (file)
 #if defined(__APCS_26__)
 #error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
 #endif
-/*
- * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
- *           miscompiles find_get_entry(), and can result in EXT3 and EXT4
- *           filesystem corruption (possibly other FS too).
- */
-#if defined(GCC_VERSION) && GCC_VERSION >= 40800 && GCC_VERSION < 40803
-#error Your compiler is too buggy; it is known to miscompile kernels
-#error and result in filesystem corruption and oopses.
-#endif
 
 int main(void)
 {
index fd4e1ce1daf96ae57aa70c84305781325252ec94..e93145d72c26e04533f4b61c4dee167a4b10f87f 100644 (file)
@@ -241,7 +241,6 @@ static int __init fdt_find_hyper_node(unsigned long node, const char *uname,
  * see Documentation/devicetree/bindings/arm/xen.txt for the
  * documentation of the Xen Device Tree format.
  */
-#define GRANT_TABLE_PHYSADDR 0
 void __init xen_early_init(void)
 {
        of_scan_flat_dt(fdt_find_hyper_node, NULL);
index 5e5dc05d63a06473050a1a55c7b0763484d73ebb..12f0eb56a1cc30e5a38c6db1d0342e47a0e3908d 100644 (file)
@@ -73,11 +73,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
        ".pushsection .altinstructions,\"a\"\n"                         \
        ALTINSTR_ENTRY(feature)                                         \
        ".popsection\n"                                                 \
-       ".pushsection .altinstr_replacement, \"a\"\n"                   \
+       ".subsection 1\n"                                               \
        "663:\n\t"                                                      \
        newinstr "\n"                                                   \
        "664:\n\t"                                                      \
-       ".popsection\n\t"                                               \
+       ".previous\n\t"                                                 \
        ".org   . - (664b-663b) + (662b-661b)\n\t"                      \
        ".org   . - (662b-661b) + (664b-663b)\n"                        \
        ".endif\n"
@@ -117,9 +117,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
 662:   .pushsection .altinstructions, "a"
        altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
        .popsection
-       .pushsection .altinstr_replacement, "ax"
+       .subsection 1
 663:   \insn2
-664:   .popsection
+664:   .previous
        .org    . - (664b-663b) + (662b-661b)
        .org    . - (662b-661b) + (664b-663b)
        .endif
@@ -160,7 +160,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
        .pushsection .altinstructions, "a"
        altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
        .popsection
-       .pushsection .altinstr_replacement, "ax"
+       .subsection 1
        .align 2        /* So GAS knows label 661 is suitably aligned */
 661:
 .endm
@@ -179,9 +179,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
 .macro alternative_else
 662:
        .if .Lasm_alt_mode==0
-       .pushsection .altinstr_replacement, "ax"
+       .subsection 1
        .else
-       .popsection
+       .previous
        .endif
 663:
 .endm
@@ -192,7 +192,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
 .macro alternative_endif
 664:
        .if .Lasm_alt_mode==0
-       .popsection
+       .previous
        .endif
        .org    . - (664b-663b) + (662b-661b)
        .org    . - (662b-661b) + (664b-663b)
index a358e97572c14c58d55d732926cf3f211a71a410..6647ae4f02318637afa16bee6eb5e0065b239a97 100644 (file)
@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
        return read_sysreg_s(SYS_ICC_PMR_EL1);
 }
 
-static inline void gic_write_pmr(u32 val)
+static __always_inline void gic_write_pmr(u32 val)
 {
        write_sysreg_s(val, SYS_ICC_PMR_EL1);
 }
index 7ae54d7d333a5ed5563543df487b8f97662669bb..9f0ec21d6327f49b8e06549ebfb2e218733a4638 100644 (file)
@@ -58,6 +58,7 @@ struct arch_timer_erratum_workaround {
        u64 (*read_cntvct_el0)(void);
        int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
        int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
+       bool disable_compat_vdso;
 };
 
 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
index 5d1f4ae42799b2a86459c5892d633b24d09939e1..f7c3d1ff091d28602045e3b3ecf6dbc556ace8cd 100644 (file)
@@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void)
                cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
 }
 
-static inline bool system_uses_irq_prio_masking(void)
+static __always_inline bool system_uses_irq_prio_masking(void)
 {
        return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
               cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
index a87a93f67671d9d017afd936003ab4caf4f14bc3..7219cddeba669f844a8e9611b3ceda597e65c021 100644 (file)
@@ -86,6 +86,7 @@
 #define QCOM_CPU_PART_FALKOR           0xC00
 #define QCOM_CPU_PART_KRYO             0x200
 #define QCOM_CPU_PART_KRYO_3XX_SILVER  0x803
+#define QCOM_CPU_PART_KRYO_4XX_GOLD    0x804
 #define QCOM_CPU_PART_KRYO_4XX_SILVER  0x805
 
 #define NVIDIA_CPU_PART_DENVER         0x003
 #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
 #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
 #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
 #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
 #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
 #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
index 2e7e0f452301820efc4ccf45730cd5d627778d65..4d867c6446c4844c035a1a38d5660e5b70f7085c 100644 (file)
@@ -67,7 +67,7 @@ extern bool arm64_use_ng_mappings;
 #define PAGE_HYP               __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
 #define PAGE_HYP_EXEC          __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
 #define PAGE_HYP_RO            __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
-#define PAGE_HYP_DEVICE                __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+#define PAGE_HYP_DEVICE                __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
 
 #define PAGE_S2_MEMATTR(attr)                                          \
        ({                                                              \
index df6ea65c1decad7423bebcb6d4b71dd95ee1579f..b054d9febfb5432c46d21af10af4316134987c58 100644 (file)
@@ -2,7 +2,10 @@
 #ifndef __ASM_VDSOCLOCKSOURCE_H
 #define __ASM_VDSOCLOCKSOURCE_H
 
-#define VDSO_ARCH_CLOCKMODES   \
-       VDSO_CLOCKMODE_ARCHTIMER
+#define VDSO_ARCH_CLOCKMODES                                   \
+       /* vdso clocksource for both 32 and 64bit tasks */      \
+       VDSO_CLOCKMODE_ARCHTIMER,                               \
+       /* vdso clocksource for 64bit tasks only */             \
+       VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT
 
 #endif
index b6907ae78e5303bc45804faeb83d3d002d7d28ad..9a625e8947ff0a8233d6a4e5e49228c57ec57e7b 100644 (file)
@@ -111,7 +111,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
         * update. Return something. Core will do another round and then
         * see the mode change and fallback to the syscall.
         */
-       if (clock_mode == VDSO_CLOCKMODE_NONE)
+       if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
                return 0;
 
        /*
@@ -152,6 +152,12 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
        return ret;
 }
 
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+       return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
+}
+#define vdso_clocksource_ok    vdso_clocksource_ok
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
index d1757ef1b1e749692958681620382d09a3597ce5..73039949b5ce2f6227f11d0d1591967c38bda8f6 100644 (file)
@@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature)
  */
 static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
 {
-       unsigned long replptr;
-
-       if (kernel_text_address(pc))
-               return true;
-
-       replptr = (unsigned long)ALT_REPL_PTR(alt);
-       if (pc >= replptr && pc <= (replptr + alt->alt_len))
-               return false;
-
-       /*
-        * Branching into *another* alternate sequence is doomed, and
-        * we're not even trying to fix it up.
-        */
-       BUG();
+       unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
+       return !(pc >= replptr && pc <= (replptr + alt->alt_len));
 }
 
 #define align_down(x, a)       ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
index cf50c53e9357ea4fb036acff9685d80e75cf9823..79728bfb5351ffd9cdf258ab9c1c366b0cac3e0f 100644 (file)
@@ -472,12 +472,7 @@ static bool
 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
                               int scope)
 {
-       u32 midr = read_cpuid_id();
-       /* Cortex-A76 r0p0 - r3p1 */
-       struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
-
-       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-       return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+       return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
 }
 #endif
 
@@ -728,6 +723,8 @@ static const struct midr_range erratum_1418040_list[] = {
        MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
        /* Neoverse-N1 r0p0 to r3p1 */
        MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
+       /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+       MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
        {},
 };
 #endif
@@ -772,11 +769,23 @@ static const struct midr_range erratum_speculative_at_list[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1530923
        /* Cortex A55 r0p0 to r2p0 */
        MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
+       /* Kryo4xx Silver (rdpe => r1p0) */
+       MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
 #endif
        {},
 };
 #endif
 
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+static const struct midr_range erratum_1463225[] = {
+       /* Cortex-A76 r0p0 - r3p1 */
+       MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+       /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+       MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
+       {},
+};
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
@@ -916,6 +925,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .capability = ARM64_WORKAROUND_1463225,
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = has_cortex_a76_erratum_1463225,
+               .midr_range_list = erratum_1463225,
        },
 #endif
 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
index 9f63053a63a981efffb0192b697d99aac825e779..9fae0efc80c1763c27326f549f8abfa113a190c9 100644 (file)
@@ -1408,6 +1408,8 @@ static bool cpu_has_broken_dbm(void)
        static const struct midr_range cpus[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1024718
                MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0),  // A55 r0p0 -r1p0
+               /* Kryo4xx Silver (rdpe => r1p0) */
+               MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
 #endif
                {},
        };
index 3dbdf9752b118fd45784a48a7bdd85c6b25e2f1b..d3be9dbf549007e19e347ca9978694a410d12a4f 100644 (file)
@@ -57,7 +57,7 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
        /*
         * The CPU masked interrupts, and we are leaving them masked during
         * do_debug_exception(). Update PMR as if we had called
-        * local_mask_daif().
+        * local_daif_mask().
         */
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
index 5304d193c79dd3a67bca8d72ec9afbf1d530df2e..35de8ba60e3d5801b2539ac4354834cd7538b835 100644 (file)
@@ -126,8 +126,10 @@ alternative_else_nop_endif
        add     \dst, \dst, #(\sym - .entry.tramp.text)
        .endm
 
-       // This macro corrupts x0-x3. It is the caller's duty
-       // to save/restore them if required.
+       /*
+        * This macro corrupts x0-x3. It is the caller's duty  to save/restore
+        * them if required.
+        */
        .macro  apply_ssbd, state, tmp1, tmp2
 #ifdef CONFIG_ARM64_SSBD
 alternative_cb arm64_enable_wa2_handling
@@ -167,13 +169,28 @@ alternative_cb_end
        stp     x28, x29, [sp, #16 * 14]
 
        .if     \el == 0
+       .if     \regsize == 32
+       /*
+        * If we're returning from a 32-bit task on a system affected by
+        * 1418040 then re-enable userspace access to the virtual counter.
+        */
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if ARM64_WORKAROUND_1418040
+       mrs     x0, cntkctl_el1
+       orr     x0, x0, #2      // ARCH_TIMER_USR_VCT_ACCESS_EN
+       msr     cntkctl_el1, x0
+alternative_else_nop_endif
+#endif
+       .endif
        clear_gp_regs
        mrs     x21, sp_el0
        ldr_this_cpu    tsk, __entry_task, x20
        msr     sp_el0, tsk
 
-       // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
-       // when scheduling.
+       /*
+        * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
+        * when scheduling.
+        */
        ldr     x19, [tsk, #TSK_TI_FLAGS]
        disable_step_tsk x19, x20
 
@@ -320,6 +337,14 @@ alternative_else_nop_endif
        tst     x22, #PSR_MODE32_BIT            // native task?
        b.eq    3f
 
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if ARM64_WORKAROUND_1418040
+       mrs     x0, cntkctl_el1
+       bic     x0, x0, #2                      // ARCH_TIMER_USR_VCT_ACCESS_EN
+       msr     cntkctl_el1, x0
+alternative_else_nop_endif
+#endif
+
 #ifdef CONFIG_ARM64_ERRATUM_845719
 alternative_if ARM64_WORKAROUND_845719
 #ifdef CONFIG_PID_IN_CONTEXTIDR
@@ -331,21 +356,6 @@ alternative_if ARM64_WORKAROUND_845719
 alternative_else_nop_endif
 #endif
 3:
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if_not ARM64_WORKAROUND_1418040
-       b       4f
-alternative_else_nop_endif
-       /*
-        * if (x22.mode32 == cntkctl_el1.el0vcten)
-        *     cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
-        */
-       mrs     x1, cntkctl_el1
-       eon     x0, x1, x22, lsr #3
-       tbz     x0, #1, 4f
-       eor     x1, x1, #2      // ARCH_TIMER_USR_VCT_ACCESS_EN
-       msr     cntkctl_el1, x1
-4:
-#endif
        scs_save tsk, x0
 
        /* No kernel C function calls after this as user keys are set. */
@@ -377,11 +387,11 @@ alternative_else_nop_endif
        .if     \el == 0
 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-       bne     5f
+       bne     4f
        msr     far_el1, x30
        tramp_alias     x30, tramp_exit_native
        br      x30
-5:
+4:
        tramp_alias     x30, tramp_exit_compat
        br      x30
 #endif
index 43119922341f81272568fc6049f0ec163d26eaa8..1a157ca33262d11de7d74eaf4d994cb9bf3ceeca 100644 (file)
@@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
        if (!kgdb_single_step)
                return DBG_HOOK_ERROR;
 
-       kgdb_handle_exception(1, SIGTRAP, 0, regs);
+       kgdb_handle_exception(0, SIGTRAP, 0, regs);
        return DBG_HOOK_HANDLED;
 }
 NOKPROBE_SYMBOL(kgdb_step_brk_fn);
index cbe49cd117cfec755cf6135849e1823779a8a86d..5290f17a4d8041deb1943ea4a87f1b9d37577886 100644 (file)
@@ -122,7 +122,7 @@ void *alloc_insn_page(void)
 {
        return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
                        GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
-                       NUMA_NO_NODE, __func__);
+                       NUMA_NO_NODE, __builtin_return_address(0));
 }
 
 /* arm kprobe: install breakpoint in text */
index 6827da7f3aa54c85d263f46d51f05194795c8a87..5423ffe0a987602f45bf15026e229bf5f5ae698e 100644 (file)
@@ -165,9 +165,6 @@ SECTIONS
                *(.altinstructions)
                __alt_instructions_end = .;
        }
-       .altinstr_replacement : {
-               *(.altinstr_replacement)
-       }
 
        . = ALIGN(SEGMENT_ALIGN);
        __inittext_end = .;
index 6e6ed5581eed157220a1b1ffcd660da21f38d62e..e76c0e89d48e0313b281f46bcdfeb6b2c70fc681 100644 (file)
@@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
 
 1:     cmp     x0, #HVC_RESET_VECTORS
        b.ne    1f
-reset:
+
        /*
-        * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
-        * case we coming via HVC_SOFT_RESTART.
+        * Set the HVC_RESET_VECTORS return code before entering the common
+        * path so that we do not clobber x0-x2 in case we are coming via
+        * HVC_SOFT_RESTART.
         */
+       mov     x0, xzr
+reset:
+       /* Reset kvm back to the hyp stub. */
        mrs     x5, sctlr_el2
        mov_q   x6, SCTLR_ELx_FLAGS
        bic     x5, x5, x6              // Clear SCTL_M and etc
@@ -151,7 +155,6 @@ reset:
        /* Install stub vectors */
        adr_l   x5, __hyp_stub_vectors
        msr     vbar_el2, x5
-       mov     x0, xzr
        eret
 
 1:     /* Bad stub call */
index b5ae3a5d509e40f27a83d02093435e1ad93cdebd..3c224162b3ddd7b150bb9d695e282b93350c9b8d 100644 (file)
@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
 }
 
 /*
- * On VHE ensure that only guest events have EL0 counting enabled
+ * On VHE ensure that only guest events have EL0 counting enabled.
+ * This is called from both vcpu_{load,put} and the sysreg handling.
+ * Since the latter is preemptible, special care must be taken to
+ * disable preemption.
  */
 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
 {
@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
        if (!has_vhe())
                return;
 
+       preempt_disable();
        host = this_cpu_ptr(&kvm_host_data);
        events_guest = host->pmu_events.events_guest;
        events_host = host->pmu_events.events_host;
 
        kvm_vcpu_pmu_enable_el0(events_guest);
        kvm_vcpu_pmu_disable_el0(events_host);
+       preempt_enable();
 }
 
 /*
index 1e0f4c2848889030192e60a344c315b727f668d8..f7b52ce1557ec393635d1fb71aa4a76274627335 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/arm-smccc.h>
 #include <linux/kvm_host.h>
+#include <linux/sched/stat.h>
 
 #include <asm/kvm_mmu.h>
 #include <asm/pvclock-abi.h>
@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
        return base;
 }
 
+static bool kvm_arm_pvtime_supported(void)
+{
+       return !!sched_info_on();
+}
+
 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
                            struct kvm_device_attr *attr)
 {
@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
        int ret = 0;
        int idx;
 
-       if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+       if (!kvm_arm_pvtime_supported() ||
+           attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
                return -ENXIO;
 
        if (get_user(ipa, user))
@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
        u64 __user *user = (u64 __user *)attr->addr;
        u64 ipa;
 
-       if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+       if (!kvm_arm_pvtime_supported() ||
+           attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
                return -ENXIO;
 
        ipa = vcpu->arch.steal.base;
@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
 {
        switch (attr->attr) {
        case KVM_ARM_VCPU_PVTIME_IPA:
-               return 0;
+               if (kvm_arm_pvtime_supported())
+                       return 0;
        }
        return -ENXIO;
 }
index d3b2090237274f8ffe8f4d89de92074390d4075c..6ed36be51b4b2d432889b4f22e3098484cdcc8a5 100644 (file)
@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
  */
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 {
-       int ret = -EINVAL;
+       int ret;
        bool loaded;
        u32 pstate;
 
@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 
        if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
            test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
-               if (kvm_vcpu_enable_ptrauth(vcpu))
+               if (kvm_vcpu_enable_ptrauth(vcpu)) {
+                       ret = -EINVAL;
                        goto out;
+               }
        }
 
        switch (vcpu->arch.target) {
        default:
                if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
-                       if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
+                       if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
+                               ret = -EINVAL;
                                goto out;
+                       }
                        pstate = VCPU_RESET_PSTATE_SVC;
                } else {
                        pstate = VCPU_RESET_PSTATE_EL1;
index 27ac833e5ec7c2f7fd3c58ab2bde385022f8b629..b5fa73c9fd35526cc4d4b30b94c30f970750af9e 100644 (file)
@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
            !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
                disable_irq_nosync(irq);
 
+       /*
+        * The v4.1 doorbell can fire concurrently with the vPE being
+        * made non-resident. Ensure we only update pending_last
+        * *after* the non-residency sequence has completed.
+        */
+       raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
        vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+       raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
+
        kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
        kvm_vcpu_kick(vcpu);
 
index e779b19e01939b632d377751e13b76f3a995b5ab..f66f4b1d062ed636abd3e4ea380c162132125bac 100644 (file)
@@ -138,7 +138,8 @@ void __init setup_arch(char **cmdline_p)
        pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
                 __bss_stop, memory_start, memory_start, memory_end);
 
-       memblock_add(memory_start, memory_end - memory_start);
+       memblock_add(_rambase, memory_end - _rambase);
+       memblock_reserve(_rambase, memory_start - _rambase);
 
        /* Keep a copy of command line */
        *cmdline_p = &command_line[0];
index 29f47923aa4629cab0ce77e4d0396d7f92332700..7d04210d34f026da98f2e083df3a598624eab815 100644 (file)
@@ -174,7 +174,7 @@ void __init cf_bootmem_alloc(void)
        m68k_memory[0].addr = _rambase;
        m68k_memory[0].size = _ramend - _rambase;
 
-       memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+       memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
 
        /* compute total pages in system */
        num_pages = PFN_DOWN(_ramend - _rambase);
index 8d22828787d8c123abc3be02458620b823bda927..bc72304a2440bf89090408317259c78571421cf2 100644 (file)
@@ -92,7 +92,7 @@
                        "MIC1N", "Built-in Mic";
                simple-audio-card,pin-switches = "Speaker", "Headphones";
 
-               simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_HIGH>;
+               simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_LOW>;
                simple-audio-card,aux-devs = <&speaker_amp>, <&headphones_amp>;
 
                simple-audio-card,bitclock-master = <&dai_codec>;
index c628747d4ecd1895e9cbde95bbbf4179a0cea573..7dd4a80e05d6ddef8d132d09acad6251210c92cf 100644 (file)
                                                                \
        /*                                                      \
         * We can't unroll if the number of iterations isn't    \
-        * compile-time constant. Unfortunately GCC versions    \
-        * up until 4.6 tend to miss obvious constants & cause  \
+        * compile-time constant. Unfortunately clang versions  \
+        * up until 8.0 tend to miss obvious constants & cause  \
         * this check to fail, even though they go on to        \
         * generate reasonable code for the switch statement,   \
         * so we skip the sanity check for those compilers.     \
         */                                                     \
-       BUILD_BUG_ON((CONFIG_GCC_VERSION >= 40700 ||            \
-                     CONFIG_CLANG_VERSION >= 80000) &&         \
-                    !__builtin_constant_p(times));             \
+       BUILD_BUG_ON(!__builtin_constant_p(times));             \
                                                                \
        switch (times) {                                        \
        case 32: fn(__VA_ARGS__); /* fall through */            \
index 7c32c956156a0cc6c14f73e882eafe7384d1de4b..f655af68176c85aec5fbb47b9eb2f237dc6d4ca4 100644 (file)
@@ -723,12 +723,14 @@ static int simulate_loongson3_cpucfg(struct pt_regs *regs,
                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
                /* Do not emulate on unsupported core models. */
-               if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data))
+               preempt_disable();
+               if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data)) {
+                       preempt_enable();
                        return -1;
-
+               }
                regs->regs[rd] = loongson3_cpucfg_read_synthesized(
                        &current_cpu_data, sel);
-
+               preempt_enable();
                return 0;
        }
 
@@ -2169,6 +2171,7 @@ static void configure_status(void)
 
        change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
                         status_set);
+       back_to_back_c0_hazard();
 }
 
 unsigned int hwrena;
index 5ae82d925197102b240ed9970b02a3413d3a6914..d242300cacc04371038bf59d954ccc026a59c3dc 100644 (file)
@@ -1722,6 +1722,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
                          vcpu->arch.gprs[rt], *(u32 *)data);
                break;
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
        case sdl_op:
                run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
                                        vcpu->arch.host_cp0_badvaddr) & (~0x7);
@@ -1815,6 +1816,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
                          vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
                          vcpu->arch.gprs[rt], *(u64 *)data);
                break;
+#endif
 
 #ifdef CONFIG_CPU_LOONGSON64
        case sdc2_op:
@@ -2002,6 +2004,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
                }
                break;
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
        case ldl_op:
                run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
                                        vcpu->arch.host_cp0_badvaddr) & (~0x7);
@@ -2073,6 +2076,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
                        break;
                }
                break;
+#endif
 
 #ifdef CONFIG_CPU_LOONGSON64
        case ldc2_op:
index aa37545ebe8f72d73cd5e9281342695ef05ae553..b10342018d199a4932743217deea517b71c26acc 100644 (file)
@@ -514,8 +514,8 @@ void __init ltq_soc_init(void)
                clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
                               PMU_PPE_DP | PMU_PPE_TC);
                clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
-               clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
-               clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
                clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
                clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
                clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
@@ -538,8 +538,8 @@ void __init ltq_soc_init(void)
                                PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
                                PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
                                PMU_PPE_QSB | PMU_PPE_TOP);
-               clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
-               clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
                clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
                clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
                clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
index fa080694e581edcf96f9a2c21487cf179a8d3680..0fc8bad878b2d629eb82d6580d954a0159fc437d 100644 (file)
@@ -2551,7 +2551,7 @@ EXC_VIRT_NONE(0x5400, 0x100)
 INT_DEFINE_BEGIN(denorm_exception)
        IVEC=0x1500
        IHSRR=1
-       IBRANCH_COMMON=0
+       IBRANCH_TO_COMMON=0
        IKVM_REAL=1
 INT_DEFINE_END(denorm_exception)
 
index 1199fc2bfaec9185ec36f8f6bc564287beacfb68..ca5fcb4bff32604cd32be9a9086ec88d37d4304f 100644 (file)
@@ -353,9 +353,6 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
        int pkey_shift;
        u64 amr;
 
-       if (!is_pkey_enabled(pkey))
-               return true;
-
        pkey_shift = pkeyshift(pkey);
        if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift)))
                return true;
index 128192e14ff2a83e6b29490bf4b6d78feaabd660..3230c1d48562662575ca20926bc1bd7b0c1102f3 100644 (file)
@@ -23,6 +23,8 @@ config RISCV
        select ARCH_HAS_SET_DIRECT_MAP
        select ARCH_HAS_SET_MEMORY
        select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+       select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+       select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
index 041b45f5b99719f9430122250094312277eae380..09342111f22771c091309ac712f15a0a8374bfad 100644 (file)
@@ -3,8 +3,7 @@
 #ifndef __ASM_GDB_XML_H_
 #define __ASM_GDB_XML_H_
 
-#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
-static const char riscv_gdb_stub_feature[64] =
+const char riscv_gdb_stub_feature[64] =
                        "PacketSize=800;qXfer:features:read+;";
 
 static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:";
index 8177a457caffa53a0dbb545718ccfbe971f5f495..46677daf708bd0b06a763a8297dfa88f369712d0 100644 (file)
@@ -19,7 +19,6 @@
 
 #ifndef        __ASSEMBLY__
 
-extern int kgdb_has_hit_break(unsigned long addr);
 extern unsigned long kgdb_compiled_break;
 
 static inline void arch_kgdb_breakpoint(void)
@@ -106,7 +105,9 @@ static inline void arch_kgdb_breakpoint(void)
 #define DBG_REG_BADADDR_OFF 34
 #define DBG_REG_CAUSE_OFF 35
 
-#include <asm/gdb_xml.h>
+extern const char riscv_gdb_stub_feature[64];
+
+#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
 
 #endif
 #endif
index c3275f42d1ac815104c112ebfc6dfd3630716d53..963ed7edcff264e6dcfc4891a77bb774c0655d26 100644 (file)
@@ -44,18 +44,18 @@ DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
 DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
 DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
 
-int decode_register_index(unsigned long opcode, int offset)
+static int decode_register_index(unsigned long opcode, int offset)
 {
        return (opcode >> offset) & 0x1F;
 }
 
-int decode_register_index_short(unsigned long opcode, int offset)
+static int decode_register_index_short(unsigned long opcode, int offset)
 {
        return ((opcode >> offset) & 0x7) + 8;
 }
 
 /* Calculate the new address for after a step */
-int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
+static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
 {
        unsigned long pc = regs->epc;
        unsigned long *regs_ptr = (unsigned long *)regs;
@@ -136,7 +136,7 @@ int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
        return 0;
 }
 
-int do_single_step(struct pt_regs *regs)
+static int do_single_step(struct pt_regs *regs)
 {
        /* Determine where the target instruction will send us to */
        unsigned long addr = 0;
@@ -320,7 +320,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
        return err;
 }
 
-int kgdb_riscv_kgdbbreak(unsigned long addr)
+static int kgdb_riscv_kgdbbreak(unsigned long addr)
 {
        if (stepped_address == addr)
                return KGDB_SW_SINGLE_STEP;
index 46038bc58c9e589b79666da39abe1bc495d2cc48..0cf9a82326a85b4f254f46bd0648096621577831 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_WATCH_QUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -14,7 +15,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
@@ -31,9 +31,9 @@ CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
-CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_LSM=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -51,14 +51,11 @@ CONFIG_CHSC_SCH=y
 CONFIG_VFIO_CCW=m
 CONFIG_VFIO_AP=m
 CONFIG_CRASH_DUMP=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
 CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_S390_UNWIND_SELFTEST=y
 CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
@@ -77,6 +74,8 @@ CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -96,7 +95,6 @@ CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
-CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
@@ -130,6 +128,7 @@ CONFIG_SYN_COOKIES=y
 CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESPINTCP=y
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
@@ -144,6 +143,7 @@ CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESPINTCP=y
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_MIP6=m
 CONFIG_IPV6_VTI=m
@@ -151,7 +151,10 @@ CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
+CONFIG_MPTCP=y
 CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -317,6 +320,7 @@ CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
 CONFIG_BRIDGE=m
+CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_NET_SCHED=y
@@ -341,6 +345,7 @@ CONFIG_NET_SCH_CODEL=m
 CONFIG_NET_SCH_FQ_CODEL=m
 CONFIG_NET_SCH_INGRESS=m
 CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_ETS=m
 CONFIG_NET_CLS_BASIC=m
 CONFIG_NET_CLS_TCINDEX=m
 CONFIG_NET_CLS_ROUTE4=m
@@ -364,6 +369,7 @@ CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GATE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
 CONFIG_VSOCKETS=m
@@ -374,6 +380,7 @@ CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
 # CONFIG_NET_DROP_MONITOR is not set
 CONFIG_PCI=y
+# CONFIG_PCIEASPM is not set
 CONFIG_PCI_DEBUG=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -435,6 +442,7 @@ CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
 CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
 CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
@@ -448,6 +456,8 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_BAREUDP=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -481,7 +491,6 @@ CONFIG_NLMON=m
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
-# CONFIG_MLXFW is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -514,6 +523,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_TI is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -561,6 +571,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -608,6 +620,7 @@ CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_EXFAT_FS=m
 CONFIG_NTFS_FS=m
 CONFIG_NTFS_RW=y
 CONFIG_PROC_KCORE=y
@@ -650,8 +663,8 @@ CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
 CONFIG_UNICODE=y
 CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_NOTIFICATIONS=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_FORTIFY_SOURCE=y
@@ -675,8 +688,11 @@ CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_SEQIV=y
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -685,6 +701,7 @@ CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -701,6 +718,7 @@ CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
@@ -719,6 +737,9 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_STATS=y
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
@@ -774,6 +795,7 @@ CONFIG_DEBUG_SHIRQ=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_WQ_WATCHDOG=y
+CONFIG_TEST_LOCKUP=m
 CONFIG_DEBUG_TIMEKEEPING=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
@@ -786,7 +808,9 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
+# CONFIG_RCU_TRACE is not set
 CONFIG_LATENCYTOP=y
+CONFIG_BOOTTIME_TRACING=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_STACK_TRACER=y
 CONFIG_IRQSOFF_TRACER=y
@@ -808,10 +832,12 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
+CONFIG_TEST_MIN_HEAP=y
 CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_BITOPS=m
 CONFIG_TEST_BPF=m
index 7cd0648c1f4e10f3b5239301ec4abecd33e21f2b..5df9759e8ff6700e6771f9999604b3192e545568 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_WATCH_QUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -13,7 +14,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
@@ -30,9 +30,9 @@ CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
-CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_LSM=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -41,7 +41,6 @@ CONFIG_LIVEPATCH=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
-# CONFIG_NUMA_EMU is not set
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
 CONFIG_KEXEC_SIG=y
@@ -51,14 +50,11 @@ CONFIG_CHSC_SCH=y
 CONFIG_VFIO_CCW=m
 CONFIG_VFIO_AP=m
 CONFIG_CRASH_DUMP=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
 CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_S390_UNWIND_SELFTEST=m
 CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
@@ -74,6 +70,8 @@ CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -91,7 +89,6 @@ CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
-CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
@@ -125,6 +122,7 @@ CONFIG_SYN_COOKIES=y
 CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESPINTCP=y
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
@@ -139,6 +137,7 @@ CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESPINTCP=y
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_MIP6=m
 CONFIG_IPV6_VTI=m
@@ -146,7 +145,10 @@ CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
+CONFIG_MPTCP=y
 CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -311,6 +313,7 @@ CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
 CONFIG_BRIDGE=m
+CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_NET_SCHED=y
@@ -335,6 +338,7 @@ CONFIG_NET_SCH_CODEL=m
 CONFIG_NET_SCH_FQ_CODEL=m
 CONFIG_NET_SCH_INGRESS=m
 CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_ETS=m
 CONFIG_NET_CLS_BASIC=m
 CONFIG_NET_CLS_TCINDEX=m
 CONFIG_NET_CLS_ROUTE4=m
@@ -358,6 +362,7 @@ CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GATE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
 CONFIG_VSOCKETS=m
@@ -368,6 +373,7 @@ CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
 # CONFIG_NET_DROP_MONITOR is not set
 CONFIG_PCI=y
+# CONFIG_PCIEASPM is not set
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_UEVENT_HELPER=y
@@ -430,6 +436,7 @@ CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
 CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
 CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
@@ -444,6 +451,8 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_BAREUDP=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -477,7 +486,6 @@ CONFIG_NLMON=m
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
-# CONFIG_MLXFW is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -510,6 +518,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_TI is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -557,6 +566,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -600,6 +611,7 @@ CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_EXFAT_FS=m
 CONFIG_NTFS_FS=m
 CONFIG_NTFS_RW=y
 CONFIG_PROC_KCORE=y
@@ -642,8 +654,8 @@ CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
 CONFIG_UNICODE=y
 CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_NOTIFICATIONS=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_SELINUX=y
@@ -667,8 +679,11 @@ CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_SEQIV=y
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_OFB=m
@@ -678,6 +693,7 @@ CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -694,6 +710,7 @@ CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
@@ -712,6 +729,9 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_STATS=y
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
@@ -725,6 +745,7 @@ CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_CORDIC=m
+CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC4=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
@@ -739,10 +760,12 @@ CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_PANIC_ON_OOPS=y
+CONFIG_TEST_LOCKUP=m
 CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
+CONFIG_BOOTTIME_TRACING=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_STACK_TRACER=y
 CONFIG_SCHED_TRACER=y
index 20c51e5d93530ac536beeead8d2ded1bde02518a..4091c50449cd06238578d5b640bb364269c5a293 100644 (file)
@@ -30,6 +30,7 @@ CONFIG_IBM_PARTITION=y
 # CONFIG_BOUNCE is not set
 CONFIG_NET=y
 # CONFIG_IUCV is not set
+# CONFIG_ETHTOOL_NETLINK is not set
 CONFIG_DEVTMPFS=y
 CONFIG_BLK_DEV_RAM=y
 # CONFIG_BLK_DEV_XPRAM is not set
@@ -55,6 +56,8 @@ CONFIG_RAW_DRIVER=y
 # CONFIG_MONWRITER is not set
 # CONFIG_S390_VMUR is not set
 # CONFIG_HID is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_VHOST_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 # CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY_USER is not set
@@ -62,7 +65,9 @@ CONFIG_CONFIGFS_FS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_LSM="yama,loadpin,safesetid,integrity"
+# CONFIG_ZLIB_DFLTCC is not set
 CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
index cee3cb6455a29885605c54c14a6bce32bff2fae4..6ea0820e7c7f05cd8b113bb7c287b592bc69b68f 100644 (file)
 #define KVM_USER_MEM_SLOTS 32
 
 /*
- * These seem to be used for allocating ->chip in the routing table,
- * which we don't use. 4096 is an out-of-thin-air value. If we need
- * to look at ->chip later on, we'll need to revisit this.
+ * These seem to be used for allocating ->chip in the routing table, which we
+ * don't use. 1 is as small as we can get to reduce the needed memory. If we
+ * need to look at ->chip later on, we'll need to revisit this.
  */
 #define KVM_NR_IRQCHIPS 1
-#define KVM_IRQCHIP_NUM_PINS 4096
+#define KVM_IRQCHIP_NUM_PINS 1
 #define KVM_HALT_POLL_NS_DEFAULT 50000
 
 /* s390-specific vcpu->requests bit members */
index 85a711d783eb45b8cdd46e02d9589dc8ae2fc451..4f9e4626df553f999c16214f118f7522b31228ce 100644 (file)
@@ -881,12 +881,21 @@ out:
        return err;
 }
 
+static bool is_callchain_event(struct perf_event *event)
+{
+       u64 sample_type = event->attr.sample_type;
+
+       return sample_type & (PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER |
+                             PERF_SAMPLE_STACK_USER);
+}
+
 static int cpumsf_pmu_event_init(struct perf_event *event)
 {
        int err;
 
        /* No support for taken branch sampling */
-       if (has_branch_stack(event))
+       /* No support for callchain, stacks and registers */
+       if (has_branch_stack(event) || is_callchain_event(event))
                return -EOPNOTSUPP;
 
        switch (event->attr.type) {
index 5853c9872dfebf7d8f328975f647eab3d2b57429..07aa15ba43b3e5937605a7448bea23f015080111 100644 (file)
@@ -1100,6 +1100,7 @@ void __init setup_arch(char **cmdline_p)
        if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
                nospec_auto_detect();
 
+       jump_label_init();
        parse_early_param();
 #ifdef CONFIG_CRASH_DUMP
        /* Deactivate elfcorehdr= kernel parameter */
index 82df06d720e8c254e4134489b045a34b5485fe18..3b5a4d25ca9b5e069975516050f0517543157ac4 100644 (file)
@@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsigned long rste)
                                             _PAGE_YOUNG);
 #ifdef CONFIG_MEM_SOFT_DIRTY
                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
-                                            _PAGE_DIRTY);
+                                            _PAGE_SOFT_DIRTY);
 #endif
                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
                                             _PAGE_NOEXEC);
index 22a0be655f27abaf4e6aced1f09559b9676e8f7d..1d17413b319a456669efafdcb72fc18b24b741dd 100644 (file)
@@ -62,11 +62,15 @@ notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
        long copied;
 
        spin_lock_irqsave(&s390_kernel_write_lock, flags);
-       while (size) {
-               copied = s390_kernel_write_odd(tmp, src, size);
-               tmp += copied;
-               src += copied;
-               size -= copied;
+       if (!(flags & PSW_MASK_DAT)) {
+               memcpy(dst, src, size);
+       } else {
+               while (size) {
+                       copied = s390_kernel_write_odd(tmp, src, size);
+                       tmp += copied;
+                       src += copied;
+                       size -= copied;
+               }
        }
        spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
 
index 08e1d619398ea993716a1fa2ad8ff776cc196342..fdebd286f40236d80939cff20c5d55b417e3fc9b 100644 (file)
@@ -94,7 +94,18 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
                }
                zdev->fh = ccdf->fh;
                zdev->state = ZPCI_FN_STATE_CONFIGURED;
-               zpci_create_device(zdev);
+               ret = zpci_enable_device(zdev);
+               if (ret)
+                       break;
+
+               pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
+               if (!pdev)
+                       break;
+
+               pci_bus_add_device(pdev);
+               pci_lock_rescan_remove();
+               pci_bus_add_devices(zdev->zbus->bus);
+               pci_unlock_rescan_remove();
                break;
        case 0x0302: /* Reserved -> Standby */
                if (!zdev) {
index bd3f14175193c35515fc003f47d18b528bf77953..e83b3f14897cc43ba1ea31a820b5014197ad7adf 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
 
+/* Check that the stack and regs on entry from user mode are sane. */
+static void check_user_regs(struct pt_regs *regs)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
+               /*
+                * Make sure that the entry code gave us a sensible EFLAGS
+                * register.  Native because we want to check the actual CPU
+                * state, not the interrupt state as imagined by Xen.
+                */
+               unsigned long flags = native_save_fl();
+               WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
+                                     X86_EFLAGS_NT));
+
+               /* We think we came from user mode. Make sure pt_regs agrees. */
+               WARN_ON_ONCE(!user_mode(regs));
+
+               /*
+                * All entries from user mode (except #DF) should be on the
+                * normal thread stack and should have user pt_regs in the
+                * correct location.
+                */
+               WARN_ON_ONCE(!on_thread_stack());
+               WARN_ON_ONCE(regs != task_pt_regs(current));
+       }
+}
+
 #ifdef CONFIG_CONTEXT_TRACKING
 /**
  * enter_from_user_mode - Establish state when coming from user mode
@@ -127,9 +153,6 @@ static long syscall_trace_enter(struct pt_regs *regs)
        unsigned long ret = 0;
        u32 work;
 
-       if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
-               BUG_ON(regs != task_pt_regs(current));
-
        work = READ_ONCE(ti->flags);
 
        if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
@@ -346,6 +369,8 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
 {
        struct thread_info *ti;
 
+       check_user_regs(regs);
+
        enter_from_user_mode();
        instrumentation_begin();
 
@@ -409,6 +434,8 @@ static void do_syscall_32_irqs_on(struct pt_regs *regs)
 /* Handles int $0x80 */
 __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
 {
+       check_user_regs(regs);
+
        enter_from_user_mode();
        instrumentation_begin();
 
@@ -460,6 +487,8 @@ __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
                                        vdso_image_32.sym_int80_landing_pad;
        bool success;
 
+       check_user_regs(regs);
+
        /*
         * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
         * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
@@ -510,6 +539,18 @@ __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
                (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
 #endif
 }
+
+/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
+__visible noinstr long do_SYSENTER_32(struct pt_regs *regs)
+{
+       /* SYSENTER loses RSP, but the vDSO saved it in RBP. */
+       regs->sp = regs->bp;
+
+       /* SYSENTER clobbers EFLAGS.IF.  Assume it was set in usermode. */
+       regs->flags |= X86_EFLAGS_IF;
+
+       return do_fast_syscall_32(regs);
+}
 #endif
 
 SYSCALL_DEFINE0(ni_syscall)
@@ -553,6 +594,7 @@ SYSCALL_DEFINE0(ni_syscall)
 bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
 {
        if (user_mode(regs)) {
+               check_user_regs(regs);
                enter_from_user_mode();
                return false;
        }
@@ -686,6 +728,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
  */
 void noinstr idtentry_enter_user(struct pt_regs *regs)
 {
+       check_user_regs(regs);
        enter_from_user_mode();
 }
 
index 024d7d276cd40bac51d405357209c9dd4634f0f1..2d0bd5d5f0328da9ed7e63e1e8c20a9308822e60 100644 (file)
@@ -933,9 +933,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
 
 .Lsysenter_past_esp:
        pushl   $__USER_DS              /* pt_regs->ss */
-       pushl   %ebp                    /* pt_regs->sp (stashed in bp) */
+       pushl   $0                      /* pt_regs->sp (placeholder) */
        pushfl                          /* pt_regs->flags (except IF = 0) */
-       orl     $X86_EFLAGS_IF, (%esp)  /* Fix IF */
        pushl   $__USER_CS              /* pt_regs->cs */
        pushl   $0                      /* pt_regs->ip = 0 (placeholder) */
        pushl   %eax                    /* pt_regs->orig_ax */
@@ -965,7 +964,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
 .Lsysenter_flags_fixed:
 
        movl    %esp, %eax
-       call    do_fast_syscall_32
+       call    do_SYSENTER_32
        /* XEN PV guests always use IRET path */
        ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
                    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
index 0f974ae01e62b832b572e7dbb621fec1493375f3..541fdaf6404533bd2d2558bd0ebc426324f2c96c 100644 (file)
@@ -57,29 +57,30 @@ SYM_CODE_START(entry_SYSENTER_compat)
 
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
-       /*
-        * User tracing code (ptrace or signal handlers) might assume that
-        * the saved RAX contains a 32-bit number when we're invoking a 32-bit
-        * syscall.  Just in case the high bits are nonzero, zero-extend
-        * the syscall number.  (This could almost certainly be deleted
-        * with no ill effects.)
-        */
-       movl    %eax, %eax
-
        /* Construct struct pt_regs on stack */
        pushq   $__USER32_DS            /* pt_regs->ss */
-       pushq   %rbp                    /* pt_regs->sp (stashed in bp) */
+       pushq   $0                      /* pt_regs->sp = 0 (placeholder) */
 
        /*
         * Push flags.  This is nasty.  First, interrupts are currently
-        * off, but we need pt_regs->flags to have IF set.  Second, even
-        * if TF was set when SYSENTER started, it's clear by now.  We fix
-        * that later using TIF_SINGLESTEP.
+        * off, but we need pt_regs->flags to have IF set.  Second, if TS
+        * was set in usermode, it's still set, and we're singlestepping
+        * through this code.  do_SYSENTER_32() will fix up IF.
         */
        pushfq                          /* pt_regs->flags (except IF = 0) */
-       orl     $X86_EFLAGS_IF, (%rsp)  /* Fix saved flags */
        pushq   $__USER32_CS            /* pt_regs->cs */
        pushq   $0                      /* pt_regs->ip = 0 (placeholder) */
+SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+
+       /*
+        * User tracing code (ptrace or signal handlers) might assume that
+        * the saved RAX contains a 32-bit number when we're invoking a 32-bit
+        * syscall.  Just in case the high bits are nonzero, zero-extend
+        * the syscall number.  (This could almost certainly be deleted
+        * with no ill effects.)
+        */
+       movl    %eax, %eax
+
        pushq   %rax                    /* pt_regs->orig_ax */
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
@@ -135,7 +136,7 @@ SYM_CODE_START(entry_SYSENTER_compat)
 .Lsysenter_flags_fixed:
 
        movq    %rsp, %rdi
-       call    do_fast_syscall_32
+       call    do_SYSENTER_32
        /* XEN PV guests always use IRET path */
        ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \
                    "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
index 2bdc72e6890eca655f89a35dbb20e206bc30bf23..6035df1b49e1a76d2e68dcb1502feae70916a003 100644 (file)
@@ -377,7 +377,8 @@ void __init hyperv_init(void)
 
        hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
                        VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
-                       VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, __func__);
+                       VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
+                       __builtin_return_address(0));
        if (hv_hypercall_pg == NULL) {
                wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
                goto remove_cpuhp_state;
index 42159f45bf9c429e1f726943fc6cd61ed2f64b55..845e7481ab776e91c8d2a6afe8d942638ef7ddea 100644 (file)
@@ -623,6 +623,11 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
  * MXCSR and XCR definitions:
  */
 
+static inline void ldmxcsr(u32 mxcsr)
+{
+       asm volatile("ldmxcsr %0" :: "m" (mxcsr));
+}
+
 extern unsigned int mxcsr_feature_mask;
 
 #define XCR_XFEATURE_ENABLED_MASK      0x00000000
index cf51c50eb356dd3a67884324bd90a2c39b90d47e..eeac6dc2adaa3bcd1775dfc8bebdf70ea12b3d8b 100644 (file)
@@ -353,10 +353,6 @@ static __always_inline void __##func(struct pt_regs *regs)
 
 #else  /* CONFIG_X86_64 */
 
-/* Maps to a regular IDTENTRY on 32bit for now */
-# define DECLARE_IDTENTRY_IST          DECLARE_IDTENTRY
-# define DEFINE_IDTENTRY_IST           DEFINE_IDTENTRY
-
 /**
  * DECLARE_IDTENTRY_DF - Declare functions for double fault 32bit variant
  * @vector:    Vector number (ignored for C)
@@ -387,28 +383,18 @@ __visible noinstr void func(struct pt_regs *regs,                 \
 #endif /* !CONFIG_X86_64 */
 
 /* C-Code mapping */
+#define DECLARE_IDTENTRY_NMI           DECLARE_IDTENTRY_RAW
+#define DEFINE_IDTENTRY_NMI            DEFINE_IDTENTRY_RAW
+
+#ifdef CONFIG_X86_64
 #define DECLARE_IDTENTRY_MCE           DECLARE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_MCE            DEFINE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_MCE_USER       DEFINE_IDTENTRY_NOIST
 
-#define DECLARE_IDTENTRY_NMI           DECLARE_IDTENTRY_RAW
-#define DEFINE_IDTENTRY_NMI            DEFINE_IDTENTRY_RAW
-
 #define DECLARE_IDTENTRY_DEBUG         DECLARE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_DEBUG          DEFINE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_DEBUG_USER     DEFINE_IDTENTRY_NOIST
-
-/**
- * DECLARE_IDTENTRY_XEN - Declare functions for XEN redirect IDT entry points
- * @vector:    Vector number (ignored for C)
- * @func:      Function name of the entry point
- *
- * Used for xennmi and xendebug redirections. No DEFINE as this is all ASM
- * indirection magic.
- */
-#define DECLARE_IDTENTRY_XEN(vector, func)                             \
-       asmlinkage void xen_asm_exc_xen##func(void);                    \
-       asmlinkage void asm_exc_xen##func(void)
+#endif
 
 #else /* !__ASSEMBLY__ */
 
@@ -455,9 +441,6 @@ __visible noinstr void func(struct pt_regs *regs,                   \
 # define DECLARE_IDTENTRY_MCE(vector, func)                            \
        DECLARE_IDTENTRY(vector, func)
 
-# define DECLARE_IDTENTRY_DEBUG(vector, func)                          \
-       DECLARE_IDTENTRY(vector, func)
-
 /* No ASM emitted for DF as this goes through a C shim */
 # define DECLARE_IDTENTRY_DF(vector, func)
 
@@ -469,10 +452,6 @@ __visible noinstr void func(struct pt_regs *regs,                  \
 /* No ASM code emitted for NMI */
 #define DECLARE_IDTENTRY_NMI(vector, func)
 
-/* XEN NMI and DB wrapper */
-#define DECLARE_IDTENTRY_XEN(vector, func)                             \
-       idtentry vector asm_exc_xen##func exc_##func has_error_code=0
-
 /*
  * ASM code to emit the common vector entry stubs where each stub is
  * packed into 8 bytes.
@@ -565,16 +544,28 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_BP,         exc_int3);
 DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF,    exc_page_fault);
 
 #ifdef CONFIG_X86_MCE
+#ifdef CONFIG_X86_64
 DECLARE_IDTENTRY_MCE(X86_TRAP_MC,      exc_machine_check);
+#else
+DECLARE_IDTENTRY_RAW(X86_TRAP_MC,      exc_machine_check);
+#endif
 #endif
 
 /* NMI */
 DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,     exc_nmi);
-DECLARE_IDTENTRY_XEN(X86_TRAP_NMI,     nmi);
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_RAW(X86_TRAP_NMI,     xenpv_exc_nmi);
+#endif
 
 /* #DB */
+#ifdef CONFIG_X86_64
 DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB,    exc_debug);
-DECLARE_IDTENTRY_XEN(X86_TRAP_DB,      debug);
+#else
+DECLARE_IDTENTRY_RAW(X86_TRAP_DB,      exc_debug);
+#endif
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_RAW(X86_TRAP_DB,      xenpv_exc_debug);
+#endif
 
 /* #DF */
 DECLARE_IDTENTRY_DF(X86_TRAP_DF,       exc_double_fault);
index 17c5a038f42d3978d1b06d7cec5f8c8afd92eaaf..0780f97c185088ce214bf7f57d917fda7aff730b 100644 (file)
@@ -408,14 +408,15 @@ struct kvm_vmx_nested_state_data {
 };
 
 struct kvm_vmx_nested_state_hdr {
-       __u32 flags;
        __u64 vmxon_pa;
        __u64 vmcs12_pa;
-       __u64 preemption_timer_deadline;
 
        struct {
                __u16 flags;
        } smm;
+
+       __u32 flags;
+       __u64 preemption_timer_deadline;
 };
 
 struct kvm_svm_nested_state_data {
index c25a67a34bd3d9a9b60b9cf65d76e3833baa282f..0ab48f1cdf848faf6e9e6429d85f4dc815324f68 100644 (file)
@@ -49,6 +49,13 @@ enum split_lock_detect_state {
 static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
 static u64 msr_test_ctrl_cache __ro_after_init;
 
+/*
+ * With a name like MSR_TEST_CTL it should go without saying, but don't touch
+ * MSR_TEST_CTL unless the CPU is one of the whitelisted models.  Writing it
+ * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
+ */
+static bool cpu_model_supports_sld __ro_after_init;
+
 /*
  * Processors which have self-snooping capability can handle conflicting
  * memory type across CPUs by snooping its own cache. However, there exists
@@ -1071,7 +1078,8 @@ static void sld_update_msr(bool on)
 
 static void split_lock_init(void)
 {
-       split_lock_verify_msr(sld_state != sld_off);
+       if (cpu_model_supports_sld)
+               split_lock_verify_msr(sld_state != sld_off);
 }
 
 static void split_lock_warn(unsigned long ip)
@@ -1177,5 +1185,6 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
                return;
        }
 
+       cpu_model_supports_sld = true;
        split_lock_setup();
 }
index fbe89a92ff361d0b7f72e02207a0a5c9abcd7645..14e4b4d17ee5bcbe909f47c5db3c550e9c07c08b 100644 (file)
@@ -1901,6 +1901,8 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
 
 static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
 {
+       WARN_ON_ONCE(user_mode(regs));
+
        /*
         * Only required when from kernel mode. See
         * mce_check_crashing_cpu() for details.
@@ -1954,7 +1956,7 @@ DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
 }
 #else
 /* 32bit unified entry point */
-DEFINE_IDTENTRY_MCE(exc_machine_check)
+DEFINE_IDTENTRY_RAW(exc_machine_check)
 {
        unsigned long dr7;
 
index 06c818967bb63765aa10be6b7ce7033f5c055282..15247b96c6eaaa8ddccd6ac695b3cc3feaa01f24 100644 (file)
@@ -101,6 +101,12 @@ void kernel_fpu_begin(void)
                copy_fpregs_to_fpstate(&current->thread.fpu);
        }
        __cpu_invalidate_fpregs_state();
+
+       if (boot_cpu_has(X86_FEATURE_XMM))
+               ldmxcsr(MXCSR_DEFAULT);
+
+       if (boot_cpu_has(X86_FEATURE_FPU))
+               asm volatile ("fninit");
 }
 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
 
index 8748321c448674b0dc16a09d1f09ce7850bae15a..b8aee71840ae507a0b93f3187350c75799dc8f94 100644 (file)
@@ -29,6 +29,8 @@
 #include <asm/mmu_context.h>
 #include <asm/pgtable_areas.h>
 
+#include <xen/xen.h>
+
 /* This is a multiple of PAGE_SIZE. */
 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
 
@@ -543,6 +545,28 @@ static int read_default_ldt(void __user *ptr, unsigned long bytecount)
        return bytecount;
 }
 
+static bool allow_16bit_segments(void)
+{
+       if (!IS_ENABLED(CONFIG_X86_16BIT))
+               return false;
+
+#ifdef CONFIG_XEN_PV
+       /*
+        * Xen PV does not implement ESPFIX64, which means that 16-bit
+        * segments will not work correctly.  Until either Xen PV implements
+        * ESPFIX64 and can signal this fact to the guest or unless someone
+        * provides compelling evidence that allowing broken 16-bit segments
+        * is worthwhile, disallow 16-bit segments under Xen PV.
+        */
+       if (xen_pv_domain()) {
+               pr_info_once("Warning: 16-bit segments do not work correctly in a Xen PV guest\n");
+               return false;
+       }
+#endif
+
+       return true;
+}
+
 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 {
        struct mm_struct *mm = current->mm;
@@ -574,7 +598,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
                /* The user wants to clear the entry. */
                memset(&ldt, 0, sizeof(ldt));
        } else {
-               if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+               if (!ldt_info.seg_32bit && !allow_16bit_segments()) {
                        error = -EINVAL;
                        goto out;
                }
index f58679e487f6caf9ddbf750d72c48944a84bfca9..b038695f36c5e44e046b9897a3d515d5bc5f86c7 100644 (file)
@@ -869,6 +869,12 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
        instrumentation_begin();
        trace_hardirqs_off_finish();
 
+       /*
+        * If something gets miswired and we end up here for a user mode
+        * #DB, we will malfunction.
+        */
+       WARN_ON_ONCE(user_mode(regs));
+
        /*
         * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
         * watchpoint at the same time then that will still be handled.
@@ -887,6 +893,12 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
 static __always_inline void exc_debug_user(struct pt_regs *regs,
                                           unsigned long dr6)
 {
+       /*
+        * If something gets miswired and we end up here for a kernel mode
+        * #DB, we will malfunction.
+        */
+       WARN_ON_ONCE(!user_mode(regs));
+
        idtentry_enter_user(regs);
        instrumentation_begin();
 
@@ -917,7 +929,7 @@ DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
 }
 #else
 /* 32 bit does not have separate entry points. */
-DEFINE_IDTENTRY_DEBUG(exc_debug)
+DEFINE_IDTENTRY_RAW(exc_debug)
 {
        unsigned long dr6, dr7;
 
index ff2d0e9ca3bc0dc45b485e8c406be95bd7581f48..cfe83d4ae625216bec42947b24b73976ffb7505d 100644 (file)
@@ -7,7 +7,7 @@
 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
 #define KVM_POSSIBLE_CR4_GUEST_BITS                              \
        (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
-        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
+        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
 
 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)                                \
 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
index 76817d13c86ed886e1b694e898de744d9f028653..6d6a0ae7800c60d75c5aae424cc9bc5707d88f5e 100644 (file)
@@ -4449,7 +4449,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
                        nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
                        rsvd_bits(maxphyaddr, 51);
                rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
-                       nonleaf_bit8_rsvd | gbpages_bit_rsvd |
+                       gbpages_bit_rsvd |
                        rsvd_bits(maxphyaddr, 51);
                rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
                        rsvd_bits(maxphyaddr, 51);
index d1af20b050a870e5b7b07666c8bc3d8ddbb3b51a..d4a4cec034d00bf7ee4b7afb1703c40a0b7c7b54 100644 (file)
@@ -4109,7 +4109,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
         * CR0_GUEST_HOST_MASK is already set in the original vmcs01
         * (KVM doesn't change it);
         */
-       vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+       vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
        vmx_set_cr0(vcpu, vmcs12->host_cr0);
 
        /* Same as above - no reason to call set_cr4_guest_host_mask().  */
@@ -4259,7 +4259,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
         */
        vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
 
-       vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+       vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
        vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
 
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
@@ -6176,6 +6176,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                        goto error_guest_mode;
        }
 
+       vmx->nested.has_preemption_timer_deadline = false;
        if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
                vmx->nested.has_preemption_timer_deadline = true;
                vmx->nested.preemption_timer_deadline =
index cb22f33bf1d8041d83738ac86b1e3afd1ef6c6ab..13745f2a5ecdf53ebd72560ceac358cc87dd7bf5 100644 (file)
@@ -133,9 +133,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
 #define KVM_VM_CR0_ALWAYS_ON                           \
        (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST |      \
         X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
-#define KVM_CR4_GUEST_OWNED_BITS                                     \
-       (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
-        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
 
 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
@@ -4034,9 +4031,9 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
 
 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
 {
-       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
-       if (enable_ept)
-               vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS;
+       if (!enable_ept)
+               vmx->vcpu.arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
        if (is_guest_mode(&vmx->vcpu))
                vmx->vcpu.arch.cr4_guest_owned_bits &=
                        ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
@@ -4333,8 +4330,8 @@ static void init_vmcs(struct vcpu_vmx *vmx)
        /* 22.2.1, 20.8.1 */
        vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
 
-       vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
-       vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
+       vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+       vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
 
        set_cr4_guest_host_mask(vmx);
 
index 3b92db412335d38ecc4367409e9c6c98bb9af55b..88c593f83b28501a036310521eb14291eed59bed 100644 (file)
@@ -975,6 +975,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        if (is_long_mode(vcpu)) {
                if (!(cr4 & X86_CR4_PAE))
                        return 1;
+               if ((cr4 ^ old_cr4) & X86_CR4_LA57)
+                       return 1;
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
                   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
@@ -2693,6 +2695,9 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
        if (data & 0x30)
                return 1;
 
+       if (!lapic_in_kernel(vcpu))
+               return 1;
+
        vcpu->arch.apf.msr_en_val = data;
 
        if (!kvm_pv_async_pf_enabled(vcpu)) {
index acc49fa6a0971da82d2d78cf962ca8bcb979d965..0d68948c82ad6f13a637d2e88ad6f86cfa31d91f 100644 (file)
@@ -598,6 +598,26 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
 }
 
 #ifdef CONFIG_X86_64
+void noist_exc_debug(struct pt_regs *regs);
+
+DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)
+{
+       /* On Xen PV, NMI doesn't use IST.  The C part is the sane as native. */
+       exc_nmi(regs);
+}
+
+DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
+{
+       /*
+        * There's no IST on Xen PV, but we still need to dispatch
+        * to the correct handler.
+        */
+       if (user_mode(regs))
+               noist_exc_debug(regs);
+       else
+               exc_debug(regs);
+}
+
 struct trap_array_entry {
        void (*orig)(void);
        void (*xen)(void);
@@ -609,18 +629,18 @@ struct trap_array_entry {
        .xen            = xen_asm_##func,               \
        .ist_okay       = ist_ok }
 
-#define TRAP_ENTRY_REDIR(func, xenfunc, ist_ok) {      \
+#define TRAP_ENTRY_REDIR(func, ist_ok) {               \
        .orig           = asm_##func,                   \
-       .xen            = xen_asm_##xenfunc,            \
+       .xen            = xen_asm_xenpv_##func,         \
        .ist_okay       = ist_ok }
 
 static struct trap_array_entry trap_array[] = {
-       TRAP_ENTRY_REDIR(exc_debug, exc_xendebug,       true  ),
+       TRAP_ENTRY_REDIR(exc_debug,                     true  ),
        TRAP_ENTRY(exc_double_fault,                    true  ),
 #ifdef CONFIG_X86_MCE
        TRAP_ENTRY(exc_machine_check,                   true  ),
 #endif
-       TRAP_ENTRY_REDIR(exc_nmi, exc_xennmi,           true  ),
+       TRAP_ENTRY_REDIR(exc_nmi,                       true  ),
        TRAP_ENTRY(exc_int3,                            false ),
        TRAP_ENTRY(exc_overflow,                        false ),
 #ifdef CONFIG_IA32_EMULATION
index 5d252aaeade8b3a327bc5b80addec37222ed66b7..aab1d99b2b480b3f8ef2a337a95e8764b389aa65 100644 (file)
@@ -29,10 +29,9 @@ _ASM_NOKPROBE(xen_\name)
 .endm
 
 xen_pv_trap asm_exc_divide_error
-xen_pv_trap asm_exc_debug
-xen_pv_trap asm_exc_xendebug
+xen_pv_trap asm_xenpv_exc_debug
 xen_pv_trap asm_exc_int3
-xen_pv_trap asm_exc_xennmi
+xen_pv_trap asm_xenpv_exc_nmi
 xen_pv_trap asm_exc_overflow
 xen_pv_trap asm_exc_bounds
 xen_pv_trap asm_exc_invalid_op
@@ -161,10 +160,22 @@ SYM_FUNC_END(xen_syscall32_target)
 
 /* 32-bit compat sysenter target */
 SYM_FUNC_START(xen_sysenter_target)
-       mov 0*8(%rsp), %rcx
-       mov 1*8(%rsp), %r11
-       mov 5*8(%rsp), %rsp
-       jmp entry_SYSENTER_compat
+       /*
+        * NB: Xen is polite and clears TF from EFLAGS for us.  This means
+        * that we don't need to guard against single step exceptions here.
+        */
+       popq %rcx
+       popq %r11
+
+       /*
+        * Neither Xen nor the kernel really knows what the old SS and
+        * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
+        * report those values even though Xen will guess its own values.
+        */
+       movq $__USER32_DS, 4*8(%rsp)
+       movq $__USER32_CS, 1*8(%rsp)
+
+       jmp entry_SYSENTER_compat_after_hwframe
 SYM_FUNC_END(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
index 9bae79f70301339a20f08b5921d65d0ff6f76605..99fcd63ce597f6e781ec995afcacfef952463b2a 100644 (file)
@@ -362,9 +362,7 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
        struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
        unsigned i;
 
-       for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS);
-            i < XCHAL_NUM_PERF_COUNTERS;
-            i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) {
+       for_each_set_bit(i, ev->used_mask, XCHAL_NUM_PERF_COUNTERS) {
                uint32_t v = get_er(XTENSA_PMU_PMSTAT(i));
                struct perf_event *event = ev->event[i];
                struct hw_perf_event *hwc = &event->hw;
index d9204dc2656e718a3826e703819418d409036e25..be2c78f7169500e73fb6aded1803b6cd69ed9e71 100644 (file)
@@ -724,7 +724,8 @@ c_start(struct seq_file *f, loff_t *pos)
 static void *
 c_next(struct seq_file *f, void *v, loff_t *pos)
 {
-       return NULL;
+       ++*pos;
+       return c_start(f, pos);
 }
 
 static void
index 4092555828b13a25305f250863ad2e4d42016727..24cf6972eacea605f34c2386b308680bb724be35 100644 (file)
@@ -87,13 +87,13 @@ void __xtensa_libgcc_window_spill(void)
 }
 EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
 
-unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v)
+unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
 {
        BUG();
 }
 EXPORT_SYMBOL(__sync_fetch_and_and_4);
 
-unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
+unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v)
 {
        BUG();
 }
index 4707e90b8ee55fda718dcdb7ea06d6746cc4a0b6..9ffd7e2895547676ed801c099306f5a728918840 100644 (file)
@@ -24,7 +24,8 @@ void blk_flush_integrity(void)
        flush_workqueue(kintegrityd_wq);
 }
 
-void __bio_integrity_free(struct bio_set *bs, struct bio_integrity_payload *bip)
+static void __bio_integrity_free(struct bio_set *bs,
+                                struct bio_integrity_payload *bip)
 {
        if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
                if (bip->bip_vec)
index 15df3a36e9fa43f9d1daa8b03938fd4d0e7cede0..e0b2bc131bf546039be96f8cdfeabd8e5a218e6f 100644 (file)
@@ -125,6 +125,9 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(REGISTERED),
        QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
        QUEUE_FLAG_NAME(QUIESCED),
+       QUEUE_FLAG_NAME(PCI_P2PDMA),
+       QUEUE_FLAG_NAME(ZONE_RESETALL),
+       QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
 };
 #undef QUEUE_FLAG_NAME
 
index a9aa6d1e44cf32bf61e7e7fa068662776f762c09..4e0d173beaa3526bc3f07a2bc13870522d0b359c 100644 (file)
@@ -828,10 +828,10 @@ static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
                               void *priv, bool reserved)
 {
        /*
-        * If we find a request that is inflight and the queue matches,
+        * If we find a request that isn't idle and the queue matches,
         * we know the queue is busy. Return false to stop the iteration.
         */
-       if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
+       if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
                bool *busy = priv;
 
                *busy = true;
index c2ef41b3147ba95bbcf57bc678286d7464654e98..35abcb1ec051d566048fb8761389c052d79dfcb0 100644 (file)
@@ -374,8 +374,7 @@ void blk_ksm_destroy(struct blk_keyslot_manager *ksm)
        if (!ksm)
                return;
        kvfree(ksm->slot_hashtable);
-       memzero_explicit(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots);
-       kvfree(ksm->slots);
+       kvfree_sensitive(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots);
        memzero_explicit(ksm, sizeof(*ksm));
 }
 EXPORT_SYMBOL_GPL(blk_ksm_destroy);
index b1cd3535c52560ab39966664fcf8b7b6d8b57036..28fc323e3fe3042bb169e29ed9a1024ef7c4c0c8 100644 (file)
@@ -128,21 +128,15 @@ EXPORT_SYMBOL_GPL(af_alg_release);
 void af_alg_release_parent(struct sock *sk)
 {
        struct alg_sock *ask = alg_sk(sk);
-       unsigned int nokey = ask->nokey_refcnt;
-       bool last = nokey && !ask->refcnt;
+       unsigned int nokey = atomic_read(&ask->nokey_refcnt);
 
        sk = ask->parent;
        ask = alg_sk(sk);
 
-       local_bh_disable();
-       bh_lock_sock(sk);
-       ask->nokey_refcnt -= nokey;
-       if (!last)
-               last = !--ask->refcnt;
-       bh_unlock_sock(sk);
-       local_bh_enable();
+       if (nokey)
+               atomic_dec(&ask->nokey_refcnt);
 
-       if (last)
+       if (atomic_dec_and_test(&ask->refcnt))
                sock_put(sk);
 }
 EXPORT_SYMBOL_GPL(af_alg_release_parent);
@@ -187,7 +181,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        err = -EBUSY;
        lock_sock(sk);
-       if (ask->refcnt | ask->nokey_refcnt)
+       if (atomic_read(&ask->refcnt))
                goto unlock;
 
        swap(ask->type, type);
@@ -236,7 +230,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
        int err = -EBUSY;
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt))
                goto unlock;
 
        type = ask->type;
@@ -301,12 +295,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
        if (err)
                goto unlock;
 
-       if (nokey || !ask->refcnt++)
+       if (atomic_inc_return_relaxed(&ask->refcnt) == 1)
                sock_hold(sk);
-       ask->nokey_refcnt += nokey;
+       if (nokey) {
+               atomic_inc(&ask->nokey_refcnt);
+               atomic_set(&alg_sk(sk2)->nokey_refcnt, 1);
+       }
        alg_sk(sk2)->parent = sk;
        alg_sk(sk2)->type = type;
-       alg_sk(sk2)->nokey_refcnt = nokey;
 
        newsock->ops = type->ops;
        newsock->state = SS_CONNECTED;
index eb1910b6d434c8bfab5c3a1119b6ad2de97a08e2..0ae000a61c7f5bf735934d87d8ec61c4537afe5f 100644 (file)
@@ -384,7 +384,7 @@ static int aead_check_key(struct socket *sock)
        struct alg_sock *ask = alg_sk(sk);
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (!atomic_read(&ask->nokey_refcnt))
                goto unlock_child;
 
        psk = ask->parent;
@@ -396,11 +396,8 @@ static int aead_check_key(struct socket *sock)
        if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
                goto unlock;
 
-       if (!pask->refcnt++)
-               sock_hold(psk);
-
-       ask->refcnt = 1;
-       sock_put(psk);
+       atomic_dec(&pask->nokey_refcnt);
+       atomic_set(&ask->nokey_refcnt, 0);
 
        err = 0;
 
index da1ffa4f7f8dafc04806cf5f5fe5fb5f58eba191..e71727c25a7db7c82155e2a605969af7dd92dcc6 100644 (file)
@@ -301,7 +301,7 @@ static int hash_check_key(struct socket *sock)
        struct alg_sock *ask = alg_sk(sk);
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (!atomic_read(&ask->nokey_refcnt))
                goto unlock_child;
 
        psk = ask->parent;
@@ -313,11 +313,8 @@ static int hash_check_key(struct socket *sock)
        if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                goto unlock;
 
-       if (!pask->refcnt++)
-               sock_hold(psk);
-
-       ask->refcnt = 1;
-       sock_put(psk);
+       atomic_dec(&pask->nokey_refcnt);
+       atomic_set(&ask->nokey_refcnt, 0);
 
        err = 0;
 
index 4c3bdffe0c3a578bb49f7d51ab2ceb50655addca..ec5567c87a6df4f61792585d45d3c835b6d2a78e 100644 (file)
@@ -211,7 +211,7 @@ static int skcipher_check_key(struct socket *sock)
        struct alg_sock *ask = alg_sk(sk);
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (!atomic_read(&ask->nokey_refcnt))
                goto unlock_child;
 
        psk = ask->parent;
@@ -223,11 +223,8 @@ static int skcipher_check_key(struct socket *sock)
        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                goto unlock;
 
-       if (!pask->refcnt++)
-               sock_hold(psk);
-
-       ask->refcnt = 1;
-       sock_put(psk);
+       atomic_dec(&pask->nokey_refcnt);
+       atomic_set(&ask->nokey_refcnt, 0);
 
        err = 0;
 
index d7f43d4ea925a0dcaa1fbce77dbef3b81fc4bc62..e5fae4e838c067d2fb27996ee5446e6879638bcb 100644 (file)
@@ -119,6 +119,7 @@ static int software_key_query(const struct kernel_pkey_params *params,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
+       ret = -ENOMEM;
        key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
                      GFP_KERNEL);
        if (!key)
index 5fab7e350db87790a53e7fbcade726968cda5745..92b996a564d0f9e08e353460c67f49420941d49b 100644 (file)
@@ -228,6 +228,7 @@ static const struct acpi_device_id int3407_device_ids[] = {
        {"INT3407", 0},
        {"INT3532", 0},
        {"INTC1047", 0},
+       {"INTC1050", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
index 873e039ad4b70529c73f16a5d1fe6ae90e269908..62873388b24f7d94100d6f6a713cc550d90a8150 100644 (file)
@@ -25,8 +25,8 @@ static int acpi_fan_remove(struct platform_device *pdev);
 
 static const struct acpi_device_id fan_device_ids[] = {
        {"PNP0C0B", 0},
-       {"INT1044", 0},
        {"INT3404", 0},
+       {"INTC1044", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, fan_device_ids);
index 43cff01a5a675d47cd9bcb60ffcf1440ac8fc871..ce7e9f223b20b821e2760f955f6df5bfec8f853e 100644 (file)
@@ -1033,25 +1033,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
             test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
                dev_err(disk_to_dev(nbd->disk),
                        "Device being setup by another task");
-               sockfd_put(sock);
-               return -EBUSY;
+               err = -EBUSY;
+               goto put_socket;
+       }
+
+       nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
+       if (!nsock) {
+               err = -ENOMEM;
+               goto put_socket;
        }
 
        socks = krealloc(config->socks, (config->num_connections + 1) *
                         sizeof(struct nbd_sock *), GFP_KERNEL);
        if (!socks) {
-               sockfd_put(sock);
-               return -ENOMEM;
+               kfree(nsock);
+               err = -ENOMEM;
+               goto put_socket;
        }
 
        config->socks = socks;
 
-       nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
-       if (!nsock) {
-               sockfd_put(sock);
-               return -ENOMEM;
-       }
-
        nsock->fallback_index = -1;
        nsock->dead = false;
        mutex_init(&nsock->tx_lock);
@@ -1063,6 +1064,10 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
        atomic_inc(&config->live_connections);
 
        return 0;
+
+put_socket:
+       sockfd_put(sock);
+       return err;
 }
 
 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
index 9d21bf0f155eed65fa150d71e6b92104fd701960..980df853ee4970ba4c4291bb9bd4d0c382c97307 100644 (file)
@@ -878,6 +878,7 @@ out_put_disk:
        put_disk(vblk->disk);
 out_free_vq:
        vdev->config->del_vqs(vdev);
+       kfree(vblk->vqs);
 out_free_vblk:
        kfree(vblk);
 out_free_index:
index 35333b65acd1a66c3156d2a928cbe87cb6bca74c..7c617edff4ca2968ef24fc2ae457233fa433e511 100644 (file)
@@ -210,7 +210,7 @@ static int st33zp24_i2c_request_resources(struct i2c_client *client)
 
 /*
  * st33zp24_i2c_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: client, the i2c_client description (TPM I2C description).
  * @param: id, the i2c_device_id struct.
  * @return: 0 in case of success.
  *      -1 in other case.
index 26e09de50f1e010d63da4237fbba7043af31b8bb..a75dafd3944517dda0eacf52175062dbd5bf9ea4 100644 (file)
@@ -329,7 +329,7 @@ static int st33zp24_spi_request_resources(struct spi_device *dev)
 
 /*
  * st33zp24_spi_probe initialize the TPM device
- * @param: dev, the spi_device drescription (TPM SPI description).
+ * @param: dev, the spi_device description (TPM SPI description).
  * @return: 0 in case of success.
  *      or a negative value describing the error.
  */
@@ -378,7 +378,7 @@ static int st33zp24_spi_probe(struct spi_device *dev)
 
 /*
  * st33zp24_spi_remove remove the TPM device
- * @param: client, the spi_device drescription (TPM SPI description).
+ * @param: client, the spi_device description (TPM SPI description).
  * @return: 0 in case of success.
  */
 static int st33zp24_spi_remove(struct spi_device *dev)
index 37bb13f516be668f2ef44f1e51d2d269dbd4ab0f..4ec10ab5e5766e1d025a7e9998930a11a2f9d713 100644 (file)
@@ -502,7 +502,7 @@ static const struct tpm_class_ops st33zp24_tpm = {
 
 /*
  * st33zp24_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: client, the i2c_client description (TPM I2C description).
  * @param: id, the i2c_device_id struct.
  * @return: 0 in case of success.
  *      -1 in other case.
index 87f4493402021b6af277881cbdb928cddbdf10db..1784530b8387bb46bec8694a49ddb5385833be80 100644 (file)
@@ -189,15 +189,6 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
                goto out;
        }
 
-       /* atomic tpm command send and result receive. We only hold the ops
-        * lock during this period so that the tpm can be unregistered even if
-        * the char dev is held open.
-        */
-       if (tpm_try_get_ops(priv->chip)) {
-               ret = -EPIPE;
-               goto out;
-       }
-
        priv->response_length = 0;
        priv->response_read = false;
        *off = 0;
@@ -211,11 +202,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
        if (file->f_flags & O_NONBLOCK) {
                priv->command_enqueued = true;
                queue_work(tpm_dev_wq, &priv->async_work);
-               tpm_put_ops(priv->chip);
                mutex_unlock(&priv->buffer_mutex);
                return size;
        }
 
+       /* atomic tpm command send and result receive. We only hold the ops
+        * lock during this period so that the tpm can be unregistered even if
+        * the char dev is held open.
+        */
+       if (tpm_try_get_ops(priv->chip)) {
+               ret = -EPIPE;
+               goto out;
+       }
+
        ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
                               sizeof(priv->data_buffer));
        tpm_put_ops(priv->chip);
index 09fe45246b8cc0d65760eecff54af9bdbd2997ef..994385bf37c0c044181f0ec2743779d80e09b38b 100644 (file)
@@ -683,13 +683,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
        if (rc)
                goto init_irq_cleanup;
 
-       if (!strcmp(id->compat, "IBM,vtpm20")) {
-               chip->flags |= TPM_CHIP_FLAG_TPM2;
-               rc = tpm2_get_cc_attrs_tbl(chip);
-               if (rc)
-                       goto init_irq_cleanup;
-       }
-
        if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
                                ibmvtpm->rtce_buf != NULL,
                                HZ)) {
@@ -697,6 +690,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
                goto init_irq_cleanup;
        }
 
+       if (!strcmp(id->compat, "IBM,vtpm20")) {
+               chip->flags |= TPM_CHIP_FLAG_TPM2;
+               rc = tpm2_get_cc_attrs_tbl(chip);
+               if (rc)
+                       goto init_irq_cleanup;
+       }
+
        return tpm_chip_register(chip);
 init_irq_cleanup:
        do {
index e7df342a317d6cf4be4f83e690af28b7d6be9c64..0b214963539de2ee6977f85344f9b12d3c5dae57 100644 (file)
@@ -235,6 +235,13 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
        return tpm_tis_init(&pnp_dev->dev, &tpm_info);
 }
 
+/*
+ * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module
+ * parameter"). This commit added IFX0102 device ID, which is also used by
+ * tpm_infineon but ignored to add quirks to probe which driver ought to be
+ * used.
+ */
+
 static struct pnp_device_id tpm_pnp_tbl[] = {
        {"PNP0C31", 0},         /* TPM */
        {"ATM1200", 0},         /* Atmel */
index 2435216bd10aaac1856f48a30c507a532bf19deb..65ab1b027949c7cb59db9a19df6b6d998cc5f2d8 100644 (file)
@@ -1085,7 +1085,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
 
        return 0;
 out_err:
-       if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+       if (chip->ops->clk_enable != NULL)
                chip->ops->clk_enable(chip, false);
 
        tpm_tis_remove(chip);
index d967559355296e1164ab72c39a08bd9709bfc770..3856f6ebcb34f712ff983cc3e15c9fc804f6c870 100644 (file)
@@ -53,8 +53,6 @@ static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
 
        if ((phy->iobuf[3] & 0x01) == 0) {
                // handle SPI wait states
-               phy->iobuf[0] = 0;
-
                for (i = 0; i < TPM_RETRY; i++) {
                        spi_xfer->len = 1;
                        spi_message_init(&m);
@@ -104,6 +102,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
                if (ret < 0)
                        goto exit;
 
+               /* Flow control transfers are receive only */
+               spi_xfer.tx_buf = NULL;
                ret = phy->flow_control(phy, &spi_xfer);
                if (ret < 0)
                        goto exit;
@@ -113,9 +113,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
                spi_xfer.delay.value = 5;
                spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
 
-               if (in) {
-                       spi_xfer.tx_buf = NULL;
-               } else if (out) {
+               if (out) {
+                       spi_xfer.tx_buf = phy->iobuf;
                        spi_xfer.rx_buf = NULL;
                        memcpy(phy->iobuf, out, transfer_len);
                        out += transfer_len;
@@ -288,6 +287,7 @@ static struct spi_driver tpm_tis_spi_driver = {
                .pm = &tpm_tis_pm,
                .of_match_table = of_match_ptr(of_tis_spi_match),
                .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
        },
        .probe = tpm_tis_spi_driver_probe,
        .remove = tpm_tis_spi_remove,
index 69934c0c3dd85f49d69a4f79b5d03eb31a640b27..326f91b2dda9fdc4e474d6156c5d7d538f8fa224 100644 (file)
@@ -50,6 +50,7 @@ source "drivers/clk/versatile/Kconfig"
 config CLK_HSDK
        bool "PLL Driver for HSDK platform"
        depends on OF || COMPILE_TEST
+       depends on IOMEM
        help
          This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs
          control.
index 99afc949925f0fe53efc80187b4501d1005b2ea7..177368cac6dd6afa25993bc206dd42ac65fbcae0 100644 (file)
@@ -131,6 +131,18 @@ static const struct clk_div_table ast2600_eclk_div_table[] = {
        { 0 }
 };
 
+static const struct clk_div_table ast2600_emmc_extclk_div_table[] = {
+       { 0x0, 2 },
+       { 0x1, 4 },
+       { 0x2, 6 },
+       { 0x3, 8 },
+       { 0x4, 10 },
+       { 0x5, 12 },
+       { 0x6, 14 },
+       { 0x7, 16 },
+       { 0 }
+};
+
 static const struct clk_div_table ast2600_mac_div_table[] = {
        { 0x0, 4 },
        { 0x1, 4 },
@@ -390,6 +402,11 @@ static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev,
        return hw;
 }
 
+static const char *const emmc_extclk_parent_names[] = {
+       "emmc_extclk_hpll_in",
+       "mpll",
+};
+
 static const char * const vclk_parent_names[] = {
        "dpll",
        "d1pll",
@@ -459,16 +476,32 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
                return PTR_ERR(hw);
        aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw;
 
-       /* EMMC ext clock divider */
-       hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "hpll", 0,
-                       scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0,
-                       &aspeed_g6_clk_lock);
+       /* EMMC ext clock */
+       hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll",
+                                         0, 1, 2);
        if (IS_ERR(hw))
                return PTR_ERR(hw);
-       hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0,
-                       scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0,
-                       ast2600_div_table,
-                       &aspeed_g6_clk_lock);
+
+       hw = clk_hw_register_mux(dev, "emmc_extclk_mux",
+                                emmc_extclk_parent_names,
+                                ARRAY_SIZE(emmc_extclk_parent_names), 0,
+                                scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1,
+                                0, &aspeed_g6_clk_lock);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux",
+                                 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1,
+                                 15, 0, &aspeed_g6_clk_lock);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       hw = clk_hw_register_divider_table(dev, "emmc_extclk",
+                                          "emmc_extclk_gate", 0,
+                                          scu_g6_base +
+                                               ASPEED_G6_CLK_SELECTION1, 12,
+                                          3, 0, ast2600_emmc_extclk_div_table,
+                                          &aspeed_g6_clk_lock);
        if (IS_ERR(hw))
                return PTR_ERR(hw);
        aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw;
index ded07b0bd0d5e6ea2615d8728687295577e44faa..557d6213783c74c925d3739455b73d06a2015041 100644 (file)
@@ -42,6 +42,7 @@ config ARMADA_AP806_SYSCON
 
 config ARMADA_AP_CPU_CLK
        bool
+       select ARMADA_AP_CP_HELPER
 
 config ARMADA_CP110_SYSCON
        bool
index ecf7b7db2d050ee4fd6be01d7d41a0eebfcbe299..6c3e84180146137665e4be0b7c032acbd8a9b8ce 100644 (file)
@@ -480,6 +480,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
                .set_next_event_virt = erratum_set_next_event_tval_virt,
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+       {
+               .match_type = ate_match_local_cap_id,
+               .id = (void *)ARM64_WORKAROUND_1418040,
+               .desc = "ARM erratum 1418040",
+               .disable_compat_vdso = true,
+       },
+#endif
 };
 
 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
@@ -566,6 +574,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
        if (wa->read_cntvct_el0) {
                clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
                vdso_default = VDSO_CLOCKMODE_NONE;
+       } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
+               vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
+               clocksource_counter.vdso_clock_mode = vdso_default;
        }
 }
 
index 01ce125f8e8d8a864b82657f095923da4cfa2715..412629601ad3b42e130bb042329ec4e8e925a566 100644 (file)
@@ -54,37 +54,11 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
                             dentry->d_name.name, ret > 0 ? name : "");
 }
 
-static const struct dentry_operations dma_buf_dentry_ops = {
-       .d_dname = dmabuffs_dname,
-};
-
-static struct vfsmount *dma_buf_mnt;
-
-static int dma_buf_fs_init_context(struct fs_context *fc)
-{
-       struct pseudo_fs_context *ctx;
-
-       ctx = init_pseudo(fc, DMA_BUF_MAGIC);
-       if (!ctx)
-               return -ENOMEM;
-       ctx->dops = &dma_buf_dentry_ops;
-       return 0;
-}
-
-static struct file_system_type dma_buf_fs_type = {
-       .name = "dmabuf",
-       .init_fs_context = dma_buf_fs_init_context,
-       .kill_sb = kill_anon_super,
-};
-
-static int dma_buf_release(struct inode *inode, struct file *file)
+static void dma_buf_release(struct dentry *dentry)
 {
        struct dma_buf *dmabuf;
 
-       if (!is_dma_buf_file(file))
-               return -EINVAL;
-
-       dmabuf = file->private_data;
+       dmabuf = dentry->d_fsdata;
 
        BUG_ON(dmabuf->vmapping_counter);
 
@@ -110,9 +84,32 @@ static int dma_buf_release(struct inode *inode, struct file *file)
        module_put(dmabuf->owner);
        kfree(dmabuf->name);
        kfree(dmabuf);
+}
+
+static const struct dentry_operations dma_buf_dentry_ops = {
+       .d_dname = dmabuffs_dname,
+       .d_release = dma_buf_release,
+};
+
+static struct vfsmount *dma_buf_mnt;
+
+static int dma_buf_fs_init_context(struct fs_context *fc)
+{
+       struct pseudo_fs_context *ctx;
+
+       ctx = init_pseudo(fc, DMA_BUF_MAGIC);
+       if (!ctx)
+               return -ENOMEM;
+       ctx->dops = &dma_buf_dentry_ops;
        return 0;
 }
 
+static struct file_system_type dma_buf_fs_type = {
+       .name = "dmabuf",
+       .init_fs_context = dma_buf_fs_init_context,
+       .kill_sb = kill_anon_super,
+};
+
 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 {
        struct dma_buf *dmabuf;
@@ -412,7 +409,6 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
 }
 
 static const struct file_operations dma_buf_fops = {
-       .release        = dma_buf_release,
        .mmap           = dma_buf_mmap_internal,
        .llseek         = dma_buf_llseek,
        .poll           = dma_buf_poll,
index b175229a4b01d7992c2356d5134a754dd6bf98fe..604f803579312b0059d064ddf0164725549105b5 100644 (file)
@@ -1176,6 +1176,8 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
        } else if (dmatest_run) {
                if (!is_threaded_test_pending(info)) {
                        pr_info("No channels configured, continue with any\n");
+                       if (!is_threaded_test_run(info))
+                               stop_threaded_test(info);
                        add_threaded_test(info);
                }
                start_threaded_tests(info);
index 21cb2a58dbd29a0c6486b683565933071a02eacf..a1b56f52db2f2437faa615c661f87d2673da583c 100644 (file)
@@ -118,16 +118,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
 {
        struct dw_dma *dw = to_dw_dma(dwc->chan.device);
 
-       if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
-               return;
-
        dw->initialize_chan(dwc);
 
        /* Enable interrupts */
        channel_set_bit(dw, MASK.XFER, dwc->mask);
        channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
-       set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
 }
 
 /*----------------------------------------------------------------------*/
@@ -954,8 +949,6 @@ static void dwc_issue_pending(struct dma_chan *chan)
 
 void do_dw_dma_off(struct dw_dma *dw)
 {
-       unsigned int i;
-
        dma_writel(dw, CFG, 0);
 
        channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -966,9 +959,6 @@ void do_dw_dma_off(struct dw_dma *dw)
 
        while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
                cpu_relax();
-
-       for (i = 0; i < dw->dma.chancnt; i++)
-               clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
 }
 
 void do_dw_dma_on(struct dw_dma *dw)
@@ -1032,8 +1022,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        /* Clear custom channel configuration */
        memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
 
-       clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
-
        /* Disable interrupts */
        channel_clear_bit(dw, MASK.XFER, dwc->mask);
        channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
index 5697c3622699bd64093541a971193bed495cc932..930ae268c497c845f1f89b5e8f77f0e542088cc7 100644 (file)
@@ -352,26 +352,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
        /*
         * TCD parameters are stored in struct fsl_edma_hw_tcd in little
         * endian format. However, we need to load the TCD registers in
-        * big- or little-endian obeying the eDMA engine model endian.
+        * big- or little-endian obeying the eDMA engine model endian,
+        * and this is performed from specific edma_write functions
         */
        edma_writew(edma, 0,  &regs->tcd[ch].csr);
-       edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
-       edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
 
-       edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
-       edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
+       edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
+       edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
 
-       edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
-       edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
+       edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
+       edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
 
-       edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
-       edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
-       edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
+       edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
+       edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
 
-       edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
+       edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
+       edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
+       edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
+
+       edma_writel(edma, (s32)tcd->dlast_sga,
                        &regs->tcd[ch].dlast_sga);
 
-       edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
+       edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
 }
 
 static inline
@@ -589,6 +591,8 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
 {
        struct virt_dma_desc *vdesc;
 
+       lockdep_assert_held(&fsl_chan->vchan.lock);
+
        vdesc = vchan_next_desc(&fsl_chan->vchan);
        if (!vdesc)
                return;
index 67e422590c9aedcfbbeedfa4a9ecf8a557a6dbba..ec1169741de130891502b18950b41c48a86c09b7 100644 (file)
@@ -33,7 +33,7 @@
 #define EDMA_TCD_ATTR_DSIZE_16BIT      BIT(0)
 #define EDMA_TCD_ATTR_DSIZE_32BIT      BIT(1)
 #define EDMA_TCD_ATTR_DSIZE_64BIT      (BIT(0) | BIT(1))
-#define EDMA_TCD_ATTR_DSIZE_32BYTE     (BIT(3) | BIT(0))
+#define EDMA_TCD_ATTR_DSIZE_32BYTE     (BIT(2) | BIT(0))
 #define EDMA_TCD_ATTR_SSIZE_8BIT       0
 #define EDMA_TCD_ATTR_SSIZE_16BIT      (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
 #define EDMA_TCD_ATTR_SSIZE_32BIT      (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
index eff7ebd8cf356172f8c5d4b15215d192b8821dff..90bb72af306cd4d617551430fce7a4f88c8eeb92 100644 (file)
@@ -45,6 +45,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
                        fsl_chan = &fsl_edma->chans[ch];
 
                        spin_lock(&fsl_chan->vchan.lock);
+
+                       if (!fsl_chan->edesc) {
+                               /* terminate_all called before */
+                               spin_unlock(&fsl_chan->vchan.lock);
+                               continue;
+                       }
+
                        if (!fsl_chan->edesc->iscyclic) {
                                list_del(&fsl_chan->edesc->vdesc.node);
                                vchan_cookie_complete(&fsl_chan->edesc->vdesc);
index ff49847e37a86dd4f28dea8ef1c388187aa8be35..cb376cf6a2d2c3316967dc74dc8e09e84ef6fb5e 100644 (file)
@@ -74,6 +74,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
        struct idxd_device *idxd;
        struct idxd_wq *wq;
        struct device *dev;
+       int rc = 0;
 
        wq = inode_wq(inode);
        idxd = wq->idxd;
@@ -81,17 +82,27 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
 
        dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
 
-       if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq))
-               return -EBUSY;
-
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
+       mutex_lock(&wq->wq_lock);
+
+       if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
+               rc = -EBUSY;
+               goto failed;
+       }
+
        ctx->wq = wq;
        filp->private_data = ctx;
        idxd_wq_get(wq);
+       mutex_unlock(&wq->wq_lock);
        return 0;
+
+ failed:
+       mutex_unlock(&wq->wq_lock);
+       kfree(ctx);
+       return rc;
 }
 
 static int idxd_cdev_release(struct inode *node, struct file *filep)
@@ -105,7 +116,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
        filep->private_data = NULL;
 
        kfree(ctx);
+       mutex_lock(&wq->wq_lock);
        idxd_wq_put(wq);
+       mutex_unlock(&wq->wq_lock);
        return 0;
 }
 
index 8d79a8787104d48b0463540e4c281be7f99f0cff..8d2718c585dc6511bedb8a25469e5374ae08abb0 100644 (file)
@@ -320,6 +320,31 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
        devm_iounmap(dev, wq->dportal);
 }
 
+void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       int i, wq_offset;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       memset(&wq->wqcfg, 0, sizeof(wq->wqcfg));
+       wq->type = IDXD_WQT_NONE;
+       wq->size = 0;
+       wq->group = NULL;
+       wq->threshold = 0;
+       wq->priority = 0;
+       clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
+       memset(wq->name, 0, WQ_NAME_SIZE);
+
+       for (i = 0; i < 8; i++) {
+               wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+               iowrite32(0, idxd->reg_base + wq_offset);
+               dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+                       wq->id, i, wq_offset,
+                       ioread32(idxd->reg_base + wq_offset));
+       }
+}
+
 /* Device control bits */
 static inline bool idxd_is_enabled(struct idxd_device *idxd)
 {
index b8f8a363b4a71c57213be6d5362228cab8d617b3..908c8d0ef3ab6f41c159f0ab22b6fd8f180da467 100644 (file)
@@ -290,6 +290,7 @@ int idxd_wq_enable(struct idxd_wq *wq);
 int idxd_wq_disable(struct idxd_wq *wq);
 int idxd_wq_map_portal(struct idxd_wq *wq);
 void idxd_wq_unmap_portal(struct idxd_wq *wq);
+void idxd_wq_disable_cleanup(struct idxd_wq *wq);
 
 /* submission */
 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
index 6510791b9921b4dabc49a02e8a5423ce8b605a96..8a35f58da689092b8555aac1426b88a0ae02a637 100644 (file)
@@ -141,7 +141,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
 
        iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
        if (!err)
-               return IRQ_HANDLED;
+               goto out;
 
        gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
        if (gensts.state == IDXD_DEVICE_STATE_HALT) {
@@ -162,6 +162,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
                spin_unlock_bh(&idxd->dev_lock);
        }
 
+ out:
        idxd_unmask_msix_vector(idxd, irq_entry->id);
        return IRQ_HANDLED;
 }
index 052dae5d6dddbd6ff25d6a6b151b462b23aee91f..2e2c5082f3220cb4616ac6f218deb0e61a4e1080 100644 (file)
@@ -315,6 +315,11 @@ static int idxd_config_bus_remove(struct device *dev)
                idxd_unregister_dma_device(idxd);
                spin_lock_irqsave(&idxd->dev_lock, flags);
                rc = idxd_device_disable(idxd);
+               for (i = 0; i < idxd->max_wqs; i++) {
+                       struct idxd_wq *wq = &idxd->wqs[i];
+
+                       idxd_wq_disable_cleanup(wq);
+               }
                spin_unlock_irqrestore(&idxd->dev_lock, flags);
                module_put(THIS_MODULE);
                if (rc < 0)
index 91774039ae5d6345c14faf7ffb4685f2f8429f33..270992c4fe47506020706e1db2f3649ea7fce183 100644 (file)
@@ -1331,8 +1331,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 
        sdma_channel_synchronize(chan);
 
-       if (sdmac->event_id0 >= 0)
-               sdma_event_disable(sdmac, sdmac->event_id0);
+       sdma_event_disable(sdmac, sdmac->event_id0);
        if (sdmac->event_id1)
                sdma_event_disable(sdmac, sdmac->event_id1);
 
@@ -1632,11 +1631,9 @@ static int sdma_config(struct dma_chan *chan,
        memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
 
        /* Set ENBLn earlier to make sure dma request triggered after that */
-       if (sdmac->event_id0 >= 0) {
-               if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
-                       return -EINVAL;
-               sdma_event_enable(sdmac, sdmac->event_id0);
-       }
+       if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
+               return -EINVAL;
+       sdma_event_enable(sdmac, sdmac->event_id0);
 
        if (sdmac->event_id1) {
                if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
index 8ad0ad861c86119ba09824abe2deeb560c44fa83..fd782aee02d92d27626e6a87494e78bc6fe956a5 100644 (file)
 
 #include "../dmaengine.h"
 
+int completion_timeout = 200;
+module_param(completion_timeout, int, 0644);
+MODULE_PARM_DESC(completion_timeout,
+               "set ioat completion timeout [msec] (default 200 [msec])");
+int idle_timeout = 2000;
+module_param(idle_timeout, int, 0644);
+MODULE_PARM_DESC(idle_timeout,
+               "set ioat idel timeout [msec] (default 2000 [msec])");
+
+#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
+#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
+
 static char *chanerr_str[] = {
        "DMA Transfer Source Address Error",
        "DMA Transfer Destination Address Error",
index e6b622e1ba92eee88d3e54bed6af3c8ca8f2ee84..f7f31fdf14cf916578f9c617890506a77f343a73 100644 (file)
@@ -104,8 +104,6 @@ struct ioatdma_chan {
        #define IOAT_RUN 5
        #define IOAT_CHAN_ACTIVE 6
        struct timer_list timer;
-       #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
-       #define IDLE_TIMEOUT msecs_to_jiffies(2000)
        #define RESET_DELAY msecs_to_jiffies(100)
        struct ioatdma_device *ioat_dma;
        dma_addr_t completion_dma;
index e15bd15a9ef6ac572da0086c33bee79ba6251600..e12b754e6398d8cecf2ebaf6cd1444e737341694 100644 (file)
@@ -35,6 +35,13 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
                        mcf_chan = &mcf_edma->chans[ch];
 
                        spin_lock(&mcf_chan->vchan.lock);
+
+                       if (!mcf_chan->edesc) {
+                               /* terminate_all called before */
+                               spin_unlock(&mcf_chan->vchan.lock);
+                               continue;
+                       }
+
                        if (!mcf_chan->edesc->iscyclic) {
                                list_del(&mcf_chan->edesc->vdesc.node);
                                vchan_cookie_complete(&mcf_chan->edesc->vdesc);
index b218a013c2600f449dac819c2ab11ab6ef74ce5e..8f7ceb698226ca7d29a91e39821e57c27c7863f0 100644 (file)
@@ -586,6 +586,8 @@ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
                desc->residue = usb_dmac_get_current_residue(chan, desc,
                                                        desc->sg_index - 1);
                desc->done_cookie = desc->vd.tx.cookie;
+               desc->vd.tx_result.result = DMA_TRANS_NOERROR;
+               desc->vd.tx_result.residue = desc->residue;
                vchan_cookie_complete(&desc->vd);
 
                /* Restart the next transfer if this driver has a next desc */
index db58d7e4f9fec992b84a9db283d7894e502a952b..c5fa2ef74abc7f0071c868d1ea776422641e7a58 100644 (file)
@@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
 
        ret = pm_runtime_get_sync(tdc2dev(tdc));
        if (ret < 0) {
+               pm_runtime_put_noidle(tdc2dev(tdc));
                free_irq(tdc->irq, tdc);
                return ret;
        }
@@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
 
        ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put_noidle(&pdev->dev);
                goto rpm_disable;
+       }
 
        ret = tegra_adma_init(tdma);
        if (ret)
index 0b8f3dd6b146313c42e87c9d434ff04da8c8614b..77e8e67d995b3bd2eb4afe02a74e3e6842fdd4dd 100644 (file)
@@ -42,6 +42,7 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
        ud = platform_get_drvdata(pdev);
        if (!ud) {
                pr_debug("UDMA has not been probed\n");
+               put_device(&pdev->dev);
                return ERR_PTR(-EPROBE_DEFER);
        }
 
index c91e2dc1bb7262eda458467a84ac8cd72fa046e5..6c879a734360489b905bb08b33e7d14a7c63d9db 100644 (file)
@@ -1753,7 +1753,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                        dev_err(ud->ddev.dev,
                                "Descriptor pool allocation failed\n");
                        uc->use_dma_pool = false;
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto err_cleanup;
                }
        }
 
@@ -1773,16 +1774,18 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
 
                ret = udma_get_chan_pair(uc);
                if (ret)
-                       return ret;
+                       goto err_cleanup;
 
                ret = udma_alloc_tx_resources(uc);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       udma_put_rchan(uc);
+                       goto err_cleanup;
+               }
 
                ret = udma_alloc_rx_resources(uc);
                if (ret) {
                        udma_free_tx_resources(uc);
-                       return ret;
+                       goto err_cleanup;
                }
 
                uc->config.src_thread = ud->psil_base + uc->tchan->id;
@@ -1800,10 +1803,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                        uc->id);
 
                ret = udma_alloc_tx_resources(uc);
-               if (ret) {
-                       uc->config.remote_thread_id = -1;
-                       return ret;
-               }
+               if (ret)
+                       goto err_cleanup;
 
                uc->config.src_thread = ud->psil_base + uc->tchan->id;
                uc->config.dst_thread = uc->config.remote_thread_id;
@@ -1820,10 +1821,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                        uc->id);
 
                ret = udma_alloc_rx_resources(uc);
-               if (ret) {
-                       uc->config.remote_thread_id = -1;
-                       return ret;
-               }
+               if (ret)
+                       goto err_cleanup;
 
                uc->config.src_thread = uc->config.remote_thread_id;
                uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
@@ -1838,7 +1837,9 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                /* Can not happen */
                dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
                        __func__, uc->id, uc->config.dir);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_cleanup;
+
        }
 
        /* check if the channel configuration was successful */
@@ -1847,7 +1848,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
 
        if (udma_is_chan_running(uc)) {
                dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
-               udma_stop(uc);
+               udma_reset_chan(uc, false);
                if (udma_is_chan_running(uc)) {
                        dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
                        ret = -EBUSY;
@@ -1906,8 +1907,6 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
 
        udma_reset_rings(uc);
 
-       INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
-                                 udma_check_tx_completion);
        return 0;
 
 err_irq_free:
@@ -1919,7 +1918,7 @@ err_psi_free:
 err_res_free:
        udma_free_tx_resources(uc);
        udma_free_rx_resources(uc);
-
+err_cleanup:
        udma_reset_uchan(uc);
 
        if (uc->use_dma_pool) {
@@ -3019,7 +3018,6 @@ static void udma_free_chan_resources(struct dma_chan *chan)
        }
 
        cancel_delayed_work_sync(&uc->tx_drain.work);
-       destroy_delayed_work_on_stack(&uc->tx_drain.work);
 
        if (uc->irq_num_ring > 0) {
                free_irq(uc->irq_num_ring, uc);
@@ -3593,7 +3591,7 @@ static int udma_probe(struct platform_device *pdev)
                return ret;
        }
 
-       ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
+       ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
        if (!ret && ud->atype > 2) {
                dev_err(dev, "Invalid atype: %u\n", ud->atype);
                return -EINVAL;
@@ -3711,6 +3709,7 @@ static int udma_probe(struct platform_device *pdev)
                tasklet_init(&uc->vc.task, udma_vchan_complete,
                             (unsigned long)&uc->vc);
                init_completion(&uc->teardown_completed);
+               INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
        }
 
        ret = dma_async_device_register(&ud->ddev);
index 873841af8d5758b34c2369269985dee7df290322..3d6ba425dbb9f2f60f32900b0d107fb5e1797bfc 100644 (file)
@@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
 
        cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
                             GFP_KERNEL);
-       if (!cpu_groups)
+       if (!cpu_groups) {
+               free_cpumask_var(tmp);
                return -ENOMEM;
+       }
 
        cpumask_copy(tmp, cpu_online_mask);
 
@@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
                        topology_core_cpumask(cpumask_any(tmp));
 
                if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+                       free_cpumask_var(tmp);
                        free_cpu_groups(num_groups, &cpu_groups);
                        return -ENOMEM;
                }
@@ -196,13 +199,12 @@ static int hotplug_tests(void)
        if (!page_buf)
                goto out_free_cpu_groups;
 
-       err = 0;
        /*
         * Of course the last CPU cannot be powered down and cpu_down() should
         * refuse doing that.
         */
        pr_info("Trying to turn off and on again all CPUs\n");
-       err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
+       err = down_and_up_cpus(cpu_online_mask, offlined_cpus);
 
        /*
         * Take down CPUs by cpu group this time. When the last CPU is turned
index 5640efe5e75049a3c3b44f759126dd5bab740181..5bda38e0780f24badccd5167c0db701b2e02640e 100644 (file)
@@ -64,6 +64,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
                ret = pm_runtime_get_sync(chip->parent);
                if (ret < 0) {
                        dev_err(chip->parent, "Failed to resume: %d\n", ret);
+                       pm_runtime_put_autosuspend(chip->parent);
                        return ret;
                }
 
@@ -72,12 +73,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
                if (ret < 0) {
                        dev_err(chip->parent, "Failed to drop cache: %d\n",
                                ret);
+                       pm_runtime_put_autosuspend(chip->parent);
                        return ret;
                }
 
                ret = regmap_read(arizona->regmap, reg, &val);
-               if (ret < 0)
+               if (ret < 0) {
+                       pm_runtime_put_autosuspend(chip->parent);
                        return ret;
+               }
 
                pm_runtime_mark_last_busy(chip->parent);
                pm_runtime_put_autosuspend(chip->parent);
@@ -106,6 +110,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
                ret = pm_runtime_get_sync(chip->parent);
                if (ret < 0) {
                        dev_err(chip->parent, "Failed to resume: %d\n", ret);
+                       pm_runtime_put(chip->parent);
                        return ret;
                }
        }
index 1fca8dd7824fdf592efd4f7d01e57503284e815d..a3b9bdedbe443503778ece0a4f85f4a47187dd6c 100644 (file)
@@ -107,6 +107,84 @@ static const struct i2c_device_id pca953x_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, pca953x_id);
 
+#ifdef CONFIG_GPIO_PCA953X_IRQ
+
+#include <linux/dmi.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+
+static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
+       {
+               /*
+                * On Intel Galileo Gen 2 board the IRQ pin of one of
+                * the I²C GPIO expanders, which has GpioInt() resource,
+                * is provided as an absolute number instead of being
+                * relative. Since first controller (gpio-sch.c) and
+                * second (gpio-dwapb.c) are at the fixed bases, we may
+                * safely refer to the number in the global space to get
+                * an IRQ out of it.
+                */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+               },
+       },
+       {}
+};
+
+#ifdef CONFIG_ACPI
+static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data)
+{
+       struct acpi_resource_gpio *agpio;
+       int *pin = data;
+
+       if (acpi_gpio_get_irq_resource(ares, &agpio))
+               *pin = agpio->pin_table[0];
+       return 1;
+}
+
+static int pca953x_acpi_find_pin(struct device *dev)
+{
+       struct acpi_device *adev = ACPI_COMPANION(dev);
+       int pin = -ENOENT, ret;
+       LIST_HEAD(r);
+
+       ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin);
+       acpi_dev_free_resource_list(&r);
+       if (ret < 0)
+               return ret;
+
+       return pin;
+}
+#else
+static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; }
+#endif
+
+static int pca953x_acpi_get_irq(struct device *dev)
+{
+       int pin, ret;
+
+       pin = pca953x_acpi_find_pin(dev);
+       if (pin < 0)
+               return pin;
+
+       dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin);
+
+       if (!gpio_is_valid(pin))
+               return -EINVAL;
+
+       ret = gpio_request(pin, "pca953x interrupt");
+       if (ret)
+               return ret;
+
+       ret = gpio_to_irq(pin);
+
+       /* When pin is used as an IRQ, no need to keep it requested */
+       gpio_free(pin);
+
+       return ret;
+}
+#endif
+
 static const struct acpi_device_id pca953x_acpi_ids[] = {
        { "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
        { }
@@ -322,6 +400,7 @@ static const struct regmap_config pca953x_ai_i2c_regmap = {
        .writeable_reg = pca953x_writeable_register,
        .volatile_reg = pca953x_volatile_register,
 
+       .disable_locking = true,
        .cache_type = REGCACHE_RBTREE,
        .max_register = 0x7f,
 };
@@ -623,8 +702,6 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
        DECLARE_BITMAP(reg_direction, MAX_LINE);
        int level;
 
-       pca953x_read_regs(chip, chip->regs->direction, reg_direction);
-
        if (chip->driver_data & PCA_PCAL) {
                /* Enable latch on interrupt-enabled inputs */
                pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
@@ -635,7 +712,11 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
                pca953x_write_regs(chip, PCAL953X_INT_MASK, irq_mask);
        }
 
+       /* Switch direction to input if needed */
+       pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+
        bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+       bitmap_complement(reg_direction, reg_direction, gc->ngpio);
        bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
 
        /* Look for any newly setup interrupt */
@@ -734,14 +815,16 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
        struct gpio_chip *gc = &chip->gpio_chip;
        DECLARE_BITMAP(pending, MAX_LINE);
        int level;
+       bool ret;
 
-       if (!pca953x_irq_pending(chip, pending))
-               return IRQ_NONE;
+       mutex_lock(&chip->i2c_lock);
+       ret = pca953x_irq_pending(chip, pending);
+       mutex_unlock(&chip->i2c_lock);
 
        for_each_set_bit(level, pending, gc->ngpio)
                handle_nested_irq(irq_find_mapping(gc->irq.domain, level));
 
-       return IRQ_HANDLED;
+       return IRQ_RETVAL(ret);
 }
 
 static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
@@ -752,6 +835,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
        DECLARE_BITMAP(irq_stat, MAX_LINE);
        int ret;
 
+       if (dmi_first_match(pca953x_dmi_acpi_irq_info)) {
+               ret = pca953x_acpi_get_irq(&client->dev);
+               if (ret > 0)
+                       client->irq = ret;
+       }
+
        if (!client->irq)
                return 0;
 
index 58f9d8c3a17ab9692f3cf3694e53aad6d72553ee..44f927641b892d0cf3aa80eb16a946dec801903e 100644 (file)
@@ -204,6 +204,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
                                (mode_info->atom_context->bios + data_offset);
                        switch (crev) {
                        case 11:
+                       case 12:
                                mem_channel_number = igp_info->v11.umachannelnumber;
                                /* channel width is 64 */
                                if (vram_width)
index 47207188c5692ad068217793a957471e13c3a035..4fb4c3b696876869d3128333b47e6d937e117e68 100644 (file)
@@ -37,7 +37,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
 
        memset(&ti, 0, sizeof(struct amdgpu_task_info));
 
-       if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
+       if (amdgpu_gpu_recovery &&
+           amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
                DRM_ERROR("ring %s timeout, but soft recovered\n",
                          s_job->sched->name);
                return;
index 16596a9ccabefc60218563effd3fd90577aa2073..02e6f8c4dde084b9b9c24bf1b7dc5b35a8821507 100644 (file)
@@ -2784,7 +2784,7 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
+       return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
 }
 
 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
@@ -2819,7 +2819,7 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
+       return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
 }
 
 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
index 7301fdcfb8bce2ded20f147dbd80111af2349858..ef3269c43d4f49ab3c8dd652f519ee6dbe147342 100644 (file)
@@ -372,6 +372,52 @@ static int psp_tmr_load(struct psp_context *psp)
        return ret;
 }
 
+static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
+                                       struct psp_gfx_cmd_resp *cmd)
+{
+       if (amdgpu_sriov_vf(psp->adev))
+               cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
+       else
+               cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
+}
+
+static int psp_tmr_unload(struct psp_context *psp)
+{
+       int ret;
+       struct psp_gfx_cmd_resp *cmd;
+
+       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       psp_prep_tmr_unload_cmd_buf(psp, cmd);
+       DRM_INFO("free PSP TMR buffer\n");
+
+       ret = psp_cmd_submit_buf(psp, NULL, cmd,
+                                psp->fence_buf_mc_addr);
+
+       kfree(cmd);
+
+       return ret;
+}
+
+static int psp_tmr_terminate(struct psp_context *psp)
+{
+       int ret;
+       void *tmr_buf;
+       void **pptr;
+
+       ret = psp_tmr_unload(psp);
+       if (ret)
+               return ret;
+
+       /* free TMR memory buffer */
+       pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+       amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+
+       return 0;
+}
+
 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
                                uint64_t asd_mc, uint32_t size)
 {
@@ -1779,8 +1825,6 @@ static int psp_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct psp_context *psp = &adev->psp;
-       void *tmr_buf;
-       void **pptr;
 
        if (psp->adev->psp.ta_fw) {
                psp_ras_terminate(psp);
@@ -1790,10 +1834,9 @@ static int psp_hw_fini(void *handle)
 
        psp_asd_unload(psp);
 
+       psp_tmr_terminate(psp);
        psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
-       pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
-       amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
        amdgpu_bo_free_kernel(&psp->fw_pri_bo,
                              &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
        amdgpu_bo_free_kernel(&psp->fence_buf_bo,
@@ -1840,6 +1883,18 @@ static int psp_suspend(void *handle)
                }
        }
 
+       ret = psp_asd_unload(psp);
+       if (ret) {
+               DRM_ERROR("Failed to unload asd\n");
+               return ret;
+       }
+
+       ret = psp_tmr_terminate(psp);
+       if (ret) {
+               DRM_ERROR("Falied to terminate tmr\n");
+               return ret;
+       }
+
        ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
        if (ret) {
                DRM_ERROR("PSP ring stop failed\n");
index 10ac8076d4f244a80e91dd05733b426ab0ebf31f..db5e0bb0d9356ea5640415078e7e91828bd342bf 100644 (file)
@@ -1358,7 +1358,7 @@ static int dm_late_init(void *handle)
        struct dmcu *dmcu = NULL;
        bool ret;
 
-       if (!adev->dm.fw_dmcu)
+       if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
                return detect_mst_link_for_all_connectors(adev->ddev);
 
        dmcu = adev->dm.dc->res_pool->dmcu;
index 6f93a6ca4cf0c8e72c33dbbf1eebfa0a52225472..d016f50e187c8d764c44ba3b5b2d59dda80ee019 100644 (file)
@@ -2538,10 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
 
        copy_stream_update_to_stream(dc, context, stream, stream_update);
 
-       if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
-               DC_ERROR("Mode validation failed for stream update!\n");
-               dc_release_state(context);
-               return;
+       if (update_type > UPDATE_TYPE_FAST) {
+               if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+                       DC_ERROR("Mode validation failed for stream update!\n");
+                       dc_release_state(context);
+                       return;
+               }
        }
 
        commit_planes_for_stream(
index 2fb97554134f5aeaf8592e888cafff9cca236458..c2e0fbbccf56a33bf6b5c6bd88ab8e4fe611fb33 100644 (file)
@@ -522,9 +522,11 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
        priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
        priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
 
-       ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
-       if (ret)
-               goto err4;
+       if (adev->psp.ras.ras) {
+               ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+               if (ret)
+                       goto err4;
+       }
 
        return 0;
 
@@ -560,7 +562,8 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
                        (struct vega20_smumgr *)(hwmgr->smu_backend);
        struct amdgpu_device *adev = hwmgr->adev;
 
-       smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
+       if (adev->psp.ras.ras)
+               smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
 
        if (priv) {
                amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
index 619f81435c1b2273abe2d61e7ee6d58dc0ce73a5..58b89ec11b0eb36e08583c03be99a5c75e1f6d80 100644 (file)
@@ -61,7 +61,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
                                struct device *subdrv_dev, void **dma_priv)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
-       int ret;
+       int ret = 0;
 
        if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
                DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
@@ -92,7 +92,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
        if (ret)
                clear_dma_max_seg_size(subdrv_dev);
 
-       return 0;
+       return ret;
 }
 
 /*
index fcee33a43aca3ee45a2c9d2584b551dd507c1cd1..03be314271811001c58683d02160233fba716b4c 100644 (file)
@@ -1498,7 +1498,6 @@ static int g2d_probe(struct platform_device *pdev)
 
        g2d->irq = platform_get_irq(pdev, 0);
        if (g2d->irq < 0) {
-               dev_err(dev, "failed to get irq\n");
                ret = g2d->irq;
                goto err_put_clk;
        }
index a86abc173605e5d3840da55b0cafb6cedf903287..3821ea76a7039df6238a79d4f997fc5071b26a71 100644 (file)
@@ -269,8 +269,10 @@ static void mic_pre_enable(struct drm_bridge *bridge)
                goto unlock;
 
        ret = pm_runtime_get_sync(mic->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put_noidle(mic->dev);
                goto unlock;
+       }
 
        mic_set_path(mic, 1);
 
index a6fd0c29e5b89cc5c657cb2c81c8b8a165f10ca9..544b9993c99ed5fc2ca68c3bc53fd63748c3a6ea 100644 (file)
@@ -307,8 +307,6 @@ static int hibmc_load(struct drm_device *dev)
        /* reset all the states of crtc/plane/encoder/connector */
        drm_mode_config_reset(dev);
 
-       drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
-
        return 0;
 
 err:
@@ -355,6 +353,9 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
                          ret);
                goto err_unload;
        }
+
+       drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+
        return 0;
 
 err_unload:
index 9ea1a397d1b54e813822a42eb5cf05a85b7fe876..26996e1839e2232637c331111666c6422e3f810e 100644 (file)
@@ -3822,6 +3822,17 @@ skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
        return true;
 }
 
+unsigned int
+intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
+{
+       int x = 0, y = 0;
+
+       intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+                                         plane_state->color_plane[0].offset, 0);
+
+       return y;
+}
+
 static int skl_check_main_surface(struct intel_plane_state *plane_state)
 {
        struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
index efb4da205ea292e7a7cecb819249bc1694e66223..3a06f72c985965818cc2068f3e77538de33c0621 100644 (file)
@@ -608,6 +608,7 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
                                   u32 pixel_format, u64 modifier,
                                   unsigned int rotation);
 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
 
 struct intel_display_error_state *
 intel_display_capture_error_state(struct drm_i915_private *dev_priv);
index 1c26673acb2dd8bd603c427a1eddae38b86761e6..a65d9d8b79a7674e34e84088572af5ec0fd745ea 100644 (file)
 #include "intel_fbc.h"
 #include "intel_frontbuffer.h"
 
-/*
- * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
- * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
- * origin so the x and y offsets can actually fit the registers. As a
- * consequence, the fence doesn't really start exactly at the display plane
- * address we program because it starts at the real start of the buffer, so we
- * have to take this into consideration here.
- */
-static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
-{
-       return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
-}
-
 /*
  * For SKL+, the plane source size used by the hardware is based on the value we
  * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
@@ -141,7 +128,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
                        fbc_ctl2 |= FBC_CTL_CPU_FENCE;
                intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
                intel_de_write(dev_priv, FBC_FENCE_OFF,
-                              params->crtc.fence_y_offset);
+                              params->fence_y_offset);
        }
 
        /* enable it... */
@@ -175,7 +162,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
        if (params->fence_id >= 0) {
                dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
                intel_de_write(dev_priv, DPFC_FENCE_YOFF,
-                              params->crtc.fence_y_offset);
+                              params->fence_y_offset);
        } else {
                intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
        }
@@ -243,7 +230,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
                        intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
                                       SNB_CPU_FENCE_ENABLE | params->fence_id);
                        intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
-                                      params->crtc.fence_y_offset);
+                                      params->fence_y_offset);
                }
        } else {
                if (IS_GEN(dev_priv, 6)) {
@@ -253,7 +240,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
        }
 
        intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
-                      params->crtc.fence_y_offset);
+                      params->fence_y_offset);
        /* enable it... */
        intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -320,7 +307,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
                intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
                               SNB_CPU_FENCE_ENABLE | params->fence_id);
                intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
-                              params->crtc.fence_y_offset);
+                              params->fence_y_offset);
        } else if (dev_priv->ggtt.num_fences) {
                intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
                intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
@@ -631,8 +618,8 @@ static bool rotation_is_valid(struct drm_i915_private *dev_priv,
 /*
  * For some reason, the hardware tracking starts looking at whatever we
  * programmed as the display plane base address register. It does not look at
- * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
- * variables instead of just looking at the pipe/plane size.
+ * the X and Y offset registers. That's why we include the src x/y offsets
+ * instead of just looking at the plane size.
  */
 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 {
@@ -705,7 +692,6 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
        cache->plane.adjusted_x = plane_state->color_plane[0].x;
        cache->plane.adjusted_y = plane_state->color_plane[0].y;
-       cache->plane.y = plane_state->uapi.src.y1 >> 16;
 
        cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
 
@@ -713,6 +699,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        cache->fb.stride = fb->pitches[0];
        cache->fb.modifier = fb->modifier;
 
+       cache->fence_y_offset = intel_plane_fence_y_offset(plane_state);
+
        drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
                    !plane_state->vma->fence);
 
@@ -883,10 +871,10 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
        memset(params, 0, sizeof(*params));
 
        params->fence_id = cache->fence_id;
+       params->fence_y_offset = cache->fence_y_offset;
 
        params->crtc.pipe = crtc->pipe;
        params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
-       params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
 
        params->fb.format = cache->fb.format;
        params->fb.stride = cache->fb.stride;
index e4aece20bc8089b0cff67256c12f0e7153ea2294..52db2bde44a3ab279b433847511c49afa901381c 100644 (file)
@@ -204,25 +204,25 @@ static int __ring_active(struct intel_ring *ring)
 {
        int err;
 
-       err = i915_active_acquire(&ring->vma->active);
+       err = intel_ring_pin(ring);
        if (err)
                return err;
 
-       err = intel_ring_pin(ring);
+       err = i915_active_acquire(&ring->vma->active);
        if (err)
-               goto err_active;
+               goto err_pin;
 
        return 0;
 
-err_active:
-       i915_active_release(&ring->vma->active);
+err_pin:
+       intel_ring_unpin(ring);
        return err;
 }
 
 static void __ring_retire(struct intel_ring *ring)
 {
-       intel_ring_unpin(ring);
        i915_active_release(&ring->vma->active);
+       intel_ring_unpin(ring);
 }
 
 __i915_active_call
diff --git a/drivers/gpu/drm/i915/gt/shaders/README b/drivers/gpu/drm/i915/gt/shaders/README
new file mode 100644 (file)
index 0000000..e7e96d7
--- /dev/null
@@ -0,0 +1,46 @@
+ASM sources for auto generated shaders
+======================================
+
+The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain
+pre-compiled batch chunks that will clear any residual render cache during
+context switch.
+
+They are generated from their respective platform ASM files present on
+i915/gt/shaders/clear_kernel directory.
+
+The generated .c files should never be modified directly. Instead, any modification
+needs to be done on the on their respective ASM files and build instructions below
+needes to be followed.
+
+Building
+========
+
+Environment
+-----------
+
+IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used
+on building.
+
+Please make sure your Mesa tool is compiled with "-Dtools=intel" and
+"-Ddri-drivers=i965", and run this script from IGT source root directory"
+
+The instructions bellow assume:
+    *  IGT gpu tools source code is located on your home directory (~) as ~/igt
+    *  Mesa source code is located on your home directory (~) as ~/mesa
+       and built under the ~/mesa/build directory
+    *  Linux kernel source code is under your home directory (~) as ~/linux
+
+Instructions
+------------
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \
+       ~/igt/lib/i915/shaders/clear_kernel/ivb.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g ivb \
+      -m ~/mesa/build/src/intel/tools/i965_asm
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \
+    ~/igt/lib/i915/shaders/clear_kernel/hsw.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g hsw \
+      -m ~/mesa/build/src/intel/tools/i965_asm
\ No newline at end of file
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
new file mode 100644 (file)
index 0000000..5fdf384
--- /dev/null
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ *     1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ *     2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ *        512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ *  Size : (SliceCount * SubSliceCount  * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ *         Expected to be initialized to 0 by driver/another kernel
+ *  Layout:
+ *          RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ *          Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1)          g1.2<1>UD       g1.2<0,1,0>UD   0x00000001UD    { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1)   null<1>UD       g1.3<0,1,0>UD   0x00000000UD    { align1 1N };
+(+f0.0) jmpi(1) 352D                                            { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ *     IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ *     HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8)          g3<1>UD         0x00000000UD                    { align1 1Q };
+shr(1)          g3<1>D          sr0<0,1,0>D     12D             { align1 1N };
+and(1)          g3<1>D          g3<0,1,0>D      1D              { align1 1N }; /* g3 has HSID */
+shr(1)          g3.1<1>D        sr0<0,1,0>D     13D             { align1 1N };
+and(1)          g3.1<1>D        g3.1<0,1,0>D    3D              { align1 1N }; /* g3.1 has sliceID */
+mul(1)          g3.5<1>D        g3.1<0,1,0>D    g1.10<0,1,0>UW  { align1 1N };
+add(1)          g3<1>D          g3<0,1,0>D      g3.5<0,1,0>D    { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1)          g3.2<1>D        sr0<0,1,0>D     8D              { align1 1N };
+and(1)          g3.2<1>D        g3.2<0,1,0>D    15D             { align1 1N }; /* g3.2 = EUID */
+mul(1)          g3.4<1>D        g3<0,1,0>D      16D             { align1 1N };
+add(1)          g3.2<1>D        g3.2<0,1,0>D    g3.4<0,1,0>D    { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address )  in instrumentation surf */
+
+mov(8)          g5<1>UD         0x00000000UD                    { align1 1Q };
+and(1)          g3.3<1>D        sr0<0,1,0>D     7D              { align1 1N };
+mul(1)          g3.3<1>D        g3.3<0,1,0>D    4D              { align1 1N };
+
+mov(8)          g4<1>UD         g0<8,8,1>UD                     { align1 1Q }; /* Initialize message header with g0 */
+mov(1)          g4<1>UD         g3.3<0,1,0>UD                   { align1 1N }; /* Block offset */
+mov(1)          g4.1<1>UD       g3.2<0,1,0>UD                   { align1 1N }; /* Block offset */
+mov(1)          g4.2<1>UD       0x00000003UD                    { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1)          g4.3<1>UD       g4.3<0,1,0>UW   0xffffffffUD    { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8)        g5<1>UD         g4<8,8,1>F      0x02190001
+
+                            render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1)          g5<1>D          g5<0,1,0>D      1D              { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8)        g5<1>UD         g4<8,8,1>F      0x040a8001
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+
+/* Delay thread for specified parameter */
+add.nz.f0.0(1)  g1.2<1>UD       g1.2<0,1,0>UD   -1D             { align1 1N };
+(+f0.0) jmpi(1) -32D                                            { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
+
+/* Initialize looping parameters */
+mov(1)          a0<1>D          0D                              { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1)          a0.4<1>W        127W                            { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8)          g2<1>UD         g0<8,8,1>UD                     { align1 1Q };
+mov(8)          g127<1>UD       g0<8,8,1>UD                     { align1 1Q };
+mov(2)          g2<1>UD         g1<2,2,1>UW                     { align1 1N };
+mov(1)          g2.2<1>UD       0x000f000fUD                    { align1 1N }; /* Block size (16x16) */
+and(1)          g2.3<1>UD       g2.3<0,1,0>UW   0xffffffefUD    { align1 1N };
+mov(16)         g3<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g4<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g5<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g6<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g7<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g8<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g9<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g10<1>UD        0x00000000UD                    { align1 1H };
+sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1)          g2<1>UD         g1<0,1,0>UW     0x0010UW        { align1 1N };
+sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1)  a0.4<1>W        a0.4<0,1,0>W    -1W             { align1 1N };
+mov(16)         g[a0]<1>UW      f0.1<0,1,0>UW                   { align1 1H };
+add(1)          a0<1>D          a0<0,1,0>D      32D             { align1 1N };
+(+f0.0) jmpi(1) -64D                                            { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8)        null<1>UD       g127<8,8,1>F    0x82000010
+                            thread_spawner MsgDesc: mlen 1 rlen 0           { align1 1Q EOT };
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
new file mode 100644 (file)
index 0000000..97c7ac9
--- /dev/null
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ *     1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ *     2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ *        512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ *  Size : (SliceCount * SubSliceCount  * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ *         Expected to be initialized to 0 by driver/another kernel
+ *  Layout :
+ *           RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ *           Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1)          g1.2<1>UD       g1.2<0,1,0>UD   0x00000001UD    { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1)   null<1>UD       g1.3<0,1,0>UD   0x00000000UD    { align1 1N };
+(+f0.0) jmpi(1) 44D                                             { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ *     IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ *     HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8)          g3<1>UD         0x00000000UD                    { align1 1Q };
+shr(1)          g3<1>D          sr0<0,1,0>D     12D             { align1 1N };
+and(1)          g3<1>D          g3<0,1,0>D      1D              { align1 1N }; /* g3 has HSID */
+shr(1)          g3.1<1>D        sr0<0,1,0>D     13D             { align1 1N };
+and(1)          g3.1<1>D        g3.1<0,1,0>D    3D              { align1 1N }; /* g3.1 has sliceID */
+mul(1)          g3.5<1>D        g3.1<0,1,0>D    g1.10<0,1,0>UW  { align1 1N };
+add(1)          g3<1>D          g3<0,1,0>D      g3.5<0,1,0>D    { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1)          g3.2<1>D        sr0<0,1,0>D     8D              { align1 1N };
+and(1)          g3.2<1>D        g3.2<0,1,0>D    15D             { align1 1N }; /* g3.2 = EUID */
+mul(1)          g3.4<1>D        g3<0,1,0>D      16D             { align1 1N };
+add(1)          g3.2<1>D        g3.2<0,1,0>D    g3.4<0,1,0>D    { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address )  in instrumentation surf */
+
+mov(8)          g5<1>UD         0x00000000UD                    { align1 1Q };
+and(1)          g3.3<1>D        sr0<0,1,0>D     7D              { align1 1N };
+mul(1)          g3.3<1>D        g3.3<0,1,0>D    4D              { align1 1N };
+
+mov(8)          g4<1>UD         g0<8,8,1>UD                     { align1 1Q }; /* Initialize message header with g0 */
+mov(1)          g4<1>UD         g3.3<0,1,0>UD                   { align1 1N }; /* Block offset */
+mov(1)          g4.1<1>UD       g3.2<0,1,0>UD                   { align1 1N }; /* Block offset */
+mov(1)          g4.2<1>UD       0x00000003UD                    { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1)          g4.3<1>UD       g4.3<0,1,0>UW   0xffffffffUD    { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8)        g5<1>UD         g4<8,8,1>F      0x02190001
+                            render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1)          g5<1>D          g5<0,1,0>D      1D              { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8)        g5<1>UD         g4<8,8,1>F      0x040a8001
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+/* Delay thread for specified parameter */
+add.nz.f0.0(1)  g1.2<1>UD       g1.2<0,1,0>UD   -1D             { align1 1N };
+(+f0.0) jmpi(1) -4D                                             { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
+
+/* Initialize looping parameters */
+mov(1)          a0<1>D          0D                              { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1)          a0.4<1>W        127W                            { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8)          g2<1>UD         g0<8,8,1>UD                     { align1 1Q };
+mov(8)          g127<1>UD       g0<8,8,1>UD                     { align1 1Q };
+mov(2)          g2<1>UD         g1<2,2,1>UW                     { align1 1N };
+mov(1)          g2.2<1>UD       0x000f000fUD                    { align1 1N }; /* Block size (16x16) */
+and(1)          g2.3<1>UD       g2.3<0,1,0>UW   0xffffffefUD    { align1 1N };
+mov(16)         g3<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g4<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g5<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g6<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g7<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g8<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g9<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g10<1>UD        0x00000000UD                    { align1 1H };
+sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1)          g2<1>UD         g1<0,1,0>UW     0x0010UW        { align1 1N };
+sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1)  a0.4<1>W        a0.4<0,1,0>W    -1W             { align1 1N };
+mov(16)         g[a0]<1>UW      f0.1<0,1,0>UW                   { align1 1H };
+add(1)          a0<1>D          a0<0,1,0>D      32D             { align1 1N };
+(+f0.0) jmpi(1) -8D                                             { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8)        null<1>UD       g127<8,8,1>F    0x82000010
+                            thread_spawner MsgDesc: mlen 1 rlen 0           { align1 1Q EOT };
index ec47d41145541a3fac610de10fc2e05cfcf0eb90..62e6a14ad58ef78caaa0875c59d70d0059a07368 100644 (file)
@@ -66,7 +66,7 @@ static inline int mmio_diff_handler(struct intel_gvt *gvt,
        vreg = vgpu_vreg(param->vgpu, offset);
 
        if (preg != vreg) {
-               node = kmalloc(sizeof(*node), GFP_KERNEL);
+               node = kmalloc(sizeof(*node), GFP_ATOMIC);
                if (!node)
                        return -ENOMEM;
 
index 3e88e3b5c43ad47707bb0c160cdd45f138744009..fadd2adb803072ed6309883d3c5b282c07151309 100644 (file)
@@ -1726,13 +1726,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
        write_vreg(vgpu, offset, p_data, bytes);
 
-       if (data & _MASKED_BIT_ENABLE(1)) {
+       if (IS_MASKED_BITS_ENABLED(data, 1)) {
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
                return 0;
        }
 
        if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
-           data & _MASKED_BIT_ENABLE(2)) {
+           IS_MASKED_BITS_ENABLED(data, 2)) {
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
                return 0;
        }
@@ -1741,14 +1741,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
         * pvinfo, if not, we will treat this guest as non-gvtg-aware
         * guest, and stop emulating its cfg space, mmio, gtt, etc.
         */
-       if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
-                       (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
-                       && !vgpu->pv_notified) {
+       if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
+           IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
+           !vgpu->pv_notified) {
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
                return 0;
        }
-       if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
-                       || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+       if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
+           IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
                enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
 
                gvt_dbg_core("EXECLIST %s on ring %s\n",
@@ -1809,7 +1809,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
        write_vreg(vgpu, offset, p_data, bytes);
        data = vgpu_vreg(vgpu, offset);
 
-       if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+       if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
                data |= RESET_CTL_READY_TO_RESET;
        else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
                data &= ~RESET_CTL_READY_TO_RESET;
@@ -1827,7 +1827,8 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
        (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
        write_vreg(vgpu, offset, p_data, bytes);
 
-       if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+       if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
+           IS_MASKED_BITS_ENABLED(data, 0x8))
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
 
        return 0;
@@ -3055,6 +3056,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
        MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
        MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
+       MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
 
        MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
        MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
@@ -3131,8 +3133,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
                 NULL, NULL);
 
-       MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
-       MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
+       MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
+       MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
 
        return 0;
 }
index 970704b18f2394c4ed04f8f38789b7f19452b1a3..3b25e7fe32f6fad5d8fa786c38b5e5ba28565932 100644 (file)
@@ -54,8 +54,8 @@ bool is_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
                                       struct i915_request *req);
-#define IS_RESTORE_INHIBIT(a)  \
-       (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
-       ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
+
+#define IS_RESTORE_INHIBIT(a) \
+       IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)
 
 #endif
index 5b66e14c5b7b2b9a2d24071dce3504b61aa95b1a..b88e033cbed4395f6a43922d568325e54fb2446d 100644 (file)
 #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
                ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
 
+#define IS_MASKED_BITS_ENABLED(_val, _b) \
+               (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
+#define IS_MASKED_BITS_DISABLED(_val, _b) \
+               ((_val) & _MASKED_BIT_DISABLE(_b))
+
 #define FORCEWAKE_RENDER_GEN9_REG 0xa278
 #define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
 #define FORCEWAKE_BLITTER_GEN9_REG 0xa188
index bca036ac662129414d38168f17fd85db68b684e8..e7532e7d74e91bbbddfe0b76ad630a1b1a35d430 100644 (file)
@@ -230,7 +230,7 @@ static int per_file_stats(int id, void *ptr, void *data)
        struct file_stats *stats = data;
        struct i915_vma *vma;
 
-       if (!kref_get_unless_zero(&obj->base.refcount))
+       if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
                return 0;
 
        stats->count++;
index adb9bf34cf97a3bb21690c8ec73bbeb382a0f7b3..f79f118bf19278ea994eaa88364166f9d8c71f65 100644 (file)
@@ -410,8 +410,6 @@ struct intel_fbc {
                        int adjusted_x;
                        int adjusted_y;
 
-                       int y;
-
                        u16 pixel_blend_mode;
                } plane;
 
@@ -420,6 +418,8 @@ struct intel_fbc {
                        unsigned int stride;
                        u64 modifier;
                } fb;
+
+               unsigned int fence_y_offset;
                u16 gen9_wa_cfb_stride;
                s8 fence_id;
        } state_cache;
@@ -435,7 +435,6 @@ struct intel_fbc {
                struct {
                        enum pipe pipe;
                        enum i9xx_plane_id i9xx_plane;
-                       unsigned int fence_y_offset;
                } crtc;
 
                struct {
@@ -444,6 +443,7 @@ struct intel_fbc {
                } fb;
 
                int cfb_size;
+               unsigned int fence_y_offset;
                u16 gen9_wa_cfb_stride;
                s8 fence_id;
                bool plane_visible;
index fc14ebf9a0b709ce029d5409016954156184e537..1f9cd33b35cb33afa3cd90d02f8b433398af8f52 100644 (file)
@@ -104,6 +104,7 @@ vma_create(struct drm_i915_gem_object *obj,
           struct i915_address_space *vm,
           const struct i915_ggtt_view *view)
 {
+       struct i915_vma *pos = ERR_PTR(-E2BIG);
        struct i915_vma *vma;
        struct rb_node *rb, **p;
 
@@ -184,7 +185,6 @@ vma_create(struct drm_i915_gem_object *obj,
        rb = NULL;
        p = &obj->vma.tree.rb_node;
        while (*p) {
-               struct i915_vma *pos;
                long cmp;
 
                rb = *p;
@@ -196,16 +196,12 @@ vma_create(struct drm_i915_gem_object *obj,
                 * and dispose of ours.
                 */
                cmp = i915_vma_compare(pos, vm, view);
-               if (cmp == 0) {
-                       spin_unlock(&obj->vma.lock);
-                       i915_vma_free(vma);
-                       return pos;
-               }
-
                if (cmp < 0)
                        p = &rb->rb_right;
-               else
+               else if (cmp > 0)
                        p = &rb->rb_left;
+               else
+                       goto err_unlock;
        }
        rb_link_node(&vma->obj_node, rb, p);
        rb_insert_color(&vma->obj_node, &obj->vma.tree);
@@ -228,8 +224,9 @@ vma_create(struct drm_i915_gem_object *obj,
 err_unlock:
        spin_unlock(&obj->vma.lock);
 err_vma:
+       i915_vm_put(vm);
        i915_vma_free(vma);
-       return ERR_PTR(-E2BIG);
+       return pos;
 }
 
 static struct i915_vma *
index c420f5a3d33b9fc452a49e3bdacafe7210e43d3d..aa74aac3cbccddedd4b4ac2abd267ea9c9cbff5a 100644 (file)
@@ -6,12 +6,12 @@ config DRM_MEDIATEK
        depends on COMMON_CLK
        depends on HAVE_ARM_SMCCC
        depends on OF
+       depends on MTK_MMSYS
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
        select DRM_MIPI_DSI
        select DRM_PANEL
        select MEMORY
-       select MTK_MMSYS
        select MTK_SMI
        select VIDEOMODE_HELPERS
        help
index fe46c4bac64d77a1cdeb0e52509d45f6d8384db8..7cd8f415fd029e368b2348cbe8915dc995ae6515 100644 (file)
@@ -193,7 +193,6 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
        int ret;
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
                ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
                if (ret) {
@@ -213,7 +212,6 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
 {
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
                clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
 }
@@ -258,7 +256,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
        int ret;
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        if (WARN_ON(!crtc->state))
                return -EINVAL;
 
@@ -299,7 +296,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
                goto err_mutex_unprepare;
        }
 
-       DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
        for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
                mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
                                      mtk_crtc->ddp_comp[i]->id,
@@ -349,7 +345,6 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
        struct drm_crtc *crtc = &mtk_crtc->base;
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
                mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
                if (i == 1)
@@ -831,7 +826,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
        mtk_crtc->cmdq_client =
-                       cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+                       cmdq_mbox_create(mtk_crtc->mmsys_dev,
+                                        drm_crtc_index(&mtk_crtc->base),
                                         2000);
        if (IS_ERR(mtk_crtc->cmdq_client)) {
                dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
index 6bd369434d9d63c07d1d3073616918f5840b0eca..040a8f393fe24da31e6bcee0f6fe69c6668d6318 100644 (file)
@@ -444,7 +444,6 @@ static int mtk_drm_probe(struct platform_device *pdev)
        if (!private)
                return -ENOMEM;
 
-       private->data = of_device_get_match_data(dev);
        private->mmsys_dev = dev->parent;
        if (!private->mmsys_dev) {
                dev_err(dev, "Failed to get MMSYS device\n");
@@ -514,7 +513,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
                                goto err_node;
                        }
 
-                       ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
+                       ret = mtk_ddp_comp_init(dev->parent, node, comp,
+                                               comp_id, NULL);
                        if (ret) {
                                of_node_put(node);
                                goto err_node;
@@ -571,7 +571,6 @@ static int mtk_drm_sys_suspend(struct device *dev)
        int ret;
 
        ret = drm_mode_config_helper_suspend(drm);
-       DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
 
        return ret;
 }
@@ -583,7 +582,6 @@ static int mtk_drm_sys_resume(struct device *dev)
        int ret;
 
        ret = drm_mode_config_helper_resume(drm);
-       DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
 
        return ret;
 }
index c2bd683a87c82857c74e508833d56256b731160b..92141a19681b901afc4c2e55d583fdcac449c62c 100644 (file)
@@ -164,6 +164,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
                                                   true, true);
 }
 
+static void mtk_plane_atomic_disable(struct drm_plane *plane,
+                                    struct drm_plane_state *old_state)
+{
+       struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+       state->pending.enable = false;
+       wmb(); /* Make sure the above parameter is set before update */
+       state->pending.dirty = true;
+}
+
 static void mtk_plane_atomic_update(struct drm_plane *plane,
                                    struct drm_plane_state *old_state)
 {
@@ -178,6 +188,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        if (!crtc || WARN_ON(!fb))
                return;
 
+       if (!plane->state->visible) {
+               mtk_plane_atomic_disable(plane, old_state);
+               return;
+       }
+
        gem = fb->obj[0];
        mtk_gem = to_mtk_gem_obj(gem);
        addr = mtk_gem->dma_addr;
@@ -200,16 +215,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        state->pending.dirty = true;
 }
 
-static void mtk_plane_atomic_disable(struct drm_plane *plane,
-                                    struct drm_plane_state *old_state)
-{
-       struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-
-       state->pending.enable = false;
-       wmb(); /* Make sure the above parameter is set before update */
-       state->pending.dirty = true;
-}
-
 static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
        .prepare_fb = drm_gem_fb_prepare_fb,
        .atomic_check = mtk_plane_atomic_check,
index 270bf22c98feb06248672d0d89de28d2a7419c48..02ac55c13a80bbcf84d8488d93842660bb3d67ec 100644 (file)
@@ -316,10 +316,7 @@ static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
 
 static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
 {
-       u32 tmp_reg1;
-
-       tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
-       return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
+       return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN;
 }
 
 static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
index 5feb760617cbdf86b1104b084b76d037c0119ca5..1eebe310470afa70a9d87453237686d9351cb7dd 100644 (file)
@@ -1630,8 +1630,6 @@ static int mtk_hdmi_audio_startup(struct device *dev, void *data)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s\n", __func__);
-
        mtk_hdmi_audio_enable(hdmi);
 
        return 0;
@@ -1641,8 +1639,6 @@ static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s\n", __func__);
-
        mtk_hdmi_audio_disable(hdmi);
 }
 
@@ -1651,8 +1647,6 @@ mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s(%d)\n", __func__, enable);
-
        if (enable)
                mtk_hdmi_hw_aud_mute(hdmi);
        else
@@ -1665,8 +1659,6 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s\n", __func__);
-
        memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
 
        return 0;
@@ -1766,7 +1758,6 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
                goto err_bridge_remove;
        }
 
-       dev_dbg(dev, "mediatek hdmi probe success\n");
        return 0;
 
 err_bridge_remove:
@@ -1789,7 +1780,7 @@ static int mtk_hdmi_suspend(struct device *dev)
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
        mtk_hdmi_clk_disable_audio(hdmi);
-       dev_dbg(dev, "hdmi suspend success!\n");
+
        return 0;
 }
 
@@ -1804,7 +1795,6 @@ static int mtk_hdmi_resume(struct device *dev)
                return ret;
        }
 
-       dev_dbg(dev, "hdmi resume success!\n");
        return 0;
 }
 #endif
index b55f5167520586d0f4fcd81ba7147c1b06a8f51a..827b93786facb137fc62b3b3c1dc14fa66fa2bba 100644 (file)
 #define RGS_HDMITX_5T1_EDG             (0xf << 4)
 #define RGS_HDMITX_PLUG_TST            BIT(0)
 
-static const u8 PREDIV[3][4] = {
-       {0x0, 0x0, 0x0, 0x0},   /* 27Mhz */
-       {0x1, 0x1, 0x1, 0x1},   /* 74Mhz */
-       {0x1, 0x1, 0x1, 0x1}    /* 148Mhz */
-};
-
-static const u8 TXDIV[3][4] = {
-       {0x3, 0x3, 0x3, 0x2},   /* 27Mhz */
-       {0x2, 0x1, 0x1, 0x1},   /* 74Mhz */
-       {0x1, 0x0, 0x0, 0x0}    /* 148Mhz */
-};
-
-static const u8 FBKSEL[3][4] = {
-       {0x1, 0x1, 0x1, 0x1},   /* 27Mhz */
-       {0x1, 0x0, 0x1, 0x1},   /* 74Mhz */
-       {0x1, 0x0, 0x1, 0x1}    /* 148Mhz */
-};
-
-static const u8 FBKDIV[3][4] = {
-       {19, 24, 29, 19},       /* 27Mhz */
-       {19, 24, 14, 19},       /* 74Mhz */
-       {19, 24, 14, 19}        /* 148Mhz */
-};
-
-static const u8 DIVEN[3][4] = {
-       {0x2, 0x1, 0x1, 0x2},   /* 27Mhz */
-       {0x2, 0x2, 0x2, 0x2},   /* 74Mhz */
-       {0x2, 0x2, 0x2, 0x2}    /* 148Mhz */
-};
-
-static const u8 HTPLLBP[3][4] = {
-       {0xc, 0xc, 0x8, 0xc},   /* 27Mhz */
-       {0xc, 0xf, 0xf, 0xc},   /* 74Mhz */
-       {0xc, 0xf, 0xf, 0xc}    /* 148Mhz */
-};
-
-static const u8 HTPLLBC[3][4] = {
-       {0x2, 0x3, 0x3, 0x2},   /* 27Mhz */
-       {0x2, 0x3, 0x3, 0x2},   /* 74Mhz */
-       {0x2, 0x3, 0x3, 0x2}    /* 148Mhz */
-};
-
-static const u8 HTPLLBR[3][4] = {
-       {0x1, 0x1, 0x0, 0x1},   /* 27Mhz */
-       {0x1, 0x2, 0x2, 0x1},   /* 74Mhz */
-       {0x1, 0x2, 0x2, 0x1}    /* 148Mhz */
-};
-
 static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
 {
        struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
 
-       dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
@@ -178,8 +128,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
 {
        struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
 
-       dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
        usleep_range(100, 150);
index 8ea00546cd4e29187ee29ed9889505a258532a3e..049c4bfe2a3aefd0abde8a8a6d0fa6fd374259b1 100644 (file)
 #define VIU_OSD_FIFO_DEPTH_VAL(val)      ((val & 0x7f) << 12)
 #define VIU_OSD_WORDS_PER_BURST(words)   (((words & 0x4) >> 1) << 22)
 #define VIU_OSD_FIFO_LIMITS(size)        ((size & 0xf) << 24)
+#define VIU_OSD_BURST_LENGTH_24          (0x0 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_32          (0x0 << 31 | 0x1 << 10)
+#define VIU_OSD_BURST_LENGTH_48          (0x0 << 31 | 0x2 << 10)
+#define VIU_OSD_BURST_LENGTH_64          (0x0 << 31 | 0x3 << 10)
+#define VIU_OSD_BURST_LENGTH_96          (0x1 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_128         (0x1 << 31 | 0x1 << 10)
 
 #define VD1_IF0_GEN_REG 0x1a50
 #define VD1_IF0_CANVAS0 0x1a51
index 304f8ff1339cb6c35acb5e239de5f08b4481ed5b..aede0c67a57f09c8effef544582e8659b0ec087e 100644 (file)
@@ -411,13 +411,6 @@ void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv)
                            priv->io_base + _REG(VIU_MISC_CTRL1));
 }
 
-static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
-{
-       uint32_t val = (((length & 0x80) % 24) / 12);
-
-       return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31));
-}
-
 void meson_viu_init(struct meson_drm *priv)
 {
        uint32_t reg;
@@ -444,9 +437,9 @@ void meson_viu_init(struct meson_drm *priv)
                VIU_OSD_FIFO_LIMITS(2);      /* fifo_lim: 2*16=32 */
 
        if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
-               reg |= meson_viu_osd_burst_length_reg(32);
+               reg |= VIU_OSD_BURST_LENGTH_32;
        else
-               reg |= meson_viu_osd_burst_length_reg(64);
+               reg |= VIU_OSD_BURST_LENGTH_64;
 
        writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
        writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
index 60f6472a3e58251b94c9607092e947b08d04db4f..6021f8d9efd1fbb0230ee921da3a9cdac3095ef1 100644 (file)
@@ -408,7 +408,7 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
        struct msm_gem_address_space *aspace;
 
        aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
-               SZ_16M + 0xfff * SZ_64K);
+               0xfff * SZ_64K);
 
        if (IS_ERR(aspace) && !IS_ERR(mmu))
                mmu->funcs->destroy(mmu);
index 096be97ce9f961583d407afaa83320bfeadc74fb..21e77d67151f58041f69ed776a5e31ee521ab020 100644 (file)
@@ -1121,7 +1121,7 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
                return -ENODEV;
 
        mmu = msm_iommu_new(gmu->dev, domain);
-       gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
+       gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
        if (IS_ERR(gmu->aspace)) {
                iommu_domain_free(domain);
                return PTR_ERR(gmu->aspace);
index a1589e040c57e7c166ba203c1f0d1563596612ba..7768557cdfb2805aa8a7865603649c9bf8ba0742 100644 (file)
@@ -893,8 +893,8 @@ static const struct adreno_gpu_funcs funcs = {
 #if defined(CONFIG_DRM_MSM_GPU_STATE)
                .gpu_state_get = a6xx_gpu_state_get,
                .gpu_state_put = a6xx_gpu_state_put,
-               .create_address_space = adreno_iommu_create_address_space,
 #endif
+               .create_address_space = adreno_iommu_create_address_space,
        },
        .get_timestamp = a6xx_get_timestamp,
 };
index 89673c7ed47354611ba4753db49f16dc9385067b..5db06b5909438fac5abe2f0c0195aadb152275ca 100644 (file)
@@ -194,7 +194,7 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
        struct msm_gem_address_space *aspace;
 
        aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
-               0xfffffff);
+               0xffffffff - SZ_16M);
 
        if (IS_ERR(aspace) && !IS_ERR(mmu))
                mmu->funcs->destroy(mmu);
index 63976dcd2ac87fc18293331b1a9ed82fdb3c1e40..0946a86b37b285c365a729b3b47eb5dc393fbcdf 100644 (file)
@@ -521,7 +521,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
                        struct dpu_kms *dpu_kms,
                        struct drm_display_mode *mode)
 {
-       struct msm_display_topology topology;
+       struct msm_display_topology topology = {0};
        int i, intf_count = 0;
 
        for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
@@ -537,7 +537,8 @@ static struct msm_display_topology dpu_encoder_get_topology(
         * 1 LM, 1 INTF
         * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
         *
-        * Adding color blocks only to primary interface
+        * Adding color blocks only to primary interface if available in
+        * sufficient number
         */
        if (intf_count == 2)
                topology.num_lm = 2;
@@ -546,8 +547,11 @@ static struct msm_display_topology dpu_encoder_get_topology(
        else
                topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
 
-       if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
-               topology.num_dspp = topology.num_lm;
+       if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
+               if (dpu_kms->catalog->dspp &&
+                       (dpu_kms->catalog->dspp_count >= topology.num_lm))
+                       topology.num_dspp = topology.num_lm;
+       }
 
        topology.num_enc = 0;
        topology.num_intf = intf_count;
@@ -2136,7 +2140,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
 
        dpu_enc = to_dpu_encoder_virt(enc);
 
-       mutex_init(&dpu_enc->enc_lock);
        ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
        if (ret)
                goto fail;
@@ -2151,7 +2154,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
                                0);
 
 
-       mutex_init(&dpu_enc->rc_lock);
        INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
                        dpu_encoder_off_work);
        dpu_enc->idle_timeout = IDLE_TIMEOUT;
@@ -2183,7 +2185,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
 
        dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
        if (!dpu_enc)
-               return ERR_PTR(ENOMEM);
+               return ERR_PTR(-ENOMEM);
 
        rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
                        drm_enc_mode, NULL);
@@ -2196,6 +2198,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
 
        spin_lock_init(&dpu_enc->enc_spinlock);
        dpu_enc->enabled = false;
+       mutex_init(&dpu_enc->enc_lock);
+       mutex_init(&dpu_enc->rc_lock);
 
        return &dpu_enc->base;
 }
index b8615d4fe8a3f70efd9492f5dea0ed3b139e459d..680527e28d09b07c4c456ff169fe321244b60491 100644 (file)
@@ -780,7 +780,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
 
        mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
        aspace = msm_gem_address_space_create(mmu, "dpu1",
-               0x1000, 0xfffffff);
+               0x1000, 0x100000000 - 0x1000);
 
        if (IS_ERR(aspace)) {
                mmu->funcs->destroy(mmu);
index 08897184b1d97d42815235c2b0c30f83f665a6f6..fc6a3f8134c7ff4d73b0bd8a6b51cbea1665ad52 100644 (file)
@@ -514,7 +514,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                        config->iommu);
 
                aspace  = msm_gem_address_space_create(mmu,
-                       "mdp4", 0x1000, 0xffffffff);
+                       "mdp4", 0x1000, 0x100000000 - 0x1000);
 
                if (IS_ERR(aspace)) {
                        if (!IS_ERR(mmu))
index 19ec48695ffb4f4529552f1dd4e49c14cb6f770a..e193865ce9a26e557ee158651d5b9ae342b980bf 100644 (file)
@@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
                mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
 
                aspace = msm_gem_address_space_create(mmu, "mdp5",
-                       0x1000, 0xffffffff);
+                       0x1000, 0x100000000 - 0x1000);
 
                if (IS_ERR(aspace)) {
                        if (!IS_ERR(mmu))
index 001fbf537440a98e2addcc70d9b1e1d6291ba28b..a1d94be7883a061c26dc1773ecea30a548554431 100644 (file)
@@ -71,8 +71,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
        queue->flags = flags;
 
        if (priv->gpu) {
-               if (prio >= priv->gpu->nr_rings)
+               if (prio >= priv->gpu->nr_rings) {
+                       kfree(queue);
                        return -EINVAL;
+               }
 
                queue->prio = prio;
        }
index d472942102f50db0328e21ae8cb44eca2d986434..519f99868e357b6c6600c08ae57075daddb145bc 100644 (file)
@@ -601,6 +601,9 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
                                (0x0100 << nv_crtc->index),
        };
 
+       if (!nv_encoder->audio)
+               return;
+
        nv_encoder->audio = false;
        nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
 
index e5c230d9ae24ed2ccca95c4d25c6765ef26beca0..cc99938375087d5c21e4a0bd0bb77bac92315821 100644 (file)
@@ -550,7 +550,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
                                         DMA_BIDIRECTIONAL);
                if (dma_mapping_error(dev, *dma_addr))
                        goto out_free_page;
-               if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+               if (drm->dmem->migrate.copy_func(drm, 1,
                        NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
                        goto out_dma_unmap;
        } else {
index ba9f9359c30e1a486d07e128f8b9bc6c2426a43d..6586d9d3987402e1daa9d3e371c8b95601d1a8b2 100644 (file)
@@ -562,6 +562,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
                .end = notifier->notifier.interval_tree.last + 1,
                .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
                .hmm_pfns = hmm_pfns,
+               .dev_private_owner = drm->dev,
        };
        struct mm_struct *mm = notifier->notifier.mm;
        int ret;
index c8ab1b5741a3e3c0e93a9943c0b3811b8c646ea6..db7769cb33ebadfa10078c05f019c35cd4680338 100644 (file)
@@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                if (retries)
                        udelay(400);
 
-               /* transaction request, wait up to 1ms for it to complete */
+               /* transaction request, wait up to 2ms for it to complete */
                nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
 
-               timeout = 1000;
+               timeout = 2000;
                do {
                        ctrl = nvkm_rd32(device, 0x00e4e4 + base);
                        udelay(1);
index 7ef60895f43a7808229a8d72e27b2b54d67f3d48..edb6148cbca042c544939adb2cae2588e90a3e0e 100644 (file)
@@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                if (retries)
                        udelay(400);
 
-               /* transaction request, wait up to 1ms for it to complete */
+               /* transaction request, wait up to 2ms for it to complete */
                nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
 
-               timeout = 1000;
+               timeout = 2000;
                do {
                        ctrl = nvkm_rd32(device, 0x00d954 + base);
                        udelay(1);
index 134aa2b01f9071d14f3658019d74aebaed0285e9..f434efdeca44dd03187cd76dfe5e8104304a313f 100644 (file)
@@ -5563,6 +5563,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
        power_state_offset = (u8 *)state_array->states;
+       rdev->pm.dpm.num_ps = 0;
        for (i = 0; i < state_array->ucNumEntries; i++) {
                u8 *idx;
                power_state = (union pplib_power_state *)power_state_offset;
@@ -5572,10 +5573,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
                if (!rdev->pm.power_state[i].clock_info)
                        return -EINVAL;
                ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
-               if (ps == NULL) {
-                       kfree(rdev->pm.dpm.ps);
+               if (ps == NULL)
                        return -ENOMEM;
-               }
                rdev->pm.dpm.ps[i].ps_priv = ps;
                ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
                                              non_clock_info,
@@ -5597,8 +5596,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
                        k++;
                }
                power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+               rdev->pm.dpm.num_ps = i + 1;
        }
-       rdev->pm.dpm.num_ps = state_array->ucNumEntries;
 
        /* fill in the vce power states */
        for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
index ce07ddc3e058aad7e77f335778a5de100e476b4b..557cbe5ab35f023a295aab16e817e174c61f6d74 100644 (file)
@@ -259,9 +259,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
        struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
        unsigned long reg;
 
-       if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
-                              reg & SUN4I_HDMI_HPD_HIGH,
-                              0, 500000)) {
+       reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
+       if (reg & SUN4I_HDMI_HPD_HIGH) {
                cec_phys_addr_invalidate(hdmi->cec_adap);
                return connector_status_disconnected;
        }
index 9147ee9d5f7d06773a3cf4ae3de613f4f8e0c676..d69f4efa3719807303d45ddba728cd009ce27341 100644 (file)
@@ -1368,7 +1368,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
         * Write dump contents to the page. No need to synchronize; panic should
         * be single-threaded.
         */
-       kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE,
+       kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
                             &bytes_written);
        if (bytes_written)
                hyperv_report_panic_msg(panic_pa, bytes_written);
index 0db8ef4fd6e18b7464f3c8e4b6c0de741f8e0998..a270b975e90bb4de3a7ab7c1a72349e1ef93a793 100644 (file)
@@ -883,7 +883,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
 
        res = setup_attrs(resource);
        if (res)
-               goto exit_free;
+               goto exit_free_capability;
 
        resource->hwmon_dev = hwmon_device_register(&device->dev);
        if (IS_ERR(resource->hwmon_dev)) {
@@ -896,6 +896,8 @@ static int acpi_power_meter_add(struct acpi_device *device)
 
 exit_remove:
        remove_attrs(resource);
+exit_free_capability:
+       free_capabilities(resource);
 exit_free:
        kfree(resource);
 exit:
index 1a9772fb1f7354b9d6024b0b43f80d6243721ad0..94698cae0497112be80d086625538ddb3b1daf68 100644 (file)
@@ -64,7 +64,7 @@ static const struct pvt_sensor_info pvt_info[] = {
  *     48380,
  * where T = [-48380, 147438] mC and N = [0, 1023].
  */
-static const struct pvt_poly poly_temp_to_N = {
+static const struct pvt_poly __maybe_unused poly_temp_to_N = {
        .total_divider = 10000,
        .terms = {
                {4, 18322, 10000, 10000},
@@ -96,7 +96,7 @@ static const struct pvt_poly poly_N_to_temp = {
  * N = (18658e-3*V - 11572) / 10,
  * V = N * 10^5 / 18658 + 11572 * 10^4 / 18658.
  */
-static const struct pvt_poly poly_volt_to_N = {
+static const struct pvt_poly __maybe_unused poly_volt_to_N = {
        .total_divider = 10,
        .terms = {
                {1, 18658, 1000, 1},
@@ -300,12 +300,12 @@ static irqreturn_t pvt_soft_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
 {
        return 0644;
 }
 
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
 {
        return 0444;
 }
@@ -462,12 +462,12 @@ static irqreturn_t pvt_hard_isr(int irq, void *data)
 
 #define pvt_soft_isr NULL
 
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
 {
        return 0;
 }
 
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
 {
        return 0;
 }
index 743752a2467a27c7f706a523c20c888ce79b52ed..64122eb38060d0f7362e9b1cb1398d399c302617 100644 (file)
@@ -38,8 +38,9 @@ static const u8 MAX6697_REG_CRIT[] = {
  * Map device tree / platform data register bit map to chip bit map.
  * Applies to alert register and over-temperature register.
  */
-#define MAX6697_MAP_BITS(reg)  ((((reg) & 0x7e) >> 1) | \
+#define MAX6697_ALERT_MAP_BITS(reg)    ((((reg) & 0x7e) >> 1) | \
                                 (((reg) & 0x01) << 6) | ((reg) & 0x80))
+#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7))
 
 #define MAX6697_REG_STAT(n)            (0x44 + (n))
 
@@ -562,12 +563,12 @@ static int max6697_init_chip(struct max6697_data *data,
                return ret;
 
        ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
-                                       MAX6697_MAP_BITS(pdata->alert_mask));
+                               MAX6697_ALERT_MAP_BITS(pdata->alert_mask));
        if (ret < 0)
                return ret;
 
        ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
-                               MAX6697_MAP_BITS(pdata->over_temperature_mask));
+                       MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask));
        if (ret < 0)
                return ret;
 
index a337195b1c395e76c5a53a30a78f29315afcc8f2..ea516cec1d354eef5fbb00a4abf1d34a3515552e 100644 (file)
@@ -71,7 +71,7 @@ config SENSORS_IR35221
          Infineon IR35221 controller.
 
          This driver can also be built as a module. If so, the module will
-         be called ir35521.
+         be called ir35221.
 
 config SENSORS_IR38064
        tristate "Infineon IR38064"
index a420877ba5335c4bf546dd020fdf7623083fa143..2191575a448b5796600f23e1ef112fda3070261e 100644 (file)
@@ -1869,7 +1869,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
        struct pmbus_sensor *sensor;
 
        sensor = pmbus_add_sensor(data, "fan", "target", index, page,
-                                 PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN,
+                                 0xff, PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
                                  false, false, true);
 
        if (!sensor)
@@ -1880,14 +1880,14 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
                return 0;
 
        sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
-                                 PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM,
+                                 0xff, PMBUS_VIRT_PWM_1 + id, PSC_PWM,
                                  false, false, true);
 
        if (!sensor)
                return -ENOMEM;
 
        sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
-                                 PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM,
+                                 0xff, PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
                                  true, false, false);
 
        if (!sensor)
@@ -1929,7 +1929,7 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
                                continue;
 
                        if (pmbus_add_sensor(data, "fan", "input", index,
-                                            page, pmbus_fan_registers[f], 0xff,
+                                            page, 0xff, pmbus_fan_registers[f],
                                             PSC_FAN, true, true, true) == NULL)
                                return -ENOMEM;
 
index ef39c83aaf337a89ba6c1aa48dcb3f82cca374c2..bae1dc08ec9a966075835d8ae9f84fe8027e979e 100644 (file)
@@ -113,11 +113,18 @@ config I2C_STUB
 
 config I2C_SLAVE
        bool "I2C slave support"
+       help
+         This enables Linux to act as an I2C slave device. Note that your I2C
+         bus master driver also needs to support this functionality. Please
+         read Documentation/i2c/slave-interface.rst for further details.
 
 if I2C_SLAVE
 
 config I2C_SLAVE_EEPROM
        tristate "I2C eeprom slave driver"
+       help
+         This backend makes Linux behave like an I2C EEPROM. Please read
+         Documentation/i2c/slave-eeprom-backend.rst for further details.
 
 endif
 
index 7f10312d1b88f55ff605fb454d580c36cb5ecb7e..388978775be042b53f8fc285ef76efebc513aeb4 100644 (file)
@@ -314,7 +314,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
                        DEB2("BUS ERROR - SDA Stuck low\n");
                        pca_reset(adap);
                        goto out;
-               case 0x90: /* Bus error - SCL stuck low */
+               case 0x78: /* Bus error - SCL stuck low (PCA9665) */
+               case 0x90: /* Bus error - SCL stuck low (PCA9564) */
                        DEB2("BUS ERROR - SCL Stuck low\n");
                        pca_reset(adap);
                        goto out;
index c2efaaaac252756b603deb7e742f02743337dfd0..a71bc58fc03c0c70403e1a9d774971277a6c4d8c 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/clk-provider.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
@@ -191,6 +192,17 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev)
        return ret;
 }
 
+static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = {
+       {
+               .ident = "Qtechnology QT5222",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Qtechnology"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "QT5222"),
+               },
+       },
+       { } /* terminate list */
+};
+
 static int dw_i2c_plat_probe(struct platform_device *pdev)
 {
        struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -267,7 +279,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
 
        adap = &dev->adapter;
        adap->owner = THIS_MODULE;
-       adap->class = I2C_CLASS_DEPRECATED;
+       adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ?
+                                       I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
        ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
        adap->dev.of_node = pdev->dev.of_node;
        adap->nr = -1;
index bb810dee8fb5e42ff1e737d598d6d236839b78c9..73f139690e4e547d1db40746c0ae857c9b756e98 100644 (file)
@@ -180,6 +180,7 @@ static const struct pci_device_id pch_pcidev_id[] = {
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
        {0,}
 };
+MODULE_DEVICE_TABLE(pci, pch_pcidev_id);
 
 static irqreturn_t pch_i2c_handler(int irq, void *pData);
 
index 2fd717d8dd30eca66aa03305a3f01ce5d24a8774..71d7bae2cbcadccb453de10248a31dbe18dbd756 100644 (file)
@@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv)
                if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) {
                        mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
                                              &datalen, 1);
-                       if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) {
+                       if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) {
                                dev_err(priv->dev, "Incorrect smbus block read message len\n");
-                               return -E2BIG;
+                               return -EPROTO;
                        }
                } else {
                        datalen = priv->xfer.data_len;
index a2ed09a3c714a9905d346e80f9d91f69248af9e7..8c930bf1df894b711ea0d43739642f6d5d6f5278 100644 (file)
@@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
        return len;
 }
 
-static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
+static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
 {
        struct sk_buff *skb = NULL;
        struct nlmsghdr *nlh;
        void *data;
        struct ib_sa_mad *mad;
        int len;
+       unsigned long flags;
+       unsigned long delay;
+       gfp_t gfp_flag;
+       int ret;
+
+       INIT_LIST_HEAD(&query->list);
+       query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
 
        mad = query->mad_buf->mad;
        len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
@@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
        /* Repair the nlmsg header length */
        nlmsg_end(skb, nlh);
 
-       return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
-}
+       gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
+               GFP_NOWAIT;
 
-static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
-{
-       unsigned long flags;
-       unsigned long delay;
-       int ret;
+       spin_lock_irqsave(&ib_nl_request_lock, flags);
+       ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
 
-       INIT_LIST_HEAD(&query->list);
-       query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
+       if (ret)
+               goto out;
 
-       /* Put the request on the list first.*/
-       spin_lock_irqsave(&ib_nl_request_lock, flags);
+       /* Put the request on the list.*/
        delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
        query->timeout = delay + jiffies;
        list_add_tail(&query->list, &ib_nl_request_list);
        /* Start the timeout if this is the only request */
        if (ib_nl_request_list.next == &query->list)
                queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
-       spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 
-       ret = ib_nl_send_msg(query, gfp_mask);
-       if (ret) {
-               ret = -EIO;
-               /* Remove the request */
-               spin_lock_irqsave(&ib_nl_request_lock, flags);
-               list_del(&query->list);
-               spin_unlock_irqrestore(&ib_nl_request_lock, flags);
-       }
+out:
+       spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 
        return ret;
 }
index 5eed4360695f53fcde3da7f557187b47d67950ae..cb7ad12888219774925200f22723ff381332aa81 100644 (file)
@@ -830,6 +830,29 @@ wq_error:
        return -ENOMEM;
 }
 
+/**
+ * destroy_workqueues - destroy per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static void destroy_workqueues(struct hfi1_devdata *dd)
+{
+       int pidx;
+       struct hfi1_pportdata *ppd;
+
+       for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+               ppd = dd->pport + pidx;
+
+               if (ppd->hfi1_wq) {
+                       destroy_workqueue(ppd->hfi1_wq);
+                       ppd->hfi1_wq = NULL;
+               }
+               if (ppd->link_wq) {
+                       destroy_workqueue(ppd->link_wq);
+                       ppd->link_wq = NULL;
+               }
+       }
+}
+
 /**
  * enable_general_intr() - Enable the IRQs that will be handled by the
  * general interrupt handler.
@@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
                 * We can't count on interrupts since we are stopping.
                 */
                hfi1_quiet_serdes(ppd);
-
-               if (ppd->hfi1_wq) {
-                       destroy_workqueue(ppd->hfi1_wq);
-                       ppd->hfi1_wq = NULL;
-               }
-               if (ppd->link_wq) {
-                       destroy_workqueue(ppd->link_wq);
-                       ppd->link_wq = NULL;
-               }
+               if (ppd->hfi1_wq)
+                       flush_workqueue(ppd->hfi1_wq);
+               if (ppd->link_wq)
+                       flush_workqueue(ppd->link_wq);
        }
        sdma_exit(dd);
 }
@@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev)
         * clear dma engines, etc.
         */
        shutdown_device(dd);
+       destroy_workqueues(dd);
 
        stop_timers(dd);
 
index 0c2ae9f7b3e8a91b2030168feb05e81ea8c7b077..be62284e42d9aff47007b68f9b3aacaafa0aed96 100644 (file)
@@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
 {
        /* Constraining 10KB packets to 8KB packets */
        if (mtu == (enum ib_mtu)OPA_MTU_10240)
-               mtu = OPA_MTU_8192;
+               mtu = (enum ib_mtu)OPA_MTU_8192;
        return opa_mtu_enum_to_int((enum opa_mtu)mtu);
 }
 
@@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
        struct hfi1_ibport *ibp =
                to_iport(qp->ibqp.device, qp->port_num);
        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-       struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+       struct hfi1_devdata *dd = ppd->dd;
+
+       if (dd->flags & HFI1_SHUTDOWN)
+               return true;
 
        return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
                               priv->s_sde ?
index 243b4ba0b6f6b625d7f9bd44354f501d663d0faa..facff133139a9597299c0b151001ab4de0cb718f 100644 (file)
@@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
        struct hfi1_ibport *ibp =
                to_iport(qp->ibqp.device, qp->port_num);
        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-       struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+       struct hfi1_devdata *dd = ppd->dd;
+
+       if ((dd->flags & HFI1_SHUTDOWN))
+               return true;
 
        return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
                                   priv->s_sde ?
index 343a8b8361e78e478ac7eef4ae8ccffc0d815898..6f99ed03d88e74c25c62f50ee54325b943f0789c 100644 (file)
@@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
                                           mdev_port_num);
        if (err)
                goto out;
-       ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
+       ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
        eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
 
        props->active_width     = IB_WIDTH_4X;
index f939c9b769f044e058486bfe38268915e67e73cd..e050eade97a1e53f541a4ca76be43dd0462fab97 100644 (file)
@@ -2668,6 +2668,10 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
                return (create_flags) ? -EINVAL : 0;
 
+       process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
+                           mlx5_get_flow_namespace(dev->mdev,
+                                                   MLX5_FLOW_NAMESPACE_BYPASS),
+                           qp);
        process_create_flag(dev, &create_flags,
                            IB_QP_CREATE_INTEGRITY_EN,
                            MLX5_CAP_GEN(mdev, sho), qp);
@@ -3001,11 +3005,12 @@ destroy_qp:
                mlx5_ib_destroy_dct(qp);
        } else {
                /*
-                * The two lines below are temp solution till QP allocation
+                * These lines below are temp solution till QP allocation
                 * will be moved to be under IB/core responsiblity.
                 */
                qp->ibqp.send_cq = attr->send_cq;
                qp->ibqp.recv_cq = attr->recv_cq;
+               qp->ibqp.pd = pd;
                destroy_qp_common(dev, qp, udata);
        }
 
index a0b8cc643c5cfcf6342f8b8f170f2ee42f8196f4..ed60c9e4643e84b16cee9be592b440e845cba130 100644 (file)
@@ -67,12 +67,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
        static int dev_id = 1;
        int rv;
 
+       sdev->vendor_part_id = dev_id++;
+
        rv = ib_register_device(base_dev, name);
        if (rv) {
                pr_warn("siw: device registration error %d\n", rv);
                return rv;
        }
-       sdev->vendor_part_id = dev_id++;
 
        siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
 
index 3f9354baac4b3e4a2c09176ea59c54f99bd9436c..6291fb5fa015a47ab25ed2dcc133acf8358c8d17 100644 (file)
@@ -951,6 +951,8 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
        u8 hover_info = packet[ETP_HOVER_INFO_OFFSET];
        bool contact_valid, hover_event;
 
+       pm_wakeup_event(&data->client->dev, 0);
+
        hover_event = hover_info & 0x40;
        for (i = 0; i < ETP_MAX_FINGERS; i++) {
                contact_valid = tp_info & (1U << (3 + i));
@@ -974,6 +976,8 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report)
        u8 *packet = &report[ETP_REPORT_ID_OFFSET + 1];
        int x, y;
 
+       pm_wakeup_event(&data->client->dev, 0);
+
        if (!data->tp_input) {
                dev_warn_once(&data->client->dev,
                              "received a trackpoint report while no trackpoint device has been created. Please report upstream.\n");
@@ -998,7 +1002,6 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report)
 static irqreturn_t elan_isr(int irq, void *dev_id)
 {
        struct elan_tp_data *data = dev_id;
-       struct device *dev = &data->client->dev;
        int error;
        u8 report[ETP_MAX_REPORT_LEN];
 
@@ -1016,8 +1019,6 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
        if (error)
                goto out;
 
-       pm_wakeup_event(dev, 0);
-
        switch (report[ETP_REPORT_ID_OFFSET]) {
        case ETP_REPORT_ID:
                elan_report_absolute(data, report);
@@ -1026,7 +1027,7 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
                elan_report_trackpoint(data, report);
                break;
        default:
-               dev_err(dev, "invalid report id data (%x)\n",
+               dev_err(&data->client->dev, "invalid report id data (%x)\n",
                        report[ETP_REPORT_ID_OFFSET]);
        }
 
index 758dae8d650066006189ef429c589ddedf87bf27..4b81b2d0fe067af3a97da105b529d573ace25c1a 100644 (file)
@@ -179,6 +179,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0093", /* T480 */
        "LEN0096", /* X280 */
        "LEN0097", /* X280 -> ALPS trackpoint */
+       "LEN0099", /* X1 Extreme 1st */
        "LEN009b", /* T580 */
        "LEN200f", /* T450s */
        "LEN2044", /* L470  */
index 7b08ff8ddf35708391904f62a0917b5d62f5444c..7d7f737027264eb8cf33c45e815c155de3149ea6 100644 (file)
@@ -425,6 +425,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
                },
        },
+       {
+               /* Lenovo XiaoXin Air 12 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
+               },
+       },
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
index 233cb1085bbdd0f93469e2b405bb109f189d39ca..5477a5718202ac425c64ac9f3cdfc64f7ff89036 100644 (file)
@@ -1325,7 +1325,6 @@ static int elants_i2c_probe(struct i2c_client *client,
                             0, MT_TOOL_PALM, 0, 0);
        input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
        input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
-       input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
 
        touchscreen_parse_properties(ts->input, true, &ts->prop);
 
index 6dc49ed8377a5c12bfb135c89021b9f3b48747ce..b0f308cb7f7c2fc2af592b63b38ef5762f9c9a83 100644 (file)
@@ -305,6 +305,7 @@ config ROCKCHIP_IOMMU
 
 config SUN50I_IOMMU
        bool "Allwinner H6 IOMMU Support"
+       depends on HAS_DMA
        depends on ARCH_SUNXI || COMPILE_TEST
        select ARM_DMA_USE_IOMMU
        select IOMMU_API
index f892992c8744dfe28d3216d41d68bbcfd553bfcd..57309716fd180adfbb609db7e195e7a036c3acd6 100644 (file)
@@ -102,7 +102,7 @@ extern int __init add_special_device(u8 type, u8 id, u16 *devid,
 #ifdef CONFIG_DMI
 void amd_iommu_apply_ivrs_quirks(void);
 #else
-static void amd_iommu_apply_ivrs_quirks(void) { }
+static inline void amd_iommu_apply_ivrs_quirks(void) { }
 #endif
 
 #endif
index cf01d0215a3974c3c2a912eef8183911e5e82b64..be4318044f96c19d1055a07231f87dc911f8ebe5 100644 (file)
@@ -12,7 +12,7 @@ struct qcom_smmu {
        struct arm_smmu_device smmu;
 };
 
-static const struct of_device_id qcom_smmu_client_of_match[] = {
+static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
        { .compatible = "qcom,adreno" },
        { .compatible = "qcom,mdp4" },
        { .compatible = "qcom,mdss" },
index d43120eb1dc56ef9d83338339d71f53c235db906..b6858adc4f173db6f2a2693e143b939ced819d88 100644 (file)
@@ -295,10 +295,10 @@ void iommu_release_device(struct device *dev)
                return;
 
        iommu_device_unlink(dev->iommu->iommu_dev, dev);
-       iommu_group_remove_device(dev);
 
        ops->release_device(dev);
 
+       iommu_group_remove_device(dev);
        module_put(ops->owner);
        dev_iommu_free(dev);
 }
index fce605e96aa2451506d3c70584ea0b6716e5f4a7..3b1bf2fb94f59524ed55053aa915c8985b98a111 100644 (file)
@@ -313,9 +313,9 @@ static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
                    IOMMU_TLB_FLUSH_MICRO_TLB(1) |
                    IOMMU_TLB_FLUSH_MICRO_TLB(0));
 
-       ret = readl_poll_timeout(iommu->base + IOMMU_TLB_FLUSH_REG,
-                                reg, !reg,
-                                1, 2000);
+       ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
+                                       reg, !reg,
+                                       1, 2000);
        if (ret)
                dev_warn(iommu->dev, "TLB Flush timed out!\n");
 
@@ -556,7 +556,6 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova
 {
        struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
        phys_addr_t pt_phys;
-       dma_addr_t pte_dma;
        u32 *pte_addr;
        u32 dte;
 
@@ -566,7 +565,6 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova
 
        pt_phys = sun50i_dte_get_pt_address(dte);
        pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
-       pte_dma = pt_phys + sun50i_iova_get_pte_index(iova) * PT_ENTRY_SIZE;
 
        if (!sun50i_pte_is_page_valid(*pte_addr))
                return 0;
index 29fead208cad5ad70eaed75345f036cf00861f67..216b3b8392b5e69ddd72cf84ee99089212f7bbe8 100644 (file)
@@ -563,7 +563,7 @@ config LOONGSON_PCH_PIC
          Support for the Loongson PCH PIC Controller.
 
 config LOONGSON_PCH_MSI
-       bool "Loongson PCH PIC Controller"
+       bool "Loongson PCH MSI Controller"
        depends on MACH_LOONGSON64 || COMPILE_TEST
        depends on PCI
        default MACH_LOONGSON64
index cd685f521c77ac1041ad0a1576e8b26e4c875740..beac4caefad9a761736e96e0d2b00de38bd35f4b 100644 (file)
@@ -3797,10 +3797,10 @@ static void its_wait_vpt_parse_complete(void)
        if (!gic_rdists->has_vpend_valid_dirty)
                return;
 
-       WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
-                                               val,
-                                               !(val & GICR_VPENDBASER_Dirty),
-                                               10, 500));
+       WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
+                                                      val,
+                                                      !(val & GICR_VPENDBASER_Dirty),
+                                                      10, 500));
 }
 
 static void its_vpe_schedule(struct its_vpe *vpe)
@@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
        u64 val;
 
        if (info->req_db) {
+               unsigned long flags;
+
                /*
                 * vPE is going to block: make the vPE non-resident with
                 * PendingLast clear and DB set. The GIC guarantees that if
                 * we read-back PendingLast clear, then a doorbell will be
                 * delivered when an interrupt comes.
+                *
+                * Note the locking to deal with the concurrent update of
+                * pending_last from the doorbell interrupt handler that can
+                * run concurrently.
                 */
+               raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
                val = its_clear_vpend_valid(vlpi_base,
                                            GICR_VPENDBASER_PendingLast,
                                            GICR_VPENDBASER_4_1_DB);
                vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+               raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
        } else {
                /*
                 * We're not blocking, so just make the vPE non-resident
index 00de05abd3c3ad4dc2ffbd69d6c2bf57b3912c29..c17fabd6741e262388996eac7d8fddbff07c5d88 100644 (file)
@@ -329,10 +329,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
-       void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
-       unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
-       u32 val, mask, bit;
-       unsigned long flags;
+       void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+       unsigned int cpu;
 
        if (!force)
                cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -342,13 +340,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       gic_lock_irqsave(flags);
-       mask = 0xff << shift;
-       bit = gic_cpu_map[cpu] << shift;
-       val = readl_relaxed(reg) & ~mask;
-       writel_relaxed(val | bit, reg);
-       gic_unlock_irqrestore(flags);
-
+       writeb_relaxed(gic_cpu_map[cpu], reg);
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
        return IRQ_SET_MASK_OK_DONE;
index a6f97fa6ff69d5f3157450c0e99c9f6c9533ebfa..8017f6d32d52b7453a28635a57ba3068ef184a6a 100644 (file)
@@ -99,7 +99,7 @@ static int __init riscv_intc_init(struct device_node *node,
 
        hartid = riscv_of_parent_hartid(node);
        if (hartid < 0) {
-               pr_warn("unable to fine hart id for %pOF\n", node);
+               pr_warn("unable to find hart id for %pOF\n", node);
                return 0;
        }
 
index f60c025121215bf6fa39f31c32858e34764dc1dd..85e0daabad49cf4b445120bbf96d504af8b51532 100644 (file)
@@ -146,10 +146,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
  */
 static void rq_completed(struct mapped_device *md)
 {
-       /* nudge anyone waiting on suspend queue */
-       if (unlikely(wq_has_sleeper(&md->wait)))
-               wake_up(&md->wait);
-
        /*
         * dm_put() must be at the end of this function. See the comment above
         */
index 30505d70f42386bfa963c039c4b9983152f66019..5358894bb9fdc155348013b7f0fa6f90f7bdfbd2 100644 (file)
@@ -2266,6 +2266,12 @@ invalid_optional:
        }
 
        if (WC_MODE_PMEM(wc)) {
+               if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
+                       r = -EOPNOTSUPP;
+                       ti->error = "Asynchronous persistent memory not supported as pmem cache";
+                       goto bad;
+               }
+
                r = persistent_memory_claim(wc);
                if (r) {
                        ti->error = "Unable to map persistent memory for cache";
index 5cf6f5f552e047498fd2517424adb2ab86709b8b..b298fefb022eb9062025b753d53e71fc42d73b74 100644 (file)
@@ -2217,8 +2217,15 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
 {
        struct list_head *list;
        struct dm_zone *zone;
-       int i = 0;
+       int i;
+
+       /* Schedule reclaim to ensure free zones are available */
+       if (!(flags & DMZ_ALLOC_RECLAIM)) {
+               for (i = 0; i < zmd->nr_devs; i++)
+                       dmz_schedule_reclaim(zmd->dev[i].reclaim);
+       }
 
+       i = 0;
 again:
        if (flags & DMZ_ALLOC_CACHE)
                list = &zmd->unmap_cache_list;
index dd1eebf6e50f11a3b3dd6b11f55471293341ff84..9c0ecc9568a420ba02784160a0d951226cab6b18 100644 (file)
@@ -456,6 +456,8 @@ static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
                nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
                nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
        }
+       if (nr_unmap <= 1)
+               return 0;
        return nr_unmap * 100 / nr_zones;
 }
 
@@ -501,7 +503,7 @@ static void dmz_reclaim_work(struct work_struct *work)
 {
        struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
        struct dmz_metadata *zmd = zrc->metadata;
-       unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
+       unsigned int p_unmap;
        int ret;
 
        if (dmz_dev_is_dying(zmd))
@@ -527,9 +529,6 @@ static void dmz_reclaim_work(struct work_struct *work)
                zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
        }
 
-       nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
-       nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
-
        DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
                dmz_metadata_label(zmd), zrc->dev_idx,
                zrc->kc_throttle.throttle,
index cf915009c306a8226945c501c53618235bf4e73c..42aa5139df7c791c01c008b2e32baa4c114ad983 100644 (file)
@@ -400,15 +400,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
                dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
        struct dmz_metadata *zmd = dmz->metadata;
        struct dm_zone *zone;
-       int i, ret;
-
-       /*
-        * Write may trigger a zone allocation. So make sure the
-        * allocation can succeed.
-        */
-       if (bio_op(bio) == REQ_OP_WRITE)
-               for (i = 0; i < dmz->nr_ddevs; i++)
-                       dmz_schedule_reclaim(dmz->dev[i].reclaim);
+       int ret;
 
        dmz_lock_metadata(zmd);
 
index e6807792fec831673781289992ddf90cef0f675e..52449afd58ebb76ab6e14c9a6fa18e486639c199 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/sched/mm.h>
 #include <linux/sched/signal.h>
 #include <linux/blkpg.h>
 #include <linux/bio.h>
@@ -654,28 +655,6 @@ static void free_tio(struct dm_target_io *tio)
        bio_put(&tio->clone);
 }
 
-static bool md_in_flight_bios(struct mapped_device *md)
-{
-       int cpu;
-       struct hd_struct *part = &dm_disk(md)->part0;
-       long sum = 0;
-
-       for_each_possible_cpu(cpu) {
-               sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
-               sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
-       }
-
-       return sum != 0;
-}
-
-static bool md_in_flight(struct mapped_device *md)
-{
-       if (queue_is_mq(md->queue))
-               return blk_mq_queue_inflight(md->queue);
-       else
-               return md_in_flight_bios(md);
-}
-
 u64 dm_start_time_ns_from_clone(struct bio *bio)
 {
        struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
@@ -1465,9 +1444,6 @@ static int __send_empty_flush(struct clone_info *ci)
        BUG_ON(bio_has_data(ci->bio));
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
                __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
-
-       bio_disassociate_blkg(ci->bio);
-
        return 0;
 }
 
@@ -1655,6 +1631,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
                ci.bio = &flush_bio;
                ci.sector_count = 0;
                error = __send_empty_flush(&ci);
+               bio_uninit(ci.bio);
                /* dec_pending submits any data associated with flush */
        } else if (op_is_zone_mgmt(bio_op(bio))) {
                ci.bio = bio;
@@ -1729,6 +1706,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
                ci.bio = &flush_bio;
                ci.sector_count = 0;
                error = __send_empty_flush(&ci);
+               bio_uninit(ci.bio);
                /* dec_pending submits any data associated with flush */
        } else {
                struct dm_target_io *tio;
@@ -2470,15 +2448,29 @@ void dm_put(struct mapped_device *md)
 }
 EXPORT_SYMBOL_GPL(dm_put);
 
-static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+static bool md_in_flight_bios(struct mapped_device *md)
+{
+       int cpu;
+       struct hd_struct *part = &dm_disk(md)->part0;
+       long sum = 0;
+
+       for_each_possible_cpu(cpu) {
+               sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
+               sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
+       }
+
+       return sum != 0;
+}
+
+static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
 {
        int r = 0;
        DEFINE_WAIT(wait);
 
-       while (1) {
+       while (true) {
                prepare_to_wait(&md->wait, &wait, task_state);
 
-               if (!md_in_flight(md))
+               if (!md_in_flight_bios(md))
                        break;
 
                if (signal_pending_state(task_state, current)) {
@@ -2493,6 +2485,28 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
        return r;
 }
 
+static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+{
+       int r = 0;
+
+       if (!queue_is_mq(md->queue))
+               return dm_wait_for_bios_completion(md, task_state);
+
+       while (true) {
+               if (!blk_mq_queue_inflight(md->queue))
+                       break;
+
+               if (signal_pending_state(task_state, current)) {
+                       r = -EINTR;
+                       break;
+               }
+
+               msleep(5);
+       }
+
+       return r;
+}
+
 /*
  * Process the deferred bios
  */
@@ -2926,17 +2940,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
                       unsigned cookie)
 {
+       int r;
+       unsigned noio_flag;
        char udev_cookie[DM_COOKIE_LENGTH];
        char *envp[] = { udev_cookie, NULL };
 
+       noio_flag = memalloc_noio_save();
+
        if (!cookie)
-               return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+               r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
        else {
                snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
                         DM_COOKIE_ENV_VAR_NAME, cookie);
-               return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
-                                         action, envp);
+               r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+                                      action, envp);
        }
+
+       memalloc_noio_restore(noio_flag);
+
+       return r;
 }
 
 uint32_t dm_next_uevent_seq(struct mapped_device *md)
index 68aea22f2b89786973da2a929074ccfa274f3907..5216487db4fbeabcb32899b4104ec33dc01dd360 100644 (file)
@@ -1324,13 +1324,13 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
                        return 0; /* fw doesn't need any host buffers */
 
                /* spin till we get enough memory */
-               while(host_page_buffer_sz > 0) {
-
-                       if((ioc->HostPageBuffer = pci_alloc_consistent(
-                           ioc->pcidev,
-                           host_page_buffer_sz,
-                           &ioc->HostPageBuffer_dma)) != NULL) {
-
+               while (host_page_buffer_sz > 0) {
+                       ioc->HostPageBuffer =
+                               dma_alloc_coherent(&ioc->pcidev->dev,
+                                               host_page_buffer_sz,
+                                               &ioc->HostPageBuffer_dma,
+                                               GFP_KERNEL);
+                       if (ioc->HostPageBuffer) {
                                dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
                                    "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
                                    ioc->name, ioc->HostPageBuffer,
@@ -2741,8 +2741,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
                sz = ioc->alloc_sz;
                dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free  @ %p, sz=%d bytes\n",
                    ioc->name, ioc->alloc, ioc->alloc_sz));
-               pci_free_consistent(ioc->pcidev, sz,
-                               ioc->alloc, ioc->alloc_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+                               ioc->alloc_dma);
                ioc->reply_frames = NULL;
                ioc->req_frames = NULL;
                ioc->alloc = NULL;
@@ -2751,8 +2751,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
 
        if (ioc->sense_buf_pool != NULL) {
                sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
-               pci_free_consistent(ioc->pcidev, sz,
-                               ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+                               ioc->sense_buf_pool_dma);
                ioc->sense_buf_pool = NULL;
                ioc->alloc_total -= sz;
        }
@@ -2802,7 +2802,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
                        "HostPageBuffer free  @ %p, sz=%d bytes\n",
                        ioc->name, ioc->HostPageBuffer,
                        ioc->HostPageBuffer_sz));
-               pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
+               dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz,
                    ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
                ioc->HostPageBuffer = NULL;
                ioc->HostPageBuffer_sz = 0;
@@ -4497,7 +4497,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
                                ioc->name, sz, sz, num_chain));
 
                total_size += sz;
-               mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
+               mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size,
+                               &alloc_dma, GFP_KERNEL);
                if (mem == NULL) {
                        printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
                                ioc->name);
@@ -4574,8 +4575,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
                spin_unlock_irqrestore(&ioc->FreeQlock, flags);
 
                sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
-               ioc->sense_buf_pool =
-                       pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
+               ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz,
+                               &ioc->sense_buf_pool_dma, GFP_KERNEL);
                if (ioc->sense_buf_pool == NULL) {
                        printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
                                ioc->name);
@@ -4613,18 +4614,16 @@ out_fail:
 
        if (ioc->alloc != NULL) {
                sz = ioc->alloc_sz;
-               pci_free_consistent(ioc->pcidev,
-                               sz,
-                               ioc->alloc, ioc->alloc_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+                               ioc->alloc_dma);
                ioc->reply_frames = NULL;
                ioc->req_frames = NULL;
                ioc->alloc_total -= sz;
        }
        if (ioc->sense_buf_pool != NULL) {
                sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
-               pci_free_consistent(ioc->pcidev,
-                               sz,
-                               ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+                               ioc->sense_buf_pool_dma);
                ioc->sense_buf_pool = NULL;
        }
 
index 7eb38d7482c6d97d6ef16024a13759ac7af1c635..08a3b1c05acb9741c422553c446df5eda9e76fa9 100644 (file)
@@ -1146,9 +1146,11 @@ static int meson_mmc_probe(struct platform_device *pdev)
 
        mmc->caps |= MMC_CAP_CMD23;
        if (host->dram_access_quirk) {
+               /* Limit segments to 1 due to low available sram memory */
+               mmc->max_segs = 1;
                /* Limit to the available sram memory */
-               mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size;
-               mmc->max_blk_count = mmc->max_segs;
+               mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN /
+                                    mmc->max_blk_size;
        } else {
                mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
                mmc->max_segs = SD_EMMC_DESC_BUF_LEN /
index 5e20c099fe03a8d538dd3305fecbfd1136fc42cd..df43f42855e2e87bdfbe254a2640a49564bc5a83 100644 (file)
@@ -689,7 +689,7 @@ MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
 static struct platform_driver owl_mmc_driver = {
        .driver = {
                .name   = "owl_mmc",
-               .of_match_table = of_match_ptr(owl_mmc_of_match),
+               .of_match_table = owl_mmc_of_match,
        },
        .probe          = owl_mmc_probe,
        .remove         = owl_mmc_remove,
index b277dd7fbdb5d56e7520d36491e3981378b5a42b..c0d58e9fcc33365a746bc005f601021e5cb69989 100644 (file)
@@ -618,8 +618,9 @@ static int msm_init_cm_dll(struct sdhci_host *host)
        config &= ~CORE_CLK_PWRSAVE;
        writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
 
-       config = msm_host->dll_config;
-       writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+       if (msm_host->dll_config)
+               writel_relaxed(msm_host->dll_config,
+                               host->ioaddr + msm_offset->core_dll_config);
 
        if (msm_host->use_14lpp_dll_reset) {
                config = readl_relaxed(host->ioaddr +
index 76d832a88e0c4fdd3d90337dc5f90038eed1c15f..7d930569a7dfb740522316297ae8c90c86316d3a 100644 (file)
@@ -1273,8 +1273,8 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
                return -EROFS;
        if (!len)
                return 0;
-       if (!mtd->oops_panic_write)
-               mtd->oops_panic_write = true;
+       if (!master->oops_panic_write)
+               master->oops_panic_write = true;
 
        return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
                                    retlen, buf);
index 0a5cb77966cc7e0d615968fd27969079ddcf9ca6..f5a53aac3c5ff3fe4cbe287f8f8ef23d7ef0e115 100644 (file)
@@ -1761,7 +1761,7 @@ static void ns_switch_state(struct nandsim *ns)
 
                NS_DBG("switch_state: operation is unknown, try to find it\n");
 
-               if (!ns_find_operation(ns, 0))
+               if (ns_find_operation(ns, 0))
                        return;
 
                if ((ns->state & ACTION_MASK) &&
index 94bfba9943265b378e6f16b0988222205914fedd..29255476afdb6054c3e22dbb4270c1435647bc0c 100644 (file)
@@ -224,7 +224,7 @@ static int xway_nand_remove(struct platform_device *pdev)
        struct nand_chip *chip = &data->chip;
        int ret;
 
-       ret = mtd_device_unregister(mtd);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
        WARN_ON(ret);
        nand_cleanup(chip);
 
index 47d65b77caf77415b730f0ce3ca3b62332e0b2e0..7c17b0f705ec37d626a5298b4b5c8150e5dc7bc3 100644 (file)
@@ -1268,6 +1268,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
                        return -ENOMEM;
        }
 
+       /* set the real number of ports */
+       dev->ds->num_ports = dev->port_cnt;
+
        return 0;
 }
 
index 9a51b8a4de5d1432a58537dcda4654932e7cc2dc..8d15c301602461106e4c41ab83961e8c6b792169 100644 (file)
@@ -1588,6 +1588,9 @@ static int ksz9477_switch_init(struct ksz_device *dev)
                        return -ENOMEM;
        }
 
+       /* set the real number of ports */
+       dev->ds->num_ports = dev->port_cnt;
+
        return 0;
 }
 
index 7d050fab08892123c1a255b92a0acea680c1383f..7951f52d860d36812067a96923175ed31b6ca846 100644 (file)
@@ -79,6 +79,7 @@ MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
 static const struct of_device_id ksz9477_dt_ids[] = {
        { .compatible = "microchip,ksz9477" },
        { .compatible = "microchip,ksz9897" },
+       { .compatible = "microchip,ksz9893" },
        { .compatible = "microchip,ksz9567" },
        {},
 };
index 3c8e8047ea1ed0ca6cca95fbfa29b1c9953f9db0..d775b23025c16a8d74a18364d9f2d92f5a113d25 100644 (file)
@@ -1700,7 +1700,7 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
        for (i = 0; i < 4; ++i)
                aq_hw_write_reg(aq_hw,
                                HW_ATL_RPF_L3_SRCA_ADR(location + i),
-                               ipv6_src[i]);
+                               ipv6_src[3 - i]);
 }
 
 void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
@@ -1711,7 +1711,7 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
        for (i = 0; i < 4; ++i)
                aq_hw_write_reg(aq_hw,
                                HW_ATL_RPF_L3_DSTA_ADR(location + i),
-                               ipv6_dest[i]);
+                               ipv6_dest[3 - i]);
 }
 
 u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
index 06220792daf152fdfb0bb97b89f6f81704193c2f..7430ff025134129d0c1fefe8d7655c05acb72b7e 100644 (file)
  */
 
  /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
-#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4)
 /* Bitmask for bitfield l3_da0[1F:0] */
 #define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
 /* Inverted bitmask for bitfield l3_da0[1F:0] */
index 3a9a51f7063ae5b084aad84d5e17ac47d7aab466..392e32c7122a69d0e743e8a7828e1e1285392c25 100644 (file)
@@ -396,6 +396,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp)
                }
        }
 
+       bp->pf.active_vfs = 0;
        kfree(bp->pf.vf);
        bp->pf.vf = NULL;
 }
@@ -835,7 +836,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
 
        bnxt_free_vf_resources(bp);
 
-       bp->pf.active_vfs = 0;
        /* Reclaim all resources for the PF. */
        rtnl_lock();
        bnxt_restore_pf_fw_resources(bp);
index 52582e8ed90e537a199043488f56b2a14af26fe5..f1f0976e7669a8e3261ab1c2a544f035dddd7c2c 100644 (file)
@@ -2821,11 +2821,13 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct macb *bp = netdev_priv(netdev);
 
-       wol->supported = 0;
-       wol->wolopts = 0;
-
-       if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
+       if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
                phylink_ethtool_get_wol(bp->phylink, wol);
+               wol->supported |= WAKE_MAGIC;
+
+               if (bp->wol & MACB_WOL_ENABLED)
+                       wol->wolopts |= WAKE_MAGIC;
+       }
 }
 
 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2833,9 +2835,13 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        struct macb *bp = netdev_priv(netdev);
        int ret;
 
+       /* Pass the order to phylink layer */
        ret = phylink_ethtool_set_wol(bp->phylink, wol);
-       if (!ret)
-               return 0;
+       /* Don't manage WoL on MAC if handled by the PHY
+        * or if there's a failure in talking to the PHY
+        */
+       if (!ret || ret != -EOPNOTSUPP)
+               return ret;
 
        if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
            (wol->wolopts & ~WAKE_MAGIC))
@@ -4422,7 +4428,7 @@ static int macb_probe(struct platform_device *pdev)
        bp->wol = 0;
        if (of_get_property(np, "magic-packet", NULL))
                bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
-       device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+       device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 
        spin_lock_init(&bp->lock);
 
@@ -4598,10 +4604,10 @@ static int __maybe_unused macb_suspend(struct device *dev)
                        bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
        }
 
-       netif_carrier_off(netdev);
        if (bp->ptp_info)
                bp->ptp_info->ptp_remove(netdev);
-       pm_runtime_force_suspend(dev);
+       if (!device_may_wakeup(dev))
+               pm_runtime_force_suspend(dev);
 
        return 0;
 }
@@ -4616,7 +4622,8 @@ static int __maybe_unused macb_resume(struct device *dev)
        if (!netif_running(netdev))
                return 0;
 
-       pm_runtime_force_resume(dev);
+       if (!device_may_wakeup(dev))
+               pm_runtime_force_resume(dev);
 
        if (bp->wol & MACB_WOL_ENABLED) {
                macb_writel(bp, IDR, MACB_BIT(WOL));
@@ -4654,7 +4661,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
        struct net_device *netdev = dev_get_drvdata(dev);
        struct macb *bp = netdev_priv(netdev);
 
-       if (!(device_may_wakeup(&bp->dev->dev))) {
+       if (!(device_may_wakeup(dev))) {
                clk_disable_unprepare(bp->tx_clk);
                clk_disable_unprepare(bp->hclk);
                clk_disable_unprepare(bp->pclk);
@@ -4670,7 +4677,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
        struct net_device *netdev = dev_get_drvdata(dev);
        struct macb *bp = netdev_priv(netdev);
 
-       if (!(device_may_wakeup(&bp->dev->dev))) {
+       if (!(device_may_wakeup(dev))) {
                clk_prepare_enable(bp->pclk);
                clk_prepare_enable(bp->hclk);
                clk_prepare_enable(bp->tx_clk);
index 7a7f61a8cdf409f33774ec5311998d8a597d8949..d02d346629b36395e3a3370976c10873ff43f2c5 100644 (file)
@@ -1112,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
                struct in_addr *addr;
 
                addr = (struct in_addr *)ipmask;
-               if (ntohl(addr->s_addr) == 0xffffffff)
+               if (addr->s_addr == htonl(0xffffffff))
                        return true;
        } else if (family == AF_INET6) {
                struct in6_addr *addr6;
 
                addr6 = (struct in6_addr *)ipmask;
-               if (ntohl(addr6->s6_addr32[0]) == 0xffffffff &&
-                   ntohl(addr6->s6_addr32[1]) == 0xffffffff &&
-                   ntohl(addr6->s6_addr32[2]) == 0xffffffff &&
-                   ntohl(addr6->s6_addr32[3]) == 0xffffffff)
+               if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
+                   addr6->s6_addr32[1] == htonl(0xffffffff) &&
+                   addr6->s6_addr32[2] == htonl(0xffffffff) &&
+                   addr6->s6_addr32[3] == htonl(0xffffffff))
                        return true;
        }
        return false;
index 1aa6dc10dc0be6c31a6498f1a4b98a9788bf5c4c..ad522f822cc22b623b5966957bb2d0e1e092cf4b 100644 (file)
@@ -3493,7 +3493,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
        drv_fw = &fw_info->fw_hdr;
 
        /* Read the header of the firmware on the card */
-       ret = -t4_read_flash(adap, FLASH_FW_START,
+       ret = t4_read_flash(adap, FLASH_FW_START,
                            sizeof(*card_fw) / sizeof(uint32_t),
                            (uint32_t *)card_fw, 1);
        if (ret == 0) {
@@ -3522,8 +3522,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
                   should_install_fs_fw(adap, card_fw_usable,
                                        be32_to_cpu(fs_fw->fw_ver),
                                        be32_to_cpu(card_fw->fw_ver))) {
-               ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
-                                    fw_size, 0);
+               ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
+                                   fw_size, 0);
                if (ret != 0) {
                        dev_err(adap->pdev_dev,
                                "failed to install firmware: %d\n", ret);
@@ -3554,7 +3554,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
                        FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
                        FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
                        FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
-               ret = EINVAL;
+               ret = -EINVAL;
                goto bye;
        }
 
index 96831f49925c088ce6bacd32f47515471bb67fbc..22105d09bc895330633cfa3137da7857caff535d 100644 (file)
@@ -266,7 +266,7 @@ static irqreturn_t enetc_msix(int irq, void *data)
        /* disable interrupts */
        enetc_wr_reg(v->rbier, 0);
 
-       for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+       for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
                enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
 
        napi_schedule_irqoff(&v->napi);
@@ -302,7 +302,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
        /* enable interrupts */
        enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
 
-       for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+       for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
                enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
                             ENETC_TBIER_TXTIE);
 
index a6cdd5b61921bb3472f3e8b72faefffddcc2e9cf..d8d76da51c5e9a9bcdd8d71abee3b6029eff9788 100644 (file)
@@ -525,11 +525,6 @@ struct fec_enet_private {
        unsigned int total_tx_ring_size;
        unsigned int total_rx_ring_size;
 
-       unsigned long work_tx;
-       unsigned long work_rx;
-       unsigned long work_ts;
-       unsigned long work_mdio;
-
        struct  platform_device *pdev;
 
        int     dev_id;
index 2d0d313ee7c5a193a805f858b9fcd83f98a4ebea..3982285ed020e23281ef639a1bcd4261beb57a3a 100644 (file)
@@ -75,8 +75,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
 
 #define DRIVER_NAME    "fec"
 
-#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
-
 /* Pause frame feild and FIFO threshold */
 #define FEC_ENET_FCE   (1 << 5)
 #define FEC_ENET_RSEM_V        0x84
@@ -1248,8 +1246,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 
        fep = netdev_priv(ndev);
 
-       queue_id = FEC_ENET_GET_QUQUE(queue_id);
-
        txq = fep->tx_queue[queue_id];
        /* get next bdp of dirty_tx */
        nq = netdev_get_tx_queue(ndev, queue_id);
@@ -1340,17 +1336,14 @@ skb_done:
                writel(0, txq->bd.reg_desc_active);
 }
 
-static void
-fec_enet_tx(struct net_device *ndev)
+static void fec_enet_tx(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       u16 queue_id;
-       /* First process class A queue, then Class B and Best Effort queue */
-       for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
-               clear_bit(queue_id, &fep->work_tx);
-               fec_enet_tx_queue(ndev, queue_id);
-       }
-       return;
+       int i;
+
+       /* Make sure that AVB queues are processed first. */
+       for (i = fep->num_tx_queues - 1; i >= 0; i--)
+               fec_enet_tx_queue(ndev, i);
 }
 
 static int
@@ -1426,7 +1419,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
 #ifdef CONFIG_M532x
        flush_cache_all();
 #endif
-       queue_id = FEC_ENET_GET_QUQUE(queue_id);
        rxq = fep->rx_queue[queue_id];
 
        /* First, grab all of the stats for the incoming packet.
@@ -1550,6 +1542,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                                               htons(ETH_P_8021Q),
                                               vlan_tag);
 
+               skb_record_rx_queue(skb, queue_id);
                napi_gro_receive(&fep->napi, skb);
 
                if (is_copybreak) {
@@ -1595,48 +1588,30 @@ rx_processing_done:
        return pkt_received;
 }
 
-static int
-fec_enet_rx(struct net_device *ndev, int budget)
+static int fec_enet_rx(struct net_device *ndev, int budget)
 {
-       int     pkt_received = 0;
-       u16     queue_id;
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int i, done = 0;
 
-       for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
-               int ret;
-
-               ret = fec_enet_rx_queue(ndev,
-                                       budget - pkt_received, queue_id);
+       /* Make sure that AVB queues are processed first. */
+       for (i = fep->num_rx_queues - 1; i >= 0; i--)
+               done += fec_enet_rx_queue(ndev, budget - done, i);
 
-               if (ret < budget - pkt_received)
-                       clear_bit(queue_id, &fep->work_rx);
-
-               pkt_received += ret;
-       }
-       return pkt_received;
+       return done;
 }
 
-static bool
-fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
+static bool fec_enet_collect_events(struct fec_enet_private *fep)
 {
-       if (int_events == 0)
-               return false;
+       uint int_events;
+
+       int_events = readl(fep->hwp + FEC_IEVENT);
 
-       if (int_events & FEC_ENET_RXF_0)
-               fep->work_rx |= (1 << 2);
-       if (int_events & FEC_ENET_RXF_1)
-               fep->work_rx |= (1 << 0);
-       if (int_events & FEC_ENET_RXF_2)
-               fep->work_rx |= (1 << 1);
+       /* Don't clear MDIO events, we poll for those */
+       int_events &= ~FEC_ENET_MII;
 
-       if (int_events & FEC_ENET_TXF_0)
-               fep->work_tx |= (1 << 2);
-       if (int_events & FEC_ENET_TXF_1)
-               fep->work_tx |= (1 << 0);
-       if (int_events & FEC_ENET_TXF_2)
-               fep->work_tx |= (1 << 1);
+       writel(int_events, fep->hwp + FEC_IEVENT);
 
-       return true;
+       return int_events != 0;
 }
 
 static irqreturn_t
@@ -1644,18 +1619,9 @@ fec_enet_interrupt(int irq, void *dev_id)
 {
        struct net_device *ndev = dev_id;
        struct fec_enet_private *fep = netdev_priv(ndev);
-       uint int_events;
        irqreturn_t ret = IRQ_NONE;
 
-       int_events = readl(fep->hwp + FEC_IEVENT);
-
-       /* Don't clear MDIO events, we poll for those */
-       int_events &= ~FEC_ENET_MII;
-
-       writel(int_events, fep->hwp + FEC_IEVENT);
-       fec_enet_collect_events(fep, int_events);
-
-       if ((fep->work_tx || fep->work_rx) && fep->link) {
+       if (fec_enet_collect_events(fep) && fep->link) {
                ret = IRQ_HANDLED;
 
                if (napi_schedule_prep(&fep->napi)) {
@@ -1672,17 +1638,19 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
 {
        struct net_device *ndev = napi->dev;
        struct fec_enet_private *fep = netdev_priv(ndev);
-       int pkts;
+       int done = 0;
 
-       pkts = fec_enet_rx(ndev, budget);
-
-       fec_enet_tx(ndev);
+       do {
+               done += fec_enet_rx(ndev, budget - done);
+               fec_enet_tx(ndev);
+       } while ((done < budget) && fec_enet_collect_events(fep));
 
-       if (pkts < budget) {
-               napi_complete_done(napi, pkts);
+       if (done < budget) {
+               napi_complete_done(napi, done);
                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
        }
-       return pkts;
+
+       return done;
 }
 
 /* ------------------------------------------------------------------------- */
index b14f2abc242501353915ee84784f7a90941d8366..c38f3bbe7d97cd51804c9ba4c22c7285f3ef7620 100644 (file)
@@ -4127,9 +4127,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
 
        hns3_put_ring_config(priv);
 
-       hns3_dbg_uninit(handle);
-
 out_netdev_free:
+       hns3_dbg_uninit(handle);
        free_netdev(netdev);
 }
 
index 6b1545f982aad5b7fd7059096426b4c63a395ae4..2622e04e8eedaf7130450516687b69ad28b93f22 100644 (file)
@@ -180,18 +180,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
 {
        struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
        unsigned char *packet = skb->data;
+       u32 len = skb_headlen(skb);
        u32 i;
 
-       for (i = 0; i < skb->len; i++)
+       len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+       for (i = 0; i < len; i++)
                if (packet[i] != (unsigned char)(i & 0xff))
                        break;
 
        /* The packet is correctly received */
-       if (i == skb->len)
+       if (i == HNS3_NIC_LB_TEST_PACKET_SIZE)
                tqp_vector->rx_group.total_packets++;
        else
                print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
-                              skb->data, skb->len, true);
+                              skb->data, len, true);
 
        dev_kfree_skb_any(skb);
 }
index 96bfad52630d3d29fd2c13c62d6f727a1210ef22..d6bfdc6520dff7f5b71ea345e669533a7d566abf 100644 (file)
@@ -9859,7 +9859,7 @@ retry:
        set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
        hdev->reset_type = HNAE3_FLR_RESET;
        ret = hclge_reset_prepare(hdev);
-       if (ret) {
+       if (ret || hdev->reset_pending) {
                dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
                        ret);
                if (hdev->reset_pending ||
index 1b9578d0bd800b91d622f9b4620d54d92f7eec1a..a10b022d19515987e3742ee345436929c17d1620 100644 (file)
@@ -1793,6 +1793,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
        if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
                hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
                ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "failed to assert VF reset, ret = %d\n", ret);
+                       return ret;
+               }
                hdev->rst_stats.vf_func_rst_cnt++;
        }
 
index 0245da02efbb0952b326e436a053fbcbd1a0f765..b735bc537508fcda0526238f7de5082d798598ff 100644 (file)
@@ -814,6 +814,8 @@ err_aeqs_init:
 err_init_msix:
 err_pfhwdev_alloc:
        hinic_free_hwif(hwif);
+       if (err > 0)
+               err = -EIO;
        return ERR_PTR(err);
 }
 
index c33eb114705572afc3adf27b17b340eeebf10314..e0f5a81d8620d7091ccd5b82b269ea899e9ed89a 100644 (file)
@@ -370,48 +370,89 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
                                MSG_NOT_RESP, timeout);
 }
 
-/**
- * mgmt_recv_msg_handler - handler for message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
-                                 struct hinic_recv_msg *recv_msg)
+static void recv_mgmt_msg_work_handler(struct work_struct *work)
 {
-       struct hinic_hwif *hwif = pf_to_mgmt->hwif;
-       struct pci_dev *pdev = hwif->pdev;
-       u8 *buf_out = recv_msg->buf_out;
+       struct hinic_mgmt_msg_handle_work *mgmt_work =
+               container_of(work, struct hinic_mgmt_msg_handle_work, work);
+       struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt;
+       struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+       u8 *buf_out = pf_to_mgmt->mgmt_ack_buf;
        struct hinic_mgmt_cb *mgmt_cb;
        unsigned long cb_state;
        u16 out_size = 0;
 
-       if (recv_msg->mod >= HINIC_MOD_MAX) {
+       memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
+
+       if (mgmt_work->mod >= HINIC_MOD_MAX) {
                dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
-                       recv_msg->mod);
+                       mgmt_work->mod);
+               kfree(mgmt_work->msg);
+               kfree(mgmt_work);
                return;
        }
 
-       mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod];
+       mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod];
 
        cb_state = cmpxchg(&mgmt_cb->state,
                           HINIC_MGMT_CB_ENABLED,
                           HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
 
        if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb))
-               mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd,
-                           recv_msg->msg, recv_msg->msg_len,
+               mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd,
+                           mgmt_work->msg, mgmt_work->msg_len,
                            buf_out, &out_size);
        else
                dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
-                       recv_msg->mod, recv_msg->cmd);
+                       mgmt_work->mod, mgmt_work->cmd);
 
        mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
 
-       if (!recv_msg->async_mgmt_to_pf)
+       if (!mgmt_work->async_mgmt_to_pf)
                /* MGMT sent sync msg, send the response */
-               msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd,
+               msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd,
                                  buf_out, out_size, MGMT_RESP,
-                                 recv_msg->msg_id);
+                                 mgmt_work->msg_id);
+
+       kfree(mgmt_work->msg);
+       kfree(mgmt_work);
+}
+
+/**
+ * mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
+                                 struct hinic_recv_msg *recv_msg)
+{
+       struct hinic_mgmt_msg_handle_work *mgmt_work = NULL;
+       struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+
+       mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
+       if (!mgmt_work) {
+               dev_err(&pdev->dev, "Allocate mgmt work memory failed\n");
+               return;
+       }
+
+       if (recv_msg->msg_len) {
+               mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
+               if (!mgmt_work->msg) {
+                       dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n");
+                       kfree(mgmt_work);
+                       return;
+               }
+       }
+
+       mgmt_work->pf_to_mgmt = pf_to_mgmt;
+       mgmt_work->msg_len = recv_msg->msg_len;
+       memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
+       mgmt_work->msg_id = recv_msg->msg_id;
+       mgmt_work->mod = recv_msg->mod;
+       mgmt_work->cmd = recv_msg->cmd;
+       mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+       INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
+       queue_work(pf_to_mgmt->workq, &mgmt_work->work);
 }
 
 /**
@@ -546,6 +587,12 @@ static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt)
        if (!pf_to_mgmt->sync_msg_buf)
                return -ENOMEM;
 
+       pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev,
+                                               MAX_PF_MGMT_BUF_SIZE,
+                                               GFP_KERNEL);
+       if (!pf_to_mgmt->mgmt_ack_buf)
+               return -ENOMEM;
+
        return 0;
 }
 
@@ -571,6 +618,11 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
                return 0;
 
        sema_init(&pf_to_mgmt->sync_msg_lock, 1);
+       pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt");
+       if (!pf_to_mgmt->workq) {
+               dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n");
+               return -ENOMEM;
+       }
        pf_to_mgmt->sync_msg_id = 0;
 
        err = alloc_msg_buf(pf_to_mgmt);
@@ -605,4 +657,5 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
 
        hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
        hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+       destroy_workqueue(pf_to_mgmt->workq);
 }
index c2b142c08b0e2f9c119095f544f44dfe421d313c..a824fbda59dbe9f0f55bd92d0b024ff0eb872dbc 100644 (file)
@@ -119,6 +119,7 @@ struct hinic_pf_to_mgmt {
        struct semaphore                sync_msg_lock;
        u16                             sync_msg_id;
        u8                              *sync_msg_buf;
+       void                            *mgmt_ack_buf;
 
        struct hinic_recv_msg           recv_resp_msg_from_mgmt;
        struct hinic_recv_msg           recv_msg_from_mgmt;
@@ -126,6 +127,21 @@ struct hinic_pf_to_mgmt {
        struct hinic_api_cmd_chain      *cmd_chain[HINIC_API_CMD_MAX];
 
        struct hinic_mgmt_cb            mgmt_cb[HINIC_MOD_MAX];
+
+       struct workqueue_struct         *workq;
+};
+
+struct hinic_mgmt_msg_handle_work {
+       struct work_struct work;
+       struct hinic_pf_to_mgmt *pf_to_mgmt;
+
+       void                    *msg;
+       u16                     msg_len;
+
+       enum hinic_mod_type     mod;
+       u8                      cmd;
+       u16                     msg_id;
+       int                     async_mgmt_to_pf;
 };
 
 void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
index c639e3a293024fe48d685e129699de993c7cd098..7d5d9d34f4e470ceadc201f44caea0b6509fc1b4 100644 (file)
@@ -3959,7 +3959,7 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
        /* When at 2.5G, the link partner can send frames with shortened
         * preambles.
         */
-       if (state->speed == SPEED_2500)
+       if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
                new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
 
        if (pp->phy_interface != state->interface) {
index 241f007169797b8a21e4496610b700698bd1d8c1..fe54764caea9c7d6253de215fb198c2ab5333b40 100644 (file)
@@ -203,7 +203,7 @@ io_error:
 
 static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
 {
-       u16 v;
+       u16 v = 0;
        __gm_phy_read(hw, port, reg, &v);
        return v;
 }
index 7be6b2d36b60382041321aa741fff70e2fd9e524..9976de8b90478d5935e59e2a6ac4c6ce2b725570 100644 (file)
@@ -29,6 +29,7 @@ struct mlx5e_dcbx {
        bool                       manual_buffer;
        u32                        cable_len;
        u32                        xoff;
+       u16                        port_buff_cell_sz;
 };
 
 #define MLX5E_MAX_DSCP (64)
index 2a8950b3056f95445bc335835c0d694fa7d01011..3cf3e35053f7768de5010527139d1de02200499c 100644 (file)
@@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
        [MLX5E_400GAUI_8]                       = 400000,
 };
 
+bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev)
+{
+       struct mlx5e_port_eth_proto eproto;
+       int err;
+
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet))
+               return true;
+
+       err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto);
+       if (err)
+               return false;
+
+       return !!eproto.cap;
+}
+
 static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
                                     const u32 **arr, u32 *size,
                                     bool force_legacy)
 {
-       bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev);
 
        *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
                      ARRAY_SIZE(mlx5e_link_speed);
@@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
        bool ext;
        int err;
 
-       ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext = mlx5e_ptys_ext_supported(mdev);
        err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
        if (err)
                goto out;
@@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
        int err;
        int i;
 
-       ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext = mlx5e_ptys_ext_supported(mdev);
        err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
        if (err)
                return err;
index a2ddd446dd59e66911bb3f55ee0941c855c66c7c..7a7defe6079262678e1ff234069a87487a48d53b 100644 (file)
@@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
                               bool force_legacy);
-
+bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
 int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
 int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
 int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
index ae99fac08b53260f38ba34a869d38e87614b3ec4..673f1c82d38155b4c71fa7cbae95f8c5bdbafae8 100644 (file)
@@ -34,6 +34,7 @@
 int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
                            struct mlx5e_port_buffer *port_buffer)
 {
+       u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
        struct mlx5_core_dev *mdev = priv->mdev;
        int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
        u32 total_used = 0;
@@ -57,11 +58,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
                port_buffer->buffer[i].epsb =
                        MLX5_GET(bufferx_reg, buffer, epsb);
                port_buffer->buffer[i].size =
-                       MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT;
+                       MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
                port_buffer->buffer[i].xon =
-                       MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+                       MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz;
                port_buffer->buffer[i].xoff =
-                       MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+                       MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
                total_used += port_buffer->buffer[i].size;
 
                mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
@@ -73,7 +74,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
        }
 
        port_buffer->port_buffer_size =
-               MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT;
+               MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
        port_buffer->spare_buffer_size =
                port_buffer->port_buffer_size - total_used;
 
@@ -88,9 +89,9 @@ out:
 static int port_set_buffer(struct mlx5e_priv *priv,
                           struct mlx5e_port_buffer *port_buffer)
 {
+       u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
        struct mlx5_core_dev *mdev = priv->mdev;
        int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
-       void *buffer;
        void *in;
        int err;
        int i;
@@ -104,16 +105,18 @@ static int port_set_buffer(struct mlx5e_priv *priv,
                goto out;
 
        for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
-               buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
-
-               MLX5_SET(bufferx_reg, buffer, size,
-                        port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT);
-               MLX5_SET(bufferx_reg, buffer, lossy,
-                        port_buffer->buffer[i].lossy);
-               MLX5_SET(bufferx_reg, buffer, xoff_threshold,
-                        port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT);
-               MLX5_SET(bufferx_reg, buffer, xon_threshold,
-                        port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT);
+               void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
+               u64 size = port_buffer->buffer[i].size;
+               u64 xoff = port_buffer->buffer[i].xoff;
+               u64 xon = port_buffer->buffer[i].xon;
+
+               do_div(size, port_buff_cell_sz);
+               do_div(xoff, port_buff_cell_sz);
+               do_div(xon, port_buff_cell_sz);
+               MLX5_SET(bufferx_reg, buffer, size, size);
+               MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy);
+               MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff);
+               MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
        }
 
        err = mlx5e_port_set_pbmc(mdev, in);
@@ -143,7 +146,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 }
 
 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
-                                u32 xoff, unsigned int max_mtu)
+                                u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz)
 {
        int i;
 
@@ -155,7 +158,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
                }
 
                if (port_buffer->buffer[i].size <
-                   (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
+                   (xoff + max_mtu + port_buff_cell_sz)) {
                        pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
                               i, port_buffer->buffer[i].size);
                        return -ENOMEM;
@@ -175,6 +178,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     @pfc_en: <input> current pfc configuration
  *     @buffer: <input> current prio to buffer mapping
  *     @xoff:   <input> xoff value
+ *     @port_buff_cell_sz: <input> port buffer cell_size
  *     @port_buffer: <output> port receive buffer configuration
  *     @change: <output>
  *
@@ -189,7 +193,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     sets change to true if buffer configuration was modified.
  */
 static int update_buffer_lossy(unsigned int max_mtu,
-                              u8 pfc_en, u8 *buffer, u32 xoff,
+                              u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
                               struct mlx5e_port_buffer *port_buffer,
                               bool *change)
 {
@@ -225,7 +229,7 @@ static int update_buffer_lossy(unsigned int max_mtu,
        }
 
        if (changed) {
-               err = update_xoff_threshold(port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
 
@@ -262,6 +266,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                                    u32 *buffer_size,
                                    u8 *prio2buffer)
 {
+       u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
        struct mlx5e_port_buffer port_buffer;
        u32 xoff = calculate_xoff(priv, mtu);
        bool update_prio2buffer = false;
@@ -282,7 +287,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
 
        if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
        }
@@ -292,7 +297,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
+               err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz,
                                          &port_buffer, &update_buffer);
                if (err)
                        return err;
@@ -304,7 +309,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+               err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
                                          xoff, &port_buffer, &update_buffer);
                if (err)
                        return err;
@@ -329,7 +334,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                        return -EINVAL;
 
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
        }
@@ -337,7 +342,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        /* Need to update buffer configuration if xoff value is changed */
        if (!update_buffer && xoff != priv->dcbx.xoff) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
        }
index 34f55b81a0debf8d11a80c3a877f665f902d2e7e..80af7a5ac6046acb9073faa49cc89225c63d305c 100644 (file)
@@ -36,7 +36,6 @@
 #include "port.h"
 
 #define MLX5E_MAX_BUFFER 8
-#define MLX5E_BUFFER_CELL_SHIFT 7
 #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
 
 #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
index baa162432e75e31c83d5d2f130782c93c4aab669..c3d167fa944c7960d54d65de10e8c94b76395d0a 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/rculist.h>
 #include <linux/rtnetlink.h>
 #include <linux/workqueue.h>
-#include <linux/rwlock.h>
 #include <linux/spinlock.h>
 #include <linux/notifier.h>
 #include <net/netevent.h>
index 430025550fad2ba7c222dc4b0ae5c81ec8211d2d..aad1c29b23db102e0f858327e7769c3cc445f0ac 100644 (file)
@@ -1097,6 +1097,7 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
        struct mlx5_ct_entry *entry = ptr;
 
        mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+       kfree(entry);
 }
 
 static void
index bc102d094bbd127212af43d3793ab3c08794c5f1..d20243d6a0326000643b6411d185354bf7bb0655 100644 (file)
@@ -1217,6 +1217,24 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
        return 0;
 }
 
+#define MLX5E_BUFFER_CELL_SHIFT 7
+
+static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
+       u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
+
+       if (!MLX5_CAP_GEN(mdev, sbcam_reg))
+               return (1 << MLX5E_BUFFER_CELL_SHIFT);
+
+       if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+                                MLX5_REG_SBCAM, 0, 0))
+               return (1 << MLX5E_BUFFER_CELL_SHIFT);
+
+       return MLX5_GET(sbcam_reg, out, cap_cell_size);
+}
+
 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
 {
        struct mlx5e_dcbx *dcbx = &priv->dcbx;
@@ -1234,6 +1252,7 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
        if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
                priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
 
+       priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
        priv->dcbx.manual_buffer = false;
        priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
 
index ec5658bbe3c57e169fc57d28811702b33394527f..c2464c349117e45bf6f50fc5d5e54c782dc0b4ac 100644 (file)
@@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
                                        struct ptys2ethtool_config **arr,
                                        u32 *size)
 {
-       bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       bool ext = mlx5e_ptys_ext_supported(mdev);
 
        *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
        *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
@@ -883,7 +883,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
                               struct ethtool_link_ksettings *link_ksettings)
 {
        unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
-       bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       bool ext = mlx5e_ptys_ext_supported(mdev);
 
        ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
 }
@@ -913,7 +913,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                           __func__, err);
                goto err_query_regs;
        }
-       ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
        eth_proto_cap    = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_capability);
        eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
@@ -1066,7 +1066,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
        autoneg = link_ksettings->base.autoneg;
        speed = link_ksettings->base.speed;
 
-       ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext_supported = mlx5e_ptys_ext_supported(mdev);
        ext = ext_requested(autoneg, adver, ext_supported);
        if (!ext_supported && ext)
                return -EOPNOTSUPP;
index a836a02a2116609eebaa64279bb0b659d42ada8d..081f15074cac4229cfcbf7da96e47b1208f03941 100644 (file)
@@ -3104,9 +3104,6 @@ int mlx5e_open(struct net_device *netdev)
                mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
        mutex_unlock(&priv->state_lock);
 
-       if (mlx5_vxlan_allowed(priv->mdev->vxlan))
-               udp_tunnel_get_rx_info(netdev);
-
        return err;
 }
 
@@ -5121,6 +5118,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
        if (err)
                goto err_destroy_flow_steering;
 
+#ifdef CONFIG_MLX5_EN_ARFS
+       priv->netdev->rx_cpu_rmap =  mlx5_eq_table_get_rmap(priv->mdev);
+#endif
+
        return 0;
 
 err_destroy_flow_steering:
@@ -5202,6 +5203,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
        rtnl_lock();
        if (netif_running(netdev))
                mlx5e_open(netdev);
+       if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+               udp_tunnel_get_rx_info(netdev);
        netif_device_attach(netdev);
        rtnl_unlock();
 }
@@ -5216,6 +5219,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
        rtnl_lock();
        if (netif_running(priv->netdev))
                mlx5e_close(priv->netdev);
+       if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+               udp_tunnel_drop_rx_info(priv->netdev);
        netif_device_detach(priv->netdev);
        rtnl_unlock();
 
@@ -5288,10 +5293,6 @@ int mlx5e_netdev_init(struct net_device *netdev,
        /* netdev init */
        netif_carrier_off(netdev);
 
-#ifdef CONFIG_MLX5_EN_ARFS
-       netdev->rx_cpu_rmap =  mlx5_eq_table_get_rmap(mdev);
-#endif
-
        return 0;
 
 err_free_cpumask:
index 7fc84f58e28a4f9d96408aeb59dcbaed4cc84b60..cc8412151ca09ad3a100c54a3e637f90e3e46af6 100644 (file)
@@ -4670,9 +4670,10 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
                                           struct mlx5e_rep_priv *rpriv)
 {
        /* Offloaded flow rule is allowed to duplicate on non-uplink representor
-        * sharing tc block with other slaves of a lag device.
+        * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
+        * function is called from NIC mode.
         */
-       return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK;
+       return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
 }
 
 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
@@ -4686,13 +4687,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
 
        rcu_read_lock();
        flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
-       rcu_read_unlock();
        if (flow) {
                /* Same flow rule offloaded to non-uplink representor sharing tc block,
                 * just return 0.
                 */
                if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
-                       goto out;
+                       goto rcu_unlock;
 
                NL_SET_ERR_MSG_MOD(extack,
                                   "flow cookie already exists, ignoring");
@@ -4700,8 +4700,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
                                 "flow cookie %lx already exists, ignoring\n",
                                 f->cookie);
                err = -EEXIST;
-               goto out;
+               goto rcu_unlock;
        }
+rcu_unlock:
+       rcu_read_unlock();
+       if (flow)
+               goto out;
 
        trace_mlx5e_configure_flower(f);
        err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
index 5dc335e621c577898a52d9ee0380275c38bada7f..b68976b378b81a53d778af04ee50b754f7a29f2d 100644 (file)
@@ -217,7 +217,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
        }
 
        /* Create ingress allow rule */
-       memset(spec, 0, sizeof(*spec));
        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
        vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
index 9f829e68fc7344ccff938e4629513adb99784083..e4186e84b3ffbbbec4aa339a05dbd9d0b871309f 100644 (file)
@@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
        return 0;
 }
 
-static int mlx5_eeprom_page(int offset)
+static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
+                               u8 *module_id)
+{
+       u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
+       u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+       int err, status;
+       u8 *ptr;
+
+       MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
+       MLX5_SET(mcia_reg, in, module, module_num);
+       MLX5_SET(mcia_reg, in, device_address, 0);
+       MLX5_SET(mcia_reg, in, page_number, 0);
+       MLX5_SET(mcia_reg, in, size, 1);
+       MLX5_SET(mcia_reg, in, l, 0);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_MCIA, 0, 0);
+       if (err)
+               return err;
+
+       status = MLX5_GET(mcia_reg, out, status);
+       if (status) {
+               mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+                             status);
+               return -EIO;
+       }
+       ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+       *module_id = ptr[0];
+
+       return 0;
+}
+
+static int mlx5_qsfp_eeprom_page(u16 offset)
 {
        if (offset < MLX5_EEPROM_PAGE_LENGTH)
                /* Addresses between 0-255 - page 00 */
@@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset)
                    MLX5_EEPROM_HIGH_PAGE_LENGTH);
 }
 
-static int mlx5_eeprom_high_page_offset(int page_num)
+static int mlx5_qsfp_eeprom_high_page_offset(int page_num)
 {
        if (!page_num) /* Page 0 always start from low page */
                return 0;
@@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num)
        return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
 }
 
+static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
+{
+       *i2c_addr = MLX5_I2C_ADDR_LOW;
+       *page_num = mlx5_qsfp_eeprom_page(*offset);
+       *offset -=  mlx5_qsfp_eeprom_high_page_offset(*page_num);
+}
+
+static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
+{
+       *i2c_addr = MLX5_I2C_ADDR_LOW;
+       *page_num = 0;
+
+       if (*offset < MLX5_EEPROM_PAGE_LENGTH)
+               return;
+
+       *i2c_addr = MLX5_I2C_ADDR_HIGH;
+       *offset -= MLX5_EEPROM_PAGE_LENGTH;
+}
+
 int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
                             u16 offset, u16 size, u8 *data)
 {
-       int module_num, page_num, status, err;
+       int module_num, status, err, page_num = 0;
+       u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
        u32 out[MLX5_ST_SZ_DW(mcia_reg)];
-       u32 in[MLX5_ST_SZ_DW(mcia_reg)];
-       u16 i2c_addr;
-       void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+       u16 i2c_addr = 0;
+       u8 module_id;
+       void *ptr;
 
        err = mlx5_query_module_num(dev, &module_num);
        if (err)
                return err;
 
-       memset(in, 0, sizeof(in));
-       size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
-
-       /* Get the page number related to the given offset */
-       page_num = mlx5_eeprom_page(offset);
+       err = mlx5_query_module_id(dev, module_num, &module_id);
+       if (err)
+               return err;
 
-       /* Set the right offset according to the page number,
-        * For page_num > 0, relative offset is always >= 128 (high page).
-        */
-       offset -= mlx5_eeprom_high_page_offset(page_num);
+       switch (module_id) {
+       case MLX5_MODULE_ID_SFP:
+               mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+               break;
+       case MLX5_MODULE_ID_QSFP:
+       case MLX5_MODULE_ID_QSFP_PLUS:
+       case MLX5_MODULE_ID_QSFP28:
+               mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+               break;
+       default:
+               mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
+               return -EINVAL;
+       }
 
        if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
                /* Cross pages read, read until offset 256 in low page */
                size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
 
-       i2c_addr = MLX5_I2C_ADDR_LOW;
+       size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
 
        MLX5_SET(mcia_reg, in, l, 0);
        MLX5_SET(mcia_reg, in, module, module_num);
@@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
                return -EIO;
        }
 
+       ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
        memcpy(data, ptr, size);
 
        return size;
index fd0e97de44e7a4063da82b5c3e80ff4c7a231ae9..c04ec1a92826024654a325f3eb58c96e817a87c1 100644 (file)
@@ -1414,23 +1414,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
        u16 num_pages;
        int err;
 
-       mutex_init(&mlxsw_pci->cmd.lock);
-       init_waitqueue_head(&mlxsw_pci->cmd.wait);
-
        mlxsw_pci->core = mlxsw_core;
 
        mbox = mlxsw_cmd_mbox_alloc();
        if (!mbox)
                return -ENOMEM;
 
-       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
-       if (err)
-               goto mbox_put;
-
-       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-       if (err)
-               goto err_out_mbox_alloc;
-
        err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
        if (err)
                goto err_sw_reset;
@@ -1537,9 +1526,6 @@ err_query_fw:
        mlxsw_pci_free_irq_vectors(mlxsw_pci);
 err_alloc_irq:
 err_sw_reset:
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-err_out_mbox_alloc:
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
 mbox_put:
        mlxsw_cmd_mbox_free(mbox);
        return err;
@@ -1553,8 +1539,6 @@ static void mlxsw_pci_fini(void *bus_priv)
        mlxsw_pci_aqs_fini(mlxsw_pci);
        mlxsw_pci_fw_area_fini(mlxsw_pci);
        mlxsw_pci_free_irq_vectors(mlxsw_pci);
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
 }
 
 static struct mlxsw_pci_queue *
@@ -1776,6 +1760,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
        .features               = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
 };
 
+static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
+{
+       int err;
+
+       mutex_init(&mlxsw_pci->cmd.lock);
+       init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+       if (err)
+               goto err_in_mbox_alloc;
+
+       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+       if (err)
+               goto err_out_mbox_alloc;
+
+       return 0;
+
+err_out_mbox_alloc:
+       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+err_in_mbox_alloc:
+       mutex_destroy(&mlxsw_pci->cmd.lock);
+       return err;
+}
+
+static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
+{
+       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+       mutex_destroy(&mlxsw_pci->cmd.lock);
+}
+
 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        const char *driver_name = pdev->driver->name;
@@ -1831,6 +1846,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mlxsw_pci->pdev = pdev;
        pci_set_drvdata(pdev, mlxsw_pci);
 
+       err = mlxsw_pci_cmd_init(mlxsw_pci);
+       if (err)
+               goto err_pci_cmd_init;
+
        mlxsw_pci->bus_info.device_kind = driver_name;
        mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
        mlxsw_pci->bus_info.dev = &pdev->dev;
@@ -1848,6 +1867,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return 0;
 
 err_bus_device_register:
+       mlxsw_pci_cmd_fini(mlxsw_pci);
+err_pci_cmd_init:
        iounmap(mlxsw_pci->hw_addr);
 err_ioremap:
 err_pci_resource_len_check:
@@ -1865,6 +1886,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
        struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
 
        mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+       mlxsw_pci_cmd_fini(mlxsw_pci);
        iounmap(mlxsw_pci->hw_addr);
        pci_release_regions(mlxsw_pci->pdev);
        pci_disable_device(mlxsw_pci->pdev);
index 770de0222e7bd06eed95a3bbb6c558b5f72c210b..019ed503aadf57f81bfc1c491ad62f62dc41efd6 100644 (file)
@@ -6262,7 +6262,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
        }
 
        fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
-       if (WARN_ON(!fib_work))
+       if (!fib_work)
                return NOTIFY_BAD;
 
        fib_work->mlxsw_sp = router->mlxsw_sp;
index f7e3ce3de04dde321c98d37d1932113dd0cdba03..e03ea9b18f95d5ca97644cbbdc8c1bea2bbcc9fb 100644 (file)
@@ -468,12 +468,18 @@ static void ionic_get_ringparam(struct net_device *netdev,
        ring->rx_pending = lif->nrxq_descs;
 }
 
+static void ionic_set_ringsize(struct ionic_lif *lif, void *arg)
+{
+       struct ethtool_ringparam *ring = arg;
+
+       lif->ntxq_descs = ring->tx_pending;
+       lif->nrxq_descs = ring->rx_pending;
+}
+
 static int ionic_set_ringparam(struct net_device *netdev,
                               struct ethtool_ringparam *ring)
 {
        struct ionic_lif *lif = netdev_priv(netdev);
-       bool running;
-       int err;
 
        if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
                netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
@@ -491,22 +497,7 @@ static int ionic_set_ringparam(struct net_device *netdev,
            ring->rx_pending == lif->nrxq_descs)
                return 0;
 
-       err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
-       if (err)
-               return err;
-
-       running = test_bit(IONIC_LIF_F_UP, lif->state);
-       if (running)
-               ionic_stop(netdev);
-
-       lif->ntxq_descs = ring->tx_pending;
-       lif->nrxq_descs = ring->rx_pending;
-
-       if (running)
-               ionic_open(netdev);
-       clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
-
-       return 0;
+       return ionic_reset_queues(lif, ionic_set_ringsize, ring);
 }
 
 static void ionic_get_channels(struct net_device *netdev,
@@ -521,12 +512,17 @@ static void ionic_get_channels(struct net_device *netdev,
        ch->combined_count = lif->nxqs;
 }
 
+static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
+{
+       struct ethtool_channels *ch = arg;
+
+       lif->nxqs = ch->combined_count;
+}
+
 static int ionic_set_channels(struct net_device *netdev,
                              struct ethtool_channels *ch)
 {
        struct ionic_lif *lif = netdev_priv(netdev);
-       bool running;
-       int err;
 
        if (!ch->combined_count || ch->other_count ||
            ch->rx_count || ch->tx_count)
@@ -535,21 +531,7 @@ static int ionic_set_channels(struct net_device *netdev,
        if (ch->combined_count == lif->nxqs)
                return 0;
 
-       err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
-       if (err)
-               return err;
-
-       running = test_bit(IONIC_LIF_F_UP, lif->state);
-       if (running)
-               ionic_stop(netdev);
-
-       lif->nxqs = ch->combined_count;
-
-       if (running)
-               ionic_open(netdev);
-       clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
-
-       return 0;
+       return ionic_reset_queues(lif, ionic_set_queuecount, ch);
 }
 
 static u32 ionic_get_priv_flags(struct net_device *netdev)
index aaa00edd9d5b088152375b85e69179cd33bce5cf..f49486b6d04d263d02af3f20620314d2faeaca60 100644 (file)
@@ -1313,7 +1313,7 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
                return err;
 
        netdev->mtu = new_mtu;
-       err = ionic_reset_queues(lif);
+       err = ionic_reset_queues(lif, NULL, NULL);
 
        return err;
 }
@@ -1325,7 +1325,7 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
        netdev_info(lif->netdev, "Tx Timeout recovery\n");
 
        rtnl_lock();
-       ionic_reset_queues(lif);
+       ionic_reset_queues(lif, NULL, NULL);
        rtnl_unlock();
 }
 
@@ -1673,6 +1673,14 @@ int ionic_open(struct net_device *netdev)
        if (err)
                goto err_out;
 
+       err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
+       if (err)
+               goto err_txrx_deinit;
+
+       err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
+       if (err)
+               goto err_txrx_deinit;
+
        /* don't start the queues until we have link */
        if (netif_carrier_ok(netdev)) {
                err = ionic_start_queues(lif);
@@ -1980,7 +1988,7 @@ static const struct net_device_ops ionic_netdev_ops = {
        .ndo_get_vf_stats       = ionic_get_vf_stats,
 };
 
-int ionic_reset_queues(struct ionic_lif *lif)
+int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
 {
        bool running;
        int err = 0;
@@ -1993,12 +2001,19 @@ int ionic_reset_queues(struct ionic_lif *lif)
        if (running) {
                netif_device_detach(lif->netdev);
                err = ionic_stop(lif->netdev);
+               if (err)
+                       goto reset_out;
        }
-       if (!err && running) {
-               ionic_open(lif->netdev);
+
+       if (cb)
+               cb(lif, arg);
+
+       if (running) {
+               err = ionic_open(lif->netdev);
                netif_device_attach(lif->netdev);
        }
 
+reset_out:
        clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
 
        return err;
index c3428034a17b2612b7eac23134ae6b5d19c76f27..ed126dd74e01fdcc43785318f63e0f5e480dafa2 100644 (file)
@@ -248,6 +248,8 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
        return (units * div) / mult;
 }
 
+typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg);
+
 void ionic_link_status_check_request(struct ionic_lif *lif);
 void ionic_get_stats64(struct net_device *netdev,
                       struct rtnl_link_stats64 *ns);
@@ -267,7 +269,7 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
 
 int ionic_open(struct net_device *netdev);
 int ionic_stop(struct net_device *netdev);
-int ionic_reset_queues(struct ionic_lif *lif);
+int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg);
 
 static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
                                        struct ionic_txq_desc *desc, bool dbell)
index a49743d56b9c02141f13b29663a8aeb33c6d5114..6c2f9ff4a53e21c10b8dac1e404c0b93159e1813 100644 (file)
@@ -876,6 +876,8 @@ struct qed_dev {
        struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
        u8 engine_for_debug;
        bool disable_ilt_dump;
+       bool                            dbg_bin_dump;
+
        DECLARE_HASHTABLE(connections, 10);
        const struct firmware           *firmware;
 
index 81e8fbe4a05bb58fcf7f05571d02ecbfe43a3e88..3b9bbafafe68bfd733a81c5bc097887507eed65a 100644 (file)
@@ -7506,6 +7506,12 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
        if (p_hwfn->cdev->print_dbg_data)
                qed_dbg_print_feature(text_buf, text_size_bytes);
 
+       /* Just return the original binary buffer if requested */
+       if (p_hwfn->cdev->dbg_bin_dump) {
+               vfree(text_buf);
+               return DBG_STATUS_OK;
+       }
+
        /* Free the old dump_buf and point the dump_buf to the newly allocagted
         * and formatted text buffer.
         */
@@ -7733,7 +7739,9 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
 #define REGDUMP_HEADER_SIZE_SHIFT              0
 #define REGDUMP_HEADER_SIZE_MASK               0xffffff
 #define REGDUMP_HEADER_FEATURE_SHIFT           24
-#define REGDUMP_HEADER_FEATURE_MASK            0x3f
+#define REGDUMP_HEADER_FEATURE_MASK            0x1f
+#define REGDUMP_HEADER_BIN_DUMP_SHIFT          29
+#define REGDUMP_HEADER_BIN_DUMP_MASK           0x1
 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT       30
 #define REGDUMP_HEADER_OMIT_ENGINE_MASK                0x1
 #define REGDUMP_HEADER_ENGINE_SHIFT            31
@@ -7771,6 +7779,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
                          feature, feature_size);
 
        SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
+       SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
        SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
        SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
 
@@ -7794,6 +7803,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
                omit_engine = 1;
 
        mutex_lock(&qed_dbg_lock);
+       cdev->dbg_bin_dump = true;
 
        org_engine = qed_get_debug_engine(cdev);
        for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
@@ -7931,6 +7941,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
                DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
        }
 
+       /* Re-populate nvm attribute info */
+       qed_mcp_nvm_info_free(p_hwfn);
+       qed_mcp_nvm_info_populate(p_hwfn);
+
        /* nvm cfg1 */
        rc = qed_dbg_nvm_image(cdev,
                               (u8 *)buffer + offset +
@@ -7993,6 +8007,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
                       QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
        }
 
+       cdev->dbg_bin_dump = false;
        mutex_unlock(&qed_dbg_lock);
 
        return 0;
index 3aa51374e727f3fc2ceb0d5f90d38a925587e522..9c26fde663b385dac3750ad62810090b8aa1c717 100644 (file)
@@ -4472,12 +4472,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        return 0;
 }
 
-static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
-{
-       kfree(p_hwfn->nvm_info.image_att);
-       p_hwfn->nvm_info.image_att = NULL;
-}
-
 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
                                 void __iomem *p_regview,
                                 void __iomem *p_doorbells,
@@ -4562,7 +4556,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
        return rc;
 err3:
        if (IS_LEAD_HWFN(p_hwfn))
-               qed_nvm_info_free(p_hwfn);
+               qed_mcp_nvm_info_free(p_hwfn);
 err2:
        if (IS_LEAD_HWFN(p_hwfn))
                qed_iov_free_hw_info(p_hwfn->cdev);
@@ -4623,7 +4617,7 @@ int qed_hw_prepare(struct qed_dev *cdev,
                if (rc) {
                        if (IS_PF(cdev)) {
                                qed_init_free(p_hwfn);
-                               qed_nvm_info_free(p_hwfn);
+                               qed_mcp_nvm_info_free(p_hwfn);
                                qed_mcp_free(p_hwfn);
                                qed_hw_hwfn_free(p_hwfn);
                        }
@@ -4657,7 +4651,7 @@ void qed_hw_remove(struct qed_dev *cdev)
 
        qed_iov_free_hw_info(cdev);
 
-       qed_nvm_info_free(p_hwfn);
+       qed_mcp_nvm_info_free(p_hwfn);
 }
 
 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
index 9624616806e70c552c02af8da694d8ad0485337c..0fd4520d06661cd825741a09919ea116e7f1a70c 100644 (file)
@@ -3280,6 +3280,13 @@ err0:
        return rc;
 }
 
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->nvm_info.image_att);
+       p_hwfn->nvm_info.image_att = NULL;
+       p_hwfn->nvm_info.valid = false;
+}
+
 int
 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                          enum qed_nvm_images image_id,
index 5750b4c5ef63720579940308ced8cdecea42bbce..12a705ed4bacc66fee865a92952b1e6ade2135dd 100644 (file)
@@ -1220,6 +1220,13 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  */
 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
 
+/**
+ * @brief Delete nvm info shadow in the given hardware function
+ *
+ * @param p_hwfn
+ */
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
+
 /**
  * @brief Get the engine affinity configuration.
  *
index 40efe60eff8d9e96f943a6b79459ede156479bed..fcdecddb2812298a8ceb228331bf2170f4a934df 100644 (file)
@@ -47,15 +47,23 @@ static int rmnet_unregister_real_device(struct net_device *real_dev)
        return 0;
 }
 
-static int rmnet_register_real_device(struct net_device *real_dev)
+static int rmnet_register_real_device(struct net_device *real_dev,
+                                     struct netlink_ext_ack *extack)
 {
        struct rmnet_port *port;
        int rc, entry;
 
        ASSERT_RTNL();
 
-       if (rmnet_is_real_dev_registered(real_dev))
+       if (rmnet_is_real_dev_registered(real_dev)) {
+               port = rmnet_get_port_rtnl(real_dev);
+               if (port->rmnet_mode != RMNET_EPMODE_VND) {
+                       NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+                       return -EINVAL;
+               }
+
                return 0;
+       }
 
        port = kzalloc(sizeof(*port), GFP_KERNEL);
        if (!port)
@@ -133,7 +141,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
 
        mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
 
-       err = rmnet_register_real_device(real_dev);
+       err = rmnet_register_real_device(real_dev, extack);
        if (err)
                goto err0;
 
@@ -422,7 +430,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
        }
 
        if (port->rmnet_mode != RMNET_EPMODE_VND) {
-               NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+               NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached");
                return -EINVAL;
        }
 
@@ -433,7 +441,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
                return -EBUSY;
        }
 
-       err = rmnet_register_real_device(slave_dev);
+       err = rmnet_register_real_device(slave_dev, extack);
        if (err)
                return -EBUSY;
 
index 55226b264e3c45391d1b7b83b1e06d7ac4a37a27..ac7e5a04c8ac9f4857c9a306c2da0f142967ad27 100644 (file)
@@ -500,6 +500,13 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
        int ret;
 
        state = gsi_channel_state(channel);
+
+       /* Channel could have entered STOPPED state since last call
+        * if it timed out.  If so, we're done.
+        */
+       if (state == GSI_CHANNEL_STATE_STOPPED)
+               return 0;
+
        if (state != GSI_CHANNEL_STATE_STARTED &&
            state != GSI_CHANNEL_STATE_STOP_IN_PROC)
                return -EINVAL;
@@ -789,20 +796,11 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
 {
        struct gsi_channel *channel = &gsi->channel[channel_id];
-       enum gsi_channel_state state;
        u32 retries;
        int ret;
 
        gsi_channel_freeze(channel);
 
-       /* Channel could have entered STOPPED state since last call if the
-        * STOP command timed out.  We won't stop a channel if stopping it
-        * was successful previously (so we still want the freeze above).
-        */
-       state = gsi_channel_state(channel);
-       if (state == GSI_CHANNEL_STATE_STOPPED)
-               return 0;
-
        /* RX channels might require a little time to enter STOPPED state */
        retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
 
index c9ab865e729062cfbd34228d81dd6cd3e8f4566d..d92dd3f09b735ad0545fc544f5333f058f7629fe 100644 (file)
@@ -586,6 +586,21 @@ u32 ipa_cmd_tag_process_count(void)
        return 4;
 }
 
+void ipa_cmd_tag_process(struct ipa *ipa)
+{
+       u32 count = ipa_cmd_tag_process_count();
+       struct gsi_trans *trans;
+
+       trans = ipa_cmd_trans_alloc(ipa, count);
+       if (trans) {
+               ipa_cmd_tag_process_add(trans);
+               gsi_trans_commit_wait(trans);
+       } else {
+               dev_err(&ipa->pdev->dev,
+                       "error allocating %u entry tag transaction\n", count);
+       }
+}
+
 static struct ipa_cmd_info *
 ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
 {
index e440aa69c8b5bc20e0cf340c9ea4b6890bce7111..1a646e0264a05ca985bfebd929c459b677c0bfc8 100644 (file)
@@ -171,6 +171,14 @@ void ipa_cmd_tag_process_add(struct gsi_trans *trans);
  */
 u32 ipa_cmd_tag_process_count(void);
 
+/**
+ * ipa_cmd_tag_process() - Perform a tag process
+ *
+ * @Return:    The number of elements to allocate in a transaction
+ *             to hold tag process commands
+ */
+void ipa_cmd_tag_process(struct ipa *ipa);
+
 /**
  * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
  * @ipa:       IPA pointer
index 52d4b84e0dac6568afff2c1a8677c879ab8936ce..de2768d71ab56b6dc38ad6c0a1549b9620fc1abb 100644 (file)
@@ -44,7 +44,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
                .endpoint = {
                        .seq_type       = IPA_SEQ_INVALID,
                        .config = {
-                               .checksum       = true,
                                .aggregation    = true,
                                .status_enable  = true,
                                .rx = {
index 9f50d0d11704c075a03c0d5512b2b1074852a30e..9e58e495d3731c1bc274d2e8e1b1e5ffccdebcc7 100644 (file)
@@ -1450,6 +1450,8 @@ void ipa_endpoint_suspend(struct ipa *ipa)
        if (ipa->modem_netdev)
                ipa_modem_suspend(ipa->modem_netdev);
 
+       ipa_cmd_tag_process(ipa);
+
        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
 }
index dc4a5c2196aeeb8b431ab3840db67afd5292722c..d323adb03383f6e9b17dc64c4e7657f960ede86b 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/types.h>
 
+#include "ipa_gsi.h"
 #include "gsi_trans.h"
 #include "ipa.h"
 #include "ipa_endpoint.h"
index 3cf18600c68e8d676bbbe29930c8d2e6f77c5519..0a40f3dc55fca4736cc5c50feb6fafd1d29e4d04 100644 (file)
@@ -8,7 +8,9 @@
 
 #include <linux/types.h>
 
+struct gsi;
 struct gsi_trans;
+struct ipa_gsi_endpoint_data;
 
 /**
  * ipa_gsi_trans_complete() - GSI transaction completion callback
index 03a1d0e559644bb57654b9e9ee49596dcdb9b99c..73413371e3d3eaaacc516aca860ea9592d74998b 100644 (file)
@@ -119,7 +119,7 @@ struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
                        sizeof_field(struct ipa_driver_init_complete_rsp,
                                     rsp),
                .tlv_type       = 0x02,
-               .elem_size      = offsetof(struct ipa_driver_init_complete_rsp,
+               .offset         = offsetof(struct ipa_driver_init_complete_rsp,
                                           rsp),
                .ei_array       = qmi_response_type_v01_ei,
        },
@@ -137,7 +137,7 @@ struct qmi_elem_info ipa_init_complete_ind_ei[] = {
                        sizeof_field(struct ipa_init_complete_ind,
                                     status),
                .tlv_type       = 0x02,
-               .elem_size      = offsetof(struct ipa_init_complete_ind,
+               .offset         = offsetof(struct ipa_init_complete_ind,
                                           status),
                .ei_array       = qmi_response_type_v01_ei,
        },
@@ -218,7 +218,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
                        sizeof_field(struct ipa_init_modem_driver_req,
                                     platform_type_valid),
                .tlv_type       = 0x10,
-               .elem_size      = offsetof(struct ipa_init_modem_driver_req,
+               .offset         = offsetof(struct ipa_init_modem_driver_req,
                                           platform_type_valid),
        },
        {
index e56547bfdac9a909a159bfdb5aa67014d159476b..9159846b8b9388644bcf8f136231726d8cf297f2 100644 (file)
@@ -4052,9 +4052,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
                return err;
 
        netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macsec_netdev_addr_lock_key,
-                                      dev->lower_level);
+       lockdep_set_class(&dev->addr_list_lock,
+                         &macsec_netdev_addr_lock_key);
 
        err = netdev_upper_dev_link(real_dev, dev, extack);
        if (err < 0)
index 6a6cc9f75307566ec108e26aad9599633d7b6577..4942f6112e51f838057974c2e386ba593f939cba 100644 (file)
@@ -880,9 +880,8 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 static void macvlan_set_lockdep_class(struct net_device *dev)
 {
        netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macvlan_netdev_addr_lock_key,
-                                      dev->lower_level);
+       lockdep_set_class(&dev->addr_list_lock,
+                         &macvlan_netdev_addr_lock_key);
 }
 
 static int macvlan_init(struct net_device *dev)
index 858b012074bd1e733ccaf6255dd733a6eef432b1..7adeb91bd368dceab1357c4ec007572a7784a091 100644 (file)
@@ -62,6 +62,7 @@
 #include <net/rtnetlink.h>
 #include <net/sock.h>
 #include <net/xdp.h>
+#include <net/ip_tunnels.h>
 #include <linux/seq_file.h>
 #include <linux/uio.h>
 #include <linux/skb_array.h>
@@ -1351,6 +1352,7 @@ static void tun_net_init(struct net_device *dev)
        switch (tun->flags & TUN_TYPE_MASK) {
        case IFF_TUN:
                dev->netdev_ops = &tun_netdev_ops;
+               dev->header_ops = &ip_tunnel_header_ops;
 
                /* Point-to-Point TUN Device */
                dev->hard_header_len = 0;
index 31b1d4b959f68807d2e1d2505a46b9a58f953f54..07c42c0719f5b1bf472d2a4bc5471ca180e5543d 100644 (file)
@@ -1370,6 +1370,7 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
        {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
        {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
index 3cf4dc3433f91e6566a26b7cf811b7861cbd7d6a..bb4ccbda031abb7e261ddf90ef16f8c679d5cfd6 100644 (file)
@@ -1287,11 +1287,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
 
        /* Init all registers */
        ret = smsc95xx_reset(dev);
+       if (ret)
+               goto free_pdata;
 
        /* detect device revision as different features may be available */
        ret = smsc95xx_read_reg(dev, ID_REV, &val);
        if (ret < 0)
-               return ret;
+               goto free_pdata;
+
        val >>= 16;
        pdata->chip_id = val;
        pdata->mdix_ctrl = get_mdix_status(dev->net);
@@ -1317,6 +1320,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
 
        return 0;
+
+free_pdata:
+       kfree(pdata);
+       return ret;
 }
 
 static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
index e30d91a38cfb637b376eab0e963ab0ade09fbbb5..284832314f3109b9da0c8f30a74809fa20ed856e 100644 (file)
@@ -303,7 +303,6 @@ static void lapbeth_setup(struct net_device *dev)
        dev->netdev_ops      = &lapbeth_netdev_ops;
        dev->needs_free_netdev = true;
        dev->type            = ARPHRD_X25;
-       dev->hard_header_len = 3;
        dev->mtu             = 1000;
        dev->addr_len        = 0;
 }
@@ -324,6 +323,14 @@ static int lapbeth_new_device(struct net_device *dev)
        if (!ndev)
                goto out;
 
+       /* When transmitting data:
+        * first this driver removes a pseudo header of 1 byte,
+        * then the lapb module prepends an LAPB header of at most 3 bytes,
+        * then this driver prepends a length field of 2 bytes,
+        * then the underlying Ethernet device prepends its own header.
+        */
+       ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
+
        lapbeth = netdev_priv(ndev);
        lapbeth->axdev = ndev;
 
index a8f151b1b5fab52d061efdb3e49975076c93d3ac..c9f65e96ccb04f12b7ce788b1a122458f9663afc 100644 (file)
@@ -262,6 +262,7 @@ static void wg_setup(struct net_device *dev)
                             max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
 
        dev->netdev_ops = &netdev_ops;
+       dev->header_ops = &ip_tunnel_header_ops;
        dev->hard_header_len = 0;
        dev->addr_len = 0;
        dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
index c58df439dbbe09bf50656707a7d6cd86171886a7..dfb674e030764ac83fc53d8aab0af6b7a04e8ad9 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/skbuff.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <net/ip_tunnels.h>
 
 struct wg_device;
 struct wg_peer;
@@ -65,25 +66,9 @@ struct packet_cb {
 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
 
-/* Returns either the correct skb->protocol value, or 0 if invalid. */
-static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb)
-{
-       if (skb_network_header(skb) >= skb->head &&
-           (skb_network_header(skb) + sizeof(struct iphdr)) <=
-                   skb_tail_pointer(skb) &&
-           ip_hdr(skb)->version == 4)
-               return htons(ETH_P_IP);
-       if (skb_network_header(skb) >= skb->head &&
-           (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
-                   skb_tail_pointer(skb) &&
-           ipv6_hdr(skb)->version == 6)
-               return htons(ETH_P_IPV6);
-       return 0;
-}
-
 static inline bool wg_check_packet_protocol(struct sk_buff *skb)
 {
-       __be16 real_protocol = wg_examine_packet_protocol(skb);
+       __be16 real_protocol = ip_tunnel_parse_protocol(skb);
        return real_protocol && skb->protocol == real_protocol;
 }
 
index 9b2ab6fc91cdd9bb1cb60e235cab33be5438dd6f..2c9551ea6dc739432d210d2517c6c719fc627bf7 100644 (file)
@@ -387,7 +387,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
         */
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb->csum_level = ~0; /* All levels */
-       skb->protocol = wg_examine_packet_protocol(skb);
+       skb->protocol = ip_tunnel_parse_protocol(skb);
        if (skb->protocol == htons(ETH_P_IP)) {
                len = ntohs(ip_hdr(skb)->tot_len);
                if (unlikely(len < sizeof(struct iphdr)))
index 89b85970912dbe076ae37cbc2ce56bce570f0457..4cef69bd3c1bd8638c8181ca632d158712a091c0 100644 (file)
@@ -95,7 +95,7 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
        struct encrypted_key_payload *epayload;
        struct device *dev = &nvdimm->dev;
 
-       keyref = lookup_user_key(id, 0, 0);
+       keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
        if (IS_ERR(keyref))
                return NULL;
 
index 28f4388c13373b395fe95f50930451e6bb090f4f..8410d03b940d7454e32df216e3e27f904c9c28e1 100644 (file)
@@ -1116,10 +1116,16 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
                dev_warn(ctrl->device,
                        "Identify Descriptors failed (%d)\n", status);
                 /*
-                 * Don't treat an error as fatal, as we potentially already
-                 * have a NGUID or EUI-64.
+                 * Don't treat non-retryable errors as fatal, as we potentially
+                 * already have a NGUID or EUI-64.  If we failed with DNR set,
+                 * we want to silently ignore the error as we can still
+                 * identify the device, but if the status has DNR set, we want
+                 * to propagate the error back specifically for the disk
+                 * revalidation flow to make sure we don't abandon the
+                 * device just because of a temporal retry-able error (such
+                 * as path of transport errors).
                  */
-               if (status > 0 && !(status & NVME_SC_DNR))
+               if (status > 0 && (status & NVME_SC_DNR))
                        status = 0;
                goto free_data;
        }
index 18d084ed497ef3496bff54de7d6671dfbfe71606..66509472fe06ae6b9cab02e0ffc77c7b2d6ba5a3 100644 (file)
@@ -672,10 +672,11 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
        }
 
        if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
-               struct backing_dev_info *info =
-                                       ns->head->disk->queue->backing_dev_info;
+               struct gendisk *disk = ns->head->disk;
 
-               info->capabilities |= BDI_CAP_STABLE_WRITES;
+               if (disk)
+                       disk->queue->backing_dev_info->capabilities |=
+                                       BDI_CAP_STABLE_WRITES;
        }
 }
 
index 0ff7c55173da0e0a3dd4b16bab38ed68063d3153..615174a9d1e06a6ffbd01f1486619b98446c4262 100644 (file)
@@ -800,6 +800,21 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
        pm_runtime_put(vg->dev);
 }
 
+static void byt_gpio_direct_irq_check(struct intel_pinctrl *vg,
+                                     unsigned int offset)
+{
+       void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+
+       /*
+        * Before making any direction modifications, do a check if gpio is set
+        * for direct IRQ. On Bay Trail, setting GPIO to output does not make
+        * sense, so let's at least inform the caller before they shoot
+        * themselves in the foot.
+        */
+       if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
+               dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+}
+
 static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
                                  struct pinctrl_gpio_range *range,
                                  unsigned int offset,
@@ -807,7 +822,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
 {
        struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
-       void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        unsigned long flags;
        u32 value;
 
@@ -817,14 +831,8 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
        value &= ~BYT_DIR_MASK;
        if (input)
                value |= BYT_OUTPUT_EN;
-       else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
-               /*
-                * Before making any direction modifications, do a check if gpio
-                * is set for direct IRQ.  On baytrail, setting GPIO to output
-                * does not make sense, so let's at least inform the caller before
-                * they shoot themselves in the foot.
-                */
-               dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+       else
+               byt_gpio_direct_irq_check(vg, offset);
 
        writel(value, val_reg);
 
@@ -1165,19 +1173,50 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 
 static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
 {
-       return pinctrl_gpio_direction_input(chip->base + offset);
+       struct intel_pinctrl *vg = gpiochip_get_data(chip);
+       void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       unsigned long flags;
+       u32 reg;
+
+       raw_spin_lock_irqsave(&byt_lock, flags);
+
+       reg = readl(val_reg);
+       reg &= ~BYT_DIR_MASK;
+       reg |= BYT_OUTPUT_EN;
+       writel(reg, val_reg);
+
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
+       return 0;
 }
 
+/*
+ * Note despite the temptation this MUST NOT be converted into a call to
+ * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this
+ * MUST be done as a single BYT_VAL_REG register write.
+ * See the commit message of the commit adding this comment for details.
+ */
 static int byt_gpio_direction_output(struct gpio_chip *chip,
                                     unsigned int offset, int value)
 {
-       int ret = pinctrl_gpio_direction_output(chip->base + offset);
+       struct intel_pinctrl *vg = gpiochip_get_data(chip);
+       void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       unsigned long flags;
+       u32 reg;
 
-       if (ret)
-               return ret;
+       raw_spin_lock_irqsave(&byt_lock, flags);
+
+       byt_gpio_direct_irq_check(vg, offset);
 
-       byt_gpio_set(chip, offset, value);
+       reg = readl(val_reg);
+       reg &= ~BYT_DIR_MASK;
+       if (value)
+               reg |= BYT_LEVEL;
+       else
+               reg &= ~BYT_LEVEL;
 
+       writel(reg, val_reg);
+
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
        return 0;
 }
 
index 3e5760f1a715355dfe44eca5d31b69d973ce566d..d4a192df5fabdc5f04370ae41a6eb00e5ecf5590 100644 (file)
@@ -252,7 +252,7 @@ static const struct amd_pingroup kerncz_groups[] = {
        {
                .name = "uart0",
                .pins = uart0_pins,
-               .npins = 9,
+               .npins = 5,
        },
        {
                .name = "uart1",
index 877aade194979dc6c22078cb710783bd1deda95e..8f4acdc06b1347f3d651d84b5977473994a754e5 100644 (file)
@@ -441,6 +441,7 @@ static int asus_wmi_battery_add(struct power_supply *battery)
         * battery is named BATT.
         */
        if (strcmp(battery->desc->name, "BAT0") != 0 &&
+           strcmp(battery->desc->name, "BAT1") != 0 &&
            strcmp(battery->desc->name, "BATT") != 0)
                return -ENODEV;
 
index 1409a5bb55820100a7362b1ff87fbbe5357eed62..4f6f7f0761fc1c940b3e165c08abd7257d3555db 100644 (file)
@@ -13,6 +13,9 @@
 #define INTEL_RAPL_PRIO_DEVID_0        0x3451
 #define INTEL_CFG_MBOX_DEVID_0 0x3459
 
+#define INTEL_RAPL_PRIO_DEVID_1 0x3251
+#define INTEL_CFG_MBOX_DEVID_1  0x3259
+
 /*
  * Validate maximum commands in a single request.
  * This is enough to handle command to every core in one ioctl, or all
index d84e2174cbdebe71d9bd48b547ac4cf9c846e3de..95f01e7a87d573a5bcd1f2cc12550d5d0e972f6d 100644 (file)
@@ -147,6 +147,7 @@ static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume)
 
 static const struct pci_device_id isst_if_mbox_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_0)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_1)},
        { 0 },
 };
 MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids);
index 3584859fcc421aae634ead7c71705951c1edb6bb..aa17fd7817f8fdd4e075a15c7486d1d1bafbfc2f 100644 (file)
@@ -72,6 +72,7 @@ static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume)
 
 static const struct pci_device_id isst_if_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_0)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_1)},
        { 0 },
 };
 MODULE_DEVICE_TABLE(pci, isst_if_ids);
index ff7f0a4f247563f4ebf3c9cce86da52eee1e2a37..0f6fceda5fc0b676be614b5f635111cfa59dbaa4 100644 (file)
@@ -885,11 +885,19 @@ static ssize_t dispatch_proc_write(struct file *file,
 
        if (!ibm || !ibm->write)
                return -EINVAL;
+       if (count > PAGE_SIZE - 1)
+               return -EINVAL;
+
+       kernbuf = kmalloc(count + 1, GFP_KERNEL);
+       if (!kernbuf)
+               return -ENOMEM;
 
-       kernbuf = strndup_user(userbuf, PAGE_SIZE);
-       if (IS_ERR(kernbuf))
-               return PTR_ERR(kernbuf);
+       if (copy_from_user(kernbuf, userbuf, count)) {
+               kfree(kernbuf);
+               return -EFAULT;
+       }
 
+       kernbuf[count] = 0;
        ret = ibm->write(kernbuf);
        if (ret == 0)
                ret = count;
index a646fc81c872f6dbf0a3c9c0f4303cd0c66a8314..13b26a1c79886bacd8742d1404588096bd224170 100644 (file)
@@ -8,6 +8,7 @@
  *            Eric Farman <farman@linux.ibm.com>
  */
 
+#include <linux/slab.h>
 #include <linux/vfio.h>
 #include "vfio_ccw_private.h"
 
index 773c45af9387096dcfbd6a1878d9ba8d88b7bf40..278d15ff1c5ae26d948741c788852718de7672e2 100644 (file)
@@ -133,8 +133,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
        lockdep_assert_held(&lport->disc.disc_mutex);
 
        rdata = fc_rport_lookup(lport, port_id);
-       if (rdata)
+       if (rdata) {
+               kref_put(&rdata->kref, fc_rport_destroy);
                return rdata;
+       }
 
        if (lport->rport_priv_size > 0)
                rport_priv_size = lport->rport_priv_size;
@@ -481,10 +483,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
 
        fc_rport_state_enter(rdata, RPORT_ST_DELETE);
 
-       kref_get(&rdata->kref);
-       if (rdata->event == RPORT_EV_NONE &&
-           !queue_work(rport_event_queue, &rdata->event_work))
-               kref_put(&rdata->kref, fc_rport_destroy);
+       if (rdata->event == RPORT_EV_NONE) {
+               kref_get(&rdata->kref);
+               if (!queue_work(rport_event_queue, &rdata->event_work))
+                       kref_put(&rdata->kref, fc_rport_destroy);
+       }
 
        rdata->event = event;
 }
index e5a64d4f255cacc3d1795e76af47a4d8c4dfe48a..49c8a1818baf89e58c7543f6157cb307cfcf5bd3 100644 (file)
@@ -2629,7 +2629,7 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
                        "iscsi_q_%d", shost->host_no);
                ihost->workq = alloc_workqueue("%s",
                        WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
-                       2, ihost->workq_name);
+                       1, ihost->workq_name);
                if (!ihost->workq)
                        goto free_host;
        }
index 62e552838565fa32fd36d7e8226218845a2e516f..983e568ff2317ec52d4a02530afd5d0b308daf49 100644 (file)
@@ -3145,19 +3145,18 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
        if (!ioc->is_warpdrive) {
                ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
                        __func__);
-               goto out;
+               return 0;
        }
        /* pci_access_mutex lock acquired by sysfs show path */
        mutex_lock(&ioc->pci_access_mutex);
-       if (ioc->pci_error_recovery || ioc->remove_host) {
-               mutex_unlock(&ioc->pci_access_mutex);
-               return 0;
-       }
+       if (ioc->pci_error_recovery || ioc->remove_host)
+               goto out;
 
        /* allocate upto GPIOVal 36 entries */
        sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
        io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
        if (!io_unit_pg3) {
+               rc = -ENOMEM;
                ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
                        __func__, sz);
                goto out;
@@ -3167,6 +3166,7 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
            0) {
                ioc_err(ioc, "%s: failed reading iounit_pg3\n",
                        __func__);
+               rc = -EINVAL;
                goto out;
        }
 
@@ -3174,12 +3174,14 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
                ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
                        __func__, ioc_status);
+               rc = -EINVAL;
                goto out;
        }
 
        if (io_unit_pg3->GPIOCount < 25) {
                ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
                        __func__, io_unit_pg3->GPIOCount);
+               rc = -EINVAL;
                goto out;
        }
 
index 4576d3ae993729af6622055b850bc181cd795756..2436a17f5cd91581a42d480ae6286687a81053b9 100644 (file)
@@ -5944,7 +5944,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
                        break;
                }
 
-               if (NVME_TARGET(vha->hw, fcport)) {
+               if (found && NVME_TARGET(vha->hw, fcport)) {
                        if (fcport->disc_state == DSC_DELETE_PEND) {
                                qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
                                vha->fcport_count--;
index eed31021e7885c28f92bbaa97a6fda71b53527cf..ba84244c1b4f65e7a34b9f9b78252985996d8839 100644 (file)
@@ -239,6 +239,7 @@ static struct {
        {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES |
                BLIST_INQUIRY_36},
        {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
index 42f0550d6b11ff81b2306fc135dff8f716516131..6f41e4b5a2b85fcc392f3172c0031d71af7883b6 100644 (file)
@@ -63,6 +63,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
        {"LSI", "INF-01-00",            "rdac", },
        {"ENGENIO", "INF-01-00",        "rdac", },
        {"LENOVO", "DE_Series",         "rdac", },
+       {"FUJITSU", "ETERNUS_AHB",      "rdac", },
        {NULL, NULL,                    NULL },
 };
 
index f4cc08eb47ba886e52e8fe6b0a38bf514ef3adc9..7ae5024e78243b412dd37b3aee8d86fb1621c001 100644 (file)
@@ -4760,7 +4760,7 @@ static __init int iscsi_transport_init(void)
 
        iscsi_eh_timer_workq = alloc_workqueue("%s",
                        WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
-                       2, "iscsi_eh");
+                       1, "iscsi_eh");
        if (!iscsi_eh_timer_workq) {
                err = -ENOMEM;
                goto release_nls;
index f8661062ef9547a322c4b6f2ed2fa7d0eb8b2d7a..f3d5b1bbd5aa7eff36c5d91cd9d2df6e41a77f8d 100644 (file)
@@ -339,7 +339,7 @@ store_spi_transport_##field(struct device *dev,                     \
        struct spi_transport_attrs *tp                                  \
                = (struct spi_transport_attrs *)&starget->starget_data; \
                                                                        \
-       if (i->f->set_##field)                                          \
+       if (!i->f->set_##field)                                         \
                return -EINVAL;                                         \
        val = simple_strtoul(buf, NULL, 0);                             \
        if (val > tp->max_##field)                                      \
index 58190c94561fccf7a014783b38ee3c074f801e9a..91c6affe139c998bb0d51d0d1216acf1ca2c8101 100644 (file)
@@ -1109,6 +1109,8 @@ static int dspi_suspend(struct device *dev)
        struct spi_controller *ctlr = dev_get_drvdata(dev);
        struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
 
+       if (dspi->irq)
+               disable_irq(dspi->irq);
        spi_controller_suspend(ctlr);
        clk_disable_unprepare(dspi->clk);
 
@@ -1129,6 +1131,8 @@ static int dspi_resume(struct device *dev)
        if (ret)
                return ret;
        spi_controller_resume(ctlr);
+       if (dspi->irq)
+               enable_irq(dspi->irq);
 
        return 0;
 }
@@ -1385,22 +1389,22 @@ static int dspi_probe(struct platform_device *pdev)
                goto poll_mode;
        }
 
-       ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
-                              IRQF_SHARED, pdev->name, dspi);
+       init_completion(&dspi->xfer_done);
+
+       ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
+                                  IRQF_SHARED, pdev->name, dspi);
        if (ret < 0) {
                dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
                goto out_clk_put;
        }
 
-       init_completion(&dspi->xfer_done);
-
 poll_mode:
 
        if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
                ret = dspi_request_dma(dspi, res->start);
                if (ret < 0) {
                        dev_err(&pdev->dev, "can't get dma channels\n");
-                       goto out_clk_put;
+                       goto out_free_irq;
                }
        }
 
@@ -1415,11 +1419,14 @@ poll_mode:
        ret = spi_register_controller(ctlr);
        if (ret != 0) {
                dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
-               goto out_clk_put;
+               goto out_free_irq;
        }
 
        return ret;
 
+out_free_irq:
+       if (dspi->irq)
+               free_irq(dspi->irq, dspi);
 out_clk_put:
        clk_disable_unprepare(dspi->clk);
 out_ctlr_put:
@@ -1434,18 +1441,8 @@ static int dspi_remove(struct platform_device *pdev)
        struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
 
        /* Disconnect from the SPI framework */
-       dspi_release_dma(dspi);
-       clk_disable_unprepare(dspi->clk);
        spi_unregister_controller(dspi->ctlr);
 
-       return 0;
-}
-
-static void dspi_shutdown(struct platform_device *pdev)
-{
-       struct spi_controller *ctlr = platform_get_drvdata(pdev);
-       struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
-
        /* Disable RX and TX */
        regmap_update_bits(dspi->regmap, SPI_MCR,
                           SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
@@ -1455,8 +1452,16 @@ static void dspi_shutdown(struct platform_device *pdev)
        regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
 
        dspi_release_dma(dspi);
+       if (dspi->irq)
+               free_irq(dspi->irq, dspi);
        clk_disable_unprepare(dspi->clk);
-       spi_unregister_controller(dspi->ctlr);
+
+       return 0;
+}
+
+static void dspi_shutdown(struct platform_device *pdev)
+{
+       dspi_remove(pdev);
 }
 
 static struct platform_driver fsl_dspi_driver = {
index 6721910e5f2aaaf44682e9825f9ef3871eed6060..0040362b7162279f9ba66fae3a4acdc1db2f3eca 100644 (file)
@@ -1485,6 +1485,11 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
        { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP },
        { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP },
        { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP },
+       /* TGL-H */
+       { PCI_VDEVICE(INTEL, 0x43aa), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x43ab), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x43fb), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x43fd), LPSS_CNL_SSP },
        /* APL */
        { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
index 9e124020519fc2072d83a767b0068be92fd1ed94..6c0e1b053126e0b7490f71dca943ca6f64609721 100644 (file)
@@ -123,12 +123,12 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
 {
        int i;
 
-       for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
-               if (power > cpufreq_cdev->em->table[i].power)
+       for (i = cpufreq_cdev->max_level; i >= 0; i--) {
+               if (power >= cpufreq_cdev->em->table[i].power)
                        break;
        }
 
-       return cpufreq_cdev->em->table[i + 1].frequency;
+       return cpufreq_cdev->em->table[i].frequency;
 }
 
 /**
index e761c9b422179d9ccaea3d23a1ad60e006651aa2..1b84ea674edb740d140ae547f3da685266e64b68 100644 (file)
@@ -649,7 +649,7 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
 static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
 {
        struct device_node *np;
-       int ret;
+       int ret = 0;
 
        data->policy = cpufreq_cpu_get(0);
        if (!data->policy) {
@@ -664,11 +664,12 @@ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
                if (IS_ERR(data->cdev)) {
                        ret = PTR_ERR(data->cdev);
                        cpufreq_cpu_put(data->policy);
-                       return ret;
                }
        }
 
-       return 0;
+       of_node_put(np);
+
+       return ret;
 }
 
 static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data)
index 0b3a6265584359c25f3666b00b37100f5e706d60..12448ccd27f16806f715dcd211ba0b426b47396d 100644 (file)
@@ -216,11 +216,16 @@ static int int3400_thermal_run_osc(acpi_handle handle,
        acpi_status status;
        int result = 0;
        struct acpi_osc_context context = {
-               .uuid_str = int3400_thermal_uuids[uuid],
+               .uuid_str = NULL,
                .rev = 1,
                .cap.length = 8,
        };
 
+       if (uuid < 0 || uuid >= INT3400_THERMAL_MAXIMUM_UUID)
+               return -EINVAL;
+
+       context.uuid_str = int3400_thermal_uuids[uuid];
+
        buf[OSC_QUERY_DWORD] = 0;
        buf[OSC_SUPPORT_DWORD] = enable;
 
index f86cbb125e2ff31281baeac3bda7c022450240b9..ec1d58c4ceaae1e7155941b40696f94f4fe854e8 100644 (file)
@@ -74,7 +74,7 @@ static void int3403_notify(acpi_handle handle,
                                                   THERMAL_TRIP_CHANGED);
                break;
        default:
-               dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
+               dev_dbg(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
                break;
        }
 }
index 76e30603d4d58831310cbf6f53f2f714f3ced4cd..42c9cd0e5f7754c5c7087224c9cc6227d967e3fe 100644 (file)
@@ -211,6 +211,9 @@ enum {
 /* The total number of temperature sensors in the MT8183 */
 #define MT8183_NUM_SENSORS     6
 
+/* The number of banks in the MT8183 */
+#define MT8183_NUM_ZONES               1
+
 /* The number of sensing points per bank */
 #define MT8183_NUM_SENSORS_PER_ZONE     6
 
@@ -497,7 +500,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
  */
 static const struct mtk_thermal_data mt8183_thermal_data = {
        .auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
-       .num_banks = MT8183_NUM_SENSORS_PER_ZONE,
+       .num_banks = MT8183_NUM_ZONES,
        .num_sensors = MT8183_NUM_SENSORS,
        .vts_index = mt8183_vts_index,
        .cali_val = MT8183_CALIBRATION,
@@ -591,8 +594,7 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
        u32 raw;
 
        for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) {
-               raw = readl(mt->thermal_base +
-                           conf->msr[conf->bank_data[bank->id].sensors[i]]);
+               raw = readl(mt->thermal_base + conf->msr[i]);
 
                temp = raw_to_mcelsius(mt,
                                       conf->bank_data[bank->id].sensors[i],
@@ -733,8 +735,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
 
        for (i = 0; i < conf->bank_data[num].num_sensors; i++)
                writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]],
-                      mt->thermal_base +
-                      conf->adcpnp[conf->bank_data[num].sensors[i]]);
+                      mt->thermal_base + conf->adcpnp[i]);
 
        writel((1 << conf->bank_data[num].num_sensors) - 1,
               controller_base + TEMP_MONCTL0);
index 8d3e94d2a9ed4404233f7f62ea54b840f448f5bd..39c4462e38f622031e729059263f58b857558503 100644 (file)
@@ -382,7 +382,7 @@ static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver)
  *
  * Return: IRQ_HANDLED
  */
-irqreturn_t tsens_critical_irq_thread(int irq, void *data)
+static irqreturn_t tsens_critical_irq_thread(int irq, void *data)
 {
        struct tsens_priv *priv = data;
        struct tsens_irq_data d;
@@ -452,7 +452,7 @@ irqreturn_t tsens_critical_irq_thread(int irq, void *data)
  *
  * Return: IRQ_HANDLED
  */
-irqreturn_t tsens_irq_thread(int irq, void *data)
+static irqreturn_t tsens_irq_thread(int irq, void *data)
 {
        struct tsens_priv *priv = data;
        struct tsens_irq_data d;
@@ -520,7 +520,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-int tsens_set_trips(void *_sensor, int low, int high)
+static int tsens_set_trips(void *_sensor, int low, int high)
 {
        struct tsens_sensor *s = _sensor;
        struct tsens_priv *priv = s->priv;
@@ -557,7 +557,7 @@ int tsens_set_trips(void *_sensor, int low, int high)
        return 0;
 }
 
-int tsens_enable_irq(struct tsens_priv *priv)
+static int tsens_enable_irq(struct tsens_priv *priv)
 {
        int ret;
        int val = tsens_version(priv) > VER_1_X ? 7 : 1;
@@ -570,7 +570,7 @@ int tsens_enable_irq(struct tsens_priv *priv)
        return ret;
 }
 
-void tsens_disable_irq(struct tsens_priv *priv)
+static void tsens_disable_irq(struct tsens_priv *priv)
 {
        regmap_field_write(priv->rf[INT_EN], 0);
 }
index 58fe7c1ef00b12706d9c8035f7a23a5cafca09a3..c48c5e9b8f203dd106839e69e7d0f1db948a64bf 100644 (file)
@@ -167,7 +167,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
 {
        struct rcar_gen3_thermal_tsc *tsc = devdata;
        int mcelsius, val;
-       u32 reg;
+       int reg;
 
        /* Read register and convert to mili Celsius */
        reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK;
index a340374e8c51ae104b416f0e387aafdb4e6a3cd3..4cde70dcf655659f6a36135ddf8a2e502d2b6f90 100644 (file)
@@ -348,8 +348,8 @@ static int sprd_thm_probe(struct platform_device *pdev)
 
        thm->var_data = pdata;
        thm->base = devm_platform_ioremap_resource(pdev, 0);
-       if (!thm->base)
-               return -ENOMEM;
+       if (IS_ERR(thm->base))
+               return PTR_ERR(thm->base);
 
        thm->nr_sensors = of_get_child_count(np);
        if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) {
index a04f74d2e854d7d35fd72c11461c7631e58ccc50..4df47d02b34b40581649310a40e8a91e098a83ee 100644 (file)
@@ -1215,7 +1215,12 @@ static int cpm_uart_init_port(struct device_node *np,
 
                pinfo->gpios[i] = NULL;
 
-               gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
+               gpiod = devm_gpiod_get_index_optional(dev, NULL, i, GPIOD_ASIS);
+
+               if (IS_ERR(gpiod)) {
+                       ret = PTR_ERR(gpiod);
+                       goto out_irq;
+               }
 
                if (gpiod) {
                        if (i == GPIO_RTS || i == GPIO_DTR)
@@ -1237,6 +1242,8 @@ static int cpm_uart_init_port(struct device_node *np,
 
        return cpm_uart_request_port(&pinfo->port);
 
+out_irq:
+       irq_dispose_mapping(pinfo->port.irq);
 out_pram:
        cpm_uart_unmap_pram(pinfo, pram);
 out_mem:
index b4f835e7de23410a5f2fb0a659e844b1f637b6a8..b784323a6a7b03ed7350733668b0f4980caaee19 100644 (file)
@@ -1698,21 +1698,21 @@ static int mxs_auart_probe(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                ret = irq;
-               goto out_disable_clks;
+               goto out_iounmap;
        }
 
        s->port.irq = irq;
        ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0,
                               dev_name(&pdev->dev), s);
        if (ret)
-               goto out_disable_clks;
+               goto out_iounmap;
 
        platform_set_drvdata(pdev, s);
 
        ret = mxs_auart_init_gpios(s, &pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "Failed to initialize GPIOs.\n");
-               goto out_disable_clks;
+               goto out_iounmap;
        }
 
        /*
@@ -1720,7 +1720,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
         */
        ret = mxs_auart_request_gpio_irq(s);
        if (ret)
-               goto out_disable_clks;
+               goto out_iounmap;
 
        auart_port[s->port.line] = s;
 
@@ -1746,6 +1746,9 @@ out_free_qpio_irq:
        mxs_auart_free_gpio_irq(s);
        auart_port[pdev->id] = NULL;
 
+out_iounmap:
+       iounmap(s->port.membase);
+
 out_disable_clks:
        if (is_asm9260_auart(s)) {
                clk_disable_unprepare(s->clk);
@@ -1761,6 +1764,7 @@ static int mxs_auart_remove(struct platform_device *pdev)
        uart_remove_one_port(&auart_driver, &s->port);
        auart_port[pdev->id] = NULL;
        mxs_auart_free_gpio_irq(s);
+       iounmap(s->port.membase);
        if (is_asm9260_auart(s)) {
                clk_disable_unprepare(s->clk);
                clk_disable_unprepare(s->clk_ahb);
index 57840cf903881c4e20a5fd22509ee039fa1bb5e4..5f3daabdc916e3eba27bb982b14dde7a4e7fb243 100644 (file)
@@ -41,8 +41,6 @@ static struct lock_class_key port_lock_key;
 
 #define HIGH_BITS_OFFSET       ((sizeof(long)-sizeof(int))*8)
 
-#define SYSRQ_TIMEOUT  (HZ * 5)
-
 static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
                                        struct ktermios *old_termios);
 static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -1916,6 +1914,12 @@ static inline bool uart_console_enabled(struct uart_port *port)
        return uart_console(port) && (port->cons->flags & CON_ENABLED);
 }
 
+static void __uart_port_spin_lock_init(struct uart_port *port)
+{
+       spin_lock_init(&port->lock);
+       lockdep_set_class(&port->lock, &port_lock_key);
+}
+
 /*
  * Ensure that the serial console lock is initialised early.
  * If this port is a console, then the spinlock is already initialised.
@@ -1925,8 +1929,7 @@ static inline void uart_port_spin_lock_init(struct uart_port *port)
        if (uart_console(port))
                return;
 
-       spin_lock_init(&port->lock);
-       lockdep_set_class(&port->lock, &port_lock_key);
+       __uart_port_spin_lock_init(port);
 }
 
 #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
@@ -2372,6 +2375,13 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
                /* Power up port for set_mctrl() */
                uart_change_pm(state, UART_PM_STATE_ON);
 
+               /*
+                * If this driver supports console, and it hasn't been
+                * successfully registered yet, initialise spin lock for it.
+                */
+               if (port->cons && !(port->cons->flags & CON_ENABLED))
+                       __uart_port_spin_lock_init(port);
+
                /*
                 * Ensure that the modem control lines are de-activated.
                 * keep the DTR setting that is set in uart_set_options()
@@ -3163,7 +3173,7 @@ static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on);
  *     Returns false if @ch is out of enabling sequence and should be
  *     handled some other way, true if @ch was consumed.
  */
-static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
+bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
 {
        int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq);
 
@@ -3186,99 +3196,9 @@ static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
        port->sysrq = 0;
        return true;
 }
-#else
-static inline bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
-{
-       return false;
-}
+EXPORT_SYMBOL_GPL(uart_try_toggle_sysrq);
 #endif
 
-int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
-{
-       if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
-               return 0;
-
-       if (!port->has_sysrq || !port->sysrq)
-               return 0;
-
-       if (ch && time_before(jiffies, port->sysrq)) {
-               if (sysrq_mask()) {
-                       handle_sysrq(ch);
-                       port->sysrq = 0;
-                       return 1;
-               }
-               if (uart_try_toggle_sysrq(port, ch))
-                       return 1;
-       }
-       port->sysrq = 0;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(uart_handle_sysrq_char);
-
-int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
-{
-       if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
-               return 0;
-
-       if (!port->has_sysrq || !port->sysrq)
-               return 0;
-
-       if (ch && time_before(jiffies, port->sysrq)) {
-               if (sysrq_mask()) {
-                       port->sysrq_ch = ch;
-                       port->sysrq = 0;
-                       return 1;
-               }
-               if (uart_try_toggle_sysrq(port, ch))
-                       return 1;
-       }
-       port->sysrq = 0;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(uart_prepare_sysrq_char);
-
-void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags)
-__releases(&port->lock)
-{
-       if (port->has_sysrq) {
-               int sysrq_ch = port->sysrq_ch;
-
-               port->sysrq_ch = 0;
-               spin_unlock_irqrestore(&port->lock, flags);
-               if (sysrq_ch)
-                       handle_sysrq(sysrq_ch);
-       } else {
-               spin_unlock_irqrestore(&port->lock, flags);
-       }
-}
-EXPORT_SYMBOL_GPL(uart_unlock_and_check_sysrq);
-
-/*
- * We do the SysRQ and SAK checking like this...
- */
-int uart_handle_break(struct uart_port *port)
-{
-       struct uart_state *state = port->state;
-
-       if (port->handle_break)
-               port->handle_break(port);
-
-       if (port->has_sysrq && uart_console(port)) {
-               if (!port->sysrq) {
-                       port->sysrq = jiffies + SYSRQ_TIMEOUT;
-                       return 1;
-               }
-               port->sysrq = 0;
-       }
-
-       if (port->flags & UPF_SAK)
-               do_SAK(state->port.tty);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(uart_handle_break);
-
 EXPORT_SYMBOL(uart_write_wakeup);
 EXPORT_SYMBOL(uart_register_driver);
 EXPORT_SYMBOL(uart_unregister_driver);
@@ -3289,8 +3209,7 @@ EXPORT_SYMBOL(uart_remove_one_port);
 
 /**
  * uart_get_rs485_mode() - retrieve rs485 properties for given uart
- * @dev: uart device
- * @rs485conf: output parameter
+ * @port: uart device's target port
  *
  * This function implements the device tree binding described in
  * Documentation/devicetree/bindings/serial/rs485.txt.
index e1179e74a2b89d892f4c8e0edceb3378c6755e5a..204bb68ce3ca91a1d9338658154bd5465109e4d3 100644 (file)
@@ -3301,6 +3301,9 @@ static int sci_probe_single(struct platform_device *dev,
                sciport->port.flags |= UPF_HARD_FLOW;
        }
 
+       if (sci_uart_driver.cons->index == sciport->port.line)
+               spin_lock_init(&sciport->port.lock);
+
        ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
        if (ret) {
                sci_cleanup_single(sciport);
index b9d672af8b655d60428a43788c2cd56d763d34d4..672cfa075e28f33c4b2eb64ce3c1337b9da9c2ad 100644 (file)
@@ -1465,7 +1465,6 @@ static int cdns_uart_probe(struct platform_device *pdev)
                cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
 #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
                cdns_uart_uart_driver.cons = &cdns_uart_console;
-               cdns_uart_console.index = id;
 #endif
 
                rc = uart_register_driver(&cdns_uart_uart_driver);
index 040d2a43e8e350924f259a3aaa8468b65edba15f..786fbb7d8be063f82d65a1fcaef8a627ea10c0b6 100644 (file)
@@ -69,11 +69,27 @@ struct xenbus_map_node {
        unsigned int   nr_handles;
 };
 
+struct map_ring_valloc {
+       struct xenbus_map_node *node;
+
+       /* Why do we need two arrays? See comment of __xenbus_map_ring */
+       union {
+               unsigned long addrs[XENBUS_MAX_RING_GRANTS];
+               pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+       };
+       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+
+       struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
+       struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+
+       unsigned int idx;       /* HVM only. */
+};
+
 static DEFINE_SPINLOCK(xenbus_valloc_lock);
 static LIST_HEAD(xenbus_valloc_pages);
 
 struct xenbus_ring_ops {
-       int (*map)(struct xenbus_device *dev,
+       int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
                   grant_ref_t *gnt_refs, unsigned int nr_grefs,
                   void **vaddr);
        int (*unmap)(struct xenbus_device *dev, void *vaddr);
@@ -440,8 +456,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
  * Map @nr_grefs pages of memory into this domain from another
  * domain's grant table.  xenbus_map_ring_valloc allocates @nr_grefs
  * pages of virtual address space, maps the pages to that address, and
- * sets *vaddr to that address.  Returns 0 on success, and GNTST_*
- * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
+ * sets *vaddr to that address.  Returns 0 on success, and -errno on
  * error. If an error is returned, device will switch to
  * XenbusStateClosing and the error message will be saved in XenStore.
  */
@@ -449,12 +464,25 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
                           unsigned int nr_grefs, void **vaddr)
 {
        int err;
+       struct map_ring_valloc *info;
+
+       *vaddr = NULL;
+
+       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+               return -EINVAL;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
 
-       err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
-       /* Some hypervisors are buggy and can return 1. */
-       if (err > 0)
-               err = GNTST_general_error;
+       info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
+       if (!info->node)
+               err = -ENOMEM;
+       else
+               err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
 
+       kfree(info->node);
+       kfree(info);
        return err;
 }
 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
@@ -466,62 +494,57 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
                             grant_ref_t *gnt_refs,
                             unsigned int nr_grefs,
                             grant_handle_t *handles,
-                            phys_addr_t *addrs,
+                            struct map_ring_valloc *info,
                             unsigned int flags,
                             bool *leaked)
 {
-       struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
-       struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
        int i, j;
-       int err = GNTST_okay;
 
        if (nr_grefs > XENBUS_MAX_RING_GRANTS)
                return -EINVAL;
 
        for (i = 0; i < nr_grefs; i++) {
-               memset(&map[i], 0, sizeof(map[i]));
-               gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
-                                 dev->otherend_id);
+               gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
+                                 gnt_refs[i], dev->otherend_id);
                handles[i] = INVALID_GRANT_HANDLE;
        }
 
-       gnttab_batch_map(map, i);
+       gnttab_batch_map(info->map, i);
 
        for (i = 0; i < nr_grefs; i++) {
-               if (map[i].status != GNTST_okay) {
-                       err = map[i].status;
-                       xenbus_dev_fatal(dev, map[i].status,
+               if (info->map[i].status != GNTST_okay) {
+                       xenbus_dev_fatal(dev, info->map[i].status,
                                         "mapping in shared page %d from domain %d",
                                         gnt_refs[i], dev->otherend_id);
                        goto fail;
                } else
-                       handles[i] = map[i].handle;
+                       handles[i] = info->map[i].handle;
        }
 
-       return GNTST_okay;
+       return 0;
 
  fail:
        for (i = j = 0; i < nr_grefs; i++) {
                if (handles[i] != INVALID_GRANT_HANDLE) {
-                       memset(&unmap[j], 0, sizeof(unmap[j]));
-                       gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
+                       gnttab_set_unmap_op(&info->unmap[j],
+                                           info->phys_addrs[i],
                                            GNTMAP_host_map, handles[i]);
                        j++;
                }
        }
 
-       if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
+       if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
                BUG();
 
        *leaked = false;
        for (i = 0; i < j; i++) {
-               if (unmap[i].status != GNTST_okay) {
+               if (info->unmap[i].status != GNTST_okay) {
                        *leaked = true;
                        break;
                }
        }
 
-       return err;
+       return -ENOENT;
 }
 
 /**
@@ -566,21 +589,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
        return err;
 }
 
-struct map_ring_valloc_hvm
-{
-       unsigned int idx;
-
-       /* Why do we need two arrays? See comment of __xenbus_map_ring */
-       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
-       unsigned long addrs[XENBUS_MAX_RING_GRANTS];
-};
-
 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
                                            unsigned int goffset,
                                            unsigned int len,
                                            void *data)
 {
-       struct map_ring_valloc_hvm *info = data;
+       struct map_ring_valloc *info = data;
        unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
 
        info->phys_addrs[info->idx] = vaddr;
@@ -589,39 +603,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
        info->idx++;
 }
 
-static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
-                                     grant_ref_t *gnt_ref,
-                                     unsigned int nr_grefs,
-                                     void **vaddr)
+static int xenbus_map_ring_hvm(struct xenbus_device *dev,
+                              struct map_ring_valloc *info,
+                              grant_ref_t *gnt_ref,
+                              unsigned int nr_grefs,
+                              void **vaddr)
 {
-       struct xenbus_map_node *node;
+       struct xenbus_map_node *node = info->node;
        int err;
        void *addr;
        bool leaked = false;
-       struct map_ring_valloc_hvm info = {
-               .idx = 0,
-       };
        unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
 
-       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
-               return -EINVAL;
-
-       *vaddr = NULL;
-
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (!node)
-               return -ENOMEM;
-
        err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
        if (err)
                goto out_err;
 
        gnttab_foreach_grant(node->hvm.pages, nr_grefs,
                             xenbus_map_ring_setup_grant_hvm,
-                            &info);
+                            info);
 
        err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
-                               info.phys_addrs, GNTMAP_host_map, &leaked);
+                               info, GNTMAP_host_map, &leaked);
        node->nr_handles = nr_grefs;
 
        if (err)
@@ -641,11 +644,13 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
        spin_unlock(&xenbus_valloc_lock);
 
        *vaddr = addr;
+       info->node = NULL;
+
        return 0;
 
  out_xenbus_unmap_ring:
        if (!leaked)
-               xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
+               xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
        else
                pr_alert("leaking %p size %u page(s)",
                         addr, nr_pages);
@@ -653,7 +658,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
        if (!leaked)
                free_xenballooned_pages(nr_pages, node->hvm.pages);
  out_err:
-       kfree(node);
        return err;
 }
 
@@ -676,40 +680,28 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
 
 #ifdef CONFIG_XEN_PV
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
-                                    grant_ref_t *gnt_refs,
-                                    unsigned int nr_grefs,
-                                    void **vaddr)
+static int xenbus_map_ring_pv(struct xenbus_device *dev,
+                             struct map_ring_valloc *info,
+                             grant_ref_t *gnt_refs,
+                             unsigned int nr_grefs,
+                             void **vaddr)
 {
-       struct xenbus_map_node *node;
+       struct xenbus_map_node *node = info->node;
        struct vm_struct *area;
-       pte_t *ptes[XENBUS_MAX_RING_GRANTS];
-       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
        int err = GNTST_okay;
        int i;
        bool leaked;
 
-       *vaddr = NULL;
-
-       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
-               return -EINVAL;
-
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (!node)
+       area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
+       if (!area)
                return -ENOMEM;
 
-       area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
-       if (!area) {
-               kfree(node);
-               return -ENOMEM;
-       }
-
        for (i = 0; i < nr_grefs; i++)
-               phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+               info->phys_addrs[i] =
+                       arbitrary_virt_to_machine(info->ptes[i]).maddr;
 
        err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
-                               phys_addrs,
-                               GNTMAP_host_map | GNTMAP_contains_pte,
+                               info, GNTMAP_host_map | GNTMAP_contains_pte,
                                &leaked);
        if (err)
                goto failed;
@@ -722,6 +714,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
        spin_unlock(&xenbus_valloc_lock);
 
        *vaddr = area->addr;
+       info->node = NULL;
+
        return 0;
 
 failed:
@@ -730,11 +724,10 @@ failed:
        else
                pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
 
-       kfree(node);
        return err;
 }
 
-static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
 {
        struct xenbus_map_node *node;
        struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
@@ -798,12 +791,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
 }
 
 static const struct xenbus_ring_ops ring_ops_pv = {
-       .map = xenbus_map_ring_valloc_pv,
-       .unmap = xenbus_unmap_ring_vfree_pv,
+       .map = xenbus_map_ring_pv,
+       .unmap = xenbus_unmap_ring_pv,
 };
 #endif
 
-struct unmap_ring_vfree_hvm
+struct unmap_ring_hvm
 {
        unsigned int idx;
        unsigned long addrs[XENBUS_MAX_RING_GRANTS];
@@ -814,19 +807,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
                                              unsigned int len,
                                              void *data)
 {
-       struct unmap_ring_vfree_hvm *info = data;
+       struct unmap_ring_hvm *info = data;
 
        info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
 
        info->idx++;
 }
 
-static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
 {
        int rv;
        struct xenbus_map_node *node;
        void *addr;
-       struct unmap_ring_vfree_hvm info = {
+       struct unmap_ring_hvm info = {
                .idx = 0,
        };
        unsigned int nr_pages;
@@ -887,8 +880,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
 
 static const struct xenbus_ring_ops ring_ops_hvm = {
-       .map = xenbus_map_ring_valloc_hvm,
-       .unmap = xenbus_unmap_ring_vfree_hvm,
+       .map = xenbus_map_ring_hvm,
+       .unmap = xenbus_unmap_ring_hvm,
 };
 
 void __init xenbus_ring_ops_init(void)
index c264839b2fd0b8e2bc73419e64acf03b0d620148..24fd163c6323e5b28616ecff2d255941c50aa40f 100644 (file)
@@ -71,7 +71,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
                swap(vnode, vnode2);
 
        if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
-               op->error = -EINTR;
+               op->error = -ERESTARTSYS;
                op->flags |= AFS_OPERATION_STOP;
                _leave(" = f [I 0]");
                return false;
@@ -80,7 +80,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
 
        if (vnode2) {
                if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) {
-                       op->error = -EINTR;
+                       op->error = -ERESTARTSYS;
                        op->flags |= AFS_OPERATION_STOP;
                        mutex_unlock(&vnode->io_lock);
                        op->flags &= ~AFS_OPERATION_LOCK_0;
index 7437806332d9b5f35a851f621f73cb8d79100e43..a121c247d95a3d63757cfd9e30557c41faa542e2 100644 (file)
@@ -449,6 +449,7 @@ static int afs_store_data(struct address_space *mapping,
        op->store.first_offset = offset;
        op->store.last_to = to;
        op->mtime = vnode->vfs_inode.i_mtime;
+       op->flags |= AFS_OPERATION_UNINTR;
        op->ops = &afs_store_data_operation;
 
 try_next_key:
index b04c528b19d3425373286ec6e3c78e4eeee3c9cd..74c886f7c51cbe25334d38a9c57fcb2695f05bd4 100644 (file)
@@ -53,7 +53,7 @@ static int autofs_write(struct autofs_sb_info *sbi,
 
        mutex_lock(&sbi->pipe_mutex);
        while (bytes) {
-               wr = __kernel_write(file, data, bytes, &file->f_pos);
+               wr = kernel_write(file, data, bytes, &file->f_pos);
                if (wr <= 0)
                        break;
                data += wr;
index 3a7648bff42cacb25832ca56c7a49b28eb615b92..82ab6e5a386daff83c7c25213cab01ea593615f7 100644 (file)
@@ -1196,7 +1196,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
                switch (tm->op) {
                case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
                        BUG_ON(tm->slot < n);
-                       /* Fallthrough */
+                       fallthrough;
                case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
                case MOD_LOG_KEY_REMOVE:
                        btrfs_set_node_key(eb, &tm->key, tm->slot);
index 5615320fa659f31835f9415d0208a15b80404b0b..741c7e19c32f2cb3116f9507ed3cc307b1fb5af3 100644 (file)
@@ -619,6 +619,7 @@ void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info)
        list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
                                 bg_list) {
                list_del_init(&block_group->bg_list);
+               btrfs_put_block_group(block_group);
                btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
        }
        spin_unlock(&fs_info->unused_bgs_lock);
index 7c6f0bbb54a5bdb310fd6faae3088cf9a324efe3..b1a148058773e486c55b899e2360627bb2d15db1 100644 (file)
@@ -2593,10 +2593,12 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
                    !extent_buffer_uptodate(tree_root->node)) {
                        handle_error = true;
 
-                       if (IS_ERR(tree_root->node))
+                       if (IS_ERR(tree_root->node)) {
                                ret = PTR_ERR(tree_root->node);
-                       else if (!extent_buffer_uptodate(tree_root->node))
+                               tree_root->node = NULL;
+                       } else if (!extent_buffer_uptodate(tree_root->node)) {
                                ret = -EUCLEAN;
+                       }
 
                        btrfs_warn(fs_info, "failed to read tree root");
                        continue;
index 68c96057ad2d8c687e13c55dd547b9ffccf64985..608f93438b294e465d71a2c4e919f446bce4c714 100644 (file)
@@ -5058,25 +5058,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
 static void check_buffer_tree_ref(struct extent_buffer *eb)
 {
        int refs;
-       /* the ref bit is tricky.  We have to make sure it is set
-        * if we have the buffer dirty.   Otherwise the
-        * code to free a buffer can end up dropping a dirty
-        * page
+       /*
+        * The TREE_REF bit is first set when the extent_buffer is added
+        * to the radix tree. It is also reset, if unset, when a new reference
+        * is created by find_extent_buffer.
         *
-        * Once the ref bit is set, it won't go away while the
-        * buffer is dirty or in writeback, and it also won't
-        * go away while we have the reference count on the
-        * eb bumped.
+        * It is only cleared in two cases: freeing the last non-tree
+        * reference to the extent_buffer when its STALE bit is set or
+        * calling releasepage when the tree reference is the only reference.
         *
-        * We can't just set the ref bit without bumping the
-        * ref on the eb because free_extent_buffer might
-        * see the ref bit and try to clear it.  If this happens
-        * free_extent_buffer might end up dropping our original
-        * ref by mistake and freeing the page before we are able
-        * to add one more ref.
+        * In both cases, care is taken to ensure that the extent_buffer's
+        * pages are not under io. However, releasepage can be concurrently
+        * called with creating new references, which is prone to race
+        * conditions between the calls to check_buffer_tree_ref in those
+        * codepaths and clearing TREE_REF in try_release_extent_buffer.
         *
-        * So bump the ref count first, then set the bit.  If someone
-        * beat us to it, drop the ref we added.
+        * The actual lifetime of the extent_buffer in the radix tree is
+        * adequately protected by the refcount, but the TREE_REF bit and
+        * its corresponding reference are not. To protect against this
+        * class of races, we call check_buffer_tree_ref from the codepaths
+        * which trigger io after they set eb->io_pages. Note that once io is
+        * initiated, TREE_REF can no longer be cleared, so that is the
+        * moment at which any such race is best fixed.
         */
        refs = atomic_read(&eb->refs);
        if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
@@ -5527,6 +5530,11 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
        clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
        eb->read_mirror = 0;
        atomic_set(&eb->io_pages, num_reads);
+       /*
+        * It is possible for releasepage to clear the TREE_REF bit before we
+        * set io_pages. See check_buffer_tree_ref for a more detailed comment.
+        */
+       check_buffer_tree_ref(eb);
        for (i = 0; i < num_pages; i++) {
                page = eb->pages[i];
 
index 2520605afc256ebe2d0e415051370100928f7b34..b0d2c976587e523c070a6d41df05fefeeead226b 100644 (file)
@@ -3509,6 +3509,7 @@ const struct file_operations btrfs_file_operations = {
        .read_iter      = generic_file_read_iter,
        .splice_read    = generic_file_splice_read,
        .write_iter     = btrfs_file_write_iter,
+       .splice_write   = iter_file_splice_write,
        .mmap           = btrfs_file_mmap,
        .open           = btrfs_file_open,
        .release        = btrfs_release_file,
index 18d384f4af54e1d9c606bfeec8fff79688669a7d..43c803c16b4824d695f84c8fca0b1835ecc82511 100644 (file)
@@ -1690,12 +1690,8 @@ out_check:
                        ret = fallback_to_cow(inode, locked_page, cow_start,
                                              found_key.offset - 1,
                                              page_started, nr_written);
-                       if (ret) {
-                               if (nocow)
-                                       btrfs_dec_nocow_writers(fs_info,
-                                                               disk_bytenr);
+                       if (ret)
                                goto error;
-                       }
                        cow_start = (u64)-1;
                }
 
@@ -1711,9 +1707,6 @@ out_check:
                                          ram_bytes, BTRFS_COMPRESS_NONE,
                                          BTRFS_ORDERED_PREALLOC);
                        if (IS_ERR(em)) {
-                               if (nocow)
-                                       btrfs_dec_nocow_writers(fs_info,
-                                                               disk_bytenr);
                                ret = PTR_ERR(em);
                                goto error;
                        }
index 7887317033c98218a45bf710157fa37459d09b91..af92525dbb1680f7def6fd76925faccd33633fd3 100644 (file)
@@ -509,7 +509,7 @@ static int process_leaf(struct btrfs_root *root,
                switch (key.type) {
                case BTRFS_EXTENT_ITEM_KEY:
                        *num_bytes = key.offset;
-                       /* fall through */
+                       fallthrough;
                case BTRFS_METADATA_ITEM_KEY:
                        *bytenr = key.objectid;
                        ret = process_extent_item(fs_info, path, &key, i,
index 41ee8863376963ea298311b8f887a19b528677e8..c7bd3fdd77928411e44c383ab7cd56db8d373f90 100644 (file)
@@ -879,8 +879,8 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
                return false;
        }
        global_rsv->reserved -= ticket->bytes;
+       remove_ticket(space_info, ticket);
        ticket->bytes = 0;
-       list_del_init(&ticket->list);
        wake_up(&ticket->wait);
        space_info->tickets_id++;
        if (global_rsv->reserved < global_rsv->size)
index bc73fd670702cf1b170aa080c40d9287169834f1..c3826ae883f0e3bde0f9b197c2d19acf2eda32f8 100644 (file)
@@ -523,7 +523,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_compress_force:
                case Opt_compress_force_type:
                        compress_force = true;
-                       /* Fallthrough */
+                       fallthrough;
                case Opt_compress:
                case Opt_compress_type:
                        saved_compress_type = btrfs_test_opt(info,
@@ -622,7 +622,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        btrfs_set_opt(info->mount_opt, NOSSD);
                        btrfs_clear_and_info(info, SSD,
                                             "not using ssd optimizations");
-                       /* Fallthrough */
+                       fallthrough;
                case Opt_nossd_spread:
                        btrfs_clear_and_info(info, SSD_SPREAD,
                                             "not using spread ssd allocation scheme");
@@ -793,7 +793,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_recovery:
                        btrfs_warn(info,
                                   "'recovery' is deprecated, use 'usebackuproot' instead");
-                       /* fall through */
+                       fallthrough;
                case Opt_usebackuproot:
                        btrfs_info(info,
                                   "trying to use backup root at mount time");
index f067b5934c46b28bae8a24966672452aad17124f..75af2334b2e37d93b5f16765458ca7f6ca869c73 100644 (file)
@@ -408,7 +408,7 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
                return BTRFS_MAP_WRITE;
        default:
                WARN_ON_ONCE(1);
-               /* fall through */
+               fallthrough;
        case REQ_OP_READ:
                return BTRFS_MAP_READ;
        }
index e7726f5f1241c23a92c1e486045478c274ba9c6c..3080cda9e82457c40d50c0fe3afc1c9aac8fa2e4 100644 (file)
@@ -937,7 +937,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
        }
 
        data = kmap(page);
-       ret = __kernel_write(file, data, len, &pos);
+       ret = kernel_write(file, data, len, &pos);
        kunmap(page);
        fput(file);
        if (ret != len)
index fc98b97b396a44c38ba459a2eeabd93bc8eac6bb..53588d7517b4d0842e2d65575f25c94625524693 100644 (file)
@@ -399,6 +399,10 @@ skip_rdma:
                        if (ses->sign)
                                seq_puts(m, " signed");
 
+                       seq_printf(m, "\n\tUser: %d Cred User: %d",
+                                  from_kuid(&init_user_ns, ses->linux_uid),
+                                  from_kuid(&init_user_ns, ses->cred_uid));
+
                        if (ses->chan_count > 1) {
                                seq_printf(m, "\n\n\tExtra Channels: %zu\n",
                                           ses->chan_count-1);
@@ -406,7 +410,7 @@ skip_rdma:
                                        cifs_dump_channel(m, j, &ses->chans[j]);
                        }
 
-                       seq_puts(m, "\n\tShares:");
+                       seq_puts(m, "\n\n\tShares:");
                        j = 0;
 
                        seq_printf(m, "\n\t%d) IPC: ", j);
index c7a311d28d3d7d9d1c3657bae29781748bbce38d..99b3180c613adf09d7beb09954894db7bdd01acd 100644 (file)
@@ -156,5 +156,5 @@ extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.27"
+#define CIFS_VERSION   "2.28"
 #endif                         /* _CIFSFS_H */
index 5fac34f192afd8f96ba125c0036905d7c962e1be..a61abde09ffe1f612b5d7bd23c8ef9773117c178 100644 (file)
@@ -5306,9 +5306,15 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
        vol_info->nocase = master_tcon->nocase;
        vol_info->nohandlecache = master_tcon->nohandlecache;
        vol_info->local_lease = master_tcon->local_lease;
+       vol_info->no_lease = master_tcon->no_lease;
+       vol_info->resilient = master_tcon->use_resilient;
+       vol_info->persistent = master_tcon->use_persistent;
+       vol_info->handle_timeout = master_tcon->handle_timeout;
        vol_info->no_linux_ext = !master_tcon->unix_ext;
+       vol_info->linux_ext = master_tcon->posix_extensions;
        vol_info->sectype = master_tcon->ses->sectype;
        vol_info->sign = master_tcon->ses->sign;
+       vol_info->seal = master_tcon->seal;
 
        rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
        if (rc) {
@@ -5334,10 +5340,6 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
                goto out;
        }
 
-       /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
-       if (tcon->posix_extensions)
-               cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-
        if (cap_unix(ses))
                reset_cifs_unix_caps(0, tcon, NULL, vol_info);
 
index 9b0f8f33f832ceb941750fa0fb31f98aa884b937..be46fab4c96d8b794bd193b2dbb0a1c800bc88fc 100644 (file)
@@ -1149,20 +1149,20 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
 
 /*
  * Set the byte-range lock (posix style). Returns:
- * 1) 0, if we set the lock and don't need to request to the server;
- * 2) 1, if we need to request to the server;
- * 3) <0, if the error occurs while setting the lock.
+ * 1) <0, if the error occurs while setting the lock;
+ * 2) 0, if we set the lock and don't need to request to the server;
+ * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
+ * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
  */
 static int
 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
 {
        struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
-       int rc = 1;
+       int rc = FILE_LOCK_DEFERRED + 1;
 
        if ((flock->fl_flags & FL_POSIX) == 0)
                return rc;
 
-try_again:
        cifs_down_write(&cinode->lock_sem);
        if (!cinode->can_cache_brlcks) {
                up_write(&cinode->lock_sem);
@@ -1171,13 +1171,6 @@ try_again:
 
        rc = posix_lock_file(file, flock, NULL);
        up_write(&cinode->lock_sem);
-       if (rc == FILE_LOCK_DEFERRED) {
-               rc = wait_event_interruptible(flock->fl_wait,
-                                       list_empty(&flock->fl_blocked_member));
-               if (!rc)
-                       goto try_again;
-               locks_delete_block(flock);
-       }
        return rc;
 }
 
@@ -1652,7 +1645,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
                int posix_lock_type;
 
                rc = cifs_posix_lock_set(file, flock);
-               if (!rc || rc < 0)
+               if (rc <= FILE_LOCK_DEFERRED)
                        return rc;
 
                if (type & server->vals->shared_lock_type)
index ce95801e9b6644dcd8d6e8ae79bcc3308b5a4491..49c3ea8aa84588d08c8e37671153fc0a40bc1519 100644 (file)
@@ -2044,6 +2044,7 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
        FILE_UNIX_BASIC_INFO *info_buf_target;
        unsigned int xid;
        int rc, tmprc;
+       bool new_target = d_really_is_negative(target_dentry);
 
        if (flags & ~RENAME_NOREPLACE)
                return -EINVAL;
@@ -2120,8 +2121,13 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
         */
 
 unlink_target:
-       /* Try unlinking the target dentry if it's not negative */
-       if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
+       /*
+        * If the target dentry was created during the rename, try
+        * unlinking it if it's not negative
+        */
+       if (new_target &&
+           d_really_is_positive(target_dentry) &&
+           (rc == -EACCES || rc == -EEXIST)) {
                if (d_is_dir(target_dentry))
                        tmprc = cifs_rmdir(target_dir, target_dentry);
                else
index 4a73e63c4d439cb67466a5391c4da7e8238e0f92..dcde44ff6cf9f3192e1fbb6fc63ecca8d142b5ef 100644 (file)
@@ -169,6 +169,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
        unsigned int xid;
        struct cifsFileInfo *pSMBFile = filep->private_data;
        struct cifs_tcon *tcon;
+       struct tcon_link *tlink;
        struct cifs_sb_info *cifs_sb;
        __u64   ExtAttrBits = 0;
        __u64   caps;
@@ -307,13 +308,19 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                                break;
                        }
                        cifs_sb = CIFS_SB(inode->i_sb);
-                       tcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+                       tlink = cifs_sb_tlink(cifs_sb);
+                       if (IS_ERR(tlink)) {
+                               rc = PTR_ERR(tlink);
+                               break;
+                       }
+                       tcon = tlink_tcon(tlink);
                        if (tcon && tcon->ses->server->ops->notify) {
                                rc = tcon->ses->server->ops->notify(xid,
                                                filep, (void __user *)arg);
                                cifs_dbg(FYI, "ioctl notify rc %d\n", rc);
                        } else
                                rc = -EOPNOTSUPP;
+                       cifs_put_tlink(tlink);
                        break;
                default:
                        cifs_dbg(FYI, "unsupported ioctl\n");
index 6a39451973f8b9c4b4c778f35819aba8dd7c8421..157992864ce7e55b50805242ab1240f7d2db5171 100644 (file)
@@ -354,9 +354,13 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_sync_hdr *shdr)
                  ((struct smb2_ioctl_rsp *)shdr)->OutputCount);
                break;
        case SMB2_CHANGE_NOTIFY:
+               *off = le16_to_cpu(
+                 ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset);
+               *len = le32_to_cpu(
+                 ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength);
+               break;
        default:
-               /* BB FIXME for unimplemented cases above */
-               cifs_dbg(VFS, "no length check for command\n");
+               cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command));
                break;
        }
 
index d9fdafa5eb601123a00a6b5c7c3eb6c42a84fbff..32f90dc82c840fad48bf5a37b0d6c6a780bc7db7 100644 (file)
@@ -2148,7 +2148,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
 
        tcon = cifs_sb_master_tcon(cifs_sb);
        oparms.tcon = tcon;
-       oparms.desired_access = FILE_READ_ATTRIBUTES;
+       oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
        oparms.disposition = FILE_OPEN;
        oparms.create_options = cifs_create_options(cifs_sb, 0);
        oparms.fid = &fid;
index d11e31064679b12a6e315679cc2ff8210cf4b569..84433d0653f92a896eba0a063eecdec1b3321b6d 100644 (file)
@@ -523,7 +523,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
                      const int timeout, const int flags,
                      unsigned int *instance)
 {
-       int rc;
+       long rc;
        int *credits;
        int optype;
        long int t;
index de43534aa2997aa87994a9447f95578de9931258..91ece649285d2824ff99ee5b996adeb8ff973a93 100644 (file)
@@ -309,7 +309,7 @@ const struct file_operations exfat_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
        .iterate        = exfat_iterate,
-       .fsync          = generic_file_fsync,
+       .fsync          = exfat_file_fsync,
 };
 
 int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu)
@@ -425,10 +425,12 @@ static void exfat_init_name_entry(struct exfat_dentry *ep,
        ep->dentry.name.flags = 0x0;
 
        for (i = 0; i < EXFAT_FILE_NAME_LEN; i++) {
-               ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
-               if (*uniname == 0x0)
-                       break;
-               uniname++;
+               if (*uniname != 0x0) {
+                       ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
+                       uniname++;
+               } else {
+                       ep->dentry.name.unicode_0_14[i] = 0x0;
+               }
        }
 }
 
index 595f3117f4924893a07eba48b55ef3d7d6edbee6..7579cd3bbadba87a8bfd56c0699c8186902a6763 100644 (file)
@@ -420,6 +420,7 @@ void exfat_truncate(struct inode *inode, loff_t size);
 int exfat_setattr(struct dentry *dentry, struct iattr *attr);
 int exfat_getattr(const struct path *path, struct kstat *stat,
                unsigned int request_mask, unsigned int query_flags);
+int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
 
 /* namei.c */
 extern const struct dentry_operations exfat_dentry_ops;
index fce03f31878735e1fa389231abc1052f24447146..3b7fea465fd41e2859a94afac61dd9768a9f3119 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/slab.h>
 #include <linux/cred.h>
 #include <linux/buffer_head.h>
+#include <linux/blkdev.h>
 
 #include "exfat_raw.h"
 #include "exfat_fs.h"
@@ -346,12 +347,28 @@ out:
        return error;
 }
 
+int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
+{
+       struct inode *inode = filp->f_mapping->host;
+       int err;
+
+       err = __generic_file_fsync(filp, start, end, datasync);
+       if (err)
+               return err;
+
+       err = sync_blockdev(inode->i_sb->s_bdev);
+       if (err)
+               return err;
+
+       return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+}
+
 const struct file_operations exfat_file_operations = {
        .llseek         = generic_file_llseek,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
-       .fsync          = generic_file_fsync,
+       .fsync          = exfat_file_fsync,
        .splice_read    = generic_file_splice_read,
        .splice_write   = iter_file_splice_write,
 };
index 5b0f35329d63e0b9ab52162e4fbad129601c458d..2b9e21094a96dbb252f38e34db272e567d665665 100644 (file)
@@ -975,7 +975,6 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
                goto unlock;
        }
 
-       exfat_set_vol_flags(sb, VOL_DIRTY);
        exfat_chain_set(&clu_to_free, ei->start_clu,
                EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi), ei->flags);
 
@@ -1002,6 +1001,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
        num_entries++;
        brelse(bh);
 
+       exfat_set_vol_flags(sb, VOL_DIRTY);
        err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries);
        if (err) {
                exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err);
@@ -1077,10 +1077,14 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
 
                epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh,
                        &sector_old);
+               if (!epold)
+                       return -EIO;
                epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh,
                        &sector_new);
-               if (!epold || !epnew)
+               if (!epnew) {
+                       brelse(old_bh);
                        return -EIO;
+               }
 
                memcpy(epnew, epold, DENTRY_SIZE);
                exfat_update_bh(sb, new_bh, sync);
@@ -1161,10 +1165,14 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
 
        epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh,
                &sector_mov);
+       if (!epmov)
+               return -EIO;
        epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh,
                &sector_new);
-       if (!epmov || !epnew)
+       if (!epnew) {
+               brelse(mov_bh);
                return -EIO;
+       }
 
        memcpy(epnew, epmov, DENTRY_SIZE);
        exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode));
index e650e65536f848c544b731f273b5f8839298e850..253a92460d5222f4f3aa75e52fc0b3eab084bc06 100644 (file)
@@ -693,10 +693,20 @@ static void exfat_free(struct fs_context *fc)
        }
 }
 
+static int exfat_reconfigure(struct fs_context *fc)
+{
+       fc->sb_flags |= SB_NODIRATIME;
+
+       /* volume flag will be updated in exfat_sync_fs */
+       sync_filesystem(fc->root->d_sb);
+       return 0;
+}
+
 static const struct fs_context_operations exfat_context_ops = {
        .parse_param    = exfat_parse_param,
        .get_tree       = exfat_get_tree,
        .free           = exfat_free,
+       .reconfigure    = exfat_reconfigure,
 };
 
 static int exfat_init_fs_context(struct fs_context *fc)
index 65603502fed6f4dc6106b02bbbc7735d3fd2678d..656647f9575a7cef916beda0081a03f9d0ecdc86 100644 (file)
@@ -230,7 +230,7 @@ struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
                d_set_d_op(path.dentry, &anon_ops);
        path.mnt = mntget(mnt);
        d_instantiate(path.dentry, inode);
-       file = alloc_file(&path, flags | FMODE_NONOTIFY, fops);
+       file = alloc_file(&path, flags, fops);
        if (IS_ERR(file)) {
                ihold(inode);
                path_put(&path);
index 72c9560f4467e0ae96da63545876f46971d288d1..68cd700a2719eb7617dc3f2a0dad1d7605eb2438 100644 (file)
@@ -468,21 +468,10 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
 }
 
 
-/**
- * __gfs2_readpage - readpage
- * @file: The file to read a page for
- * @page: The page to read
- *
- * This is the core of gfs2's readpage. It's used by the internal file
- * reading code as in that case we already hold the glock. Also it's
- * called by gfs2_readpage() once the required lock has been granted.
- */
-
 static int __gfs2_readpage(void *file, struct page *page)
 {
        struct gfs2_inode *ip = GFS2_I(page->mapping->host);
        struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
-
        int error;
 
        if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
@@ -505,36 +494,11 @@ static int __gfs2_readpage(void *file, struct page *page)
  * gfs2_readpage - read a page of a file
  * @file: The file to read
  * @page: The page of the file
- *
- * This deals with the locking required. We have to unlock and
- * relock the page in order to get the locking in the right
- * order.
  */
 
 static int gfs2_readpage(struct file *file, struct page *page)
 {
-       struct address_space *mapping = page->mapping;
-       struct gfs2_inode *ip = GFS2_I(mapping->host);
-       struct gfs2_holder gh;
-       int error;
-
-       unlock_page(page);
-       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
-       error = gfs2_glock_nq(&gh);
-       if (unlikely(error))
-               goto out;
-       error = AOP_TRUNCATED_PAGE;
-       lock_page(page);
-       if (page->mapping == mapping && !PageUptodate(page))
-               error = __gfs2_readpage(file, page);
-       else
-               unlock_page(page);
-       gfs2_glock_dq(&gh);
-out:
-       gfs2_holder_uninit(&gh);
-       if (error && error != AOP_TRUNCATED_PAGE)
-               lock_page(page);
-       return error;
+       return __gfs2_readpage(file, page);
 }
 
 /**
@@ -598,16 +562,9 @@ static void gfs2_readahead(struct readahead_control *rac)
 {
        struct inode *inode = rac->mapping->host;
        struct gfs2_inode *ip = GFS2_I(inode);
-       struct gfs2_holder gh;
 
-       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
-       if (gfs2_glock_nq(&gh))
-               goto out_uninit;
        if (!gfs2_is_stuffed(ip))
                mpage_readahead(rac, gfs2_block_map);
-       gfs2_glock_dq(&gh);
-out_uninit:
-       gfs2_holder_uninit(&gh);
 }
 
 /**
index fe305e4bfd37345048aa6859ed75b74c1699987a..bebde537ac8cf26d3329b817dccfb356f6e4e2c3 100644 (file)
@@ -558,8 +558,29 @@ out_uninit:
        return block_page_mkwrite_return(ret);
 }
 
+static vm_fault_t gfs2_fault(struct vm_fault *vmf)
+{
+       struct inode *inode = file_inode(vmf->vma->vm_file);
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_holder gh;
+       vm_fault_t ret;
+       int err;
+
+       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+       err = gfs2_glock_nq(&gh);
+       if (err) {
+               ret = block_page_mkwrite_return(err);
+               goto out_uninit;
+       }
+       ret = filemap_fault(vmf);
+       gfs2_glock_dq(&gh);
+out_uninit:
+       gfs2_holder_uninit(&gh);
+       return ret;
+}
+
 static const struct vm_operations_struct gfs2_vm_ops = {
-       .fault = filemap_fault,
+       .fault = gfs2_fault,
        .map_pages = filemap_map_pages,
        .page_mkwrite = gfs2_page_mkwrite,
 };
@@ -824,6 +845,9 @@ out_uninit:
 
 static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
+       struct gfs2_inode *ip;
+       struct gfs2_holder gh;
+       size_t written = 0;
        ssize_t ret;
 
        if (iocb->ki_flags & IOCB_DIRECT) {
@@ -832,7 +856,31 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                        return ret;
                iocb->ki_flags &= ~IOCB_DIRECT;
        }
-       return generic_file_read_iter(iocb, to);
+       iocb->ki_flags |= IOCB_NOIO;
+       ret = generic_file_read_iter(iocb, to);
+       iocb->ki_flags &= ~IOCB_NOIO;
+       if (ret >= 0) {
+               if (!iov_iter_count(to))
+                       return ret;
+               written = ret;
+       } else {
+               if (ret != -EAGAIN)
+                       return ret;
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return ret;
+       }
+       ip = GFS2_I(iocb->ki_filp->f_mapping->host);
+       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+       ret = gfs2_glock_nq(&gh);
+       if (ret)
+               goto out_uninit;
+       ret = generic_file_read_iter(iocb, to);
+       if (ret > 0)
+               written += ret;
+       gfs2_glock_dq(&gh);
+out_uninit:
+       gfs2_holder_uninit(&gh);
+       return written ? written : ret;
 }
 
 /**
index 2299dcc417eae983d9c681553623182759ce11ac..8545024a1401f74bfe70e1e97154a29682331715 100644 (file)
@@ -1899,7 +1899,10 @@ bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
 
 static void flush_delete_work(struct gfs2_glock *gl)
 {
-       flush_delayed_work(&gl->gl_delete);
+       if (cancel_delayed_work(&gl->gl_delete)) {
+               queue_delayed_work(gfs2_delete_workqueue,
+                                  &gl->gl_delete, 0);
+       }
        gfs2_glock_queue_work(gl, 0);
 }
 
index c84887769b5adee510796a6d0ab2de658f6797be..de1d5f1d9ff8561a5bfc400e4759b70635c1455f 100644 (file)
@@ -531,8 +531,7 @@ static int freeze_go_sync(struct gfs2_glock *gl)
        int error = 0;
        struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
-       if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
-           test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+       if (gl->gl_req == LM_ST_EXCLUSIVE && !gfs2_withdrawn(sdp)) {
                atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
                error = freeze_super(sdp->sd_vfs);
                if (error) {
@@ -545,8 +544,11 @@ static int freeze_go_sync(struct gfs2_glock *gl)
                        gfs2_assert_withdraw(sdp, 0);
                }
                queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
-               gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
-                              GFS2_LFC_FREEZE_GO_SYNC);
+               if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+                       gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+                                      GFS2_LFC_FREEZE_GO_SYNC);
+               else /* read-only mounts */
+                       atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
        }
        return 0;
 }
index 03ab11fab96268d858acffee4c5d48295869d652..ca2ec02436ec7fdd5cb004367618831b2ba9bb85 100644 (file)
@@ -399,7 +399,6 @@ enum {
        GIF_QD_LOCKED           = 1,
        GIF_ALLOC_FAILED        = 2,
        GIF_SW_PAGED            = 3,
-       GIF_ORDERED             = 4,
        GIF_FREE_VFS_INODE      = 5,
        GIF_GLOP_PENDING        = 6,
        GIF_DEFERRED_DELETE     = 7,
index 370c3a4b31acad07128d2e31d13c596a5086df17..6774865f5b5b5c5f8b468d85abc9cd4cb912afa4 100644 (file)
@@ -207,10 +207,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
        if (no_formal_ino && ip->i_no_formal_ino &&
            no_formal_ino != ip->i_no_formal_ino) {
+               error = -ESTALE;
                if (inode->i_state & I_NEW)
                        goto fail;
                iput(inode);
-               return ERR_PTR(-ESTALE);
+               return ERR_PTR(error);
        }
 
        if (inode->i_state & I_NEW)
index 3e4734431783296e316646ec58e43757a1fb6a77..a76e55bc28ebfdf5a5acd46cf613a758f9d05944 100644 (file)
@@ -613,6 +613,12 @@ static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
        return 0;
 }
 
+static void __ordered_del_inode(struct gfs2_inode *ip)
+{
+       if (!list_empty(&ip->i_ordered))
+               list_del_init(&ip->i_ordered);
+}
+
 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
 {
        struct gfs2_inode *ip;
@@ -623,8 +629,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
        while (!list_empty(&sdp->sd_log_ordered)) {
                ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
                if (ip->i_inode.i_mapping->nrpages == 0) {
-                       test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
-                       list_del(&ip->i_ordered);
+                       __ordered_del_inode(ip);
                        continue;
                }
                list_move(&ip->i_ordered, &written);
@@ -643,8 +648,7 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
        spin_lock(&sdp->sd_ordered_lock);
        while (!list_empty(&sdp->sd_log_ordered)) {
                ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
-               list_del(&ip->i_ordered);
-               WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
+               __ordered_del_inode(ip);
                if (ip->i_inode.i_mapping->nrpages == 0)
                        continue;
                spin_unlock(&sdp->sd_ordered_lock);
@@ -659,8 +663,7 @@ void gfs2_ordered_del_inode(struct gfs2_inode *ip)
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
        spin_lock(&sdp->sd_ordered_lock);
-       if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
-               list_del(&ip->i_ordered);
+       __ordered_del_inode(ip);
        spin_unlock(&sdp->sd_ordered_lock);
 }
 
@@ -1002,6 +1005,16 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
 
 out:
        if (gfs2_withdrawn(sdp)) {
+               /**
+                * If the tr_list is empty, we're withdrawing during a log
+                * flush that targets a transaction, but the transaction was
+                * never queued onto any of the ail lists. Here we add it to
+                * ail1 just so that ail_drain() will find and free it.
+                */
+               spin_lock(&sdp->sd_ail_lock);
+               if (tr && list_empty(&tr->tr_list))
+                       list_add(&tr->tr_list, &sdp->sd_ail1_list);
+               spin_unlock(&sdp->sd_ail_lock);
                ail_drain(sdp); /* frees all transactions */
                tr = NULL;
        }
index c1cd6ae176597cd5720b7381c3b23039e5f02b5b..8965c751a30396cb024b9d3f321bdf538bfeee55 100644 (file)
@@ -53,9 +53,9 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
        if (gfs2_is_jdata(ip) || !gfs2_is_ordered(sdp))
                return;
 
-       if (!test_bit(GIF_ORDERED, &ip->i_flags)) {
+       if (list_empty(&ip->i_ordered)) {
                spin_lock(&sdp->sd_ordered_lock);
-               if (!test_and_set_bit(GIF_ORDERED, &ip->i_flags))
+               if (list_empty(&ip->i_ordered))
                        list_add(&ip->i_ordered, &sdp->sd_log_ordered);
                spin_unlock(&sdp->sd_ordered_lock);
        }
index 733470ca6be9d5812dd1b8ee5f7da6051f22aeda..c7393ee9cf68369f588a52f5bea3e62290b6921c 100644 (file)
@@ -39,6 +39,7 @@ static void gfs2_init_inode_once(void *foo)
        atomic_set(&ip->i_sizehint, 0);
        init_rwsem(&ip->i_rw_mutex);
        INIT_LIST_HEAD(&ip->i_trunc_list);
+       INIT_LIST_HEAD(&ip->i_ordered);
        ip->i_qadata = NULL;
        gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
        memset(&ip->i_res, 0, sizeof(ip->i_res));
index 094f5fe7c00906aee304e273a5169157967aab7a..6d18d2c91add2805035de4eebe16d7ae142d16c5 100644 (file)
@@ -1136,7 +1136,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
                goto fail_per_node;
        }
 
-       if (!sb_rdonly(sb)) {
+       if (sb_rdonly(sb)) {
+               struct gfs2_holder freeze_gh;
+
+               error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+                                          LM_FLAG_NOEXP | GL_EXACT,
+                                          &freeze_gh);
+               if (error) {
+                       fs_err(sdp, "can't make FS RO: %d\n", error);
+                       goto fail_per_node;
+               }
+               gfs2_glock_dq_uninit(&freeze_gh);
+       } else {
                error = gfs2_make_fs_rw(sdp);
                if (error) {
                        fs_err(sdp, "can't make FS RW: %d\n", error);
index 96c345f4927387c694bebf67f3e61e712bfcd349..390ea79d682c256bbb19be61debe690e97e7a835 100644 (file)
@@ -364,8 +364,8 @@ void gfs2_recover_func(struct work_struct *work)
                /* Acquire a shared hold on the freeze lock */
 
                error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
-                                          LM_FLAG_NOEXP | LM_FLAG_PRIORITY,
-                                          &thaw_gh);
+                                          LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
+                                          GL_EXACT, &thaw_gh);
                if (error)
                        goto fail_gunlock_ji;
 
index 32d8d26126a1617ce85513425fcfe2e5d1035b1f..47d0ae158b6990a2a502fbe589699195ff00873b 100644 (file)
@@ -167,7 +167,8 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        if (error)
                return error;
 
-       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
+       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+                                  LM_FLAG_NOEXP | GL_EXACT,
                                   &freeze_gh);
        if (error)
                goto fail_threads;
@@ -203,7 +204,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        return 0;
 
 fail:
-       freeze_gh.gh_flags |= GL_NOCACHE;
        gfs2_glock_dq_uninit(&freeze_gh);
 fail_threads:
        if (sdp->sd_quotad_process)
@@ -430,7 +430,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
        }
 
        error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
-                                  GL_NOCACHE, &sdp->sd_freeze_gh);
+                                  LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
        if (error)
                goto out;
 
@@ -613,13 +613,15 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
            !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
                if (!log_write_allowed) {
                        error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
-                                                  LM_ST_SHARED, GL_NOCACHE |
-                                                  LM_FLAG_TRY, &freeze_gh);
+                                                  LM_ST_SHARED, LM_FLAG_TRY |
+                                                  LM_FLAG_NOEXP | GL_EXACT,
+                                                  &freeze_gh);
                        if (error == GLR_TRYFAILED)
                                error = 0;
                } else {
                        error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
-                                                  LM_ST_SHARED, GL_NOCACHE,
+                                                  LM_ST_SHARED,
+                                                  LM_FLAG_NOEXP | GL_EXACT,
                                                   &freeze_gh);
                        if (error && !gfs2_withdrawn(sdp))
                                return error;
@@ -761,8 +763,8 @@ void gfs2_freeze_func(struct work_struct *work)
        struct super_block *sb = sdp->sd_vfs;
 
        atomic_inc(&sb->s_active);
-       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
-                                  &freeze_gh);
+       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+                                  LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
        if (error) {
                fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
                gfs2_assert_withdraw(sdp, 0);
@@ -774,8 +776,6 @@ void gfs2_freeze_func(struct work_struct *work)
                                error);
                        gfs2_assert_withdraw(sdp, 0);
                }
-               if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
-                       freeze_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq_uninit(&freeze_gh);
        }
        deactivate_super(sb);
index e507737f044e08d34c29fc724c0ec10abda98113..9fd7e69696c332c9874aa3b3175e63c11fc1deda 100644 (file)
@@ -1096,6 +1096,8 @@ static inline void io_prep_async_work(struct io_kiocb *req,
 {
        const struct io_op_def *def = &io_op_defs[req->opcode];
 
+       io_req_init_async(req);
+
        if (req->flags & REQ_F_ISREG) {
                if (def->hash_reg_file)
                        io_wq_hash_work(&req->work, file_inode(req->file));
@@ -1104,7 +1106,6 @@ static inline void io_prep_async_work(struct io_kiocb *req,
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
 
-       io_req_init_async(req);
        io_req_work_grab_env(req, def);
 
        *link = io_prep_linked_timeout(req);
@@ -1274,6 +1275,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
        if (cqe) {
                clear_bit(0, &ctx->sq_check_overflow);
                clear_bit(0, &ctx->cq_check_overflow);
+               ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
        }
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
        io_cqring_ev_posted(ctx);
@@ -1311,6 +1313,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
                if (list_empty(&ctx->cq_overflow_list)) {
                        set_bit(0, &ctx->sq_check_overflow);
                        set_bit(0, &ctx->cq_check_overflow);
+                       ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
                }
                req->flags |= REQ_F_OVERFLOW;
                refcount_inc(&req->refs);
@@ -3551,6 +3554,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (req->flags & REQ_F_NEED_CLEANUP)
                return 0;
 
+       io->msg.msg.msg_name = &io->msg.addr;
        io->msg.iov = io->msg.fast_iov;
        ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
                                        &io->msg.iov);
@@ -3732,6 +3736,7 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
 
 static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
 {
+       io->msg.msg.msg_name = &io->msg.addr;
        io->msg.iov = io->msg.fast_iov;
 
 #ifdef CONFIG_COMPAT
@@ -4072,6 +4077,29 @@ struct io_poll_table {
        int error;
 };
 
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
+{
+       struct task_struct *tsk = req->task;
+       struct io_ring_ctx *ctx = req->ctx;
+       int ret, notify = TWA_RESUME;
+
+       /*
+        * SQPOLL kernel thread doesn't need notification, just a wakeup.
+        * If we're not using an eventfd, then TWA_RESUME is always fine,
+        * as we won't have dependencies between request completions for
+        * other kernel wait conditions.
+        */
+       if (ctx->flags & IORING_SETUP_SQPOLL)
+               notify = 0;
+       else if (ctx->cq_ev_fd)
+               notify = TWA_SIGNAL;
+
+       ret = task_work_add(tsk, cb, notify);
+       if (!ret)
+               wake_up_process(tsk);
+       return ret;
+}
+
 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
                           __poll_t mask, task_work_func_t func)
 {
@@ -4095,13 +4123,13 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
         * of executing it. We can't safely execute it anyway, as we may not
         * have the needed state needed for it anyway.
         */
-       ret = task_work_add(tsk, &req->task_work, true);
+       ret = io_req_task_work_add(req, &req->task_work);
        if (unlikely(ret)) {
                WRITE_ONCE(poll->canceled, true);
                tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, true);
+               task_work_add(tsk, &req->task_work, 0);
+               wake_up_process(tsk);
        }
-       wake_up_process(tsk);
        return 1;
 }
 
@@ -6057,9 +6085,9 @@ static int io_sq_thread(void *data)
                        }
 
                        /* Tell userspace we may need a wakeup call */
+                       spin_lock_irq(&ctx->completion_lock);
                        ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
-                       /* make sure to read SQ tail after writing flags */
-                       smp_mb();
+                       spin_unlock_irq(&ctx->completion_lock);
 
                        to_submit = io_sqring_entries(ctx);
                        if (!to_submit || ret == -EBUSY) {
@@ -6077,13 +6105,17 @@ static int io_sq_thread(void *data)
                                schedule();
                                finish_wait(&ctx->sqo_wait, &wait);
 
+                               spin_lock_irq(&ctx->completion_lock);
                                ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+                               spin_unlock_irq(&ctx->completion_lock);
                                ret = 0;
                                continue;
                        }
                        finish_wait(&ctx->sqo_wait, &wait);
 
+                       spin_lock_irq(&ctx->completion_lock);
                        ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+                       spin_unlock_irq(&ctx->completion_lock);
                }
 
                mutex_lock(&ctx->uring_lock);
@@ -6182,15 +6214,23 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        do {
                prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
                                                TASK_INTERRUPTIBLE);
+               /* make sure we run task_work before checking for signals */
                if (current->task_works)
                        task_work_run();
-               if (io_should_wake(&iowq, false))
-                       break;
-               schedule();
                if (signal_pending(current)) {
+                       if (current->jobctl & JOBCTL_TASK_WORK) {
+                               spin_lock_irq(&current->sighand->siglock);
+                               current->jobctl &= ~JOBCTL_TASK_WORK;
+                               recalc_sigpending();
+                               spin_unlock_irq(&current->sighand->siglock);
+                               continue;
+                       }
                        ret = -EINTR;
                        break;
                }
+               if (io_should_wake(&iowq, false))
+                       break;
+               schedule();
        } while (1);
        finish_wait(&ctx->wait, &iowq.wq);
 
@@ -6662,6 +6702,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                for (i = 0; i < nr_tables; i++)
                        kfree(ctx->file_data->table[i].files);
 
+               percpu_ref_exit(&ctx->file_data->refs);
                kfree(ctx->file_data->table);
                kfree(ctx->file_data);
                ctx->file_data = NULL;
@@ -6814,8 +6855,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                        }
                        table->files[index] = file;
                        err = io_sqe_file_register(ctx, file, i);
-                       if (err)
+                       if (err) {
+                               fput(file);
                                break;
+                       }
                }
                nr_args--;
                done++;
@@ -7311,9 +7354,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
        io_mem_free(ctx->sq_sqes);
 
        percpu_ref_exit(&ctx->refs);
-       if (ctx->account_mem)
-               io_unaccount_mem(ctx->user,
-                               ring_pages(ctx->sq_entries, ctx->cq_entries));
        free_uid(ctx->user);
        put_cred(ctx->creds);
        kfree(ctx->cancel_hash);
@@ -7398,6 +7438,16 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
        if (ctx->rings)
                io_cqring_overflow_flush(ctx, true);
        idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
+
+       /*
+        * Do this upfront, so we won't have a grace period where the ring
+        * is closed but resources aren't reaped yet. This can cause
+        * spurious failure in setting up a new ring.
+        */
+       if (ctx->account_mem)
+               io_unaccount_mem(ctx->user,
+                               ring_pages(ctx->sq_entries, ctx->cq_entries));
+
        INIT_WORK(&ctx->exit_work, io_ring_exit_work);
        queue_work(system_wq, &ctx->exit_work);
 }
@@ -7457,6 +7507,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                        if (list_empty(&ctx->cq_overflow_list)) {
                                clear_bit(0, &ctx->sq_check_overflow);
                                clear_bit(0, &ctx->cq_check_overflow);
+                               ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
                        }
                        spin_unlock_irq(&ctx->completion_lock);
 
index bb3d2c32664ad983604c0dc4162d1a2547119508..cce2510b2ccaaf10793d382094e04f845b98a91d 100644 (file)
@@ -7912,9 +7912,14 @@ nfs4_state_start_net(struct net *net)
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        int ret;
 
-       ret = nfs4_state_create_net(net);
+       ret = get_nfsdfs(net);
        if (ret)
                return ret;
+       ret = nfs4_state_create_net(net);
+       if (ret) {
+               mntput(nn->nfsd_mnt);
+               return ret;
+       }
        locks_start_grace(net, &nn->nfsd4_manager);
        nfsd4_client_tracking_init(net);
        if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
@@ -7984,6 +7989,7 @@ nfs4_state_shutdown_net(struct net *net)
 
        nfsd4_client_tracking_exit(net);
        nfs4_state_destroy_net(net);
+       mntput(nn->nfsd_mnt);
 }
 
 void
index b68e96681522ec22e416776fb4ee95a6090a4207..cd05732f8eaa84672a85a867a5a8414a52bf65ac 100644 (file)
@@ -1335,6 +1335,7 @@ void nfsd_client_rmdir(struct dentry *dentry)
        WARN_ON_ONCE(ret);
        fsnotify_rmdir(dir, dentry);
        d_delete(dentry);
+       dput(dentry);
        inode_unlock(dir);
 }
 
@@ -1424,6 +1425,18 @@ static struct file_system_type nfsd_fs_type = {
 };
 MODULE_ALIAS_FS("nfsd");
 
+int get_nfsdfs(struct net *net)
+{
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+       struct vfsmount *mnt;
+
+       mnt =  vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
+       if (IS_ERR(mnt))
+               return PTR_ERR(mnt);
+       nn->nfsd_mnt = mnt;
+       return 0;
+}
+
 #ifdef CONFIG_PROC_FS
 static int create_proc_exports_entry(void)
 {
@@ -1451,7 +1464,6 @@ unsigned int nfsd_net_id;
 static __net_init int nfsd_init_net(struct net *net)
 {
        int retval;
-       struct vfsmount *mnt;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
        retval = nfsd_export_init(net);
@@ -1478,16 +1490,8 @@ static __net_init int nfsd_init_net(struct net *net)
        init_waitqueue_head(&nn->ntf_wq);
        seqlock_init(&nn->boot_lock);
 
-       mnt =  vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
-       if (IS_ERR(mnt)) {
-               retval = PTR_ERR(mnt);
-               goto out_mount_err;
-       }
-       nn->nfsd_mnt = mnt;
        return 0;
 
-out_mount_err:
-       nfsd_reply_cache_shutdown(nn);
 out_drc_error:
        nfsd_idmap_shutdown(net);
 out_idmap_error:
@@ -1500,7 +1504,6 @@ static __net_exit void nfsd_exit_net(struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       mntput(nn->nfsd_mnt);
        nfsd_reply_cache_shutdown(nn);
        nfsd_idmap_shutdown(net);
        nfsd_export_shutdown(net);
index 36cdd81b6688a0c3563911606a3bd3fa45f70929..57c832d1b30fd8dd0aea1998f18b1c547cfa7eb1 100644 (file)
@@ -90,6 +90,8 @@ void          nfsd_destroy(struct net *net);
 
 bool           i_am_nfsd(void);
 
+int get_nfsdfs(struct net *);
+
 struct nfsdfs_client {
        struct kref cl_ref;
        void (*cl_release)(struct kref *kref);
@@ -100,6 +102,7 @@ struct dentry *nfsd_client_mkdir(struct nfsd_net *nn,
                struct nfsdfs_client *ncl, u32 id, const struct tree_descr *);
 void nfsd_client_rmdir(struct dentry *dentry);
 
+
 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
 #ifdef CONFIG_NFSD_V2_ACL
 extern const struct svc_version nfsd_acl_version2;
index c3fbab1753ec8edd2fb0c593dc1aeaf6a23b6c61..d22a056da477a0b526db3f1672387d9eb7ec5979 100644 (file)
@@ -1226,6 +1226,9 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
                iap->ia_mode = 0;
        iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
 
+       if (!IS_POSIXACL(dirp))
+               iap->ia_mode &= ~current_umask();
+
        err = 0;
        host_err = 0;
        switch (type) {
@@ -1458,6 +1461,9 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
                goto out;
        }
 
+       if (!IS_POSIXACL(dirp))
+               iap->ia_mode &= ~current_umask();
+
        host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
        if (host_err < 0) {
                fh_drop_write(fhp);
index 42c5128c7d1c764f8eb79a9b68a7c80be2b85587..6c1166ccdaea5759bd1548cc252416eee9818028 100644 (file)
@@ -566,8 +566,9 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
                goto out;
 
        /* don't even try if the size is too large */
-       if (count > KMALLOC_MAX_SIZE)
-               return -ENOMEM;
+       error = -ENOMEM;
+       if (count >= KMALLOC_MAX_SIZE)
+               goto out;
 
        if (write) {
                kbuf = memdup_user_nul(ubuf, count);
@@ -576,7 +577,6 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
                        goto out;
                }
        } else {
-               error = -ENOMEM;
                kbuf = kzalloc(count, GFP_KERNEL);
                if (!kbuf)
                        goto out;
index bbfa9b12b15eb77fc90e2c66202d42c4cb3825cc..4fb797822567a6b3500f61820d7b76ed22942635 100644 (file)
@@ -419,28 +419,42 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
        return ret;
 }
 
-ssize_t __vfs_read(struct file *file, char __user *buf, size_t count,
-                  loff_t *pos)
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
 {
+       mm_segment_t old_fs = get_fs();
+       ssize_t ret;
+
+       if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ)))
+               return -EINVAL;
+       if (!(file->f_mode & FMODE_CAN_READ))
+               return -EINVAL;
+
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
+       set_fs(KERNEL_DS);
        if (file->f_op->read)
-               return file->f_op->read(file, buf, count, pos);
+               ret = file->f_op->read(file, (void __user *)buf, count, pos);
        else if (file->f_op->read_iter)
-               return new_sync_read(file, buf, count, pos);
+               ret = new_sync_read(file, (void __user *)buf, count, pos);
        else
-               return -EINVAL;
+               ret = -EINVAL;
+       set_fs(old_fs);
+       if (ret > 0) {
+               fsnotify_access(file);
+               add_rchar(current, ret);
+       }
+       inc_syscr(current);
+       return ret;
 }
 
 ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
 {
-       mm_segment_t old_fs;
-       ssize_t result;
+       ssize_t ret;
 
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       /* The cast to a user pointer is valid due to the set_fs() */
-       result = vfs_read(file, (void __user *)buf, count, pos);
-       set_fs(old_fs);
-       return result;
+       ret = rw_verify_area(READ, file, pos, count);
+       if (ret)
+               return ret;
+       return __kernel_read(file, buf, count, pos);
 }
 EXPORT_SYMBOL(kernel_read);
 
@@ -456,17 +470,22 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
                return -EFAULT;
 
        ret = rw_verify_area(READ, file, pos, count);
-       if (!ret) {
-               if (count > MAX_RW_COUNT)
-                       count =  MAX_RW_COUNT;
-               ret = __vfs_read(file, buf, count, pos);
-               if (ret > 0) {
-                       fsnotify_access(file);
-                       add_rchar(current, ret);
-               }
-               inc_syscr(current);
-       }
+       if (ret)
+               return ret;
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
 
+       if (file->f_op->read)
+               ret = file->f_op->read(file, buf, count, pos);
+       else if (file->f_op->read_iter)
+               ret = new_sync_read(file, buf, count, pos);
+       else
+               ret = -EINVAL;
+       if (ret > 0) {
+               fsnotify_access(file);
+               add_rchar(current, ret);
+       }
+       inc_syscr(current);
        return ret;
 }
 
@@ -488,23 +507,15 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
        return ret;
 }
 
-static ssize_t __vfs_write(struct file *file, const char __user *p,
-                          size_t count, loff_t *pos)
-{
-       if (file->f_op->write)
-               return file->f_op->write(file, p, count, pos);
-       else if (file->f_op->write_iter)
-               return new_sync_write(file, p, count, pos);
-       else
-               return -EINVAL;
-}
-
+/* caller is responsible for file_start_write/file_end_write */
 ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
 {
        mm_segment_t old_fs;
        const char __user *p;
        ssize_t ret;
 
+       if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE)))
+               return -EBADF;
        if (!(file->f_mode & FMODE_CAN_WRITE))
                return -EINVAL;
 
@@ -513,7 +524,12 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
        p = (__force const char __user *)buf;
        if (count > MAX_RW_COUNT)
                count =  MAX_RW_COUNT;
-       ret = __vfs_write(file, p, count, pos);
+       if (file->f_op->write)
+               ret = file->f_op->write(file, p, count, pos);
+       else if (file->f_op->write_iter)
+               ret = new_sync_write(file, p, count, pos);
+       else
+               ret = -EINVAL;
        set_fs(old_fs);
        if (ret > 0) {
                fsnotify_modify(file);
@@ -522,21 +538,20 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
        inc_syscw(current);
        return ret;
 }
-EXPORT_SYMBOL(__kernel_write);
 
 ssize_t kernel_write(struct file *file, const void *buf, size_t count,
                            loff_t *pos)
 {
-       mm_segment_t old_fs;
-       ssize_t res;
+       ssize_t ret;
 
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       /* The cast to a user pointer is valid due to the set_fs() */
-       res = vfs_write(file, (__force const char __user *)buf, count, pos);
-       set_fs(old_fs);
+       ret = rw_verify_area(WRITE, file, pos, count);
+       if (ret)
+               return ret;
 
-       return res;
+       file_start_write(file);
+       ret =  __kernel_write(file, buf, count, pos);
+       file_end_write(file);
+       return ret;
 }
 EXPORT_SYMBOL(kernel_write);
 
@@ -552,19 +567,23 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
                return -EFAULT;
 
        ret = rw_verify_area(WRITE, file, pos, count);
-       if (!ret) {
-               if (count > MAX_RW_COUNT)
-                       count =  MAX_RW_COUNT;
-               file_start_write(file);
-               ret = __vfs_write(file, buf, count, pos);
-               if (ret > 0) {
-                       fsnotify_modify(file);
-                       add_wchar(current, ret);
-               }
-               inc_syscw(current);
-               file_end_write(file);
+       if (ret)
+               return ret;
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
+       file_start_write(file);
+       if (file->f_op->write)
+               ret = file->f_op->write(file, buf, count, pos);
+       else if (file->f_op->write_iter)
+               ret = new_sync_write(file, buf, count, pos);
+       else
+               ret = -EINVAL;
+       if (ret > 0) {
+               fsnotify_modify(file);
+               add_wchar(current, ret);
        }
-
+       inc_syscw(current);
+       file_end_write(file);
        return ret;
 }
 
index b43f0e8f43f2e0d8f40b11245abd57b50b5035ab..9ed90368ab311cd2196760459430ff07a87040c4 100644 (file)
@@ -671,7 +671,8 @@ xlog_cil_push_work(
        /*
         * Wake up any background push waiters now this context is being pushed.
         */
-       wake_up_all(&ctx->push_wait);
+       if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
+               wake_up_all(&cil->xc_push_wait);
 
        /*
         * Check if we've anything to push. If there is nothing, then we don't
@@ -743,13 +744,12 @@ xlog_cil_push_work(
 
        /*
         * initialise the new context and attach it to the CIL. Then attach
-        * the current context to the CIL committing lsit so it can be found
+        * the current context to the CIL committing list so it can be found
         * during log forces to extract the commit lsn of the sequence that
         * needs to be forced.
         */
        INIT_LIST_HEAD(&new_ctx->committing);
        INIT_LIST_HEAD(&new_ctx->busy_extents);
-       init_waitqueue_head(&new_ctx->push_wait);
        new_ctx->sequence = ctx->sequence + 1;
        new_ctx->cil = cil;
        cil->xc_ctx = new_ctx;
@@ -937,7 +937,7 @@ xlog_cil_push_background(
        if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
                trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
                ASSERT(cil->xc_ctx->space_used < log->l_logsize);
-               xlog_wait(&cil->xc_ctx->push_wait, &cil->xc_push_lock);
+               xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
                return;
        }
 
@@ -1216,12 +1216,12 @@ xlog_cil_init(
        INIT_LIST_HEAD(&cil->xc_committing);
        spin_lock_init(&cil->xc_cil_lock);
        spin_lock_init(&cil->xc_push_lock);
+       init_waitqueue_head(&cil->xc_push_wait);
        init_rwsem(&cil->xc_ctx_lock);
        init_waitqueue_head(&cil->xc_commit_wait);
 
        INIT_LIST_HEAD(&ctx->committing);
        INIT_LIST_HEAD(&ctx->busy_extents);
-       init_waitqueue_head(&ctx->push_wait);
        ctx->sequence = 1;
        ctx->cil = cil;
        cil->xc_ctx = ctx;
index ec22c7a3867f19fc80c4b860b8f02fe2524cb687..75a62870b63af00d27be289fe54d6bcbc7541204 100644 (file)
@@ -240,7 +240,6 @@ struct xfs_cil_ctx {
        struct xfs_log_vec      *lv_chain;      /* logvecs being pushed */
        struct list_head        iclog_entry;
        struct list_head        committing;     /* ctx committing list */
-       wait_queue_head_t       push_wait;      /* background push throttle */
        struct work_struct      discard_endio_work;
 };
 
@@ -274,6 +273,7 @@ struct xfs_cil {
        wait_queue_head_t       xc_commit_wait;
        xfs_lsn_t               xc_current_sequence;
        struct work_struct      xc_push_work;
+       wait_queue_head_t       xc_push_wait;   /* background push throttle */
 } ____cacheline_aligned_in_smp;
 
 /*
index 56527c85d12222f58cb741c1f857cd644ddfa523..088c1ded271486a2549758943e84ad5275dd1376 100644 (file)
@@ -29,8 +29,8 @@ struct alg_sock {
 
        struct sock *parent;
 
-       unsigned int refcnt;
-       unsigned int nokey_refcnt;
+       atomic_t refcnt;
+       atomic_t nokey_refcnt;
 
        const struct af_alg_type *type;
        void *private;
index 4671fbf28842718fa74c9664dee74f8e129dbb7e..7f475d59a0974f17d7a9765c643b798a1dc6ee83 100644 (file)
@@ -18,8 +18,7 @@
  * position @h. For example
  * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
  */
-#if !defined(__ASSEMBLY__) && \
-       (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
+#if !defined(__ASSEMBLY__)
 #include <linux/build_bug.h>
 #define GENMASK_INPUT_CHECK(h, l) \
        (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
index 8fd900998b4e2e7cca1a44a47e3ceebb496331d5..57241417ff2f86229a01e6f568bfa7a57e2aaaf8 100644 (file)
@@ -590,6 +590,7 @@ struct request_queue {
        u64                     write_hints[BLK_MAX_WRITE_HINTS];
 };
 
+/* Keep blk_queue_flag_name[] in sync with the definitions below */
 #define QUEUE_FLAG_STOPPED     0       /* queue is stopped */
 #define QUEUE_FLAG_DYING       1       /* queue being torn down */
 #define QUEUE_FLAG_NOMERGES     3      /* disable merge attempts */
index 4052d649f36d05d732bd69d8ca784ac2be3b1275..47d5b0c708c98bffcbad9a915725be64b7809139 100644 (file)
@@ -33,7 +33,7 @@ int netns_bpf_prog_query(const union bpf_attr *attr,
                         union bpf_attr __user *uattr);
 int netns_bpf_prog_attach(const union bpf_attr *attr,
                          struct bpf_prog *prog);
-int netns_bpf_prog_detach(const union bpf_attr *attr);
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
 int netns_bpf_link_create(const union bpf_attr *attr,
                          struct bpf_prog *prog);
 #else
@@ -49,7 +49,8 @@ static inline int netns_bpf_prog_attach(const union bpf_attr *attr,
        return -EOPNOTSUPP;
 }
 
-static inline int netns_bpf_prog_detach(const union bpf_attr *attr)
+static inline int netns_bpf_prog_detach(const union bpf_attr *attr,
+                                       enum bpf_prog_type ptype)
 {
        return -EOPNOTSUPP;
 }
index 07052d44bca1c692cb6f360dd614c4c6ce854016..9750a1902ee503814ce83e6b890837b1c673bdc4 100644 (file)
@@ -1543,13 +1543,16 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
 
 #if defined(CONFIG_BPF_STREAM_PARSER)
-int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
+int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
+                        struct bpf_prog *old, u32 which);
 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
 void sock_map_unhash(struct sock *sk);
 void sock_map_close(struct sock *sk, long timeout);
 #else
 static inline int sock_map_prog_update(struct bpf_map *map,
-                                      struct bpf_prog *prog, u32 which)
+                                      struct bpf_prog *prog,
+                                      struct bpf_prog *old, u32 which)
 {
        return -EOPNOTSUPP;
 }
@@ -1559,6 +1562,12 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
 {
        return -EINVAL;
 }
+
+static inline int sock_map_prog_detach(const union bpf_attr *attr,
+                                      enum bpf_prog_type ptype)
+{
+       return -EOPNOTSUPP;
+}
 #endif /* CONFIG_BPF_STREAM_PARSER */
 
 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
index 5c1ea99b480fa7164766454be9871b7d247d0d40..8b81fbb4497cf156bb977043a84190e0a3ef1690 100644 (file)
@@ -82,6 +82,11 @@ static inline bool btf_type_is_int(const struct btf_type *t)
        return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
 }
 
+static inline bool btf_type_is_small_int(const struct btf_type *t)
+{
+       return btf_type_is_int(t) && t->size <= sizeof(u64);
+}
+
 static inline bool btf_type_is_enum(const struct btf_type *t)
 {
        return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
index 52661155f85fd935f930c9fe1c3990257c8302a1..fee0b5547cd0a0e32e3b0cc713e93d225c7bda60 100644 (file)
@@ -790,7 +790,9 @@ struct sock_cgroup_data {
        union {
 #ifdef __LITTLE_ENDIAN
                struct {
-                       u8      is_data;
+                       u8      is_data : 1;
+                       u8      no_refcnt : 1;
+                       u8      unused : 6;
                        u8      padding;
                        u16     prioidx;
                        u32     classid;
@@ -800,7 +802,9 @@ struct sock_cgroup_data {
                        u32     classid;
                        u16     prioidx;
                        u8      padding;
-                       u8      is_data;
+                       u8      unused : 6;
+                       u8      no_refcnt : 1;
+                       u8      is_data : 1;
                } __packed;
 #endif
                u64             val;
index 4598e4da6b1b72f2d36f5e9fd04acdea13195141..618838c48313cd82a9ce917378a5508396b0dcee 100644 (file)
@@ -822,6 +822,7 @@ extern spinlock_t cgroup_sk_update_lock;
 
 void cgroup_sk_alloc_disable(void);
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+void cgroup_sk_clone(struct sock_cgroup_data *skcd);
 void cgroup_sk_free(struct sock_cgroup_data *skcd);
 
 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
@@ -835,7 +836,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
         */
        v = READ_ONCE(skcd->val);
 
-       if (v & 1)
+       if (v & 3)
                return &cgrp_dfl_root.cgrp;
 
        return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
@@ -847,6 +848,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
 #else  /* CONFIG_CGROUP_DATA */
 
 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
+static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
 
 #endif /* CONFIG_CGROUP_DATA */
index 1c74464c80c65320a898a6f359790063eb15c3d7..0b1dc61f3955c545e5fecae15aebf321d61855f4 100644 (file)
@@ -11,7 +11,7 @@
                     + __GNUC_PATCHLEVEL__)
 
 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
-#if GCC_VERSION < 40800
+#if GCC_VERSION < 40900
 # error Sorry, your compiler is too old - please upgrade it.
 #endif
 
index c3bf7710f69acb8425228ef41568ec04a2709f9a..01dd58c74d808a67dc80b3a821f55ed387d170a8 100644 (file)
@@ -252,32 +252,8 @@ struct ftrace_likely_data {
  * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
  *                            non-scalar types unchanged.
  */
-#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__)
 /*
- * We build this out of a couple of helper macros in a vain attempt to
- * help you keep your lunch down while reading it.
- */
-#define __pick_scalar_type(x, type, otherwise)                                 \
-       __builtin_choose_expr(__same_type(x, type), (type)0, otherwise)
-
-/*
- * 'char' is not type-compatible with either 'signed char' or 'unsigned char',
- * so we include the naked type here as well as the signed/unsigned variants.
- */
-#define __pick_integer_type(x, type, otherwise)                                        \
-       __pick_scalar_type(x, type,                                             \
-               __pick_scalar_type(x, unsigned type,                            \
-                       __pick_scalar_type(x, signed type, otherwise)))
-
-#define __unqual_scalar_typeof(x) typeof(                                      \
-       __pick_integer_type(x, char,                                            \
-               __pick_integer_type(x, short,                                   \
-                       __pick_integer_type(x, int,                             \
-                               __pick_integer_type(x, long,                    \
-                                       __pick_integer_type(x, long long, x))))))
-#else
-/*
- * If supported, prefer C11 _Generic for better compile-times. As above, 'char'
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
  * is not type-compatible with 'signed char', and we define a separate case.
  */
 #define __scalar_type_to_expr_cases(type)                              \
@@ -293,7 +269,6 @@ struct ftrace_likely_data {
                         __scalar_type_to_expr_cases(long),             \
                         __scalar_type_to_expr_cases(long long),        \
                         default: (x)))
-#endif
 
 /* Is this type a native word size -- useful for atomic operations */
 #define __native_word(t) \
index cdfa400f89b3d38ad176f0dea420ef9ef7617dc0..5184735a0fe8eb93dbd3dbbebdad8a92eb0affad 100644 (file)
@@ -85,4 +85,5 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs);
 int dma_direct_supported(struct device *dev, u64 mask);
+bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
 #endif /* _LINUX_DMA_DIRECT_H */
index 78f677cf45ab6937c0e12bde4f1fe21a3f68def2..a33ed3954ed46583a60331abe8f31369d8711536 100644 (file)
@@ -461,6 +461,7 @@ int dma_set_mask(struct device *dev, u64 mask);
 int dma_set_coherent_mask(struct device *dev, u64 mask);
 u64 dma_get_required_mask(struct device *dev);
 size_t dma_max_mapping_size(struct device *dev);
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
 unsigned long dma_get_merge_boundary(struct device *dev);
 #else /* CONFIG_HAS_DMA */
 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
@@ -571,6 +572,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
 {
        return 0;
 }
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+       return false;
+}
 static inline unsigned long dma_get_merge_boundary(struct device *dev)
 {
        return 0;
index 2593777236037afc2247ae39c25e492bf42dc791..0b0144752d780aec77f01a8bac8f055752fe6a12 100644 (file)
@@ -884,12 +884,12 @@ void bpf_jit_compile(struct bpf_prog *prog);
 bool bpf_jit_needs_zext(void);
 bool bpf_helper_changes_pkt_data(void *func);
 
-static inline bool bpf_dump_raw_ok(void)
+static inline bool bpf_dump_raw_ok(const struct cred *cred)
 {
        /* Reconstruction of call-sites is dependent on kallsyms,
         * thus make dump the same restriction.
         */
-       return kallsyms_show_value() == 1;
+       return kallsyms_show_value(cred);
 }
 
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
index 3f881a892ea7469278ef799f44254a8f809ffd3f..f5abba86107d86d4342ec6788c6573cc69ccbfbd 100644 (file)
@@ -315,6 +315,7 @@ enum rw_hint {
 #define IOCB_SYNC              (1 << 5)
 #define IOCB_WRITE             (1 << 6)
 #define IOCB_NOWAIT            (1 << 7)
+#define IOCB_NOIO              (1 << 9)
 
 struct kiocb {
        struct file             *ki_filp;
@@ -1917,7 +1918,6 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
                              struct iovec *fast_pointer,
                              struct iovec **ret_pointer);
 
-extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
 extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -3033,6 +3033,7 @@ extern int kernel_read_file_from_path_initns(const char *, void **, loff_t *, lo
 extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
                                    enum kernel_read_file_id);
 extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
 extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
 extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
 extern struct file * open_exec(const char *);
index fe15f831841b4da7f2b6f2f7a9722311a7c876a0..9f732499ea88e8bd934288df4d57ca425765e713 100644 (file)
@@ -3333,13 +3333,17 @@ struct ieee80211_multiple_bssid_configuration {
 #define WLAN_AKM_SUITE_TDLS                    SUITE(0x000FAC, 7)
 #define WLAN_AKM_SUITE_SAE                     SUITE(0x000FAC, 8)
 #define WLAN_AKM_SUITE_FT_OVER_SAE             SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_AP_PEER_KEY             SUITE(0x000FAC, 10)
 #define WLAN_AKM_SUITE_8021X_SUITE_B           SUITE(0x000FAC, 11)
 #define WLAN_AKM_SUITE_8021X_SUITE_B_192       SUITE(0x000FAC, 12)
+#define WLAN_AKM_SUITE_FT_8021X_SHA384         SUITE(0x000FAC, 13)
 #define WLAN_AKM_SUITE_FILS_SHA256             SUITE(0x000FAC, 14)
 #define WLAN_AKM_SUITE_FILS_SHA384             SUITE(0x000FAC, 15)
 #define WLAN_AKM_SUITE_FT_FILS_SHA256          SUITE(0x000FAC, 16)
 #define WLAN_AKM_SUITE_FT_FILS_SHA384          SUITE(0x000FAC, 17)
 #define WLAN_AKM_SUITE_OWE                     SUITE(0x000FAC, 18)
+#define WLAN_AKM_SUITE_FT_PSK_SHA384           SUITE(0x000FAC, 19)
+#define WLAN_AKM_SUITE_PSK_SHA384              SUITE(0x000FAC, 20)
 
 #define WLAN_MAX_KEY_LEN               32
 
index b05e855f1ddd4f2ea1cb0282ad754a7a50f826fe..41a518336673b496faf7ce0ea2a65068fe6814f2 100644 (file)
@@ -25,6 +25,8 @@
 #define VLAN_ETH_DATA_LEN      1500    /* Max. octets in payload        */
 #define VLAN_ETH_FRAME_LEN     1518    /* Max. octets in frame sans FCS */
 
+#define VLAN_MAX_DEPTH 8               /* Max. number of nested VLAN tags parsed */
+
 /*
  *     struct vlan_hdr - vlan header
  *     @h_vlan_TCI: priority and VLAN ID
@@ -577,10 +579,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
  * Returns the EtherType of the packet, regardless of whether it is
  * vlan encapsulated (normal or hardware accelerated) or not.
  */
-static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
                                         int *depth)
 {
-       unsigned int vlan_depth = skb->mac_len;
+       unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
 
        /* if type is 802.1Q/AD then the header should already be
         * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
@@ -595,13 +597,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
                        vlan_depth = ETH_HLEN;
                }
                do {
-                       struct vlan_hdr *vh;
+                       struct vlan_hdr vhdr, *vh;
 
-                       if (unlikely(!pskb_may_pull(skb,
-                                                   vlan_depth + VLAN_HLEN)))
+                       vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
+                       if (unlikely(!vh || !--parse_depth))
                                return 0;
 
-                       vh = (struct vlan_hdr *)(skb->data + vlan_depth);
                        type = vh->h_vlan_encapsulated_proto;
                        vlan_depth += VLAN_HLEN;
                } while (eth_type_vlan(type));
@@ -620,11 +621,25 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
  * Returns the EtherType of the packet, regardless of whether it is
  * vlan encapsulated (normal or hardware accelerated) or not.
  */
-static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
 {
        return __vlan_get_protocol(skb, skb->protocol, NULL);
 }
 
+/* A getter for the SKB protocol field which will handle VLAN tags consistently
+ * whether VLAN acceleration is enabled or not.
+ */
+static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
+{
+       if (!skip_vlan)
+               /* VLAN acceleration strips the VLAN header from the skb and
+                * moves it to skb->vlan_proto
+                */
+               return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
+
+       return vlan_get_protocol(skb);
+}
+
 static inline void vlan_set_encap_proto(struct sk_buff *skb,
                                        struct vlan_hdr *vhdr)
 {
index 1ecb6b45812c9dfd70d9eb77a48baf5eafef74d9..520858d126808019889e80d1be2a32863900ca64 100644 (file)
@@ -67,8 +67,15 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN062B", 0 },
        { "ELAN062C", 0 },
        { "ELAN062D", 0 },
+       { "ELAN062E", 0 }, /* Lenovo V340 Whiskey Lake U */
+       { "ELAN062F", 0 }, /* Lenovo V340 Comet Lake U */
        { "ELAN0631", 0 },
        { "ELAN0632", 0 },
+       { "ELAN0633", 0 }, /* Lenovo S145 */
+       { "ELAN0634", 0 }, /* Lenovo V340 Ice lake */
+       { "ELAN0635", 0 }, /* Lenovo V1415-IIL */
+       { "ELAN0636", 0 }, /* Lenovo V1415-Dali */
+       { "ELAN0637", 0 }, /* Lenovo V1415-IGLR */
        { "ELAN1000", 0 },
        { }
 };
index 98338dc6b5d275acdc80b2026ee6ac7111cb9f7d..481273f0c72d4256979ee0ba2b02a4b9629f4497 100644 (file)
@@ -18,6 +18,7 @@
 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
                         2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
 
+struct cred;
 struct module;
 
 static inline int is_kernel_inittext(unsigned long addr)
@@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname);
 int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
 
 /* How and when do we show kallsyms values? */
-extern int kallsyms_show_value(void);
+extern bool kallsyms_show_value(const struct cred *cred);
 
 #else /* !CONFIG_KALLSYMS */
 
@@ -158,7 +159,7 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
        return -ERANGE;
 }
 
-static inline int kallsyms_show_value(void)
+static inline bool kallsyms_show_value(const struct cred *cred)
 {
        return false;
 }
index 529116b0cabe2879ff95ca027cd8848394795f00..477b8b7c908f8cdbae88139a9b239fbb8def95de 100644 (file)
@@ -176,6 +176,17 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
                           char *remcom_out_buffer,
                           struct pt_regs *regs);
 
+/**
+ *     kgdb_arch_handle_qxfer_pkt - Handle architecture specific GDB XML
+ *                                  packets.
+ *     @remcom_in_buffer: The buffer of the packet we have read.
+ *     @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ */
+
+extern void
+kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
+                          char *remcom_out_buffer);
+
 /**
  *     kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU
  *     @ignored: This parameter is only here to match the prototype.
@@ -314,6 +325,7 @@ extern int kgdb_hex2mem(char *buf, char *mem, int count);
 
 extern int kgdb_isremovedbreak(unsigned long addr);
 extern void kgdb_schedule_breakpoint(void);
+extern int kgdb_has_hit_break(unsigned long addr);
 
 extern int
 kgdb_handle_exception(int ex_vector, int signo, int err_code,
index 6791813cd439c085ea9703d30b176fc08147f9b0..af998f93d256073e03e8adef0fc3d69a8cc9ecd8 100644 (file)
@@ -150,7 +150,7 @@ LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
         size_t buffer_size)
 LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
 LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
-LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name)
+LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name)
 LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
         struct kernfs_node *kn)
 LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
@@ -360,7 +360,7 @@ LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
         unsigned long flags)
 LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
 LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
-        unsigned perm)
+        enum key_need_perm need_perm)
 LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer)
 #endif /* CONFIG_KEYS */
 
index 13c0e4556eda941f132b55b3f7f23e6ae58fb9b2..1e6ca716635a902cb1a7056951571e684e102fc6 100644 (file)
@@ -147,6 +147,7 @@ enum {
        MLX5_REG_MCDA            = 0x9063,
        MLX5_REG_MCAM            = 0x907f,
        MLX5_REG_MIRC            = 0x9162,
+       MLX5_REG_SBCAM           = 0xB01F,
        MLX5_REG_RESOURCE_DUMP   = 0xC000,
 };
 
index ca1887dd04231b254f3e91944158382b88104021..073b79eacc9918715eec8fe6a5c529397b828cfe 100644 (file)
@@ -9960,6 +9960,34 @@ struct mlx5_ifc_pptb_reg_bits {
        u8         untagged_buff[0x4];
 };
 
+struct mlx5_ifc_sbcam_reg_bits {
+       u8         reserved_at_0[0x8];
+       u8         feature_group[0x8];
+       u8         reserved_at_10[0x8];
+       u8         access_reg_group[0x8];
+
+       u8         reserved_at_20[0x20];
+
+       u8         sb_access_reg_cap_mask[4][0x20];
+
+       u8         reserved_at_c0[0x80];
+
+       u8         sb_feature_cap_mask[4][0x20];
+
+       u8         reserved_at_1c0[0x40];
+
+       u8         cap_total_buffer_size[0x20];
+
+       u8         cap_cell_size[0x10];
+       u8         cap_max_pg_buffers[0x8];
+       u8         cap_num_pool_supported[0x8];
+
+       u8         reserved_at_240[0x8];
+       u8         cap_sbsr_stat_size[0x8];
+       u8         cap_max_tclass_data[0x8];
+       u8         cap_max_cpu_ingress_tclass_sb[0x8];
+};
+
 struct mlx5_ifc_pbmc_reg_bits {
        u8         reserved_at_0[0x8];
        u8         local_port[0x8];
index 8d764aab29de9b729ff8209a6206c3d323830197..e14cbe444afcceb5bf54e910eaf452dd009968f7 100644 (file)
@@ -318,7 +318,7 @@ struct pcmcia_device_id {
 #define INPUT_DEVICE_ID_LED_MAX                0x0f
 #define INPUT_DEVICE_ID_SND_MAX                0x07
 #define INPUT_DEVICE_ID_FF_MAX         0x7f
-#define INPUT_DEVICE_ID_SW_MAX         0x0f
+#define INPUT_DEVICE_ID_SW_MAX         0x10
 #define INPUT_DEVICE_ID_PROP_MAX       0x1f
 
 #define INPUT_DEVICE_ID_MATCH_BUS      1
index c79d83304e5293330f38ebb73a630daa499c9a6c..34c1c4f45288f750fbdbc2683e3b2bdf61525883 100644 (file)
@@ -2169,12 +2169,11 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
  */
 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
 {
-       struct pci_dev *bridge = pci_upstream_bridge(dev);
-
-       while (bridge) {
-               if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
-                       return bridge;
-               bridge = pci_upstream_bridge(bridge);
+       while (dev) {
+               if (pci_is_pcie(dev) &&
+                   pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+                       return dev;
+               dev = pci_upstream_bridge(dev);
        }
 
        return NULL;
index 4f922afb607ac01d4122dc3641faa367884faaa6..45cf7b69d8521ddb7bdae96e5dd0ff01abb239c5 100644 (file)
@@ -155,7 +155,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
  * Loop over each sg element in the given sg_table object.
  */
 #define for_each_sgtable_sg(sgt, sg, i)                \
-       for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+       for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
 
 /*
  * Loop over each sg element in the given *DMA mapped* sg_table object.
@@ -163,7 +163,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
  * of the each element.
  */
 #define for_each_sgtable_dma_sg(sgt, sg, i)    \
-       for_each_sg(sgt->sgl, sg, sgt->nents, i)
+       for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
 
 /**
  * sg_chain - Chain two sglists together
@@ -451,7 +451,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
  * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
  */
 #define for_each_sgtable_page(sgt, piter, pgoffset)    \
-       for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset)
+       for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset)
 
 /**
  * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
@@ -465,7 +465,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
  * unit.
  */
 #define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset)     \
-       for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset)
+       for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset)
 
 
 /*
index fa067de9f1a94843f7402f2fd258d8b6339b59f0..d2b4204ba4d34984b344ae4b302de77bc9603c74 100644 (file)
@@ -19,6 +19,7 @@ struct task_struct;
 #define JOBCTL_TRAPPING_BIT    21      /* switching to TRACED */
 #define JOBCTL_LISTENING_BIT   22      /* ptracer is listening for events */
 #define JOBCTL_TRAP_FREEZE_BIT 23      /* trap for cgroup freezer */
+#define JOBCTL_TASK_WORK_BIT   24      /* set by TWA_SIGNAL */
 
 #define JOBCTL_STOP_DEQUEUED   (1UL << JOBCTL_STOP_DEQUEUED_BIT)
 #define JOBCTL_STOP_PENDING    (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,9 +29,10 @@ struct task_struct;
 #define JOBCTL_TRAPPING                (1UL << JOBCTL_TRAPPING_BIT)
 #define JOBCTL_LISTENING       (1UL << JOBCTL_LISTENING_BIT)
 #define JOBCTL_TRAP_FREEZE     (1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_TASK_WORK       (1UL << JOBCTL_TASK_WORK_BIT)
 
 #define JOBCTL_TRAP_MASK       (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
-#define JOBCTL_PENDING_MASK    (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
+#define JOBCTL_PENDING_MASK    (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK)
 
 extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask);
 extern void task_clear_jobctl_trapping(struct task_struct *task);
index 9fd550e7946a5889f8d94fa1a2c116e0041df031..791f4844efeb9d8d88cd71b74670b96b816c722d 100644 (file)
@@ -462,10 +462,104 @@ extern void uart_handle_cts_change(struct uart_port *uport,
 extern void uart_insert_char(struct uart_port *port, unsigned int status,
                 unsigned int overrun, unsigned int ch, unsigned int flag);
 
-extern int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch);
-extern int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch);
-extern void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags);
-extern int uart_handle_break(struct uart_port *port);
+#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
+#define SYSRQ_TIMEOUT  (HZ * 5)
+
+bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch);
+
+static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+       if (!port->sysrq)
+               return 0;
+
+       if (ch && time_before(jiffies, port->sysrq)) {
+               if (sysrq_mask()) {
+                       handle_sysrq(ch);
+                       port->sysrq = 0;
+                       return 1;
+               }
+               if (uart_try_toggle_sysrq(port, ch))
+                       return 1;
+       }
+       port->sysrq = 0;
+
+       return 0;
+}
+
+static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+       if (!port->sysrq)
+               return 0;
+
+       if (ch && time_before(jiffies, port->sysrq)) {
+               if (sysrq_mask()) {
+                       port->sysrq_ch = ch;
+                       port->sysrq = 0;
+                       return 1;
+               }
+               if (uart_try_toggle_sysrq(port, ch))
+                       return 1;
+       }
+       port->sysrq = 0;
+
+       return 0;
+}
+
+static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+       int sysrq_ch;
+
+       if (!port->has_sysrq) {
+               spin_unlock_irqrestore(&port->lock, irqflags);
+               return;
+       }
+
+       sysrq_ch = port->sysrq_ch;
+       port->sysrq_ch = 0;
+
+       spin_unlock_irqrestore(&port->lock, irqflags);
+
+       if (sysrq_ch)
+               handle_sysrq(sysrq_ch);
+}
+#else  /* CONFIG_MAGIC_SYSRQ_SERIAL */
+static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+       return 0;
+}
+static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+       return 0;
+}
+static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+       spin_unlock_irqrestore(&port->lock, irqflags);
+}
+#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
+
+/*
+ * We do the SysRQ and SAK checking like this...
+ */
+static inline int uart_handle_break(struct uart_port *port)
+{
+       struct uart_state *state = port->state;
+
+       if (port->handle_break)
+               port->handle_break(port);
+
+#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
+       if (port->has_sysrq && uart_console(port)) {
+               if (!port->sysrq) {
+                       port->sysrq = jiffies + SYSRQ_TIMEOUT;
+                       return 1;
+               }
+               port->sysrq = 0;
+       }
+#endif
+       if (port->flags & UPF_SAK)
+               do_SAK(state->port.tty);
+       return 0;
+}
 
 /*
  *     UART_ENABLE_MS - determine if port should enable modem status irqs
index 08674cd14d5a5581d383daeb9c745ed61ff58e1d..1e9ed840b9fc101087c860dd6879de95afb950d3 100644 (file)
@@ -430,6 +430,19 @@ static inline void psock_set_prog(struct bpf_prog **pprog,
                bpf_prog_put(prog);
 }
 
+static inline int psock_replace_prog(struct bpf_prog **pprog,
+                                    struct bpf_prog *prog,
+                                    struct bpf_prog *old)
+{
+       if (cmpxchg(pprog, old, prog) != old)
+               return -ENOENT;
+
+       if (old)
+               bpf_prog_put(old);
+
+       return 0;
+}
+
 static inline void psock_progs_drop(struct sk_psock_progs *progs)
 {
        psock_set_prog(&progs->msg_parser, NULL);
index bd9a6a91c097e2c8c0806590353e44ee2a1b9d23..0fb93aafa4785e981aa92ef7e805495e28bb778c 100644 (file)
@@ -13,7 +13,10 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
        twork->func = func;
 }
 
-int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
+#define TWA_RESUME     1
+#define TWA_SIGNAL     2
+int task_work_add(struct task_struct *task, struct callback_head *twork, int);
+
 struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
 void task_work_run(void);
 
index 07adfacd8088ac473f2b4a8d368312a6560e71a0..852d8fb36ab72390c5f417eaff9f9580eac69429 100644 (file)
@@ -400,7 +400,15 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co
 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
                                                     struct sk_buff *skb)
 {
-       struct neighbour *n =  dst->ops->neigh_lookup(dst, skb, NULL);
+       struct neighbour *n = NULL;
+
+       /* The packets from tunnel devices (eg bareudp) may have only
+        * metadata in the dst pointer of skb. Hence a pointer check of
+        * neigh_lookup is needed.
+        */
+       if (dst->ops->neigh_lookup)
+               n = dst->ops->neigh_lookup(dst, skb, NULL);
+
        return IS_ERR(n) ? NULL : n;
 }
 
index a7eba43fe4e4cab9deb228bbbff0f9dd6064ab5f..4b6e36288ddd3b356a262bae4d04912f4493c354 100644 (file)
@@ -372,7 +372,8 @@ flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
 }
 
 #ifdef CONFIG_BPF_SYSCALL
-int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog);
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+                                        struct bpf_prog *prog);
 #endif /* CONFIG_BPF_SYSCALL */
 
 #endif
index 74950663bb00d2be53d3cfb82bcb23ffc35e5324..6e5f1e1aa82267f53d7b4486fe85bfd204758a5e 100644 (file)
@@ -35,13 +35,6 @@ struct genl_info;
  *     do additional, common, filtering and return an error
  * @post_doit: called after an operation's doit callback, it may
  *     undo operations done by pre_doit, for example release locks
- * @mcast_bind: a socket bound to the given multicast group (which
- *     is given as the offset into the groups array)
- * @mcast_unbind: a socket was unbound from the given multicast group.
- *     Note that unbind() will not be called symmetrically if the
- *     generic netlink family is removed while there are still open
- *     sockets.
- * @attrbuf: buffer to store parsed attributes (private)
  * @mcgrps: multicast groups used by this family
  * @n_mcgrps: number of multicast groups
  * @mcgrp_offset: starting number of multicast group IDs in this family
@@ -64,9 +57,6 @@ struct genl_family {
        void                    (*post_doit)(const struct genl_ops *ops,
                                             struct sk_buff *skb,
                                             struct genl_info *info);
-       int                     (*mcast_bind)(struct net *net, int group);
-       void                    (*mcast_unbind)(struct net *net, int group);
-       struct nlattr **        attrbuf;        /* private */
        const struct genl_ops * ops;
        const struct genl_multicast_group *mcgrps;
        unsigned int            n_ops;
index 0f0d1efe06ddcd1bcd67000bde1d7b88f0c74153..e1eaf17802889dbb8143ae9b9523de4614708ed5 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/ip.h>
 #include <linux/skbuff.h>
+#include <linux/if_vlan.h>
 
 #include <net/inet_sock.h>
 #include <net/dsfield.h>
@@ -172,7 +173,7 @@ static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
 
 static inline int INET_ECN_set_ce(struct sk_buff *skb)
 {
-       switch (skb->protocol) {
+       switch (skb_protocol(skb, true)) {
        case cpu_to_be16(ETH_P_IP):
                if (skb_network_header(skb) + sizeof(struct iphdr) <=
                    skb_tail_pointer(skb))
@@ -191,7 +192,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
 
 static inline int INET_ECN_set_ect1(struct sk_buff *skb)
 {
-       switch (skb->protocol) {
+       switch (skb_protocol(skb, true)) {
        case cpu_to_be16(ETH_P_IP):
                if (skb_network_header(skb) + sizeof(struct iphdr) <=
                    skb_tail_pointer(skb))
@@ -272,12 +273,16 @@ static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
 {
        __u8 inner;
 
-       if (skb->protocol == htons(ETH_P_IP))
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                inner = ip_hdr(skb)->tos;
-       else if (skb->protocol == htons(ETH_P_IPV6))
+               break;
+       case htons(ETH_P_IPV6):
                inner = ipv6_get_dsfield(ipv6_hdr(skb));
-       else
+               break;
+       default:
                return 0;
+       }
 
        return INET_ECN_decapsulate(skb, oiph->tos, inner);
 }
@@ -287,12 +292,16 @@ static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
 {
        __u8 inner;
 
-       if (skb->protocol == htons(ETH_P_IP))
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                inner = ip_hdr(skb)->tos;
-       else if (skb->protocol == htons(ETH_P_IPV6))
+               break;
+       case htons(ETH_P_IPV6):
                inner = ipv6_get_dsfield(ipv6_hdr(skb));
-       else
+               break;
+       default:
                return 0;
+       }
 
        return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
 }
index 076e5d7db7d3c45663d094aeef1010eeca461635..36025dea7612ac462a2582883a6ea1024c141081 100644 (file)
@@ -290,6 +290,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
                      struct ip_tunnel_parm *p, __u32 fwmark);
 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
 
+extern const struct header_ops ip_tunnel_header_ops;
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
+
 struct ip_tunnel_encap_ops {
        size_t (*encap_hlen)(struct ip_tunnel_encap *e);
        int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
index a8dce2a380c8e85e6c32bbe4244f2bac302e68de..0ca6a1b8718533adf2f37223bf4d8955df151246 100644 (file)
@@ -9,10 +9,13 @@
 #include <linux/bpf-netns.h>
 
 struct bpf_prog;
+struct bpf_prog_array;
 
 struct netns_bpf {
-       struct bpf_prog __rcu *progs[MAX_NETNS_BPF_ATTACH_TYPE];
-       struct bpf_link *links[MAX_NETNS_BPF_ATTACH_TYPE];
+       /* Array of programs to run compiled from progs or links */
+       struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE];
+       struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE];
+       struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE];
 };
 
 #endif /* __NETNS_BPF_H__ */
index 9092e697059e775af307be69a879386ebfd9924f..ac8c890a2657e35546ec51fe8b8a993a2bd0c91b 100644 (file)
@@ -136,17 +136,6 @@ static inline void qdisc_run(struct Qdisc *q)
        }
 }
 
-static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
-{
-       /* We need to take extra care in case the skb came via
-        * vlan accelerated path. In that case, use skb->vlan_proto
-        * as the original vlan header was already stripped.
-        */
-       if (skb_vlan_tag_present(skb))
-               return skb->vlan_proto;
-       return skb->protocol;
-}
-
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
index 3428619faae4340485b200f49d9cce4fb09086b3..1183507df95bfdad4fb159efd50a80e7e2190f9a 100644 (file)
@@ -533,7 +533,8 @@ enum sk_pacing {
  * be copied.
  */
 #define SK_USER_DATA_NOCOPY    1UL
-#define SK_USER_DATA_PTRMASK   ~(SK_USER_DATA_NOCOPY)
+#define SK_USER_DATA_BPF       2UL     /* Managed by BPF */
+#define SK_USER_DATA_PTRMASK   ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
 
 /**
  * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
index a4ff226505c99c06fc6df0e3b0253179e0eff5e0..6842990e2712bd194b60739c995855c2e21dab0f 100644 (file)
@@ -40,7 +40,7 @@ struct xsk_buff_pool {
        u32 headroom;
        u32 chunk_size;
        u32 frame_len;
-       bool cheap_dma;
+       bool dma_need_sync;
        bool unaligned;
        void *addrs;
        struct device *dev;
@@ -80,7 +80,7 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
 {
-       if (xskb->pool->cheap_dma)
+       if (!xskb->pool->dma_need_sync)
                return;
 
        xp_dma_sync_for_cpu_slow(xskb);
@@ -91,7 +91,7 @@ void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
                                          dma_addr_t dma, size_t size)
 {
-       if (pool->cheap_dma)
+       if (!pool->dma_need_sync)
                return;
 
        xp_dma_sync_for_device_slow(pool, dma, size);
index 6ce8effa0b128ffe0464e137bb343f36b9c48874..70cbc5095e7250aeb1fca7f4529516422d45cb2a 100644 (file)
@@ -66,6 +66,7 @@ struct snd_compr_runtime {
  * @direction: stream direction, playback/recording
  * @metadata_set: metadata set flag, true when set
  * @next_track: has userspace signal next track transition, true when set
+ * @partial_drain: undergoing partial_drain for stream, true when set
  * @private_data: pointer to DSP private data
  * @dma_buffer: allocated buffer if any
  */
@@ -78,6 +79,7 @@ struct snd_compr_stream {
        enum snd_compr_direction direction;
        bool metadata_set;
        bool next_track;
+       bool partial_drain;
        void *private_data;
        struct snd_dma_buffer dma_buffer;
 };
@@ -182,7 +184,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
        if (snd_BUG_ON(!stream))
                return;
 
-       stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+       /* for partial_drain case we are back to running state on success */
+       if (stream->partial_drain) {
+               stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+               stream->partial_drain = false; /* clear this flag as well */
+       } else {
+               stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+       }
 
        wake_up(&stream->runtime->sleep);
 }
index 974a71342aea681d330bff9074810d3c73f125e5..8bd33050b7bbb6e0c5e1d0bf2e1e7ffc11b2caa4 100644 (file)
@@ -3171,13 +3171,12 @@ union bpf_attr {
  * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
  *     Description
  *             Copy *size* bytes from *data* into a ring buffer *ringbuf*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
- *             0, on success;
- *             < 0, on error.
+ *             0 on success, or a negative error in case of failure.
  *
  * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
  *     Description
@@ -3189,20 +3188,20 @@ union bpf_attr {
  * void bpf_ringbuf_submit(void *data, u64 flags)
  *     Description
  *             Submit reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
  * void bpf_ringbuf_discard(void *data, u64 flags)
  *     Description
  *             Discard reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
@@ -3210,16 +3209,18 @@ union bpf_attr {
  *     Description
  *             Query various characteristics of provided ring buffer. What
  *             exactly is queries is determined by *flags*:
- *               - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
- *               - BPF_RB_RING_SIZE - the size of ring buffer;
- *               - BPF_RB_CONS_POS - consumer position (can wrap around);
- *               - BPF_RB_PROD_POS - producer(s) position (can wrap around);
- *             Data returned is just a momentary snapshots of actual values
+ *
+ *             * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ *             * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ *             * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ *             * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ *             Data returned is just a momentary snapshot of actual values
  *             and could be inaccurate, so this facility should be used to
  *             power heuristics and for reporting, not to make 100% correct
  *             calculation.
  *     Return
- *             Requested value, or 0, if flags are not recognized.
+ *             Requested value, or 0, if *flags* are not recognized.
  *
  * int bpf_csum_level(struct sk_buff *skb, u64 level)
  *     Description
index 1f412fbf561bb2262553c2000e6cf938fad23543..e103c1434e4b0ea8861a97a0753a54d63b5dbd57 100644 (file)
@@ -110,9 +110,12 @@ struct dsa_hw_desc {
        uint16_t        rsvd1;
        union {
                uint8_t         expected_res;
+               /* create delta record */
                struct {
                        uint64_t        delta_addr;
                        uint32_t        max_delta_size;
+                       uint32_t        delt_rsvd;
+                       uint8_t         expected_res_mask;
                };
                uint32_t        delta_rec_size;
                uint64_t        dest2;
index b6a835d37826384e87feb1446809e983379bda1f..0c2e27d28e0acdf825bab42cbffb452275f065ed 100644 (file)
 #define SW_LINEIN_INSERT       0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE         0x0e  /* set = device disabled */
 #define SW_PEN_INSERTED                0x0f  /* set = pen inserted */
-#define SW_MAX                 0x0f
+#define SW_MACHINE_COVER       0x10  /* set = cover closed */
+#define SW_MAX                 0x10
 #define SW_CNT                 (SW_MAX+1)
 
 /*
index 92c22699a5a74e44111926657e0c65ac74f135a2..7843742b8b741e98118ed79fee7ad947dc750826 100644 (file)
@@ -197,6 +197,7 @@ struct io_sqring_offsets {
  * sq_ring->flags
  */
 #define IORING_SQ_NEED_WAKEUP  (1U << 0) /* needs io_uring_enter wakeup */
+#define IORING_SQ_CQ_OVERFLOW  (1U << 1) /* CQ ring is overflown */
 
 struct io_cqring_offsets {
        __u32 head;
index a46aa8f3174d5315b5287fa26b7c3c471fcd86fa..0498af567f7060fa4360ae0f2b73e3aea6298179 100644 (file)
@@ -49,13 +49,13 @@ config CLANG_VERSION
 
 config CC_CAN_LINK
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m64-flag)) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m32-flag))
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag))
 
 config CC_CAN_LINK_STATIC
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m64-flag)) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m32-flag))
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag) -static) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag) -static)
 
 config CC_HAS_ASM_GOTO
        def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
index 58c9af1d480820332a332685790027cdc99fcc52..9a1a98dd9e97c7c9576ec78513e6ced95f7bb4ae 100644 (file)
@@ -3746,7 +3746,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
                                return false;
 
                        t = btf_type_skip_modifiers(btf, t->type, NULL);
-                       if (!btf_type_is_int(t)) {
+                       if (!btf_type_is_small_int(t)) {
                                bpf_log(log,
                                        "ret type %s not allowed for fmod_ret\n",
                                        btf_kind_str[BTF_INFO_KIND(t->info)]);
@@ -3768,7 +3768,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
        /* skip modifiers */
        while (btf_type_is_modifier(t))
                t = btf_type_by_id(btf, t->type);
-       if (btf_type_is_int(t) || btf_type_is_enum(t))
+       if (btf_type_is_small_int(t) || btf_type_is_enum(t))
                /* accessing a scalar */
                return true;
        if (!btf_type_is_ptr(t)) {
index 78cf061f817938560117a7a6bbed007b9660810e..310241ca79912a62007e83accfe6307cc2e8e01a 100644 (file)
@@ -19,18 +19,21 @@ struct bpf_netns_link {
         * with netns_bpf_mutex held.
         */
        struct net *net;
+       struct list_head node; /* node in list of links attached to net */
 };
 
 /* Protects updates to netns_bpf */
 DEFINE_MUTEX(netns_bpf_mutex);
 
 /* Must be called with netns_bpf_mutex held. */
-static void __net_exit bpf_netns_link_auto_detach(struct bpf_link *link)
+static void netns_bpf_run_array_detach(struct net *net,
+                                      enum netns_bpf_attach_type type)
 {
-       struct bpf_netns_link *net_link =
-               container_of(link, struct bpf_netns_link, link);
+       struct bpf_prog_array *run_array;
 
-       net_link->net = NULL;
+       run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL,
+                                       lockdep_is_held(&netns_bpf_mutex));
+       bpf_prog_array_free(run_array);
 }
 
 static void bpf_netns_link_release(struct bpf_link *link)
@@ -40,22 +43,18 @@ static void bpf_netns_link_release(struct bpf_link *link)
        enum netns_bpf_attach_type type = net_link->netns_type;
        struct net *net;
 
-       /* Link auto-detached by dying netns. */
-       if (!net_link->net)
-               return;
-
        mutex_lock(&netns_bpf_mutex);
 
-       /* Recheck after potential sleep. We can race with cleanup_net
-        * here, but if we see a non-NULL struct net pointer pre_exit
-        * has not happened yet and will block on netns_bpf_mutex.
+       /* We can race with cleanup_net, but if we see a non-NULL
+        * struct net pointer, pre_exit has not run yet and wait for
+        * netns_bpf_mutex.
         */
        net = net_link->net;
        if (!net)
                goto out_unlock;
 
-       net->bpf.links[type] = NULL;
-       RCU_INIT_POINTER(net->bpf.progs[type], NULL);
+       netns_bpf_run_array_detach(net, type);
+       list_del(&net_link->node);
 
 out_unlock:
        mutex_unlock(&netns_bpf_mutex);
@@ -76,6 +75,7 @@ static int bpf_netns_link_update_prog(struct bpf_link *link,
        struct bpf_netns_link *net_link =
                container_of(link, struct bpf_netns_link, link);
        enum netns_bpf_attach_type type = net_link->netns_type;
+       struct bpf_prog_array *run_array;
        struct net *net;
        int ret = 0;
 
@@ -93,8 +93,11 @@ static int bpf_netns_link_update_prog(struct bpf_link *link,
                goto out_unlock;
        }
 
+       run_array = rcu_dereference_protected(net->bpf.run_array[type],
+                                             lockdep_is_held(&netns_bpf_mutex));
+       WRITE_ONCE(run_array->items[0].prog, new_prog);
+
        old_prog = xchg(&link->prog, new_prog);
-       rcu_assign_pointer(net->bpf.progs[type], new_prog);
        bpf_prog_put(old_prog);
 
 out_unlock:
@@ -142,14 +145,38 @@ static const struct bpf_link_ops bpf_netns_link_ops = {
        .show_fdinfo = bpf_netns_link_show_fdinfo,
 };
 
+/* Must be called with netns_bpf_mutex held. */
+static int __netns_bpf_prog_query(const union bpf_attr *attr,
+                                 union bpf_attr __user *uattr,
+                                 struct net *net,
+                                 enum netns_bpf_attach_type type)
+{
+       __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
+       struct bpf_prog_array *run_array;
+       u32 prog_cnt = 0, flags = 0;
+
+       run_array = rcu_dereference_protected(net->bpf.run_array[type],
+                                             lockdep_is_held(&netns_bpf_mutex));
+       if (run_array)
+               prog_cnt = bpf_prog_array_length(run_array);
+
+       if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
+               return -EFAULT;
+       if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
+               return -EFAULT;
+       if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
+               return 0;
+
+       return bpf_prog_array_copy_to_user(run_array, prog_ids,
+                                          attr->query.prog_cnt);
+}
+
 int netns_bpf_prog_query(const union bpf_attr *attr,
                         union bpf_attr __user *uattr)
 {
-       __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
-       u32 prog_id, prog_cnt = 0, flags = 0;
        enum netns_bpf_attach_type type;
-       struct bpf_prog *attached;
        struct net *net;
+       int ret;
 
        if (attr->query.query_flags)
                return -EINVAL;
@@ -162,36 +189,25 @@ int netns_bpf_prog_query(const union bpf_attr *attr,
        if (IS_ERR(net))
                return PTR_ERR(net);
 
-       rcu_read_lock();
-       attached = rcu_dereference(net->bpf.progs[type]);
-       if (attached) {
-               prog_cnt = 1;
-               prog_id = attached->aux->id;
-       }
-       rcu_read_unlock();
+       mutex_lock(&netns_bpf_mutex);
+       ret = __netns_bpf_prog_query(attr, uattr, net, type);
+       mutex_unlock(&netns_bpf_mutex);
 
        put_net(net);
-
-       if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
-               return -EFAULT;
-       if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
-               return -EFAULT;
-
-       if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
-               return 0;
-
-       if (copy_to_user(prog_ids, &prog_id, sizeof(u32)))
-               return -EFAULT;
-
-       return 0;
+       return ret;
 }
 
 int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
+       struct bpf_prog_array *run_array;
        enum netns_bpf_attach_type type;
+       struct bpf_prog *attached;
        struct net *net;
        int ret;
 
+       if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd)
+               return -EINVAL;
+
        type = to_netns_bpf_attach_type(attr->attach_type);
        if (type < 0)
                return -EINVAL;
@@ -200,19 +216,47 @@ int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
        mutex_lock(&netns_bpf_mutex);
 
        /* Attaching prog directly is not compatible with links */
-       if (net->bpf.links[type]) {
+       if (!list_empty(&net->bpf.links[type])) {
                ret = -EEXIST;
                goto out_unlock;
        }
 
        switch (type) {
        case NETNS_BPF_FLOW_DISSECTOR:
-               ret = flow_dissector_bpf_prog_attach(net, prog);
+               ret = flow_dissector_bpf_prog_attach_check(net, prog);
                break;
        default:
                ret = -EINVAL;
                break;
        }
+       if (ret)
+               goto out_unlock;
+
+       attached = net->bpf.progs[type];
+       if (attached == prog) {
+               /* The same program cannot be attached twice */
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       run_array = rcu_dereference_protected(net->bpf.run_array[type],
+                                             lockdep_is_held(&netns_bpf_mutex));
+       if (run_array) {
+               WRITE_ONCE(run_array->items[0].prog, prog);
+       } else {
+               run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
+               if (!run_array) {
+                       ret = -ENOMEM;
+                       goto out_unlock;
+               }
+               run_array->items[0].prog = prog;
+               rcu_assign_pointer(net->bpf.run_array[type], run_array);
+       }
+
+       net->bpf.progs[type] = prog;
+       if (attached)
+               bpf_prog_put(attached);
+
 out_unlock:
        mutex_unlock(&netns_bpf_mutex);
 
@@ -221,63 +265,74 @@ out_unlock:
 
 /* Must be called with netns_bpf_mutex held. */
 static int __netns_bpf_prog_detach(struct net *net,
-                                  enum netns_bpf_attach_type type)
+                                  enum netns_bpf_attach_type type,
+                                  struct bpf_prog *old)
 {
        struct bpf_prog *attached;
 
        /* Progs attached via links cannot be detached */
-       if (net->bpf.links[type])
+       if (!list_empty(&net->bpf.links[type]))
                return -EINVAL;
 
-       attached = rcu_dereference_protected(net->bpf.progs[type],
-                                            lockdep_is_held(&netns_bpf_mutex));
-       if (!attached)
+       attached = net->bpf.progs[type];
+       if (!attached || attached != old)
                return -ENOENT;
-       RCU_INIT_POINTER(net->bpf.progs[type], NULL);
+       netns_bpf_run_array_detach(net, type);
+       net->bpf.progs[type] = NULL;
        bpf_prog_put(attached);
        return 0;
 }
 
-int netns_bpf_prog_detach(const union bpf_attr *attr)
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
 {
        enum netns_bpf_attach_type type;
+       struct bpf_prog *prog;
        int ret;
 
+       if (attr->target_fd)
+               return -EINVAL;
+
        type = to_netns_bpf_attach_type(attr->attach_type);
        if (type < 0)
                return -EINVAL;
 
+       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
+       if (IS_ERR(prog))
+               return PTR_ERR(prog);
+
        mutex_lock(&netns_bpf_mutex);
-       ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type);
+       ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type, prog);
        mutex_unlock(&netns_bpf_mutex);
 
+       bpf_prog_put(prog);
+
        return ret;
 }
 
 static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
                                 enum netns_bpf_attach_type type)
 {
-       struct bpf_prog *prog;
+       struct bpf_netns_link *net_link =
+               container_of(link, struct bpf_netns_link, link);
+       struct bpf_prog_array *run_array;
        int err;
 
        mutex_lock(&netns_bpf_mutex);
 
        /* Allow attaching only one prog or link for now */
-       if (net->bpf.links[type]) {
+       if (!list_empty(&net->bpf.links[type])) {
                err = -E2BIG;
                goto out_unlock;
        }
        /* Links are not compatible with attaching prog directly */
-       prog = rcu_dereference_protected(net->bpf.progs[type],
-                                        lockdep_is_held(&netns_bpf_mutex));
-       if (prog) {
+       if (net->bpf.progs[type]) {
                err = -EEXIST;
                goto out_unlock;
        }
 
        switch (type) {
        case NETNS_BPF_FLOW_DISSECTOR:
-               err = flow_dissector_bpf_prog_attach(net, link->prog);
+               err = flow_dissector_bpf_prog_attach_check(net, link->prog);
                break;
        default:
                err = -EINVAL;
@@ -286,7 +341,15 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
        if (err)
                goto out_unlock;
 
-       net->bpf.links[type] = link;
+       run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
+       if (!run_array) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+       run_array->items[0].prog = link->prog;
+       rcu_assign_pointer(net->bpf.run_array[type], run_array);
+
+       list_add_tail(&net_link->node, &net->bpf.links[type]);
 
 out_unlock:
        mutex_unlock(&netns_bpf_mutex);
@@ -345,23 +408,34 @@ out_put_net:
        return err;
 }
 
+static int __net_init netns_bpf_pernet_init(struct net *net)
+{
+       int type;
+
+       for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++)
+               INIT_LIST_HEAD(&net->bpf.links[type]);
+
+       return 0;
+}
+
 static void __net_exit netns_bpf_pernet_pre_exit(struct net *net)
 {
        enum netns_bpf_attach_type type;
-       struct bpf_link *link;
+       struct bpf_netns_link *net_link;
 
        mutex_lock(&netns_bpf_mutex);
        for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) {
-               link = net->bpf.links[type];
-               if (link)
-                       bpf_netns_link_auto_detach(link);
-               else
-                       __netns_bpf_prog_detach(net, type);
+               netns_bpf_run_array_detach(net, type);
+               list_for_each_entry(net_link, &net->bpf.links[type], node)
+                       net_link->net = NULL; /* auto-detach link */
+               if (net->bpf.progs[type])
+                       bpf_prog_put(net->bpf.progs[type]);
        }
        mutex_unlock(&netns_bpf_mutex);
 }
 
 static struct pernet_operations netns_bpf_pernet_ops __net_initdata = {
+       .init = netns_bpf_pernet_init,
        .pre_exit = netns_bpf_pernet_pre_exit,
 };
 
index 21cde24386db4b73ed4388f8cb3c183e57a58160..cae9d505e04ace141bfdc64df04a4caabcc62052 100644 (file)
@@ -20,11 +20,14 @@ static struct reuseport_array *reuseport_array(struct bpf_map *map)
 /* The caller must hold the reuseport_lock */
 void bpf_sk_reuseport_detach(struct sock *sk)
 {
-       struct sock __rcu **socks;
+       uintptr_t sk_user_data;
 
        write_lock_bh(&sk->sk_callback_lock);
-       socks = sk->sk_user_data;
-       if (socks) {
+       sk_user_data = (uintptr_t)sk->sk_user_data;
+       if (sk_user_data & SK_USER_DATA_BPF) {
+               struct sock __rcu **socks;
+
+               socks = (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
                WRITE_ONCE(sk->sk_user_data, NULL);
                /*
                 * Do not move this NULL assignment outside of
@@ -252,6 +255,7 @@ int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
        struct sock *free_osk = NULL, *osk, *nsk;
        struct sock_reuseport *reuse;
        u32 index = *(u32 *)key;
+       uintptr_t sk_user_data;
        struct socket *socket;
        int err, fd;
 
@@ -305,7 +309,9 @@ int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
        if (err)
                goto put_file_unlock;
 
-       WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]);
+       sk_user_data = (uintptr_t)&array->ptrs[index] | SK_USER_DATA_NOCOPY |
+               SK_USER_DATA_BPF;
+       WRITE_ONCE(nsk->sk_user_data, (void *)sk_user_data);
        rcu_assign_pointer(array->ptrs[index], nsk);
        free_osk = osk;
        err = 0;
index 180414bb0d3e9a94d26225f15370b139d0d7689f..0af88bbc1c15307d3964acd8810453f7c0276208 100644 (file)
@@ -132,15 +132,6 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
 {
        struct bpf_ringbuf *rb;
 
-       if (!data_sz || !PAGE_ALIGNED(data_sz))
-               return ERR_PTR(-EINVAL);
-
-#ifdef CONFIG_64BIT
-       /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
-       if (data_sz > RINGBUF_MAX_DATA_SZ)
-               return ERR_PTR(-E2BIG);
-#endif
-
        rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
        if (!rb)
                return ERR_PTR(-ENOMEM);
@@ -166,9 +157,16 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
                return ERR_PTR(-EINVAL);
 
        if (attr->key_size || attr->value_size ||
-           attr->max_entries == 0 || !PAGE_ALIGNED(attr->max_entries))
+           !is_power_of_2(attr->max_entries) ||
+           !PAGE_ALIGNED(attr->max_entries))
                return ERR_PTR(-EINVAL);
 
+#ifdef CONFIG_64BIT
+       /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
+       if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
+               return ERR_PTR(-E2BIG);
+#endif
+
        rb_map = kzalloc(sizeof(*rb_map), GFP_USER);
        if (!rb_map)
                return ERR_PTR(-ENOMEM);
index 8da159936bab17e4b322561457ca03f8608dffd3..0fd80ac81f705725a3ebc3234eadd4f7dae4d7cc 100644 (file)
@@ -2121,7 +2121,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
            !bpf_capable())
                return -EPERM;
 
-       if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN))
+       if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
                return -EPERM;
        if (is_perfmon_prog_type(type) && !perfmon_capable())
                return -EPERM;
@@ -2893,13 +2893,11 @@ static int bpf_prog_detach(const union bpf_attr *attr)
        switch (ptype) {
        case BPF_PROG_TYPE_SK_MSG:
        case BPF_PROG_TYPE_SK_SKB:
-               return sock_map_get_from_fd(attr, NULL);
+               return sock_map_prog_detach(attr, ptype);
        case BPF_PROG_TYPE_LIRC_MODE2:
                return lirc_prog_detach(attr);
        case BPF_PROG_TYPE_FLOW_DISSECTOR:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               return netns_bpf_prog_detach(attr);
+               return netns_bpf_prog_detach(attr, ptype);
        case BPF_PROG_TYPE_CGROUP_DEVICE:
        case BPF_PROG_TYPE_CGROUP_SKB:
        case BPF_PROG_TYPE_CGROUP_SOCK:
@@ -3139,7 +3137,8 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
        return NULL;
 }
 
-static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
+                                             const struct cred *f_cred)
 {
        const struct bpf_map *map;
        struct bpf_insn *insns;
@@ -3165,7 +3164,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
                    code == (BPF_JMP | BPF_CALL_ARGS)) {
                        if (code == (BPF_JMP | BPF_CALL_ARGS))
                                insns[i].code = BPF_JMP | BPF_CALL;
-                       if (!bpf_dump_raw_ok())
+                       if (!bpf_dump_raw_ok(f_cred))
                                insns[i].imm = 0;
                        continue;
                }
@@ -3221,7 +3220,8 @@ static int set_info_rec_size(struct bpf_prog_info *info)
        return 0;
 }
 
-static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
+static int bpf_prog_get_info_by_fd(struct file *file,
+                                  struct bpf_prog *prog,
                                   const union bpf_attr *attr,
                                   union bpf_attr __user *uattr)
 {
@@ -3290,11 +3290,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                struct bpf_insn *insns_sanitized;
                bool fault;
 
-               if (prog->blinded && !bpf_dump_raw_ok()) {
+               if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
                        info.xlated_prog_insns = 0;
                        goto done;
                }
-               insns_sanitized = bpf_insn_prepare_dump(prog);
+               insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
                if (!insns_sanitized)
                        return -ENOMEM;
                uinsns = u64_to_user_ptr(info.xlated_prog_insns);
@@ -3328,7 +3328,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        }
 
        if (info.jited_prog_len && ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        uinsns = u64_to_user_ptr(info.jited_prog_insns);
                        ulen = min_t(u32, info.jited_prog_len, ulen);
 
@@ -3363,7 +3363,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        ulen = info.nr_jited_ksyms;
        info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
        if (ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        unsigned long ksym_addr;
                        u64 __user *user_ksyms;
                        u32 i;
@@ -3394,7 +3394,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        ulen = info.nr_jited_func_lens;
        info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
        if (ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        u32 __user *user_lens;
                        u32 func_len, i;
 
@@ -3451,7 +3451,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        else
                info.nr_jited_line_info = 0;
        if (info.nr_jited_line_info && ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        __u64 __user *user_linfo;
                        u32 i;
 
@@ -3497,7 +3497,8 @@ done:
        return 0;
 }
 
-static int bpf_map_get_info_by_fd(struct bpf_map *map,
+static int bpf_map_get_info_by_fd(struct file *file,
+                                 struct bpf_map *map,
                                  const union bpf_attr *attr,
                                  union bpf_attr __user *uattr)
 {
@@ -3540,7 +3541,8 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
        return 0;
 }
 
-static int bpf_btf_get_info_by_fd(struct btf *btf,
+static int bpf_btf_get_info_by_fd(struct file *file,
+                                 struct btf *btf,
                                  const union bpf_attr *attr,
                                  union bpf_attr __user *uattr)
 {
@@ -3555,7 +3557,8 @@ static int bpf_btf_get_info_by_fd(struct btf *btf,
        return btf_get_info_by_fd(btf, attr, uattr);
 }
 
-static int bpf_link_get_info_by_fd(struct bpf_link *link,
+static int bpf_link_get_info_by_fd(struct file *file,
+                                 struct bpf_link *link,
                                  const union bpf_attr *attr,
                                  union bpf_attr __user *uattr)
 {
@@ -3608,15 +3611,15 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
                return -EBADFD;
 
        if (f.file->f_op == &bpf_prog_fops)
-               err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
+               err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
                                              uattr);
        else if (f.file->f_op == &bpf_map_fops)
-               err = bpf_map_get_info_by_fd(f.file->private_data, attr,
+               err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
                                             uattr);
        else if (f.file->f_op == &btf_fops)
-               err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
+               err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
        else if (f.file->f_op == &bpf_link_fops)
-               err = bpf_link_get_info_by_fd(f.file->private_data,
+               err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
                                              attr, uattr);
        else
                err = -EINVAL;
index 34cde841ab6819ebc61f992b38a61b66228ce8fb..94cead5a43e57e949e71d177594afca431468c49 100644 (file)
@@ -399,8 +399,7 @@ static bool reg_type_not_null(enum bpf_reg_type type)
        return type == PTR_TO_SOCKET ||
                type == PTR_TO_TCP_SOCK ||
                type == PTR_TO_MAP_VALUE ||
-               type == PTR_TO_SOCK_COMMON ||
-               type == PTR_TO_BTF_ID;
+               type == PTR_TO_SOCK_COMMON;
 }
 
 static bool reg_type_may_be_null(enum bpf_reg_type type)
@@ -9801,7 +9800,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
        int i, j, subprog_start, subprog_end = 0, len, subprog;
        struct bpf_insn *insn;
        void *old_bpf_func;
-       int err;
+       int err, num_exentries;
 
        if (env->subprog_cnt <= 1)
                return 0;
@@ -9876,6 +9875,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                func[i]->aux->nr_linfo = prog->aux->nr_linfo;
                func[i]->aux->jited_linfo = prog->aux->jited_linfo;
                func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
+               num_exentries = 0;
+               insn = func[i]->insnsi;
+               for (j = 0; j < func[i]->len; j++, insn++) {
+                       if (BPF_CLASS(insn->code) == BPF_LDX &&
+                           BPF_MODE(insn->code) == BPF_PROBE_MEM)
+                               num_exentries++;
+               }
+               func[i]->aux->num_exentries = num_exentries;
                func[i] = bpf_int_jit_compile(func[i]);
                if (!func[i]->jited) {
                        err = -ENOTSUPP;
index 1ea181a58465fd16889a2f18cbc3b6be48d3d956..dd247747ec14a622d47037e66e5b43dfbd4e6f24 100644 (file)
@@ -6439,18 +6439,8 @@ void cgroup_sk_alloc_disable(void)
 
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
 {
-       if (cgroup_sk_alloc_disabled)
-               return;
-
-       /* Socket clone path */
-       if (skcd->val) {
-               /*
-                * We might be cloning a socket which is left in an empty
-                * cgroup and the cgroup might have already been rmdir'd.
-                * Don't use cgroup_get_live().
-                */
-               cgroup_get(sock_cgroup_ptr(skcd));
-               cgroup_bpf_get(sock_cgroup_ptr(skcd));
+       if (cgroup_sk_alloc_disabled) {
+               skcd->no_refcnt = 1;
                return;
        }
 
@@ -6475,10 +6465,27 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
        rcu_read_unlock();
 }
 
+void cgroup_sk_clone(struct sock_cgroup_data *skcd)
+{
+       if (skcd->val) {
+               if (skcd->no_refcnt)
+                       return;
+               /*
+                * We might be cloning a socket which is left in an empty
+                * cgroup and the cgroup might have already been rmdir'd.
+                * Don't use cgroup_get_live().
+                */
+               cgroup_get(sock_cgroup_ptr(skcd));
+               cgroup_bpf_get(sock_cgroup_ptr(skcd));
+       }
+}
+
 void cgroup_sk_free(struct sock_cgroup_data *skcd)
 {
        struct cgroup *cgrp = sock_cgroup_ptr(skcd);
 
+       if (skcd->no_refcnt)
+               return;
        cgroup_bpf_put(cgrp);
        cgroup_put(cgrp);
 }
index 61774aec46b4c8ee90f60daa384db9f7a9625d60..a790026e42d01006d8c9107e0a6cb303ba818023 100644 (file)
@@ -792,6 +792,19 @@ static void gdb_cmd_query(struct kgdb_state *ks)
                }
                break;
 #endif
+#ifdef CONFIG_HAVE_ARCH_KGDB_QXFER_PKT
+       case 'S':
+               if (!strncmp(remcom_in_buffer, "qSupported:", 11))
+                       strcpy(remcom_out_buffer, kgdb_arch_gdb_stub_feature);
+               break;
+       case 'X':
+               if (!strncmp(remcom_in_buffer, "qXfer:", 6))
+                       kgdb_arch_handle_qxfer_pkt(remcom_in_buffer,
+                                                  remcom_out_buffer);
+               break;
+#endif
+       default:
+               break;
        }
 }
 
index 93f578a8e613ba7d4d5cde3f62d94943ce679526..95866b647581007862dc39af279a350edcfa13be 100644 (file)
@@ -539,3 +539,9 @@ size_t dma_direct_max_mapping_size(struct device *dev)
                return swiotlb_max_mapping_size(dev);
        return SIZE_MAX;
 }
+
+bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+       return !dev_is_dma_coherent(dev) ||
+               is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
+}
index 98e3d873792ea4cf7bb5f84416dfd492f5da792d..a8c18c9a796fdc6525d4cf006fe56efd0f99fb81 100644 (file)
@@ -397,6 +397,16 @@ size_t dma_max_mapping_size(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dma_max_mapping_size);
 
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+       const struct dma_map_ops *ops = get_dma_ops(dev);
+
+       if (dma_is_direct(ops))
+               return dma_direct_need_sync(dev, dma_addr);
+       return ops->sync_single_for_cpu || ops->sync_single_for_device;
+}
+EXPORT_SYMBOL_GPL(dma_need_sync);
+
 unsigned long dma_get_merge_boundary(struct device *dev)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
index 8cfa01243ed27b6fa3d7ce5718fc69b2d6a55cf1..39ca26fa41b570595428014ef96281cf5e094221 100644 (file)
@@ -239,12 +239,16 @@ void *dma_alloc_from_pool(struct device *dev, size_t size,
        }
 
        val = gen_pool_alloc(pool, size);
-       if (val) {
+       if (likely(val)) {
                phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
 
                *ret_page = pfn_to_page(__phys_to_pfn(phys));
                ptr = (void *)val;
                memset(ptr, 0, size);
+       } else {
+               WARN_ONCE(1, "DMA coherent pool depleted, increase size "
+                            "(recommended min coherent_pool=%zuK)\n",
+                         gen_pool_size(pool) >> 9);
        }
        if (gen_pool_avail(pool) < atomic_pool_size)
                schedule_work(&atomic_pool_work);
index 142b23645d82e76297298d0dd62fd0b11969685a..efc5493203ae0b744d0684ec10f531bb5c7b9557 100644 (file)
@@ -1977,7 +1977,7 @@ static __latent_entropy struct task_struct *copy_process(
         * to stop root fork bombs.
         */
        retval = -EAGAIN;
-       if (nr_threads >= max_threads)
+       if (data_race(nr_threads >= max_threads))
                goto bad_fork_cleanup_count;
 
        delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
index 16c8c605f4b0facfc751198a8450cc91405e2f1c..bb14e64f62a48eddc2f5ca1ede0e156fde55af15 100644 (file)
@@ -644,19 +644,20 @@ static inline int kallsyms_for_perf(void)
  * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
  * block even that).
  */
-int kallsyms_show_value(void)
+bool kallsyms_show_value(const struct cred *cred)
 {
        switch (kptr_restrict) {
        case 0:
                if (kallsyms_for_perf())
-                       return 1;
+                       return true;
        /* fallthrough */
        case 1:
-               if (has_capability_noaudit(current, CAP_SYSLOG))
-                       return 1;
+               if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
+                                    CAP_OPT_NOAUDIT) == 0)
+                       return true;
        /* fallthrough */
        default:
-               return 0;
+               return false;
        }
 }
 
@@ -673,7 +674,11 @@ static int kallsyms_open(struct inode *inode, struct file *file)
                return -ENOMEM;
        reset_iter(iter, 0);
 
-       iter->show_value = kallsyms_show_value();
+       /*
+        * Instead of checking this on every s_show() call, cache
+        * the result here at open time.
+        */
+       iter->show_value = kallsyms_show_value(file->f_cred);
        return 0;
 }
 
index 4a904cc56d68f922dbf807fb54f9593905a6f7f0..2e97febeef77dfa96ee0565ef7cf097ef17bef86 100644 (file)
@@ -2448,7 +2448,7 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
        else
                kprobe_type = "k";
 
-       if (!kallsyms_show_value())
+       if (!kallsyms_show_value(pi->file->f_cred))
                addr = NULL;
 
        if (sym)
@@ -2540,7 +2540,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
         * If /proc/kallsyms is not showing kernel address, we won't
         * show them here either.
         */
-       if (!kallsyms_show_value())
+       if (!kallsyms_show_value(m->file->f_cred))
                seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
                           (void *)ent->start_addr);
        else
index 0c6573b98c366244928e54cf2ec7fb14082e93d3..aa183c9ac0a256eadb92f6d30f2d2460583bfec8 100644 (file)
@@ -1510,8 +1510,7 @@ static inline bool sect_empty(const Elf_Shdr *sect)
 }
 
 struct module_sect_attr {
-       struct module_attribute mattr;
-       char *name;
+       struct bin_attribute battr;
        unsigned long address;
 };
 
@@ -1521,13 +1520,18 @@ struct module_sect_attrs {
        struct module_sect_attr attrs[];
 };
 
-static ssize_t module_sect_show(struct module_attribute *mattr,
-                               struct module_kobject *mk, char *buf)
+static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
+                               struct bin_attribute *battr,
+                               char *buf, loff_t pos, size_t count)
 {
        struct module_sect_attr *sattr =
-               container_of(mattr, struct module_sect_attr, mattr);
-       return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
-                      (void *)sattr->address : NULL);
+               container_of(battr, struct module_sect_attr, battr);
+
+       if (pos != 0)
+               return -EINVAL;
+
+       return sprintf(buf, "0x%px\n",
+                      kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL);
 }
 
 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
@@ -1535,7 +1539,7 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
        unsigned int section;
 
        for (section = 0; section < sect_attrs->nsections; section++)
-               kfree(sect_attrs->attrs[section].name);
+               kfree(sect_attrs->attrs[section].battr.attr.name);
        kfree(sect_attrs);
 }
 
@@ -1544,42 +1548,41 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
        unsigned int nloaded = 0, i, size[2];
        struct module_sect_attrs *sect_attrs;
        struct module_sect_attr *sattr;
-       struct attribute **gattr;
+       struct bin_attribute **gattr;
 
        /* Count loaded sections and allocate structures */
        for (i = 0; i < info->hdr->e_shnum; i++)
                if (!sect_empty(&info->sechdrs[i]))
                        nloaded++;
        size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
-                       sizeof(sect_attrs->grp.attrs[0]));
-       size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
+                       sizeof(sect_attrs->grp.bin_attrs[0]));
+       size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
        sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
        if (sect_attrs == NULL)
                return;
 
        /* Setup section attributes. */
        sect_attrs->grp.name = "sections";
-       sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
+       sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
 
        sect_attrs->nsections = 0;
        sattr = &sect_attrs->attrs[0];
-       gattr = &sect_attrs->grp.attrs[0];
+       gattr = &sect_attrs->grp.bin_attrs[0];
        for (i = 0; i < info->hdr->e_shnum; i++) {
                Elf_Shdr *sec = &info->sechdrs[i];
                if (sect_empty(sec))
                        continue;
+               sysfs_bin_attr_init(&sattr->battr);
                sattr->address = sec->sh_addr;
-               sattr->name = kstrdup(info->secstrings + sec->sh_name,
-                                       GFP_KERNEL);
-               if (sattr->name == NULL)
+               sattr->battr.attr.name =
+                       kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
+               if (sattr->battr.attr.name == NULL)
                        goto out;
                sect_attrs->nsections++;
-               sysfs_attr_init(&sattr->mattr.attr);
-               sattr->mattr.show = module_sect_show;
-               sattr->mattr.store = NULL;
-               sattr->mattr.attr.name = sattr->name;
-               sattr->mattr.attr.mode = S_IRUSR;
-               *(gattr++) = &(sattr++)->mattr.attr;
+               sattr->battr.read = module_sect_read;
+               sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4);
+               sattr->battr.attr.mode = 0400;
+               *(gattr++) = &(sattr++)->battr;
        }
        *gattr = NULL;
 
@@ -1669,7 +1672,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
                        continue;
                if (info->sechdrs[i].sh_type == SHT_NOTE) {
                        sysfs_bin_attr_init(nattr);
-                       nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
+                       nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
                        nattr->attr.mode = S_IRUGO;
                        nattr->size = info->sechdrs[i].sh_size;
                        nattr->private = (void *) info->sechdrs[i].sh_addr;
@@ -2785,7 +2788,7 @@ void * __weak module_alloc(unsigned long size)
 {
        return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
                        GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
-                       NUMA_NO_NODE, __func__);
+                       NUMA_NO_NODE, __builtin_return_address(0));
 }
 
 bool __weak module_init_section(const char *name)
@@ -4379,7 +4382,7 @@ static int modules_open(struct inode *inode, struct file *file)
 
        if (!err) {
                struct seq_file *m = file->private_data;
-               m->private = kallsyms_show_value() ? NULL : (void *)8ul;
+               m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
        }
 
        return err;
index 29fc5d87a4cddd8a72e68edf077cb5567425ae59..4373f7adaa40a1934ba1646a14fc024c32be2b50 100644 (file)
@@ -335,7 +335,7 @@ static void padata_reorder(struct parallel_data *pd)
         *
         * Ensure reorder queue is read after pd->lock is dropped so we see
         * new objects from another task in padata_do_serial.  Pairs with
-        * smp_mb__after_atomic in padata_do_serial.
+        * smp_mb in padata_do_serial.
         */
        smp_mb();
 
@@ -418,7 +418,7 @@ void padata_do_serial(struct padata_priv *padata)
         * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
         * in padata_reorder.
         */
-       smp_mb__after_atomic();
+       smp_mb();
 
        padata_reorder(pd);
 }
index 16dd1e6b7c09fbd228cf7f745e2fe9308a1754a1..9eb39c20082c52a88f8926932111ba4214baf814 100644 (file)
@@ -723,7 +723,7 @@ kfree_perf_init(void)
                schedule_timeout_uninterruptible(1);
        }
 
-       pr_alert("kfree object size=%lu\n", kfree_mult * sizeof(struct kfree_obj));
+       pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
 
        kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
                               GFP_KERNEL);
index 5ca48cc5da760d2a6ad6ef2497cd92be13c4f9f7..ee22ec78fd6d5c136429f5a2f67ca19d40307ba1 100644 (file)
@@ -2529,9 +2529,6 @@ bool get_signal(struct ksignal *ksig)
        struct signal_struct *signal = current->signal;
        int signr;
 
-       if (unlikely(current->task_works))
-               task_work_run();
-
        if (unlikely(uprobe_deny_signal()))
                return false;
 
@@ -2544,6 +2541,13 @@ bool get_signal(struct ksignal *ksig)
 
 relock:
        spin_lock_irq(&sighand->siglock);
+       current->jobctl &= ~JOBCTL_TASK_WORK;
+       if (unlikely(current->task_works)) {
+               spin_unlock_irq(&sighand->siglock);
+               task_work_run();
+               goto relock;
+       }
+
        /*
         * Every stopped thread goes here after wakeup. Check to see if
         * we should notify the parent, prepare_signal(SIGCONT) encodes
index 825f28259a19a285399171df9007ab01c19b42b1..5c0848ca1287df065303b3abbc3b4e3cb5fffa81 100644 (file)
@@ -25,9 +25,10 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
  * 0 if succeeds or -ESRCH.
  */
 int
-task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
+task_work_add(struct task_struct *task, struct callback_head *work, int notify)
 {
        struct callback_head *head;
+       unsigned long flags;
 
        do {
                head = READ_ONCE(task->task_works);
@@ -36,8 +37,19 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
                work->next = head;
        } while (cmpxchg(&task->task_works, head, work) != head);
 
-       if (notify)
+       switch (notify) {
+       case TWA_RESUME:
                set_notify_resume(task);
+               break;
+       case TWA_SIGNAL:
+               if (lock_task_sighand(task, &flags)) {
+                       task->jobctl |= JOBCTL_TASK_WORK;
+                       signal_wake_up(task, 0);
+                       unlock_task_sighand(task, &flags);
+               }
+               break;
+       }
+
        return 0;
 }
 
index ffa7a76de08604f38ecdd2ee874d8f77cf3ddd7d..256f2486f9bd2f4d0dd0ef603df4955465836417 100644 (file)
@@ -3,6 +3,11 @@
 config HAVE_ARCH_KGDB
        bool
 
+# set if architecture has the its kgdb_arch_handle_qxfer_pkt
+# function to enable gdb stub to address XML packet sent from GDB.
+config HAVE_ARCH_KGDB_QXFER_PKT
+       bool
+
 menuconfig KGDB
        bool "KGDB: kernel debugger"
        depends on HAVE_ARCH_KGDB
index 50d1e9f2f5a77338dad83d3951c3d5a75947924a..6ed72dccfdb5d667cfa7a4a67d3d727eb5d417ad 100644 (file)
@@ -73,6 +73,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
  * @endbit: The index (in logical notation, compensated for quirks) where
  *         the packed value ends within pbuf. Must be smaller than, or equal
  *         to, startbit.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
  * @op: If PACK, then uval will be treated as const pointer and copied (packed)
  *     into pbuf, between startbit and endbit.
  *     If UNPACK, then pbuf will be treated as const pointer and the logical
index 0463ad2ce06b75eecd52beb61012b1f9721dd3a9..26ecff8188817c598a1c3a1ef788ac0056bddb65 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -339,13 +339,13 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
                 */
                if (base < highmem_start && limit > highmem_start) {
                        addr = memblock_alloc_range_nid(size, alignment,
-                                       highmem_start, limit, nid, false);
+                                       highmem_start, limit, nid, true);
                        limit = highmem_start;
                }
 
                if (!addr) {
                        addr = memblock_alloc_range_nid(size, alignment, base,
-                                       limit, nid, false);
+                                       limit, nid, true);
                        if (!addr) {
                                ret = -ENOMEM;
                                goto err;
index f0ae9a6308cb4d53a420ffa2acfe38e0717f0699..385759c4ce4be6c054c9328773e5b24eba467005 100644 (file)
@@ -2028,7 +2028,7 @@ find_page:
 
                page = find_get_page(mapping, index);
                if (!page) {
-                       if (iocb->ki_flags & IOCB_NOWAIT)
+                       if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
                                goto would_block;
                        page_cache_sync_readahead(mapping,
                                        ra, filp,
@@ -2038,6 +2038,10 @@ find_page:
                                goto no_cached_page;
                }
                if (PageReadahead(page)) {
+                       if (iocb->ki_flags & IOCB_NOIO) {
+                               put_page(page);
+                               goto out;
+                       }
                        page_cache_async_readahead(mapping,
                                        ra, filp, page,
                                        index, last_index - index);
@@ -2160,6 +2164,11 @@ page_not_up_to_date_locked:
                }
 
 readpage:
+               if (iocb->ki_flags & IOCB_NOIO) {
+                       unlock_page(page);
+                       put_page(page);
+                       goto would_block;
+               }
                /*
                 * A previous I/O error may have been due to temporary
                 * failures, eg. multipath errors.
@@ -2249,9 +2258,19 @@ EXPORT_SYMBOL_GPL(generic_file_buffered_read);
  *
  * This is the "read_iter()" routine for all filesystems
  * that can use the page cache directly.
+ *
+ * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
+ * be returned when no data can be read without waiting for I/O requests
+ * to complete; it doesn't prevent readahead.
+ *
+ * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
+ * requests shall be made for the read or for readahead.  When no data
+ * can be read, -EAGAIN shall be returned.  When readahead would be
+ * triggered, a partial, possibly empty read shall be returned.
+ *
  * Return:
  * * number of bytes copied, even for partial reads
- * * negative error code if nothing was read
+ * * negative error code (or 0 if IOCB_NOIO) if nothing was read
  */
 ssize_t
 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
index 57ece74e3aae1e584c805b0d284e68162963f199..fab4485b9e52b6dd78ddcdb00906de2d1b815b76 100644 (file)
@@ -1593,7 +1593,7 @@ static struct address_space *_get_hugetlb_page_mapping(struct page *hpage)
 
        /* Use first found vma */
        pgoff_start = page_to_pgoff(hpage);
-       pgoff_end = pgoff_start + hpage_nr_pages(hpage) - 1;
+       pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1;
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
                                        pgoff_start, pgoff_end) {
                struct vm_area_struct *vma = avc->vma;
index f3772967355861556660db86d6a5dfb59c890971..40cd7016ae6fc60120a780611d17670556357b7c 100644 (file)
@@ -1160,22 +1160,11 @@ out:
        return rc;
 }
 
-/*
- * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
- * around it.
- */
-#if defined(CONFIG_ARM) && \
-       defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
-#define ICE_noinline noinline
-#else
-#define ICE_noinline
-#endif
-
 /*
  * Obtain the lock on page, remove all ptes and migrate the page
  * to the newly allocated page in newpage.
  */
-static ICE_noinline int unmap_and_move(new_page_t get_new_page,
+static int unmap_and_move(new_page_t get_new_page,
                                   free_page_t put_new_page,
                                   unsigned long private, struct page *page,
                                   int force, enum migrate_mode mode,
index 5dd572d57ca9919b7e9821bf753131fe64f770fc..6b153dc05fe4882c76eaac4150c8c20e12956af3 100644 (file)
@@ -206,9 +206,28 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 
        /*
         * The destination pmd shouldn't be established, free_pgtables()
-        * should have release it.
+        * should have released it.
+        *
+        * However, there's a case during execve() where we use mremap
+        * to move the initial stack, and in that case the target area
+        * may overlap the source area (always moving down).
+        *
+        * If everything is PMD-aligned, that works fine, as moving
+        * each pmd down will clear the source pmd. But if we first
+        * have a few 4kB-only pages that get moved down, and then
+        * hit the "now the rest is PMD-aligned, let's do everything
+        * one pmd at a time", we will still have the old (now empty
+        * of any 4kB pages, but still there) PMD in the page table
+        * tree.
+        *
+        * Warn on it once - because we really should try to figure
+        * out how to do this better - but then say "I won't move
+        * this pmd".
+        *
+        * One alternative might be to just unmap the target pmd at
+        * this point, and verify that it really is empty. We'll see.
         */
-       if (WARN_ON(!pmd_none(*new_pmd)))
+       if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
                return false;
 
        /*
index 48eb0f1410d47c1622cf080331150b53c72ea2fa..e028b87ce294289f7b2dfb5eb57ac143e5dcc4f9 100644 (file)
@@ -7832,7 +7832,7 @@ void setup_per_zone_wmarks(void)
  * Initialise min_free_kbytes.
  *
  * For small machines we want it small (128k min).  For large machines
- * we want it large (64MB max).  But it is not linear, because network
+ * we want it large (256MB max).  But it is not linear, because network
  * bandwidth does not increase linearly with machine size.  We use
  *
  *     min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
index c8d6a07e23c579c5b097a62e229cee0145efd8c3..3dd7c972677be27b276c44942419a33a79b13214 100644 (file)
@@ -503,11 +503,10 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev,
        lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
 }
 
-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+static void vlan_dev_set_lockdep_class(struct net_device *dev)
 {
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &vlan_netdev_addr_lock_key,
-                                      subclass);
+       lockdep_set_class(&dev->addr_list_lock,
+                         &vlan_netdev_addr_lock_key);
        netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
 }
 
@@ -601,7 +600,7 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       vlan_dev_set_lockdep_class(dev, dev->lower_level);
+       vlan_dev_set_lockdep_class(dev);
 
        vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan->vlan_pcpu_stats)
index bfd4ccd80847de61b44462df7b70f46dfb382acd..b03c469cd01fa8f22e821ae00ae764928f8a603d 100644 (file)
@@ -147,6 +147,20 @@ int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
        return a + (long)b + c + d + (long)e + f;
 }
 
+struct bpf_fentry_test_t {
+       struct bpf_fentry_test_t *a;
+};
+
+int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
+{
+       return (long)arg;
+}
+
+int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
+{
+       return (long)arg->a;
+}
+
 int noinline bpf_modify_return_test(int a, int *b)
 {
        *b += 1;
@@ -185,6 +199,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
                              const union bpf_attr *kattr,
                              union bpf_attr __user *uattr)
 {
+       struct bpf_fentry_test_t arg = {};
        u16 side_effect = 0, ret = 0;
        int b = 2, err = -EFAULT;
        u32 retval = 0;
@@ -197,7 +212,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
                    bpf_fentry_test3(4, 5, 6) != 15 ||
                    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
                    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
-                   bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
+                   bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
+                   bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
+                   bpf_fentry_test8(&arg) != 0)
                        goto out;
                break;
        case BPF_MODIFY_RETURN:
index c0f0990f30b60415fa6b929415f5ac67bba4ed01..1905e01c3aa9a7f92f949877f82e72e45d90f8d4 100644 (file)
@@ -50,7 +50,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
        req.len = optlen;
        if (!bpfilter_ops.info.pid)
                goto out;
-       n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
+       n = kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
                           &pos);
        if (n != sizeof(req)) {
                pr_err("write fail %zd\n", n);
index 779e1eb754430fa43099b237f5cba5db00efe746..90592af9db619fcd7b87d153f473caf582888d88 100644 (file)
@@ -86,7 +86,7 @@ static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
 {
        struct ethhdr *eth_hdr;
        struct sk_buff *skb;
-       u16 *version;
+       __be16 *version;
 
        skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
        if (!skb)
index 83490bf73a13b3f2e0f19d83eab6e780ee8b7204..4c4a93abde680db8f27306be6529972bc440911b 100644 (file)
@@ -1007,7 +1007,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
 
                if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
-                   nsrcs_offset + sizeof(_nsrcs))
+                   nsrcs_offset + sizeof(__nsrcs))
                        return -EINVAL;
 
                _nsrcs = skb_header_pointer(skb, nsrcs_offset,
index 2130fe0194e64e03c35e41f24942faf6daa80d10..e0ea6dbbc97ed464f35e2d14a3db5c34b4b1a283 100644 (file)
@@ -430,7 +430,7 @@ struct net_bridge {
        struct hlist_head               fdb_list;
 
 #if IS_ENABLED(CONFIG_BRIDGE_MRP)
-       struct list_head                __rcu mrp_list;
+       struct list_head                mrp_list;
 #endif
 };
 
index 33b255e38ffecf563aad75d259ebd2203df1414b..315eb37d89f0f3ad4ae4dcc9b8654b769e18854e 100644 (file)
@@ -8,7 +8,7 @@
 
 struct br_mrp {
        /* list of mrp instances */
-       struct list_head                __rcu list;
+       struct list_head                list;
 
        struct net_bridge_port __rcu    *p_port;
        struct net_bridge_port __rcu    *s_port;
index 6393ba930097b49b26a70cf01b657da08514f552..54cd568e7c2f51bdc0da38d267f4276b6d76804e 100644 (file)
@@ -690,6 +690,15 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return;
 
+       /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two
+        * reasons:
+        * 1) This is always called without any addr_list_lock, so as the
+        *    outermost one here, it must be 0.
+        * 2) This is called by some callers after unlinking the upper device,
+        *    so the dev->lower_level becomes 1 again.
+        * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or
+        * larger.
+        */
        netif_addr_lock_bh(from);
        netif_addr_lock_nested(to);
        __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
@@ -911,6 +920,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return;
 
+       /* See the above comments inside dev_uc_unsync(). */
        netif_addr_lock_bh(from);
        netif_addr_lock_nested(to);
        __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
index 73395384afe2f88d5b0d7483088b48a1226ac83f..82e1b5b0616758a470fb760635d92d9fe269d342 100644 (file)
@@ -5853,12 +5853,16 @@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
 {
        unsigned int iphdr_len;
 
-       if (skb->protocol == cpu_to_be16(ETH_P_IP))
+       switch (skb_protocol(skb, true)) {
+       case cpu_to_be16(ETH_P_IP):
                iphdr_len = sizeof(struct iphdr);
-       else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
+               break;
+       case cpu_to_be16(ETH_P_IPV6):
                iphdr_len = sizeof(struct ipv6hdr);
-       else
+               break;
+       default:
                return 0;
+       }
 
        if (skb_headlen(skb) < iphdr_len)
                return 0;
index d02df0b6d0d99ae12783f15746fbdff49b603cc7..142a8824f0a8ef348b56b0497484f1e3af391ae3 100644 (file)
@@ -70,10 +70,10 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
 EXPORT_SYMBOL(skb_flow_dissector_init);
 
 #ifdef CONFIG_BPF_SYSCALL
-int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+                                        struct bpf_prog *prog)
 {
        enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
-       struct bpf_prog *attached;
 
        if (net == &init_net) {
                /* BPF flow dissector in the root namespace overrides
@@ -86,26 +86,17 @@ int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
                for_each_net(ns) {
                        if (ns == &init_net)
                                continue;
-                       if (rcu_access_pointer(ns->bpf.progs[type]))
+                       if (rcu_access_pointer(ns->bpf.run_array[type]))
                                return -EEXIST;
                }
        } else {
                /* Make sure root flow dissector is not attached
                 * when attaching to the non-root namespace.
                 */
-               if (rcu_access_pointer(init_net.bpf.progs[type]))
+               if (rcu_access_pointer(init_net.bpf.run_array[type]))
                        return -EEXIST;
        }
 
-       attached = rcu_dereference_protected(net->bpf.progs[type],
-                                            lockdep_is_held(&netns_bpf_mutex));
-       if (attached == prog)
-               /* The same program cannot be attached twice */
-               return -EINVAL;
-
-       rcu_assign_pointer(net->bpf.progs[type], prog);
-       if (attached)
-               bpf_prog_put(attached);
        return 0;
 }
 #endif /* CONFIG_BPF_SYSCALL */
@@ -903,7 +894,6 @@ bool __skb_flow_dissect(const struct net *net,
        struct flow_dissector_key_addrs *key_addrs;
        struct flow_dissector_key_tags *key_tags;
        struct flow_dissector_key_vlan *key_vlan;
-       struct bpf_prog *attached = NULL;
        enum flow_dissect_ret fdret;
        enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
        bool mpls_el = false;
@@ -960,14 +950,14 @@ bool __skb_flow_dissect(const struct net *net,
        WARN_ON_ONCE(!net);
        if (net) {
                enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
+               struct bpf_prog_array *run_array;
 
                rcu_read_lock();
-               attached = rcu_dereference(init_net.bpf.progs[type]);
-
-               if (!attached)
-                       attached = rcu_dereference(net->bpf.progs[type]);
+               run_array = rcu_dereference(init_net.bpf.run_array[type]);
+               if (!run_array)
+                       run_array = rcu_dereference(net->bpf.run_array[type]);
 
-               if (attached) {
+               if (run_array) {
                        struct bpf_flow_keys flow_keys;
                        struct bpf_flow_dissector ctx = {
                                .flow_keys = &flow_keys,
@@ -975,6 +965,7 @@ bool __skb_flow_dissect(const struct net *net,
                                .data_end = data + hlen,
                        };
                        __be16 n_proto = proto;
+                       struct bpf_prog *prog;
 
                        if (skb) {
                                ctx.skb = skb;
@@ -985,7 +976,8 @@ bool __skb_flow_dissect(const struct net *net,
                                n_proto = skb->protocol;
                        }
 
-                       ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff,
+                       prog = READ_ONCE(run_array->items[0].prog);
+                       ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
                                               hlen, flags);
                        __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
                                                 target_container);
index 351afbf6bfbac1d7d86af2dc4a98c058dd9531e0..6a32a1fd34f8cd1306a43f1f14bd6e038ff2efe6 100644 (file)
@@ -683,7 +683,7 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
        return container_of(parser, struct sk_psock, parser);
 }
 
-static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
+static void sk_psock_skb_redirect(struct sk_buff *skb)
 {
        struct sk_psock *psock_other;
        struct sock *sk_other;
@@ -715,12 +715,11 @@ static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
        }
 }
 
-static void sk_psock_tls_verdict_apply(struct sk_psock *psock,
-                                      struct sk_buff *skb, int verdict)
+static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
 {
        switch (verdict) {
        case __SK_REDIRECT:
-               sk_psock_skb_redirect(psock, skb);
+               sk_psock_skb_redirect(skb);
                break;
        case __SK_PASS:
        case __SK_DROP:
@@ -741,8 +740,8 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
        }
+       sk_psock_tls_verdict_apply(skb, ret);
        rcu_read_unlock();
-       sk_psock_tls_verdict_apply(psock, skb, ret);
        return ret;
 }
 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
@@ -770,7 +769,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
                }
                goto out_free;
        case __SK_REDIRECT:
-               sk_psock_skb_redirect(psock, skb);
+               sk_psock_skb_redirect(skb);
                break;
        case __SK_DROP:
                /* fall-through */
@@ -782,11 +781,18 @@ out_free:
 
 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
 {
-       struct sk_psock *psock = sk_psock_from_strp(strp);
+       struct sk_psock *psock;
        struct bpf_prog *prog;
        int ret = __SK_DROP;
+       struct sock *sk;
 
        rcu_read_lock();
+       sk = strp->sk;
+       psock = sk_psock(sk);
+       if (unlikely(!psock)) {
+               kfree_skb(skb);
+               goto out;
+       }
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
                skb_orphan(skb);
@@ -794,8 +800,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
        }
-       rcu_read_unlock();
        sk_psock_verdict_apply(psock, skb, ret);
+out:
+       rcu_read_unlock();
 }
 
 static int sk_psock_strp_read_done(struct strparser *strp, int err)
index d832c650287c375cd9e99e40c09f3ec354487716..2e5b7870e5d35d40de7d4fdc68bfe1bbf9c09de7 100644 (file)
@@ -1926,7 +1926,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                /* sk->sk_memcg will be populated at accept() time */
                newsk->sk_memcg = NULL;
 
-               cgroup_sk_alloc(&newsk->sk_cgrp_data);
+               cgroup_sk_clone(&newsk->sk_cgrp_data);
 
                rcu_read_lock();
                filter = rcu_dereference(sk->sk_filter);
index 4059f94e9bb5bf7316227984151052b47e321b47..0971f17e8e5429427d48f60524eaf4c9d8f292f9 100644 (file)
@@ -70,11 +70,49 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
        struct fd f;
        int ret;
 
+       if (attr->attach_flags || attr->replace_bpf_fd)
+               return -EINVAL;
+
        f = fdget(ufd);
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
-       ret = sock_map_prog_update(map, prog, attr->attach_type);
+       ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
+       fdput(f);
+       return ret;
+}
+
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+       u32 ufd = attr->target_fd;
+       struct bpf_prog *prog;
+       struct bpf_map *map;
+       struct fd f;
+       int ret;
+
+       if (attr->attach_flags || attr->replace_bpf_fd)
+               return -EINVAL;
+
+       f = fdget(ufd);
+       map = __bpf_map_get(f);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
+
+       prog = bpf_prog_get(attr->attach_bpf_fd);
+       if (IS_ERR(prog)) {
+               ret = PTR_ERR(prog);
+               goto put_map;
+       }
+
+       if (prog->type != ptype) {
+               ret = -EINVAL;
+               goto put_prog;
+       }
+
+       ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
+put_prog:
+       bpf_prog_put(prog);
+put_map:
        fdput(f);
        return ret;
 }
@@ -1203,27 +1241,32 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
 }
 
 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
-                        u32 which)
+                        struct bpf_prog *old, u32 which)
 {
        struct sk_psock_progs *progs = sock_map_progs(map);
+       struct bpf_prog **pprog;
 
        if (!progs)
                return -EOPNOTSUPP;
 
        switch (which) {
        case BPF_SK_MSG_VERDICT:
-               psock_set_prog(&progs->msg_parser, prog);
+               pprog = &progs->msg_parser;
                break;
        case BPF_SK_SKB_STREAM_PARSER:
-               psock_set_prog(&progs->skb_parser, prog);
+               pprog = &progs->skb_parser;
                break;
        case BPF_SK_SKB_STREAM_VERDICT:
-               psock_set_prog(&progs->skb_verdict, prog);
+               pprog = &progs->skb_verdict;
                break;
        default:
                return -EOPNOTSUPP;
        }
 
+       if (old)
+               return psock_replace_prog(pprog, prog, old);
+
+       psock_set_prog(pprog, prog);
        return 0;
 }
 
index f93f8ace6c561912fe7757d9eb0343d16aff2a37..6ada114bbcca24a2f70e9392f85de4a2d7b0353e 100644 (file)
@@ -274,7 +274,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
        if (write && !ret) {
                if (jit_enable < 2 ||
-                   (jit_enable == 2 && bpf_dump_raw_ok())) {
+                   (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
                        *(int *)table->data = jit_enable;
                        if (jit_enable == 2)
                                pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
index 88fd07f47040ca57ca4a215b0d30d830a70c9ecc..dd8a1c1dc07ddbd120f293aa4e36feef060e9a18 100644 (file)
@@ -376,10 +376,17 @@ err_dev:
 }
 
 static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
-                                 const struct ethnl_dump_ctx *ctx)
+                                 const struct ethnl_dump_ctx *ctx,
+                                 struct netlink_callback *cb)
 {
+       void *ehdr;
        int ret;
 
+       ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+                          &ethtool_genl_family, 0, ctx->ops->reply_cmd);
+       if (!ehdr)
+               return -EMSGSIZE;
+
        ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev);
        rtnl_lock();
        ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, NULL);
@@ -395,6 +402,10 @@ out:
        if (ctx->ops->cleanup_data)
                ctx->ops->cleanup_data(ctx->reply_data);
        ctx->reply_data->dev = NULL;
+       if (ret < 0)
+               genlmsg_cancel(skb, ehdr);
+       else
+               genlmsg_end(skb, ehdr);
        return ret;
 }
 
@@ -411,7 +422,6 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
        int s_idx = ctx->pos_idx;
        int h, idx = 0;
        int ret = 0;
-       void *ehdr;
 
        rtnl_lock();
        for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
@@ -431,26 +441,15 @@ restart_chain:
                        dev_hold(dev);
                        rtnl_unlock();
 
-                       ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
-                                          cb->nlh->nlmsg_seq,
-                                          &ethtool_genl_family, 0,
-                                          ctx->ops->reply_cmd);
-                       if (!ehdr) {
-                               dev_put(dev);
-                               ret = -EMSGSIZE;
-                               goto out;
-                       }
-                       ret = ethnl_default_dump_one(skb, dev, ctx);
+                       ret = ethnl_default_dump_one(skb, dev, ctx, cb);
                        dev_put(dev);
                        if (ret < 0) {
-                               genlmsg_cancel(skb, ehdr);
                                if (ret == -EOPNOTSUPP)
                                        goto lock_and_cont;
                                if (likely(skb->len))
                                        ret = skb->len;
                                goto out;
                        }
-                       genlmsg_end(skb, ehdr);
 lock_and_cont:
                        rtnl_lock();
                        if (net->dev_base_seq != seq) {
index 478852ef98efb8ededf021d90fce8d6785f55eaa..a6f4e9f65b1486aa04f4bdb57f59acc5e7c83c8c 100644 (file)
@@ -415,6 +415,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                     unsigned char multicast_spec, u8 protocol_version,
                     struct netlink_ext_ack *extack)
 {
+       bool unregister = false;
        struct hsr_priv *hsr;
        int res;
 
@@ -466,25 +467,27 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
        if (res)
                goto err_unregister;
 
+       unregister = true;
+
        res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
        if (res)
-               goto err_add_slaves;
+               goto err_unregister;
 
        res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
        if (res)
-               goto err_add_slaves;
+               goto err_unregister;
 
        hsr_debugfs_init(hsr, hsr_dev);
        mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
 
        return 0;
 
-err_add_slaves:
-       unregister_netdevice(hsr_dev);
 err_unregister:
        hsr_del_ports(hsr);
 err_add_master:
        hsr_del_self_node(hsr);
 
+       if (unregister)
+               unregister_netdevice(hsr_dev);
        return res;
 }
index 956a806649f7ef8df08c259acc4dbacc3d0e3ca3..e30515f898023abf821394c6374dd27fadeec9b1 100644 (file)
@@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
 
        ipcm_init(&ipc);
        inet->tos = ip_hdr(skb)->tos;
-       sk->sk_mark = mark;
+       ipc.sockc.mark = mark;
        daddr = ipc.addr = ip_hdr(skb)->saddr;
        saddr = fib_compute_spec_dst(skb);
 
@@ -710,10 +710,10 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
        icmp_param.skb    = skb_in;
        icmp_param.offset = skb_network_offset(skb_in);
        inet_sk(sk)->tos = tos;
-       sk->sk_mark = mark;
        ipcm_init(&ipc);
        ipc.addr = iph->saddr;
        ipc.opt = &icmp_param.replyopts.opt;
+       ipc.sockc.mark = mark;
 
        rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
                               type, code, &icmp_param);
index 090d3097ee15baa87695c530278761fb26534ad5..17206677d5033d15db6249801169e8191d61bcd6 100644 (file)
@@ -1702,7 +1702,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
        sk->sk_protocol = ip_hdr(skb)->protocol;
        sk->sk_bound_dev_if = arg->bound_dev_if;
        sk->sk_sndbuf = sysctl_wmem_default;
-       sk->sk_mark = fl4.flowi4_mark;
+       ipc.sockc.mark = fl4.flowi4_mark;
        err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
                             len, 0, &ipc, &rt, MSG_DONTWAIT);
        if (unlikely(err)) {
index 181b7a2a024766a7370dbb657acac85ed1f49016..f8b419e2475c9f7ec55a2fb0e4c0906c1f5f7102 100644 (file)
@@ -844,3 +844,21 @@ void ip_tunnel_unneed_metadata(void)
        static_branch_dec(&ip_tunnel_metadata_cnt);
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
+
+/* Returns either the correct skb->protocol value, or 0 if invalid. */
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb)
+{
+       if (skb_network_header(skb) >= skb->head &&
+           (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) &&
+           ip_hdr(skb)->version == 4)
+               return htons(ETH_P_IP);
+       if (skb_network_header(skb) >= skb->head &&
+           (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) &&
+           ipv6_hdr(skb)->version == 6)
+               return htons(ETH_P_IPV6);
+       return 0;
+}
+EXPORT_SYMBOL(ip_tunnel_parse_protocol);
+
+const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol };
+EXPORT_SYMBOL(ip_tunnel_header_ops);
index 1d9c8cff5ac332f3790540b4f02ef3d0157842ab..460ca1099e8acf82b639d4ec02393a93ec7ccd71 100644 (file)
@@ -441,6 +441,7 @@ static const struct net_device_ops vti_netdev_ops = {
 static void vti_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &vti_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
        dev->type               = ARPHRD_TUNNEL;
        ip_tunnel_setup(dev, vti_net_id);
 }
index 40fea52c82773fcaccb79ac94a155e4d7d30f8d5..75d35e76bec2c4b185c821156d84f31c2b7b0f7a 100644 (file)
@@ -361,6 +361,7 @@ static const struct net_device_ops ipip_netdev_ops = {
 static void ipip_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &ipip_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
 
        dev->type               = ARPHRD_TUNNEL;
        dev->flags              = IFF_NOARP;
index 535427292194eb57e8a268f882174710ccb76b30..df6fbefe44d4b4bc9507c5ac124da9ce4adaf39f 100644 (file)
@@ -786,6 +786,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                           inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
                           sk->sk_uid);
 
+       fl4.fl4_icmp_type = user_icmph.type;
+       fl4.fl4_icmp_code = user_icmph.code;
+
        security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
        if (IS_ERR(rt)) {
index 1d7076b78e630b7621cd7b87941edda9ebf1cc7c..a01efa062f6bcd16ef3e1f917a0a1397ac7a3507 100644 (file)
@@ -2027,7 +2027,7 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                      const struct sk_buff *hint)
 {
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct rtable *rt = (struct rtable *)hint;
+       struct rtable *rt = skb_rtable(hint);
        struct net *net = dev_net(dev);
        int err = -EINVAL;
        u32 tag = 0;
index 810cc164f795f8e1e8ca747ed5df51bb20fec8a2..6f0caf9a866de912d1903a95bb63abe3f70f7578 100644 (file)
@@ -2691,6 +2691,9 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->window_clamp = 0;
        tp->delivered = 0;
        tp->delivered_ce = 0;
+       if (icsk->icsk_ca_ops->release)
+               icsk->icsk_ca_ops->release(sk);
+       memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
        tcp_set_ca_state(sk, TCP_CA_Open);
        tp->is_sack_reneg = 0;
        tcp_clear_retrans(tp);
@@ -3246,10 +3249,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
 #ifdef CONFIG_TCP_MD5SIG
        case TCP_MD5SIG:
        case TCP_MD5SIG_EXT:
-               if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
-                       err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
-               else
-                       err = -EINVAL;
+               err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
                break;
 #endif
        case TCP_USER_TIMEOUT:
@@ -4033,11 +4033,14 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data);
 
 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
 {
+       u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
        struct scatterlist sg;
 
-       sg_init_one(&sg, key->key, key->keylen);
-       ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
-       return crypto_ahash_update(hp->md5_req);
+       sg_init_one(&sg, key->key, keylen);
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
+
+       /* We use data_race() because tcp_md5_do_add() might change key->key under us */
+       return data_race(crypto_ahash_update(hp->md5_req));
 }
 EXPORT_SYMBOL(tcp_md5_hash_key);
 
index 3172e31987be4232af90e7b204742c5bb09ef6ca..62878cf26d9cc5c0ae44d5ecdadd0b7a5acf5365 100644 (file)
@@ -197,7 +197,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
        icsk->icsk_ca_setsockopt = 1;
        memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
-       if (sk->sk_state != TCP_CLOSE)
+       if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
                tcp_init_congestion_control(sk);
 }
 
index f3a0eb139b7633ebc1ddb801de232bcd3a0cbdc6..9615e72656d12e9c7298bf7087792d0209897b50 100644 (file)
@@ -4582,6 +4582,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 
        if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+               sk->sk_data_ready(sk);
                tcp_drop(sk, skb);
                return;
        }
@@ -4828,6 +4829,7 @@ queue_and_out:
                        sk_forced_mem_schedule(sk, skb->truesize);
                else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
                        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+                       sk->sk_data_ready(sk);
                        goto drop;
                }
 
index ad6435ba6d72ffd8caf783bb25cad7ec151d6909..04bfcbbfee83aadf5bca0332275c57113abdbc75 100644 (file)
@@ -1111,9 +1111,21 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 
        key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
        if (key) {
-               /* Pre-existing entry - just update that one. */
-               memcpy(key->key, newkey, newkeylen);
-               key->keylen = newkeylen;
+               /* Pre-existing entry - just update that one.
+                * Note that the key might be used concurrently.
+                * data_race() is telling kcsan that we do not care of
+                * key mismatches, since changing MD5 key on live flows
+                * can lead to packet drops.
+                */
+               data_race(memcpy(key->key, newkey, newkeylen));
+
+               /* Pairs with READ_ONCE() in tcp_md5_hash_key().
+                * Also note that a reader could catch new key->keylen value
+                * but old key->key[], this is the reason we use __GFP_ZERO
+                * at sock_kmalloc() time below these lines.
+                */
+               WRITE_ONCE(key->keylen, newkeylen);
+
                return 0;
        }
 
@@ -1129,7 +1141,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
                rcu_assign_pointer(tp->md5sig_info, md5sig);
        }
 
-       key = sock_kmalloc(sk, sizeof(*key), gfp);
+       key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
        if (!key)
                return -ENOMEM;
        if (!tcp_alloc_md5sig_pool()) {
index a50e1990a845a258d4cc6a2a989d09068ea3a973..5f5b2f0b0e606530e661ee8c5ad7a94e8efee74b 100644 (file)
@@ -700,7 +700,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
                                       unsigned int mss, struct sk_buff *skb,
                                       struct tcp_out_options *opts,
                                       const struct tcp_md5sig_key *md5,
-                                      struct tcp_fastopen_cookie *foc)
+                                      struct tcp_fastopen_cookie *foc,
+                                      enum tcp_synack_type synack_type)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -715,7 +716,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
                 * rather than TS in order to fit in better with old,
                 * buggy kernels, but that was deemed to be unnecessary.
                 */
-               ireq->tstamp_ok &= !ireq->sack_ok;
+               if (synack_type != TCP_SYNACK_COOKIE)
+                       ireq->tstamp_ok &= !ireq->sack_ok;
        }
 #endif
 
@@ -3394,7 +3396,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 #endif
        skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
        tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
-                                            foc) + sizeof(*th);
+                                            foc, synack_type) + sizeof(*th);
 
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
index fc5000370030d67094ba11f15aaaaaa7ba519cde..9df8737ae0d3294319c5d91651b2b9317b0fba08 100644 (file)
@@ -566,7 +566,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
        fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
-       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -583,6 +582,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
                fl6.flowi6_oif = np->ucast_oif;
 
        ipcm6_init_sk(&ipc6, np);
+       ipc6.sockc.mark = mark;
        fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
 
        dst = icmpv6_route_lookup(net, skb, sk, &fl6);
@@ -751,7 +751,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        sk = icmpv6_xmit_lock(net);
        if (!sk)
                goto out_bh_enable;
-       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -779,6 +778,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        ipcm6_init_sk(&ipc6, np);
        ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
        ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+       ipc6.sockc.mark = mark;
 
        if (ip6_append_data(sk, icmpv6_getfrag, &msg,
                            skb->len + sizeof(struct icmp6hdr),
index 821d96c720b936ae732c388c02977eef213291bb..a18c378ca5f46a1648c86b4d4d7ff9da6e4422bb 100644 (file)
@@ -1846,6 +1846,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
 static void ip6_tnl_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops = &ip6_tnl_netdev_ops;
+       dev->header_ops = &ip_tunnel_header_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6_dev_free;
 
index 1147f647b9a0f109f6178f7d11ce65e93ef8120c..0d964160a9dd559b6c2624ae129aaf899c9292ea 100644 (file)
@@ -905,6 +905,7 @@ static const struct net_device_ops vti6_netdev_ops = {
 static void vti6_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops = &vti6_netdev_ops;
+       dev->header_ops = &ip_tunnel_header_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = vti6_dev_free;
 
index 82cbb46a2a4fe48c328e5c5522d00bb02019335d..f3279810d76523bcfc4ea0ce7e59f1ba6655340c 100644 (file)
@@ -431,9 +431,12 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
        struct fib6_info *sibling, *next_sibling;
        struct fib6_info *match = res->f6i;
 
-       if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
+       if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
                goto out;
 
+       if (match->nh && have_oif_match && res->nh)
+               return;
+
        /* We might have already computed the hash for ICMPv6 errors. In such
         * case it will always be non-zero. Otherwise now is the time to do it.
         */
@@ -3402,7 +3405,7 @@ static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
        if ((flags & RTF_REJECT) ||
            (dev && (dev->flags & IFF_LOOPBACK) &&
             !(addr_type & IPV6_ADDR_LOOPBACK) &&
-            !(flags & RTF_LOCAL)))
+            !(flags & (RTF_ANYCAST | RTF_LOCAL))))
                return true;
 
        return false;
index 1fbb4dfbb191bb82b7a8b32c11e74824533b08b2..5e2c34c0ac973643f2fdffee0e9c2d1214922425 100644 (file)
@@ -1421,6 +1421,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
        int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
        dev->netdev_ops         = &ipip6_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
        dev->needs_free_netdev  = true;
        dev->priv_destructor    = ipip6_dev_free;
 
index 6d7ef78c88af059a4cbfb5d89f32ad6d1babfe74..6434d17e6e8eaccb1fad97615db78b38a765aaaa 100644 (file)
@@ -1028,6 +1028,7 @@ static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
 
        /* Queue the packet to IP for output */
        skb->ignore_df = 1;
+       skb_dst_drop(skb);
 #if IS_ENABLED(CONFIG_IPV6)
        if (l2tp_sk_is_v6(tunnel->sock))
                error = inet6_csk_xmit(tunnel->sock, skb, NULL);
@@ -1099,10 +1100,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
                goto out_unlock;
        }
 
-       /* Get routing info from the tunnel socket */
-       skb_dst_drop(skb);
-       skb_dst_set(skb, sk_dst_check(sk, 0));
-
        inet = inet_sk(sk);
        fl = &inet->cork.fl;
        switch (tunnel->encap) {
index 54fb8d452a7b7398d8dab417664d6bb613f03723..6e53e43c19071cdb7f9b1ae35e227c24663366ff 100644 (file)
@@ -273,6 +273,10 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
 
        if (!sock_flag(sk, SOCK_ZAPPED))
                goto out;
+       if (!addr->sllc_arphrd)
+               addr->sllc_arphrd = ARPHRD_ETHER;
+       if (addr->sllc_arphrd != ARPHRD_ETHER)
+               goto out;
        rc = -ENODEV;
        if (sk->sk_bound_dev_if) {
                llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
@@ -328,7 +332,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
                goto out;
        rc = -EAFNOSUPPORT;
-       if (unlikely(addr->sllc_family != AF_LLC))
+       if (!addr->sllc_arphrd)
+               addr->sllc_arphrd = ARPHRD_ETHER;
+       if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER))
                goto out;
        dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
        rc = -ENODEV;
@@ -336,8 +342,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        if (sk->sk_bound_dev_if) {
                llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
                if (llc->dev) {
-                       if (!addr->sllc_arphrd)
-                               addr->sllc_arphrd = llc->dev->type;
                        if (is_zero_ether_addr(addr->sllc_mac))
                                memcpy(addr->sllc_mac, llc->dev->dev_addr,
                                       IFHWADDRLEN);
index aa5150929996d609ccf8075465e12c70dbd9fc18..02cde0fd08fe8616d8489ce7c174b15bdd88b214 100644 (file)
@@ -1105,11 +1105,8 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
                               ttl, lifetime, 0, ifmsh->preq_id++, sdata);
 
        spin_lock_bh(&mpath->state_lock);
-       if (mpath->flags & MESH_PATH_DELETED) {
-               spin_unlock_bh(&mpath->state_lock);
-               goto enddiscovery;
-       }
-       mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
+       if (!(mpath->flags & MESH_PATH_DELETED))
+               mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
        spin_unlock_bh(&mpath->state_lock);
 
 enddiscovery:
index a88ab6fb16f20d738aaa74dcb21304bf2e08a4af..5c5af4b5fc080211f395cf8816937ee360b6ff9b 100644 (file)
@@ -2396,6 +2396,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
 
 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
 {
+       struct ieee80211_hdr *hdr = (void *)rx->skb->data;
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 
@@ -2406,6 +2407,31 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
        if (status->flag & RX_FLAG_DECRYPTED)
                return 0;
 
+       /* check mesh EAPOL frames first */
+       if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
+                    ieee80211_is_data(fc))) {
+               struct ieee80211s_hdr *mesh_hdr;
+               u16 hdr_len = ieee80211_hdrlen(fc);
+               u16 ethertype_offset;
+               __be16 ethertype;
+
+               if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
+                       goto drop_check;
+
+               /* make sure fixed part of mesh header is there, also checks skb len */
+               if (!pskb_may_pull(rx->skb, hdr_len + 6))
+                       goto drop_check;
+
+               mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
+               ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
+                                  sizeof(rfc1042_header);
+
+               if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
+                   ethertype == rx->sdata->control_port_protocol)
+                       return 0;
+       }
+
+drop_check:
        /* Drop unencrypted frames if key is set. */
        if (unlikely(!ieee80211_has_protected(fc) &&
                     !ieee80211_is_any_nullfunc(fc) &&
index 7b1bacac39c6ee3a43d1a7755421d0c8f023d13a..cbc40b358ba264c6d8d68b434f4343d9d96bdd12 100644 (file)
@@ -639,11 +639,23 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
                u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
                struct ieee80211_sub_if_data *sdata;
                struct ieee80211_hdr *hdr = (void *)skb->data;
+               __be16 ethertype = 0;
+
+               if (skb->len >= ETH_HLEN && skb->protocol == cpu_to_be16(ETH_P_802_3))
+                       skb_copy_bits(skb, 2 * ETH_ALEN, &ethertype, ETH_TLEN);
 
                rcu_read_lock();
                sdata = ieee80211_sdata_from_skb(local, skb);
                if (sdata) {
-                       if (ieee80211_is_any_nullfunc(hdr->frame_control))
+                       if (ethertype == sdata->control_port_protocol ||
+                           ethertype == cpu_to_be16(ETH_P_PREAUTH))
+                               cfg80211_control_port_tx_status(&sdata->wdev,
+                                                               cookie,
+                                                               skb->data,
+                                                               skb->len,
+                                                               acked,
+                                                               GFP_ATOMIC);
+                       else if (ieee80211_is_any_nullfunc(hdr->frame_control))
                                cfg80211_probe_status(sdata->dev, hdr->addr1,
                                                      cookie, acked,
                                                      info->status.ack_signal,
@@ -654,12 +666,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
                                                        skb->data, skb->len,
                                                        acked, GFP_ATOMIC);
                        else
-                               cfg80211_control_port_tx_status(&sdata->wdev,
-                                                               cookie,
-                                                               skb->data,
-                                                               skb->len,
-                                                               acked,
-                                                               GFP_ATOMIC);
+                               pr_warn("Unknown status report in ack skb\n");
+
                }
                rcu_read_unlock();
 
index e9ce658141f51a2e6cc813dd03997e154a1f07a2..1a2941e5244fb8d8ce0a20d55816c67c047489d3 100644 (file)
@@ -3996,6 +3996,9 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
        skb_list_walk_safe(skb, skb, next) {
                skb_mark_not_on_list(skb);
 
+               if (skb->protocol == sdata->control_port_protocol)
+                       ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
+
                skb = ieee80211_build_hdr(sdata, skb, info_flags,
                                          sta, ctrl_flags, cookie);
                if (IS_ERR(skb)) {
@@ -4206,7 +4209,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
            (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER)))
                ra = sdata->u.mgd.bssid;
 
-       if (!is_valid_ether_addr(ra))
+       if (is_zero_ether_addr(ra))
                goto out_free;
 
        multicast = is_multicast_ether_addr(ra);
@@ -5371,7 +5374,8 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
                return -EINVAL;
 
        if (proto == sdata->control_port_protocol)
-               ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+               ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO |
+                             IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
 
        if (unencrypted)
                flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
index df9a51425c6fc4769c06fcc0f09dbfa7ef554683..8f940be42f98a567b84a26d398200d3627d5efbc 100644 (file)
@@ -449,9 +449,9 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
 }
 
 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
-                                struct mptcp_ext *ext)
+                                struct sk_buff *skb, struct mptcp_ext *ext)
 {
-       if (!ext->use_map) {
+       if (!ext->use_map || !skb->len) {
                /* RFC6824 requires a DSS mapping with specific values
                 * if DATA_FIN is set but no data payload is mapped
                 */
@@ -503,7 +503,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
                        opts->ext_copy = *mpext;
 
                if (skb && tcp_fin && subflow->data_fin_tx_enable)
-                       mptcp_write_data_fin(subflow, &opts->ext_copy);
+                       mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
                ret = true;
        }
 
index 486959f70cf313b44b06fdaffef6827fbe04eaba..a8ce04a4bb72abef52524edad94dc7d69d39458a 100644 (file)
@@ -326,7 +326,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        set->variant = &bitmap_ip;
        if (!init_map_ip(set, map, first_ip, last_ip,
                         elements, hosts, netmask)) {
-               kfree(map);
+               ip_set_free(map);
                return -ENOMEM;
        }
        if (tb[IPSET_ATTR_TIMEOUT]) {
index 2310a316e0affc69676062f7d984126e6a0b8e83..2c625e0f49ec020581206445b3365663d9c19746 100644 (file)
@@ -363,7 +363,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_ipmac;
        if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
-               kfree(map);
+               ip_set_free(map);
                return -ENOMEM;
        }
        if (tb[IPSET_ATTR_TIMEOUT]) {
index e56ced66f202d6d8b6e913f0349a9991583d4014..7138e080def4cfd7abf40a9ca485212773dda8ec 100644 (file)
@@ -274,7 +274,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_port;
        if (!init_map_port(set, map, first_port, last_port)) {
-               kfree(map);
+               ip_set_free(map);
                return -ENOMEM;
        }
        if (tb[IPSET_ATTR_TIMEOUT]) {
index 1ee43752d6d3ccb1eae1684369a8be5ea5b96656..521e970be4028de7659b437616f844744362f10b 100644 (file)
@@ -682,7 +682,7 @@ retry:
        }
        t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
        if (!t->hregion) {
-               kfree(t);
+               ip_set_free(t);
                ret = -ENOMEM;
                goto out;
        }
@@ -1533,7 +1533,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
        }
        t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
        if (!t->hregion) {
-               kfree(t);
+               ip_set_free(t);
                kfree(h);
                return -ENOMEM;
        }
index 79cd9dde457b190ade6f737d19b0d34cf19b00d3..f33d72c5b06e123dd97c11933467b3197877d1d5 100644 (file)
@@ -2158,6 +2158,8 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
                err = __nf_conntrack_update(net, skb, ct, ctinfo);
                if (err < 0)
                        return err;
+
+               ct = nf_ct_get(skb, &ctinfo);
        }
 
        return nf_confirm_cthelper(skb, ct, ctinfo);
index 55ee680e9db180b2d2e6d56db194c006df8049d8..9395ee8a868dbedba93547cb7d948c53b21ec9cb 100644 (file)
@@ -351,22 +351,11 @@ int genl_register_family(struct genl_family *family)
                start = end = GENL_ID_VFS_DQUOT;
        }
 
-       if (family->maxattr && !family->parallel_ops) {
-               family->attrbuf = kmalloc_array(family->maxattr + 1,
-                                               sizeof(struct nlattr *),
-                                               GFP_KERNEL);
-               if (family->attrbuf == NULL) {
-                       err = -ENOMEM;
-                       goto errout_locked;
-               }
-       } else
-               family->attrbuf = NULL;
-
        family->id = idr_alloc_cyclic(&genl_fam_idr, family,
                                      start, end + 1, GFP_KERNEL);
        if (family->id < 0) {
                err = family->id;
-               goto errout_free;
+               goto errout_locked;
        }
 
        err = genl_validate_assign_mc_groups(family);
@@ -385,8 +374,6 @@ int genl_register_family(struct genl_family *family)
 
 errout_remove:
        idr_remove(&genl_fam_idr, family->id);
-errout_free:
-       kfree(family->attrbuf);
 errout_locked:
        genl_unlock_all();
        return err;
@@ -419,8 +406,6 @@ int genl_unregister_family(const struct genl_family *family)
                   atomic_read(&genl_sk_destructing_cnt) == 0);
        genl_unlock();
 
-       kfree(family->attrbuf);
-
        genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
 
        return 0;
@@ -485,30 +470,23 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
        if (!family->maxattr)
                return NULL;
 
-       if (family->parallel_ops) {
-               attrbuf = kmalloc_array(family->maxattr + 1,
-                                       sizeof(struct nlattr *), GFP_KERNEL);
-               if (!attrbuf)
-                       return ERR_PTR(-ENOMEM);
-       } else {
-               attrbuf = family->attrbuf;
-       }
+       attrbuf = kmalloc_array(family->maxattr + 1,
+                               sizeof(struct nlattr *), GFP_KERNEL);
+       if (!attrbuf)
+               return ERR_PTR(-ENOMEM);
 
        err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
                            family->policy, validate, extack);
        if (err) {
-               if (family->parallel_ops)
-                       kfree(attrbuf);
+               kfree(attrbuf);
                return ERR_PTR(err);
        }
        return attrbuf;
 }
 
-static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
-                                          struct nlattr **attrbuf)
+static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
 {
-       if (family->parallel_ops)
-               kfree(attrbuf);
+       kfree(attrbuf);
 }
 
 struct genl_start_context {
@@ -542,7 +520,7 @@ static int genl_start(struct netlink_callback *cb)
 no_attrs:
        info = genl_dumpit_info_alloc();
        if (!info) {
-               genl_family_rcv_msg_attrs_free(ctx->family, attrs);
+               genl_family_rcv_msg_attrs_free(attrs);
                return -ENOMEM;
        }
        info->family = ctx->family;
@@ -559,7 +537,7 @@ no_attrs:
        }
 
        if (rc) {
-               genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+               genl_family_rcv_msg_attrs_free(info->attrs);
                genl_dumpit_info_free(info);
                cb->data = NULL;
        }
@@ -588,7 +566,7 @@ static int genl_lock_done(struct netlink_callback *cb)
                rc = ops->done(cb);
                genl_unlock();
        }
-       genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+       genl_family_rcv_msg_attrs_free(info->attrs);
        genl_dumpit_info_free(info);
        return rc;
 }
@@ -601,7 +579,7 @@ static int genl_parallel_done(struct netlink_callback *cb)
 
        if (ops->done)
                rc = ops->done(cb);
-       genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+       genl_family_rcv_msg_attrs_free(info->attrs);
        genl_dumpit_info_free(info);
        return rc;
 }
@@ -694,7 +672,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
                family->post_doit(ops, skb, &info);
 
 out:
-       genl_family_rcv_msg_attrs_free(family, attrbuf);
+       genl_family_rcv_msg_attrs_free(attrbuf);
 
        return err;
 }
@@ -1166,60 +1144,11 @@ static struct genl_family genl_ctrl __ro_after_init = {
        .netnsok = true,
 };
 
-static int genl_bind(struct net *net, int group)
-{
-       struct genl_family *f;
-       int err = -ENOENT;
-       unsigned int id;
-
-       down_read(&cb_lock);
-
-       idr_for_each_entry(&genl_fam_idr, f, id) {
-               if (group >= f->mcgrp_offset &&
-                   group < f->mcgrp_offset + f->n_mcgrps) {
-                       int fam_grp = group - f->mcgrp_offset;
-
-                       if (!f->netnsok && net != &init_net)
-                               err = -ENOENT;
-                       else if (f->mcast_bind)
-                               err = f->mcast_bind(net, fam_grp);
-                       else
-                               err = 0;
-                       break;
-               }
-       }
-       up_read(&cb_lock);
-
-       return err;
-}
-
-static void genl_unbind(struct net *net, int group)
-{
-       struct genl_family *f;
-       unsigned int id;
-
-       down_read(&cb_lock);
-
-       idr_for_each_entry(&genl_fam_idr, f, id) {
-               if (group >= f->mcgrp_offset &&
-                   group < f->mcgrp_offset + f->n_mcgrps) {
-                       int fam_grp = group - f->mcgrp_offset;
-
-                       if (f->mcast_unbind)
-                               f->mcast_unbind(net, fam_grp);
-                       break;
-               }
-       }
-       up_read(&cb_lock);
-}
-
 static int __net_init genl_pernet_init(struct net *net)
 {
        struct netlink_kernel_cfg cfg = {
                .input          = genl_rcv,
                .flags          = NL_CFG_F_NONROOT_RECV,
-               .bind           = genl_bind,
-               .unbind         = genl_unbind,
        };
 
        /* we'll bump the group number right afterwards */
index 2d8d6131bc5f7e23b168af2dcd377d55c278cd8e..24a8c3c6da0dcaa43837e2f9efd117dacf5e856f 100644 (file)
@@ -166,6 +166,7 @@ static void __qrtr_node_release(struct kref *kref)
 {
        struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
        struct radix_tree_iter iter;
+       struct qrtr_tx_flow *flow;
        unsigned long flags;
        void __rcu **slot;
 
@@ -181,8 +182,9 @@ static void __qrtr_node_release(struct kref *kref)
 
        /* Free tx flow counters */
        radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
+               flow = *slot;
                radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
-               kfree(*slot);
+               kfree(flow);
        }
        kfree(node);
 }
@@ -427,7 +429,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
        unsigned int ver;
        size_t hdrlen;
 
-       if (len & 3)
+       if (len == 0 || len & 3)
                return -EINVAL;
 
        skb = netdev_alloc_skb(NULL, len);
@@ -441,6 +443,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 
        switch (ver) {
        case QRTR_PROTO_VER_1:
+               if (len < sizeof(*v1))
+                       goto err;
                v1 = data;
                hdrlen = sizeof(*v1);
 
@@ -454,6 +458,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
                size = le32_to_cpu(v1->size);
                break;
        case QRTR_PROTO_VER_2:
+               if (len < sizeof(*v2))
+                       goto err;
                v2 = data;
                hdrlen = sizeof(*v2) + v2->optlen;
 
index ed7f2133acc2fb12a5419d0a96ca435dc7e3da78..f2fcab182095cbc39f991a4d2886aa2237aec1c8 100644 (file)
@@ -905,6 +905,17 @@ void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
 }
 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
 
+/* Check connectivity of all paths
+ */
+void rds_check_all_paths(struct rds_connection *conn)
+{
+       int i = 0;
+
+       do {
+               rds_conn_path_connect_if_down(&conn->c_path[i]);
+       } while (++i < conn->c_npaths);
+}
+
 void rds_conn_connect_if_down(struct rds_connection *conn)
 {
        WARN_ON(conn->c_trans->t_mp_capable);
index 6019b0c004a9df23fbaafd614c8b03492485b294..106e862996b94d377717745523b74b6db07e6a58 100644 (file)
@@ -778,6 +778,7 @@ void rds_conn_drop(struct rds_connection *conn);
 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
 void rds_conn_connect_if_down(struct rds_connection *conn);
 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
+void rds_check_all_paths(struct rds_connection *conn);
 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
                          struct rds_info_iterator *iter,
                          struct rds_info_lengths *lens,
@@ -822,6 +823,12 @@ rds_conn_path_up(struct rds_conn_path *cp)
        return atomic_read(&cp->cp_state) == RDS_CONN_UP;
 }
 
+static inline int
+rds_conn_path_down(struct rds_conn_path *cp)
+{
+       return atomic_read(&cp->cp_state) == RDS_CONN_DOWN;
+}
+
 static inline int
 rds_conn_up(struct rds_connection *conn)
 {
index 68e2bdb08fd099fd930d0ea66ae037af6a3ba8d2..9a529a01cdc6a10a13d08efcd32947bf22290849 100644 (file)
@@ -1340,7 +1340,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                goto out;
        }
 
-       rds_conn_path_connect_if_down(cpath);
+       if (rds_conn_path_down(cpath))
+               rds_check_all_paths(conn);
 
        ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
        if (ret) {
index 43a243081e7d2a86624d31029dae20957f78f11c..f901421b0634d4408d4d7cc6881fa5e1716a471a 100644 (file)
@@ -43,17 +43,20 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
        tcf_lastuse_update(&ca->tcf_tm);
        bstats_update(&ca->tcf_bstats, skb);
 
-       if (skb->protocol == htons(ETH_P_IP)) {
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                if (skb->len < sizeof(struct iphdr))
                        goto out;
 
                proto = NFPROTO_IPV4;
-       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               break;
+       case htons(ETH_P_IPV6):
                if (skb->len < sizeof(struct ipv6hdr))
                        goto out;
 
                proto = NFPROTO_IPV6;
-       } else {
+               break;
+       default:
                goto out;
        }
 
index cb8608f0a77a2a88671da430c79399c1fab0d77d..c60674cf25c4fd5d30484f72cc23e974a285aa23 100644 (file)
@@ -587,7 +587,7 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
                goto drop;
 
        update_flags = params->update_flags;
-       protocol = tc_skb_protocol(skb);
+       protocol = skb_protocol(skb, false);
 again:
        switch (protocol) {
        case cpu_to_be16(ETH_P_IP):
index e9f3576cbf71ab702209337259f2105921497714..67504aece9ae5d4ad1773b644881c65d5213aee7 100644 (file)
@@ -624,7 +624,7 @@ static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
 {
        u8 family = NFPROTO_UNSPEC;
 
-       switch (skb->protocol) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                family = NFPROTO_IPV4;
                break;
@@ -748,6 +748,7 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
                          const struct nf_nat_range2 *range,
                          enum nf_nat_manip_type maniptype)
 {
+       __be16 proto = skb_protocol(skb, true);
        int hooknum, err = NF_ACCEPT;
 
        /* See HOOK2MANIP(). */
@@ -759,14 +760,13 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
        switch (ctinfo) {
        case IP_CT_RELATED:
        case IP_CT_RELATED_REPLY:
-               if (skb->protocol == htons(ETH_P_IP) &&
+               if (proto == htons(ETH_P_IP) &&
                    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
                        if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
                                                           hooknum))
                                err = NF_DROP;
                        goto out;
-               } else if (IS_ENABLED(CONFIG_IPV6) &&
-                          skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
                        __be16 frag_off;
                        u8 nexthdr = ipv6_hdr(skb)->nexthdr;
                        int hdrlen = ipv6_skip_exthdr(skb,
@@ -925,6 +925,8 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
        force = p->ct_action & TCA_CT_ACT_FORCE;
        tmpl = p->tmpl;
 
+       tcf_lastuse_update(&c->tcf_tm);
+
        if (clear) {
                ct = nf_ct_get(skb, &ctinfo);
                if (ct) {
@@ -1550,4 +1552,3 @@ MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
 MODULE_DESCRIPTION("Connection tracking action");
 MODULE_LICENSE("GPL v2");
-
index 19649623493b158b3008c82ce2409ae80ffa6dc6..b5042f3ea079e2e4d77bb0786c3761ec5e93f7da 100644 (file)
@@ -96,19 +96,22 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
        action = READ_ONCE(ca->tcf_action);
 
        wlen = skb_network_offset(skb);
-       if (tc_skb_protocol(skb) == htons(ETH_P_IP)) {
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                wlen += sizeof(struct iphdr);
                if (!pskb_may_pull(skb, wlen))
                        goto out;
 
                proto = NFPROTO_IPV4;
-       } else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) {
+               break;
+       case htons(ETH_P_IPV6):
                wlen += sizeof(struct ipv6hdr);
                if (!pskb_may_pull(skb, wlen))
                        goto out;
 
                proto = NFPROTO_IPV6;
-       } else {
+               break;
+       default:
                goto out;
        }
 
index be3f215cd0277b1a32e8da8634b5edd2f32b25af..8118e26409796aff8ccfe8569470b829dd6e6ad0 100644 (file)
@@ -82,7 +82,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
                        goto drop;
                break;
        case TCA_MPLS_ACT_PUSH:
-               new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
+               new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true)));
                if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len,
                                  skb->dev && skb->dev->type == ARPHRD_ETHER))
                        goto drop;
index b125b2be4467a46c57208c21385a41bf722f6fc4..b2b3faa57294c5caed90461ea74be8038c5352ad 100644 (file)
@@ -41,7 +41,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
        if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
                int wlen = skb_network_offset(skb);
 
-               switch (tc_skb_protocol(skb)) {
+               switch (skb_protocol(skb, true)) {
                case htons(ETH_P_IP):
                        wlen += sizeof(struct iphdr);
                        if (!pskb_may_pull(skb, wlen))
index faa78b7dd96220ba4e222757eef597e9e704750d..e62beec0d844055d955d848ac604fa19ab50fe3d 100644 (file)
@@ -1538,7 +1538,7 @@ static inline int __tcf_classify(struct sk_buff *skb,
 reclassify:
 #endif
        for (; tp; tp = rcu_dereference_bh(tp->next)) {
-               __be16 protocol = tc_skb_protocol(skb);
+               __be16 protocol = skb_protocol(skb, false);
                int err;
 
                if (tp->protocol != protocol &&
index 80ae7b9fa90affd5bae5647aada9c556d1f2e65b..ab53a93b2f2ba94c5b8dbad647bc6f6f6a8e5465 100644 (file)
@@ -80,7 +80,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
        if (dst)
                return ntohl(dst);
 
-       return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+       return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
 }
 
 static u32 flow_get_proto(const struct sk_buff *skb,
@@ -104,7 +104,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb,
        if (flow->ports.ports)
                return ntohs(flow->ports.dst);
 
-       return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+       return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
 }
 
 static u32 flow_get_iif(const struct sk_buff *skb)
@@ -151,7 +151,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
 static u32 flow_get_nfct_src(const struct sk_buff *skb,
                             const struct flow_keys *flow)
 {
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                return ntohl(CTTUPLE(skb, src.u3.ip));
        case htons(ETH_P_IPV6):
@@ -164,7 +164,7 @@ fallback:
 static u32 flow_get_nfct_dst(const struct sk_buff *skb,
                             const struct flow_keys *flow)
 {
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                return ntohl(CTTUPLE(skb, dst.u3.ip));
        case htons(ETH_P_IPV6):
index b2da3728608225a05e40c8a8c1d48964aa944056..e30bd969fc485e7c018463a7333949f32ff055ab 100644 (file)
@@ -313,7 +313,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                /* skb_flow_dissect() does not set n_proto in case an unknown
                 * protocol, so do it rather here.
                 */
-               skb_key.basic.n_proto = skb->protocol;
+               skb_key.basic.n_proto = skb_protocol(skb, false);
                skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
                skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
                                    fl_ct_info_to_flower_map,
index df00566d327de89788a00c96a4db693415b8ded4..c95cf86fb431ab8e4c82a0914fa3bbc401e17c6d 100644 (file)
@@ -59,7 +59,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
        };
        int ret, network_offset;
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                state.pf = NFPROTO_IPV4;
                if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
index 18755d29fd1594040dfea24387be8b48c8350241..3650117da47f129df4e5e5b01b68ca0dec2bc7d6 100644 (file)
@@ -212,7 +212,7 @@ static int em_ipt_match(struct sk_buff *skb, struct tcf_ematch *em,
        struct nf_hook_state state;
        int ret;
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
                        return 0;
index d99966a55c84fa0f5142ed72faeceb9baab86f5e..46254968d390fc50a1b77c1bf94971ded5233142 100644 (file)
@@ -195,7 +195,7 @@ META_COLLECTOR(int_priority)
 META_COLLECTOR(int_protocol)
 {
        /* Let userspace take care of the byte ordering */
-       dst->value = tc_skb_protocol(skb);
+       dst->value = skb_protocol(skb, false);
 }
 
 META_COLLECTOR(int_pkttype)
index ee12ca9f55b4fd59a6c0b826ad961ac3984c3ba2..1c281cc81f57789b787c77622cc2c026414f9c9d 100644 (file)
@@ -553,16 +553,16 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
        if (!p->link.q)
                p->link.q = &noop_qdisc;
        pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
+       p->link.vcc = NULL;
+       p->link.sock = NULL;
+       p->link.common.classid = sch->handle;
+       p->link.ref = 1;
 
        err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
                            extack);
        if (err)
                return err;
 
-       p->link.vcc = NULL;
-       p->link.sock = NULL;
-       p->link.common.classid = sch->handle;
-       p->link.ref = 1;
        tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
        return 0;
 }
index ca813697728eb795ff0fcd97c63cbe5d64c9f925..ebaeec1e5c82d99b25ac8bdfe24a878f52469e4b 100644 (file)
@@ -592,7 +592,7 @@ static bool cake_update_flowkeys(struct flow_keys *keys,
        bool rev = !skb->_nfct, upd = false;
        __be32 ip;
 
-       if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+       if (skb_protocol(skb, true) != htons(ETH_P_IP))
                return false;
 
        if (!nf_ct_get_tuple_skb(&tuple, skb))
@@ -1557,7 +1557,7 @@ static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
        u16 *buf, buf_;
        u8 dscp;
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
                if (unlikely(!buf))
index 05605b30bef3abac1da2e0e821c871a54b3635ba..2b88710994d71e5339f82ef3fe53ecce961ea42f 100644 (file)
@@ -210,7 +210,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        if (p->set_tc_index) {
                int wlen = skb_network_offset(skb);
 
-               switch (tc_skb_protocol(skb)) {
+               switch (skb_protocol(skb, true)) {
                case htons(ETH_P_IP):
                        wlen += sizeof(struct iphdr);
                        if (!pskb_may_pull(skb, wlen) ||
@@ -303,7 +303,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
        index = skb->tc_index & (p->indices - 1);
        pr_debug("index %d->%d\n", skb->tc_index, index);
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
                                    p->mv[index].value);
@@ -320,7 +320,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
                 */
                if (p->mv[index].mask != 0xff || p->mv[index].value)
                        pr_warn("%s: unsupported protocol %d\n",
-                               __func__, ntohs(tc_skb_protocol(skb)));
+                               __func__, ntohs(skb_protocol(skb, true)));
                break;
        }
 
index 689ef6f3ded80968a46aaccc9dc6fd4db9506a85..2f1f0a3784083088bf9cfeb3b4c84e1391fb9e54 100644 (file)
@@ -239,7 +239,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
                char haddr[MAX_ADDR_LEN];
 
                neigh_ha_snapshot(haddr, n, dev);
-               err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
+               err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)),
                                      haddr, NULL, skb->len);
 
                if (err < 0)
index d5627df24215d934d4a2ce55619f5e863d73589b..779f4142a11d8b377758586070e1b881baaf4379 100644 (file)
@@ -27,6 +27,7 @@
 
 #define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
 #define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
+#define SMC_CLC_RECV_BUF_LEN   100
 
 /* eye catcher "SMCR" EBCDIC for CLC messages */
 static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
@@ -36,7 +37,7 @@ static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
 /* check if received message has a correct header length and contains valid
  * heading and trailing eyecatchers
  */
-static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
+static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
 {
        struct smc_clc_msg_proposal_prefix *pclc_prfx;
        struct smc_clc_msg_accept_confirm *clc;
@@ -49,12 +50,9 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
                return false;
        switch (clcm->type) {
        case SMC_CLC_PROPOSAL:
-               if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
-                   clcm->path != SMC_TYPE_B)
-                       return false;
                pclc = (struct smc_clc_msg_proposal *)clcm;
                pclc_prfx = smc_clc_proposal_get_prefix(pclc);
-               if (ntohs(pclc->hdr.length) !=
+               if (ntohs(pclc->hdr.length) <
                        sizeof(*pclc) + ntohs(pclc->iparea_offset) +
                        sizeof(*pclc_prfx) +
                        pclc_prfx->ipv6_prefixes_cnt *
@@ -86,7 +84,8 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
        default:
                return false;
        }
-       if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
+       if (check_trl &&
+           memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
            memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
                return false;
        return true;
@@ -276,7 +275,8 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        struct msghdr msg = {NULL, 0};
        int reason_code = 0;
        struct kvec vec = {buf, buflen};
-       int len, datlen;
+       int len, datlen, recvlen;
+       bool check_trl = true;
        int krflags;
 
        /* peek the first few bytes to determine length of data to receive
@@ -320,10 +320,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        }
        datlen = ntohs(clcm->length);
        if ((len < sizeof(struct smc_clc_msg_hdr)) ||
-           (datlen > buflen) ||
-           (clcm->version != SMC_CLC_V1) ||
-           (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
-            clcm->path != SMC_TYPE_B) ||
+           (clcm->version < SMC_CLC_V1) ||
            ((clcm->type != SMC_CLC_DECLINE) &&
             (clcm->type != expected_type))) {
                smc->sk.sk_err = EPROTO;
@@ -331,16 +328,38 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
                goto out;
        }
 
+       if (clcm->type == SMC_CLC_PROPOSAL && clcm->path == SMC_TYPE_N)
+               reason_code = SMC_CLC_DECL_VERSMISMAT; /* just V2 offered */
+
        /* receive the complete CLC message */
        memset(&msg, 0, sizeof(struct msghdr));
-       iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen);
+       if (datlen > buflen) {
+               check_trl = false;
+               recvlen = buflen;
+       } else {
+               recvlen = datlen;
+       }
+       iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
        krflags = MSG_WAITALL;
        len = sock_recvmsg(smc->clcsock, &msg, krflags);
-       if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
+       if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) {
                smc->sk.sk_err = EPROTO;
                reason_code = -EPROTO;
                goto out;
        }
+       datlen -= len;
+       while (datlen) {
+               u8 tmp[SMC_CLC_RECV_BUF_LEN];
+
+               vec.iov_base = &tmp;
+               vec.iov_len = SMC_CLC_RECV_BUF_LEN;
+               /* receive remaining proposal message */
+               recvlen = datlen > SMC_CLC_RECV_BUF_LEN ?
+                                               SMC_CLC_RECV_BUF_LEN : datlen;
+               iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
+               len = sock_recvmsg(smc->clcsock, &msg, krflags);
+               datlen -= len;
+       }
        if (clcm->type == SMC_CLC_DECLINE) {
                struct smc_clc_msg_decline *dclc;
 
index 465876701b7556aedb32dfc85f2fba0f8ecb738f..76c2b150d040f3b4a10ae4f6e6c8d922a3107443 100644 (file)
@@ -25,6 +25,7 @@
 #define SMC_CLC_V1             0x1             /* SMC version                */
 #define SMC_TYPE_R             0               /* SMC-R only                 */
 #define SMC_TYPE_D             1               /* SMC-D only                 */
+#define SMC_TYPE_N             2               /* neither SMC-R nor SMC-D    */
 #define SMC_TYPE_B             3               /* SMC-R and SMC-D            */
 #define CLC_WAIT_TIME          (6 * HZ)        /* max. wait time on clcsock  */
 #define CLC_WAIT_TIME_SHORT    HZ              /* short wait time on clcsock */
@@ -46,6 +47,7 @@
 #define SMC_CLC_DECL_ISMVLANERR        0x03090000  /* err to reg vlan id on ism dev  */
 #define SMC_CLC_DECL_NOACTLINK 0x030a0000  /* no active smc-r link in lgr    */
 #define SMC_CLC_DECL_NOSRVLINK 0x030b0000  /* SMC-R link from srv not found  */
+#define SMC_CLC_DECL_VERSMISMAT        0x030c0000  /* SMC version mismatch           */
 #define SMC_CLC_DECL_SYNCERR   0x04000000  /* synchronization error          */
 #define SMC_CLC_DECL_PEERDECL  0x05000000  /* peer declined during handshake */
 #define SMC_CLC_DECL_INTERR    0x09990000  /* internal error                 */
index 7964a21e5e6fb51aef1abeeee2ae48c4c2ff8a8d..f69d205b3e11e51f7f89e5bdcdd0174cc699124e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/workqueue.h>
 #include <linux/wait.h>
 #include <linux/reboot.h>
+#include <linux/mutex.h>
 #include <net/tcp.h>
 #include <net/sock.h>
 #include <rdma/ib_verbs.h>
@@ -247,7 +248,8 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
                if (smc_link_usable(lnk))
                        lnk->state = SMC_LNK_INACTIVE;
        }
-       wake_up_interruptible_all(&lgr->llc_waiter);
+       wake_up_all(&lgr->llc_msg_waiter);
+       wake_up_all(&lgr->llc_flow_waiter);
 }
 
 static void smc_lgr_free(struct smc_link_group *lgr);
@@ -1130,18 +1132,19 @@ static void smcr_link_up(struct smc_link_group *lgr,
                        return;
                if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
                        /* some other llc task is ongoing */
-                       wait_event_interruptible_timeout(lgr->llc_waiter,
-                               (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+                       wait_event_timeout(lgr->llc_flow_waiter,
+                               (list_empty(&lgr->list) ||
+                                lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
                                SMC_LLC_WAIT_TIME);
                }
-               if (list_empty(&lgr->list) ||
-                   !smc_ib_port_active(smcibdev, ibport))
-                       return; /* lgr or device no longer active */
-               link = smc_llc_usable_link(lgr);
-               if (!link)
-                       return;
-               smc_llc_send_add_link(link, smcibdev->mac[ibport - 1], gid,
-                                     NULL, SMC_LLC_REQ);
+               /* lgr or device no longer active? */
+               if (!list_empty(&lgr->list) &&
+                   smc_ib_port_active(smcibdev, ibport))
+                       link = smc_llc_usable_link(lgr);
+               if (link)
+                       smc_llc_send_add_link(link, smcibdev->mac[ibport - 1],
+                                             gid, NULL, SMC_LLC_REQ);
+               wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
        }
 }
 
@@ -1195,13 +1198,17 @@ static void smcr_link_down(struct smc_link *lnk)
                if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
                        /* another llc task is ongoing */
                        mutex_unlock(&lgr->llc_conf_mutex);
-                       wait_event_interruptible_timeout(lgr->llc_waiter,
-                               (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+                       wait_event_timeout(lgr->llc_flow_waiter,
+                               (list_empty(&lgr->list) ||
+                                lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
                                SMC_LLC_WAIT_TIME);
                        mutex_lock(&lgr->llc_conf_mutex);
                }
-               smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true,
-                                        SMC_LLC_DEL_LOST_PATH);
+               if (!list_empty(&lgr->list))
+                       smc_llc_send_delete_link(to_lnk, del_link_id,
+                                                SMC_LLC_REQ, true,
+                                                SMC_LLC_DEL_LOST_PATH);
+               wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
        }
 }
 
@@ -1262,7 +1269,7 @@ static void smc_link_down_work(struct work_struct *work)
 
        if (list_empty(&lgr->list))
                return;
-       wake_up_interruptible_all(&lgr->llc_waiter);
+       wake_up_all(&lgr->llc_msg_waiter);
        mutex_lock(&lgr->llc_conf_mutex);
        smcr_link_down(link);
        mutex_unlock(&lgr->llc_conf_mutex);
@@ -1955,20 +1962,20 @@ static void smc_core_going_away(void)
        struct smc_ib_device *smcibdev;
        struct smcd_dev *smcd;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
                int i;
 
                for (i = 0; i < SMC_MAX_PORTS; i++)
                        set_bit(i, smcibdev->ports_going_away);
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd, &smcd_dev_list.list, list) {
                smcd->going_away = 1;
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 }
 
 /* Clean up all SMC link groups */
@@ -1980,10 +1987,10 @@ static void smc_lgrs_shutdown(void)
 
        smc_smcr_terminate_all(NULL);
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd, &smcd_dev_list.list, list)
                smc_smcd_terminate_all(smcd);
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 }
 
 static int smc_core_reboot_event(struct notifier_block *this,
index 86d160f0d187b450426f0623d812acd1be37cab8..c3ff512fd8911ffedb3b9e2915566684515995a9 100644 (file)
@@ -262,8 +262,10 @@ struct smc_link_group {
                        struct work_struct      llc_del_link_work;
                        struct work_struct      llc_event_work;
                                                /* llc event worker */
-                       wait_queue_head_t       llc_waiter;
+                       wait_queue_head_t       llc_flow_waiter;
                                                /* w4 next llc event */
+                       wait_queue_head_t       llc_msg_waiter;
+                                               /* w4 next llc msg */
                        struct smc_llc_flow     llc_flow_lcl;
                                                /* llc local control field */
                        struct smc_llc_flow     llc_flow_rmt;
index 562a52d01ad161749cb9bbd5a9398416bd292e55..7637fdebbb78f7b7d02ec94cac800c765d54ad04 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/workqueue.h>
 #include <linux/scatterlist.h>
 #include <linux/wait.h>
+#include <linux/mutex.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_cache.h>
 
@@ -33,7 +34,7 @@
 #define SMC_QP_RNR_RETRY                       7 /* 7: infinite */
 
 struct smc_ib_devices smc_ib_devices = {       /* smc-registered ib devices */
-       .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
+       .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex),
        .list = LIST_HEAD_INIT(smc_ib_devices.list),
 };
 
@@ -565,9 +566,9 @@ static int smc_ib_add_dev(struct ib_device *ibdev)
        INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
        atomic_set(&smcibdev->lnk_cnt, 0);
        init_waitqueue_head(&smcibdev->lnks_deleted);
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_add_tail(&smcibdev->list, &smc_ib_devices.list);
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
        INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
                              smc_ib_global_event_handler);
@@ -602,9 +603,9 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
 {
        struct smc_ib_device *smcibdev = client_data;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        pr_warn_ratelimited("smc: removing ib device %s\n",
                            smcibdev->ibdev->name);
        smc_smcr_terminate_all(smcibdev);
index e6a696ae15f3e51fe1d72ab8f0c271324532aecb..ae6776e1e7264ae25590b8a1d643a09ad915bf87 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/if_ether.h>
+#include <linux/mutex.h>
 #include <linux/wait.h>
 #include <rdma/ib_verbs.h>
 #include <net/smc.h>
@@ -25,7 +26,7 @@
 
 struct smc_ib_devices {                        /* list of smc ib devices definition */
        struct list_head        list;
-       spinlock_t              lock;   /* protects list of smc ib devices */
+       struct mutex            mutex;  /* protects list of smc ib devices */
 };
 
 extern struct smc_ib_devices   smc_ib_devices; /* list of smc ib devices */
index 91f85fc09fb8dacf2d87b118efedba9f6f558e0e..998c525de785690426929732bdfc73f32dc731b0 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/slab.h>
 #include <asm/page.h>
 
@@ -17,7 +18,7 @@
 
 struct smcd_dev_list smcd_dev_list = {
        .list = LIST_HEAD_INIT(smcd_dev_list.list),
-       .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock)
+       .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex)
 };
 
 /* Test if an ISM communication is possible. */
@@ -317,9 +318,9 @@ EXPORT_SYMBOL_GPL(smcd_alloc_dev);
 
 int smcd_register_dev(struct smcd_dev *smcd)
 {
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_add_tail(&smcd->list, &smcd_dev_list.list);
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 
        pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
                            dev_name(&smcd->dev), smcd->pnetid,
@@ -333,9 +334,9 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
 {
        pr_warn_ratelimited("smc: removing smcd device %s\n",
                            dev_name(&smcd->dev));
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_del_init(&smcd->list);
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        smcd->going_away = 1;
        smc_smcd_terminate_all(smcd);
        flush_workqueue(smcd->event_wq);
index 4da946cbfa29c2d7c233f16c983eaf4b738bd6bb..81cc4537efd38592cf26e01c809f1752e48489bf 100644 (file)
 #define SMCD_ISM_H
 
 #include <linux/uio.h>
+#include <linux/mutex.h>
 
 #include "smc.h"
 
 struct smcd_dev_list { /* List of SMCD devices */
        struct list_head list;
-       spinlock_t lock;        /* Protects list of devices */
+       struct mutex mutex;     /* Protects list of devices */
 };
 
 extern struct smcd_dev_list    smcd_dev_list; /* list of smcd devices */
index 391237b601fed2e41f6fde6aa3bea34bebf180c8..c1a038689c63cbe9c985b2a6c635c28131e128fe 100644 (file)
@@ -186,6 +186,26 @@ static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
        flow->qentry = qentry;
 }
 
+static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
+                                 struct smc_llc_qentry *qentry)
+{
+       u8 msg_type = qentry->msg.raw.hdr.common.type;
+
+       if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
+           flow_type != msg_type && !lgr->delayed_event) {
+               lgr->delayed_event = qentry;
+               return;
+       }
+       /* drop parallel or already-in-progress llc requests */
+       if (flow_type != msg_type)
+               pr_warn_once("smc: SMC-R lg %*phN dropped parallel "
+                            "LLC msg: msg %d flow %d role %d\n",
+                            SMC_LGR_ID_SIZE, &lgr->id,
+                            qentry->msg.raw.hdr.common.type,
+                            flow_type, lgr->role);
+       kfree(qentry);
+}
+
 /* try to start a new llc flow, initiated by an incoming llc msg */
 static bool smc_llc_flow_start(struct smc_llc_flow *flow,
                               struct smc_llc_qentry *qentry)
@@ -195,14 +215,7 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
        spin_lock_bh(&lgr->llc_flow_lock);
        if (flow->type) {
                /* a flow is already active */
-               if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK ||
-                    qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) &&
-                   !lgr->delayed_event) {
-                       lgr->delayed_event = qentry;
-               } else {
-                       /* forget this llc request */
-                       kfree(qentry);
-               }
+               smc_llc_flow_parallel(lgr, flow->type, qentry);
                spin_unlock_bh(&lgr->llc_flow_lock);
                return false;
        }
@@ -222,8 +235,8 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
        }
        if (qentry == lgr->delayed_event)
                lgr->delayed_event = NULL;
-       spin_unlock_bh(&lgr->llc_flow_lock);
        smc_llc_flow_qentry_set(flow, qentry);
+       spin_unlock_bh(&lgr->llc_flow_lock);
        return true;
 }
 
@@ -251,11 +264,11 @@ again:
                return 0;
        }
        spin_unlock_bh(&lgr->llc_flow_lock);
-       rc = wait_event_interruptible_timeout(lgr->llc_waiter,
-                       (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
-                        (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
-                         lgr->llc_flow_rmt.type == allowed_remote)),
-                       SMC_LLC_WAIT_TIME);
+       rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
+                               (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
+                                (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
+                                 lgr->llc_flow_rmt.type == allowed_remote))),
+                               SMC_LLC_WAIT_TIME * 10);
        if (!rc)
                return -ETIMEDOUT;
        goto again;
@@ -272,7 +285,7 @@ void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
            flow == &lgr->llc_flow_lcl)
                schedule_work(&lgr->llc_event_work);
        else
-               wake_up_interruptible(&lgr->llc_waiter);
+               wake_up(&lgr->llc_flow_waiter);
 }
 
 /* lnk is optional and used for early wakeup when link goes down, useful in
@@ -283,26 +296,32 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
                                    int time_out, u8 exp_msg)
 {
        struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
+       u8 rcv_msg;
 
-       wait_event_interruptible_timeout(lgr->llc_waiter,
-                                        (flow->qentry ||
-                                         (lnk && !smc_link_usable(lnk)) ||
-                                         list_empty(&lgr->list)),
-                                        time_out);
+       wait_event_timeout(lgr->llc_msg_waiter,
+                          (flow->qentry ||
+                           (lnk && !smc_link_usable(lnk)) ||
+                           list_empty(&lgr->list)),
+                          time_out);
        if (!flow->qentry ||
            (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
                smc_llc_flow_qentry_del(flow);
                goto out;
        }
-       if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) {
+       rcv_msg = flow->qentry->msg.raw.hdr.common.type;
+       if (exp_msg && rcv_msg != exp_msg) {
                if (exp_msg == SMC_LLC_ADD_LINK &&
-                   flow->qentry->msg.raw.hdr.common.type ==
-                   SMC_LLC_DELETE_LINK) {
+                   rcv_msg == SMC_LLC_DELETE_LINK) {
                        /* flow_start will delay the unexpected msg */
                        smc_llc_flow_start(&lgr->llc_flow_lcl,
                                           smc_llc_flow_qentry_clr(flow));
                        return NULL;
                }
+               pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: "
+                            "msg %d exp %d flow %d role %d flags %x\n",
+                            SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg,
+                            flow->type, lgr->role,
+                            flow->qentry->msg.raw.hdr.flags);
                smc_llc_flow_qentry_del(flow);
        }
 out:
@@ -1222,8 +1241,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
        smc_llc_send_message(lnk, &qentry->msg); /* response */
 
        if (smc_link_downing(&lnk_del->state)) {
-               smc_switch_conns(lgr, lnk_del, false);
-               smc_wr_tx_wait_no_pending_sends(lnk_del);
+               if (smc_switch_conns(lgr, lnk_del, false))
+                       smc_wr_tx_wait_no_pending_sends(lnk_del);
        }
        smcr_link_clear(lnk_del, true);
 
@@ -1297,8 +1316,8 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
                goto out; /* asymmetric link already deleted */
 
        if (smc_link_downing(&lnk_del->state)) {
-               smc_switch_conns(lgr, lnk_del, false);
-               smc_wr_tx_wait_no_pending_sends(lnk_del);
+               if (smc_switch_conns(lgr, lnk_del, false))
+                       smc_wr_tx_wait_no_pending_sends(lnk_del);
        }
        if (!list_empty(&lgr->list)) {
                /* qentry is either a request from peer (send it back to
@@ -1459,7 +1478,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                                /* a flow is waiting for this message */
                                smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
                                                        qentry);
-                               wake_up_interruptible(&lgr->llc_waiter);
+                               wake_up(&lgr->llc_msg_waiter);
                        } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
                                                      qentry)) {
                                schedule_work(&lgr->llc_add_link_work);
@@ -1474,7 +1493,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
                        /* a flow is waiting for this message */
                        smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
-                       wake_up_interruptible(&lgr->llc_waiter);
+                       wake_up(&lgr->llc_msg_waiter);
                        return;
                }
                break;
@@ -1485,7 +1504,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                                /* DEL LINK REQ during ADD LINK SEQ */
                                smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
                                                        qentry);
-                               wake_up_interruptible(&lgr->llc_waiter);
+                               wake_up(&lgr->llc_msg_waiter);
                        } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
                                                      qentry)) {
                                schedule_work(&lgr->llc_del_link_work);
@@ -1496,7 +1515,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                                /* DEL LINK REQ during ADD LINK SEQ */
                                smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
                                                        qentry);
-                               wake_up_interruptible(&lgr->llc_waiter);
+                               wake_up(&lgr->llc_msg_waiter);
                        } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
                                                      qentry)) {
                                schedule_work(&lgr->llc_del_link_work);
@@ -1581,7 +1600,7 @@ static void smc_llc_rx_response(struct smc_link *link,
        case SMC_LLC_DELETE_RKEY:
                /* assign responses to the local flow, we requested them */
                smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
-               wake_up_interruptible(&link->lgr->llc_waiter);
+               wake_up(&link->lgr->llc_msg_waiter);
                return;
        case SMC_LLC_CONFIRM_RKEY_CONT:
                /* not used because max links is 3 */
@@ -1616,7 +1635,7 @@ static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
        spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
        list_add_tail(&qentry->list, &lgr->llc_event_q);
        spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
-       schedule_work(&link->lgr->llc_event_work);
+       schedule_work(&lgr->llc_event_work);
 }
 
 /* copy received msg and add it to the event queue */
@@ -1677,7 +1696,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
        INIT_LIST_HEAD(&lgr->llc_event_q);
        spin_lock_init(&lgr->llc_event_q_lock);
        spin_lock_init(&lgr->llc_flow_lock);
-       init_waitqueue_head(&lgr->llc_waiter);
+       init_waitqueue_head(&lgr->llc_flow_waiter);
+       init_waitqueue_head(&lgr->llc_msg_waiter);
        mutex_init(&lgr->llc_conf_mutex);
        lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
 }
@@ -1686,7 +1706,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
 void smc_llc_lgr_clear(struct smc_link_group *lgr)
 {
        smc_llc_event_flush(lgr);
-       wake_up_interruptible_all(&lgr->llc_waiter);
+       wake_up_all(&lgr->llc_flow_waiter);
+       wake_up_all(&lgr->llc_msg_waiter);
        cancel_work_sync(&lgr->llc_event_work);
        cancel_work_sync(&lgr->llc_add_link_work);
        cancel_work_sync(&lgr->llc_del_link_work);
index 014d91b9778ec4ba5f701080b7ca64b3bf236c60..30e5fac7034e1b9ea1d49c8a1251db59ff4ff87c 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/list.h>
 #include <linux/ctype.h>
+#include <linux/mutex.h>
 #include <net/netlink.h>
 #include <net/genetlink.h>
 
@@ -129,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
                return rc;
 
        /* remove ib devices */
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) {
                        if (ibdev->pnetid_by_user[ibport] &&
@@ -149,9 +150,9 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
                        }
                }
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        /* remove smcd devices */
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
                if (smcd_dev->pnetid_by_user &&
                    (!pnet_name ||
@@ -165,7 +166,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
                        rc = 0;
                }
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        return rc;
 }
 
@@ -240,14 +241,14 @@ static bool smc_pnet_apply_ib(struct smc_ib_device *ib_dev, u8 ib_port,
        u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
        bool applied = false;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        if (smc_pnet_match(ib_dev->pnetid[ib_port - 1], pnet_null)) {
                memcpy(ib_dev->pnetid[ib_port - 1], pnet_name,
                       SMC_MAX_PNETID_LEN);
                ib_dev->pnetid_by_user[ib_port - 1] = true;
                applied = true;
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        return applied;
 }
 
@@ -258,13 +259,13 @@ static bool smc_pnet_apply_smcd(struct smcd_dev *smcd_dev, char *pnet_name)
        u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
        bool applied = false;
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        if (smc_pnet_match(smcd_dev->pnetid, pnet_null)) {
                memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN);
                smcd_dev->pnetid_by_user = true;
                applied = true;
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        return applied;
 }
 
@@ -300,7 +301,7 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
 {
        struct smc_ib_device *ibdev;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                if (!strncmp(ibdev->ibdev->name, ib_name,
                             sizeof(ibdev->ibdev->name)) ||
@@ -311,7 +312,7 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
        }
        ibdev = NULL;
 out:
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        return ibdev;
 }
 
@@ -320,7 +321,7 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
 {
        struct smcd_dev *smcd_dev;
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
                if (!strncmp(dev_name(&smcd_dev->dev), smcd_name,
                             IB_DEVICE_NAME_MAX - 1))
@@ -328,7 +329,7 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
        }
        smcd_dev = NULL;
 out:
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        return smcd_dev;
 }
 
@@ -825,7 +826,7 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
        int i;
 
        ini->ib_dev = NULL;
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                if (ibdev == known_dev)
                        continue;
@@ -844,7 +845,7 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
                }
        }
 out:
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
 }
 
 /* find alternate roce device with same pnet_id and vlan_id */
@@ -863,7 +864,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
 {
        struct smc_ib_device *ibdev;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                struct net_device *ndev;
                int i;
@@ -888,7 +889,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
                        }
                }
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
 }
 
 /* Determine the corresponding IB device port based on the hardware PNETID.
@@ -924,7 +925,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
            smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid))
                return; /* pnetid could not be determined */
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
                if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) &&
                    !ismdev->going_away) {
@@ -932,7 +933,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
                        break;
                }
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 }
 
 /* PNET table analysis for a given sock:
index 7239ba9b99dc6eb9b0f437064ce1a64c88139d86..1e23cdd41eb1ec7740624e1269abe630c964380c 100644 (file)
@@ -169,6 +169,8 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
 {
        *idx = link->wr_tx_cnt;
+       if (!smc_link_usable(link))
+               return -ENOLINK;
        for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
                if (!test_and_set_bit(*idx, link->wr_tx_mask))
                        return 0;
@@ -560,15 +562,15 @@ void smc_wr_free_link(struct smc_link *lnk)
 {
        struct ib_device *ibdev;
 
+       if (!lnk->smcibdev)
+               return;
+       ibdev = lnk->smcibdev->ibdev;
+
        if (smc_wr_tx_wait_no_pending_sends(lnk))
                memset(lnk->wr_tx_mask, 0,
                       BITS_TO_LONGS(SMC_WR_BUF_CNT) *
                                                sizeof(*lnk->wr_tx_mask));
 
-       if (!lnk->smcibdev)
-               return;
-       ibdev = lnk->smcibdev->ibdev;
-
        if (lnk->wr_rx_dma_addr) {
                ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
                                    SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
index 5c4ec9386f81dfaf313bceade699668151022df0..c537272f9c7ed18522bfcc88b964cf60306858d5 100644 (file)
@@ -44,6 +44,7 @@
 #include <net/tcp.h>
 #include <net/tcp_states.h>
 #include <linux/uaccess.h>
+#include <linux/highmem.h>
 #include <asm/ioctls.h>
 
 #include <linux/sunrpc/types.h>
index ee3b8d0576b8939b42b5b40f10524d42735884c2..263d950e70e9adbc517ffcc49e74faff04c9688a 100644 (file)
@@ -921,6 +921,21 @@ static void link_prepare_wakeup(struct tipc_link *l)
 
 }
 
+/**
+ * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
+ *                                     the given skb should be next attempted
+ * @skb: skb to set a future retransmission time for
+ * @l: link the skb will be transmitted on
+ */
+static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
+                                             struct tipc_link *l)
+{
+       if (link_is_bc_sndlink(l))
+               TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+       else
+               TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
+}
+
 void tipc_link_reset(struct tipc_link *l)
 {
        struct sk_buff_head list;
@@ -1036,9 +1051,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                                return -ENOBUFS;
                        }
                        __skb_queue_tail(transmq, skb);
-                       /* next retransmit attempt */
-                       if (link_is_bc_sndlink(l))
-                               TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+                       tipc_link_set_skb_retransmit_time(skb, l);
                        __skb_queue_tail(xmitq, _skb);
                        TIPC_SKB_CB(skb)->ackers = l->ackers;
                        l->rcv_unacked = 0;
@@ -1139,9 +1152,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
                if (unlikely(skb == l->backlog[imp].target_bskb))
                        l->backlog[imp].target_bskb = NULL;
                __skb_queue_tail(&l->transmq, skb);
-               /* next retransmit attempt */
-               if (link_is_bc_sndlink(l))
-                       TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+               tipc_link_set_skb_retransmit_time(skb, l);
 
                __skb_queue_tail(xmitq, _skb);
                TIPC_SKB_CB(skb)->ackers = l->ackers;
@@ -1584,8 +1595,7 @@ release:
                        /* retransmit skb if unrestricted*/
                        if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
                                continue;
-                       TIPC_SKB_CB(skb)->nxt_retr = (is_uc) ?
-                                       TIPC_UC_RETR_TIME : TIPC_BC_RETR_LIM;
+                       tipc_link_set_skb_retransmit_time(skb, l);
                        _skb = pskb_copy(skb, GFP_ATOMIC);
                        if (!_skb)
                                continue;
index 263ae395ad443fb2553a6f9af9df11363090f7da..0e07fb8585fb4fcbcfc3cd8541ddc3372b65a683 100644 (file)
@@ -5016,7 +5016,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                err = nl80211_parse_he_obss_pd(
                                        info->attrs[NL80211_ATTR_HE_OBSS_PD],
                                        &params.he_obss_pd);
-               goto out;
+               if (err)
+                       goto out;
        }
 
        if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) {
@@ -5024,7 +5025,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                                        info->attrs[NL80211_ATTR_HE_BSS_COLOR],
                                        &params.he_bss_color);
                if (err)
-                       return err;
+                       goto out;
        }
 
        nl80211_calculate_ap_params(&params);
index 540ed75e44821cc62e1ee927f9ae3c196487c862..08b80669f6495591fe4e9255ca0955f8ef83f7de 100644 (file)
@@ -2,9 +2,6 @@
 
 #include <net/xsk_buff_pool.h>
 #include <net/xdp_sock.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/swiotlb.h>
 
 #include "xsk_queue.h"
 
@@ -55,7 +52,6 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
        pool->free_heads_cnt = chunks;
        pool->headroom = headroom;
        pool->chunk_size = chunk_size;
-       pool->cheap_dma = true;
        pool->unaligned = unaligned;
        pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
        INIT_LIST_HEAD(&pool->free_list);
@@ -125,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
        }
 }
 
-static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_SWIOTLB)
-       phys_addr_t paddr;
-       u32 i;
-
-       for (i = 0; i < pool->dma_pages_cnt; i++) {
-               paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
-               if (is_swiotlb_buffer(paddr))
-                       return false;
-       }
-#endif
-       return true;
-}
-
-static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_HAS_DMA)
-       const struct dma_map_ops *ops = get_dma_ops(pool->dev);
-
-       if (ops) {
-               return !ops->sync_single_for_cpu &&
-                       !ops->sync_single_for_device;
-       }
-
-       if (!dma_is_direct(ops))
-               return false;
-
-       if (!xp_check_swiotlb_dma(pool))
-               return false;
-
-       if (!dev_is_dma_coherent(pool->dev)) {
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) ||               \
-       defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) ||        \
-       defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
-               return false;
-#endif
-       }
-#endif
-       return true;
-}
-
 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
               unsigned long attrs, struct page **pages, u32 nr_pages)
 {
@@ -180,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 
        pool->dev = dev;
        pool->dma_pages_cnt = nr_pages;
+       pool->dma_need_sync = false;
 
        for (i = 0; i < pool->dma_pages_cnt; i++) {
                dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
@@ -188,14 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
                        xp_dma_unmap(pool, attrs);
                        return -ENOMEM;
                }
+               if (dma_need_sync(dev, dma))
+                       pool->dma_need_sync = true;
                pool->dma_pages[i] = dma;
        }
 
        if (pool->unaligned)
                xp_check_dma_contiguity(pool);
-
-       pool->dev = dev;
-       pool->cheap_dma = xp_check_cheap_dma(pool);
        return 0;
 }
 EXPORT_SYMBOL(xp_dma_map);
@@ -280,7 +234,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
        xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
        xskb->xdp.data_meta = xskb->xdp.data;
 
-       if (!pool->cheap_dma) {
+       if (pool->dma_need_sync) {
                dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
                                                 pool->frame_len,
                                                 DMA_BIDIRECTIONAL);
index c407ecbc5d462b952aa60cf41c98e8a5c7e39ddf..b615729812e5acd1c1be39f402db5a9a6061e25d 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
+#include <net/ip_tunnels.h>
 #include <net/addrconf.h>
 #include <net/xfrm.h>
 #include <net/net_namespace.h>
@@ -581,6 +582,7 @@ static const struct net_device_ops xfrmi_netdev_ops = {
 static void xfrmi_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &xfrmi_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
        dev->type               = ARPHRD_NONE;
        dev->mtu                = ETH_DATA_LEN;
        dev->min_mtu            = ETH_MIN_MTU;
index 76c577ea4fd8b602878aab8b961b20bc2f3fe607..49c7a46cee073912c2ddc7103b862bf3450a4a87 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/fcntl.h>
 #define statx foo
 #define statx_timestamp foo_timestamp
+struct statx;
+struct statx_timestamp;
 #include <sys/stat.h>
 #undef statx
 #undef statx_timestamp
index 4aea7cf71d11f5660563b1252d0bd2e492221283..62c275685b75e487a8646d8f7554122d1171009b 100644 (file)
@@ -35,6 +35,7 @@ KBUILD_CFLAGS += $(call cc-option, -Wstringop-truncation)
 # The following turn off the warnings enabled by -Wextra
 KBUILD_CFLAGS += -Wno-missing-field-initializers
 KBUILD_CFLAGS += -Wno-sign-compare
+KBUILD_CFLAGS += -Wno-type-limits
 
 KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN1
 
@@ -66,6 +67,7 @@ KBUILD_CFLAGS += -Wshadow
 KBUILD_CFLAGS += $(call cc-option, -Wlogical-op)
 KBUILD_CFLAGS += -Wmissing-field-initializers
 KBUILD_CFLAGS += -Wsign-compare
+KBUILD_CFLAGS += -Wtype-limits
 KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized)
 KBUILD_CFLAGS += $(call cc-option, -Wunused-macros)
 
index 99ac59c598265829f3790898272ff9e5bbc021d4..916b2f7f70987c8e1a89ba895ac4c0ce6f4d2548 100644 (file)
@@ -212,6 +212,9 @@ $(foreach m, $(notdir $1), \
        $(addprefix $(obj)/, $(foreach s, $3, $($(m:%$(strip $2)=%$(s)))))))
 endef
 
+quiet_cmd_copy = COPY    $@
+      cmd_copy = cp $< $@
+
 # Shipped files
 # ===========================================================================
 
@@ -259,6 +262,7 @@ quiet_cmd_gzip = GZIP    $@
 # DTC
 # ---------------------------------------------------------------------------
 DTC ?= $(objtree)/scripts/dtc/dtc
+DTC_FLAGS += -Wno-interrupt_provider
 
 # Disable noisy checks by default
 ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),)
@@ -274,7 +278,8 @@ endif
 
 ifneq ($(findstring 2,$(KBUILD_EXTRA_WARN)),)
 DTC_FLAGS += -Wnode_name_chars_strict \
-       -Wproperty_name_chars_strict
+       -Wproperty_name_chars_strict \
+       -Winterrupt_provider
 endif
 
 DTC_FLAGS += $(DTC_FLAGS_$(basetarget))
index 4b3c486f1399f809c0ccc250cf1419120628e822..b7955dbd71caac7ef87456d256c0c8fb8a69a04e 100644 (file)
@@ -1022,6 +1022,9 @@ static void check_i2c_bus_bridge(struct check *c, struct dt_info *dti, struct no
 }
 WARNING(i2c_bus_bridge, check_i2c_bus_bridge, NULL, &addr_size_cells);
 
+#define I2C_OWN_SLAVE_ADDRESS  (1U << 30)
+#define I2C_TEN_BIT_ADDRESS    (1U << 31)
+
 static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node *node)
 {
        struct property *prop;
@@ -1044,6 +1047,8 @@ static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node
        }
 
        reg = fdt32_to_cpu(*cells);
+       /* Ignore I2C_OWN_SLAVE_ADDRESS */
+       reg &= ~I2C_OWN_SLAVE_ADDRESS;
        snprintf(unit_addr, sizeof(unit_addr), "%x", reg);
        if (!streq(unitname, unit_addr))
                FAIL(c, dti, node, "I2C bus unit address format error, expected \"%s\"",
@@ -1051,10 +1056,15 @@ static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node
 
        for (len = prop->val.len; len > 0; len -= 4) {
                reg = fdt32_to_cpu(*(cells++));
-               if (reg > 0x3ff)
+               /* Ignore I2C_OWN_SLAVE_ADDRESS */
+               reg &= ~I2C_OWN_SLAVE_ADDRESS;
+
+               if ((reg & I2C_TEN_BIT_ADDRESS) && ((reg & ~I2C_TEN_BIT_ADDRESS) > 0x3ff))
                        FAIL_PROP(c, dti, node, prop, "I2C address must be less than 10-bits, got \"0x%x\"",
                                  reg);
-
+               else if (reg > 0x7f)
+                       FAIL_PROP(c, dti, node, prop, "I2C address must be less than 7-bits, got \"0x%x\". Set I2C_TEN_BIT_ADDRESS for 10 bit addresses or fix the property",
+                                 reg);
        }
 }
 WARNING(i2c_bus_reg, check_i2c_bus_reg, NULL, &reg_format, &i2c_bus_bridge);
@@ -1547,6 +1557,28 @@ static bool node_is_interrupt_provider(struct node *node)
 
        return false;
 }
+
+static void check_interrupt_provider(struct check *c,
+                                    struct dt_info *dti,
+                                    struct node *node)
+{
+       struct property *prop;
+
+       if (!node_is_interrupt_provider(node))
+               return;
+
+       prop = get_property(node, "#interrupt-cells");
+       if (!prop)
+               FAIL(c, dti, node,
+                    "Missing #interrupt-cells in interrupt provider");
+
+       prop = get_property(node, "#address-cells");
+       if (!prop)
+               FAIL(c, dti, node,
+                    "Missing #address-cells in interrupt provider");
+}
+WARNING(interrupt_provider, check_interrupt_provider, NULL);
+
 static void check_interrupts_property(struct check *c,
                                      struct dt_info *dti,
                                      struct node *node)
@@ -1604,7 +1636,7 @@ static void check_interrupts_property(struct check *c,
 
        prop = get_property(irq_node, "#interrupt-cells");
        if (!prop) {
-               FAIL(c, dti, irq_node, "Missing #interrupt-cells in interrupt-parent");
+               /* We warn about that already in another test. */
                return;
        }
 
@@ -1828,6 +1860,7 @@ static struct check *check_table[] = {
        &deprecated_gpio_property,
        &gpios_property,
        &interrupts_property,
+       &interrupt_provider,
 
        &alias_paths,
 
index 6e74ecea55a39223b77e641070487f59e26cbd6e..a08f4159cd0366563832936ca098a453c30b057f 100644 (file)
@@ -51,6 +51,37 @@ extern int annotate;         /* annotate .dts with input source location */
 
 typedef uint32_t cell_t;
 
+static inline uint16_t dtb_ld16(const void *p)
+{
+       const uint8_t *bp = (const uint8_t *)p;
+
+       return ((uint16_t)bp[0] << 8)
+               | bp[1];
+}
+
+static inline uint32_t dtb_ld32(const void *p)
+{
+       const uint8_t *bp = (const uint8_t *)p;
+
+       return ((uint32_t)bp[0] << 24)
+               | ((uint32_t)bp[1] << 16)
+               | ((uint32_t)bp[2] << 8)
+               | bp[3];
+}
+
+static inline uint64_t dtb_ld64(const void *p)
+{
+       const uint8_t *bp = (const uint8_t *)p;
+
+       return ((uint64_t)bp[0] << 56)
+               | ((uint64_t)bp[1] << 48)
+               | ((uint64_t)bp[2] << 40)
+               | ((uint64_t)bp[3] << 32)
+               | ((uint64_t)bp[4] << 24)
+               | ((uint64_t)bp[5] << 16)
+               | ((uint64_t)bp[6] << 8)
+               | bp[7];
+}
 
 #define streq(a, b)    (strcmp((a), (b)) == 0)
 #define strstarts(s, prefix)   (strncmp((s), (prefix), strlen(prefix)) == 0)
index bd6977eedcb860a16ff8f6424f00cc150bd0877b..07f10d2b5d798442e23ba0d2be03aac33c0efd43 100644 (file)
@@ -156,7 +156,7 @@ static void asm_emit_data(void *e, struct data d)
                emit_offset_label(f, m->ref, m->offset);
 
        while ((d.len - off) >= sizeof(uint32_t)) {
-               asm_emit_cell(e, fdt32_to_cpu(*((fdt32_t *)(d.val+off))));
+               asm_emit_cell(e, dtb_ld32(d.val + off));
                off += sizeof(uint32_t);
        }
 
index 524b520c848647d7033b3e15c7749f2ace65d70e..93e4a2b563486de74eff03b2051ac1ecc708b7fe 100644 (file)
@@ -436,7 +436,7 @@ int fdt_open_into(const void *fdt, void *buf, int bufsize)
                        return struct_size;
        }
 
-       if (can_assume(LIBFDT_ORDER) |
+       if (can_assume(LIBFDT_ORDER) ||
            !fdt_blocks_misordered_(fdt, mem_rsv_size, struct_size)) {
                /* no further work necessary */
                err = fdt_move(fdt, buf, bufsize);
index 26759d5dfb8cd47c04c0c8114eae15bbd58f36bb..94ce4bb91a007067c25a24d46a67ae2f3adc52f0 100644 (file)
@@ -32,7 +32,7 @@ static int fdt_sw_probe_(void *fdt)
 /* 'memrsv' state:     Initial state after fdt_create()
  *
  * Allowed functions:
- *     fdt_add_reservmap_entry()
+ *     fdt_add_reservemap_entry()
  *     fdt_finish_reservemap()         [moves to 'struct' state]
  */
 static int fdt_sw_probe_memrsv_(void *fdt)
index 36fadcdea516a2294212e5aa002f485f03e06d4b..fe49b5d789382c6c56e3e3be9fd69ec0e885af00 100644 (file)
@@ -9,6 +9,10 @@
 #include "libfdt_env.h"
 #include "fdt.h"
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 #define FDT_FIRST_SUPPORTED_VERSION    0x02
 #define FDT_LAST_SUPPORTED_VERSION     0x11
 
@@ -2069,4 +2073,8 @@ int fdt_overlay_apply(void *fdt, void *fdto);
 
 const char *fdt_strerror(int errval);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LIBFDT_H */
index c9d980c8abfc4928bf9227bae3d5e16ce9c2ff85..061ba8c9c5e832654024c9c36b286cab8a02a9be 100644 (file)
@@ -110,13 +110,13 @@ static void write_propval_int(FILE *f, const char *p, size_t len, size_t width)
                        fprintf(f, "%02"PRIx8, *(const uint8_t*)p);
                        break;
                case 2:
-                       fprintf(f, "0x%02"PRIx16, fdt16_to_cpu(*(const fdt16_t*)p));
+                       fprintf(f, "0x%02"PRIx16, dtb_ld16(p));
                        break;
                case 4:
-                       fprintf(f, "0x%02"PRIx32, fdt32_to_cpu(*(const fdt32_t*)p));
+                       fprintf(f, "0x%02"PRIx32, dtb_ld32(p));
                        break;
                case 8:
-                       fprintf(f, "0x%02"PRIx64, fdt64_to_cpu(*(const fdt64_t*)p));
+                       fprintf(f, "0x%02"PRIx64, dtb_ld64(p));
                        break;
                }
                if (p + width < end)
@@ -183,7 +183,7 @@ static enum markertype guess_value_type(struct property *prop)
                        nnotcelllbl++;
        }
 
-       if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul < (len-nnul))
+       if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul <= (len-nnul))
            && (nnotstringlbl == 0)) {
                return TYPE_STRING;
        } else if (((len % sizeof(cell_t)) == 0) && (nnotcelllbl == 0)) {
index 61dd7112d6e4aebf1a283f85131f2195ce6a3af5..0714799446f883492a8f7a98f65ff8861a570ca7 100644 (file)
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.6.0-g87a656ae"
+#define DTC_VERSION "DTC 1.6.0-g9d7888cb"
index 5b6ea8ea862f0dccb3e666f2fe1721be4549cdc0..4e93c12dc658390b67b0f8254edd01ebbc0d47bd 100644 (file)
@@ -59,10 +59,10 @@ static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, ch
                        sprintf(buf, "0x%"PRIx8, *(uint8_t*)(data + off));
                        break;
                case 2:
-                       sprintf(buf, "0x%"PRIx16, fdt16_to_cpu(*(fdt16_t*)(data + off)));
+                       sprintf(buf, "0x%"PRIx16, dtb_ld16(data + off));
                        break;
                case 4:
-                       sprintf(buf, "0x%"PRIx32, fdt32_to_cpu(*(fdt32_t*)(data + off)));
+                       sprintf(buf, "0x%"PRIx32, dtb_ld32(data + off));
                        m = markers;
                        is_phandle = false;
                        for_each_marker_of_type(m, REF_PHANDLE) {
@@ -73,7 +73,7 @@ static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, ch
                        }
                        break;
                case 8:
-                       sprintf(buf, "0x%"PRIx64, fdt64_to_cpu(*(fdt64_t*)(data + off)));
+                       sprintf(buf, "0x%"PRIx64, dtb_ld64(data + off));
                        break;
                }
 
index ce0b99fb5847110a410f84efba711c85ef23d138..ae19fb0243b9bd81d85f8d6b9ffeb3872682d2b7 100644 (file)
@@ -78,7 +78,7 @@ config GCC_PLUGIN_RANDSTRUCT
          source tree isn't cleaned after kernel installation).
 
          The seed used for compilation is located at
-         scripts/gcc-plgins/randomize_layout_seed.h.  It remains after
+         scripts/gcc-plugins/randomize_layout_seed.h.  It remains after
          a make clean to allow for external modules to be compiled with
          the existing seed and will be removed by a make mrproper or
          make distclean.
index c0ac8f7b5f1abd532300cad586b47ed8e4d78ad3..4a616128a15484c6f9359cdd9ba35901dfebb41a 100644 (file)
@@ -4,27 +4,19 @@
  * Copyright (C) 2015 Boris Barbulovski <bbarbulovski@gmail.com>
  */
 
-#include <qglobal.h>
-
-#include <QMainWindow>
-#include <QList>
-#include <qtextbrowser.h>
 #include <QAction>
+#include <QApplication>
+#include <QCloseEvent>
+#include <QDebug>
+#include <QDesktopWidget>
 #include <QFileDialog>
+#include <QLabel>
+#include <QLayout>
+#include <QList>
 #include <QMenu>
-
-#include <qapplication.h>
-#include <qdesktopwidget.h>
-#include <qtoolbar.h>
-#include <qlayout.h>
-#include <qsplitter.h>
-#include <qlineedit.h>
-#include <qlabel.h>
-#include <qpushbutton.h>
-#include <qmenubar.h>
-#include <qmessagebox.h>
-#include <qregexp.h>
-#include <qevent.h>
+#include <QMenuBar>
+#include <QMessageBox>
+#include <QToolBar>
 
 #include <stdlib.h>
 
@@ -445,9 +437,10 @@ void ConfigList::updateList(ConfigItem* item)
        if (rootEntry != &rootmenu && (mode == singleMode ||
            (mode == symbolMode && rootEntry->parent != &rootmenu))) {
                item = (ConfigItem *)topLevelItem(0);
-               if (!item)
+               if (!item && mode != symbolMode) {
                        item = new ConfigItem(this, 0, true);
-               last = item;
+                       last = item;
+               }
        }
        if ((mode == singleMode || (mode == symbolMode && !(rootEntry->flags & MENU_ROOT))) &&
            rootEntry->sym && rootEntry->prompt) {
@@ -545,7 +538,7 @@ void ConfigList::setRootMenu(struct menu *menu)
        rootEntry = menu;
        updateListAll();
        if (currentItem()) {
-               currentItem()->setSelected(hasFocus());
+               setSelected(currentItem(), hasFocus());
                scrollToItem(currentItem());
        }
 }
@@ -873,7 +866,7 @@ void ConfigList::focusInEvent(QFocusEvent *e)
 
        ConfigItem* item = (ConfigItem *)currentItem();
        if (item) {
-               item->setSelected(true);
+               setSelected(item, true);
                menu = item->menu;
        }
        emit gotFocus(menu);
@@ -1021,7 +1014,7 @@ ConfigInfoView::ConfigInfoView(QWidget* parent, const char *name)
        : Parent(parent), sym(0), _menu(0)
 {
        setObjectName(name);
-
+       setOpenLinks(false);
 
        if (!objectName().isEmpty()) {
                configSettings->beginGroup(objectName());
@@ -1094,7 +1087,7 @@ void ConfigInfoView::menuInfo(void)
                        if (sym->name) {
                                head += " (";
                                if (showDebug())
-                                       head += QString().sprintf("<a href=\"s%p\">", sym);
+                                       head += QString().sprintf("<a href=\"s%s\">", sym->name);
                                head += print_filter(sym->name);
                                if (showDebug())
                                        head += "</a>";
@@ -1103,7 +1096,7 @@ void ConfigInfoView::menuInfo(void)
                } else if (sym->name) {
                        head += "<big><b>";
                        if (showDebug())
-                               head += QString().sprintf("<a href=\"s%p\">", sym);
+                               head += QString().sprintf("<a href=\"s%s\">", sym->name);
                        head += print_filter(sym->name);
                        if (showDebug())
                                head += "</a>";
@@ -1154,13 +1147,16 @@ QString ConfigInfoView::debug_info(struct symbol *sym)
                switch (prop->type) {
                case P_PROMPT:
                case P_MENU:
-                       debug += QString().sprintf("prompt: <a href=\"m%p\">", prop->menu);
+                       debug += QString().sprintf("prompt: <a href=\"m%s\">", sym->name);
                        debug += print_filter(prop->text);
                        debug += "</a><br>";
                        break;
                case P_DEFAULT:
                case P_SELECT:
                case P_RANGE:
+               case P_COMMENT:
+               case P_IMPLY:
+               case P_SYMBOL:
                        debug += prop_get_type_name(prop->type);
                        debug += ": ";
                        expr_print(prop->expr, expr_print_help, &debug, E_NONE);
@@ -1226,13 +1222,62 @@ void ConfigInfoView::expr_print_help(void *data, struct symbol *sym, const char
        QString str2 = print_filter(str);
 
        if (sym && sym->name && !(sym->flags & SYMBOL_CONST)) {
-               *text += QString().sprintf("<a href=\"s%p\">", sym);
+               *text += QString().sprintf("<a href=\"s%s\">", sym->name);
                *text += str2;
                *text += "</a>";
        } else
                *text += str2;
 }
 
+void ConfigInfoView::clicked(const QUrl &url)
+{
+       QByteArray str = url.toEncoded();
+       const std::size_t count = str.size();
+       char *data = new char[count + 1];
+       struct symbol **result;
+       struct menu *m = NULL;
+
+       if (count < 1) {
+               qInfo() << "Clicked link is empty";
+               delete data;
+               return;
+       }
+
+       memcpy(data, str.constData(), count);
+       data[count] = '\0';
+
+       /* Seek for exact match */
+       data[0] = '^';
+       strcat(data, "$");
+       result = sym_re_search(data);
+       if (!result) {
+               qInfo() << "Clicked symbol is invalid:" << data;
+               delete data;
+               return;
+       }
+
+       sym = *result;
+
+       /* Seek for the menu which holds the symbol */
+       for (struct property *prop = sym->prop; prop; prop = prop->next) {
+                   if (prop->type != P_PROMPT && prop->type != P_MENU)
+                           continue;
+                   m = prop->menu;
+                   break;
+       }
+
+       if (!m) {
+               /* Symbol is not visible as a menu */
+               symbolInfo();
+               emit showDebugChanged(true);
+       } else {
+               emit menuSelected(m);
+       }
+
+       free(result);
+       delete data;
+}
+
 QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
 {
        QMenu* popup = Parent::createStandardContextMenu(pos);
@@ -1402,18 +1447,22 @@ ConfigMainWindow::ConfigMainWindow(void)
        addToolBar(toolBar);
 
        backAction = new QAction(QPixmap(xpm_back), "Back", this);
-         connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
-         backAction->setEnabled(false);
+       connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
+
        QAction *quitAction = new QAction("&Quit", this);
        quitAction->setShortcut(Qt::CTRL + Qt::Key_Q);
-         connect(quitAction, SIGNAL(triggered(bool)), SLOT(close()));
+       connect(quitAction, SIGNAL(triggered(bool)), SLOT(close()));
+
        QAction *loadAction = new QAction(QPixmap(xpm_load), "&Load", this);
        loadAction->setShortcut(Qt::CTRL + Qt::Key_L);
-         connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig()));
+       connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig()));
+
        saveAction = new QAction(QPixmap(xpm_save), "&Save", this);
        saveAction->setShortcut(Qt::CTRL + Qt::Key_S);
-         connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig()));
+       connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig()));
+
        conf_set_changed_callback(conf_changed);
+
        // Set saveAction's initial state
        conf_changed();
        configname = xstrdup(conf_get_configname());
@@ -1506,6 +1555,9 @@ ConfigMainWindow::ConfigMainWindow(void)
        helpMenu->addAction(showIntroAction);
        helpMenu->addAction(showAboutAction);
 
+       connect (helpText, SIGNAL (anchorClicked (const QUrl &)),
+                helpText, SLOT (clicked (const QUrl &)) );
+
        connect(configList, SIGNAL(menuChanged(struct menu *)),
                helpText, SLOT(setInfo(struct menu *)));
        connect(configList, SIGNAL(menuSelected(struct menu *)),
@@ -1611,21 +1663,11 @@ void ConfigMainWindow::searchConfig(void)
 void ConfigMainWindow::changeItens(struct menu *menu)
 {
        configList->setRootMenu(menu);
-
-       if (configList->rootEntry->parent == &rootmenu)
-               backAction->setEnabled(false);
-       else
-               backAction->setEnabled(true);
 }
 
 void ConfigMainWindow::changeMenu(struct menu *menu)
 {
        menuList->setRootMenu(menu);
-
-       if (menuList->rootEntry->parent == &rootmenu)
-               backAction->setEnabled(false);
-       else
-               backAction->setEnabled(true);
 }
 
 void ConfigMainWindow::setMenuLink(struct menu *menu)
@@ -1645,22 +1687,26 @@ void ConfigMainWindow::setMenuLink(struct menu *menu)
                        return;
                list->setRootMenu(parent);
                break;
-       case symbolMode:
+       case menuMode:
                if (menu->flags & MENU_ROOT) {
-                       configList->setRootMenu(menu);
+                       menuList->setRootMenu(menu);
                        configList->clearSelection();
-                       list = menuList;
-               } else {
                        list = configList;
+               } else {
                        parent = menu_get_parent_menu(menu->parent);
                        if (!parent)
                                return;
-                       item = menuList->findConfigItem(parent);
+
+                       /* Select the config view */
+                       item = configList->findConfigItem(parent);
                        if (item) {
-                               item->setSelected(true);
-                               menuList->scrollToItem(item);
+                               configList->setSelected(item, true);
+                               configList->scrollToItem(item);
                        }
-                       list->setRootMenu(parent);
+
+                       menuList->setRootMenu(parent);
+                       menuList->clearSelection();
+                       list = menuList;
                }
                break;
        case fullMode:
@@ -1673,9 +1719,10 @@ void ConfigMainWindow::setMenuLink(struct menu *menu)
        if (list) {
                item = list->findConfigItem(menu);
                if (item) {
-                       item->setSelected(true);
+                       list->setSelected(item, true);
                        list->scrollToItem(item);
                        list->setFocus();
+                       helpText->setInfo(menu);
                }
        }
 }
@@ -1688,25 +1735,11 @@ void ConfigMainWindow::listFocusChanged(void)
 
 void ConfigMainWindow::goBack(void)
 {
-       ConfigItem* item, *oldSelection;
-
-       configList->setParentMenu();
+qInfo() << __FUNCTION__;
        if (configList->rootEntry == &rootmenu)
-               backAction->setEnabled(false);
-
-       if (menuList->selectedItems().count() == 0)
                return;
 
-       item = (ConfigItem*)menuList->selectedItems().first();
-       oldSelection = item;
-       while (item) {
-               if (item->menu == configList->rootEntry) {
-                       oldSelection->setSelected(false);
-                       item->setSelected(true);
-                       break;
-               }
-               item = (ConfigItem*)item->parent();
-       }
+       configList->setParentMenu();
 }
 
 void ConfigMainWindow::showSingleView(void)
@@ -1718,6 +1751,8 @@ void ConfigMainWindow::showSingleView(void)
        fullViewAction->setEnabled(true);
        fullViewAction->setChecked(false);
 
+       backAction->setEnabled(true);
+
        menuView->hide();
        menuList->setRootMenu(0);
        configList->mode = singleMode;
@@ -1737,6 +1772,8 @@ void ConfigMainWindow::showSplitView(void)
        fullViewAction->setEnabled(true);
        fullViewAction->setChecked(false);
 
+       backAction->setEnabled(false);
+
        configList->mode = menuMode;
        if (configList->rootEntry == &rootmenu)
                configList->updateListAll();
@@ -1760,6 +1797,8 @@ void ConfigMainWindow::showFullView(void)
        fullViewAction->setEnabled(false);
        fullViewAction->setChecked(true);
 
+       backAction->setEnabled(false);
+
        menuView->hide();
        menuList->setRootMenu(0);
        configList->mode = fullMode;
index c879d79ce8170b625f7e1ba7d9d12b18f7f3ccd9..fb9e9729266fc5a14ee9b5f1becc1355f70a6eb4 100644 (file)
@@ -3,17 +3,17 @@
  * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
  */
 
-#include <QTextBrowser>
-#include <QTreeWidget>
-#include <QMainWindow>
+#include <QCheckBox>
+#include <QDialog>
 #include <QHeaderView>
-#include <qsettings.h>
+#include <QLineEdit>
+#include <QMainWindow>
 #include <QPushButton>
 #include <QSettings>
-#include <QLineEdit>
 #include <QSplitter>
-#include <QCheckBox>
-#include <QDialog>
+#include <QTextBrowser>
+#include <QTreeWidget>
+
 #include "expr.h"
 
 class ConfigView;
@@ -45,11 +45,17 @@ class ConfigList : public QTreeWidget {
 public:
        ConfigList(ConfigView* p, const char *name = 0);
        void reinit(void);
+       ConfigItem* findConfigItem(struct menu *);
        ConfigView* parent(void) const
        {
                return (ConfigView*)Parent::parent();
        }
-       ConfigItem* findConfigItem(struct menu *);
+       void setSelected(QTreeWidgetItem *item, bool enable) {
+               for (int i = 0; i < selectedItems().size(); i++)
+                       selectedItems().at(i)->setSelected(false);
+
+               item->setSelected(enable);
+       }
 
 protected:
        void keyPressEvent(QKeyEvent *e);
@@ -250,6 +256,7 @@ public slots:
        void setInfo(struct menu *menu);
        void saveSettings(void);
        void setShowDebug(bool);
+       void clicked (const QUrl &url);
 
 signals:
        void showDebugChanged(bool);
index e12c4900510f607bd7a09be7ce3ec7d150b5954b..1d20003243c3fb6f176dbefd76dea1e087c52458 100644 (file)
@@ -188,19 +188,7 @@ DEFINE_LSM(integrity) = {
 int integrity_kernel_read(struct file *file, loff_t offset,
                          void *addr, unsigned long count)
 {
-       mm_segment_t old_fs;
-       char __user *buf = (char __user *)addr;
-       ssize_t ret;
-
-       if (!(file->f_mode & FMODE_READ))
-               return -EBADF;
-
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       ret = __vfs_read(file, buf, count, &offset);
-       set_fs(old_fs);
-
-       return ret;
+       return __kernel_read(file, addr, count, &offset);
 }
 
 /*
index df93ac258e01350f495da3447a938721932c57b1..9d94080bdad82ff4571958627b8d75c006ea52d2 100644 (file)
@@ -30,7 +30,7 @@
 
 enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
                     IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
-enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
+enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
 
 /* digest size for IMA, fits SHA1 or MD5 */
 #define IMA_DIGEST_SIZE                SHA1_DIGEST_SIZE
index 220b14920c377affdc405a25d1222ad4cff4aa31..011c3c76af8658d4efd2b9140857842363ab9ab8 100644 (file)
@@ -823,13 +823,26 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
        if (rc != 0)
                return rc;
 
-       /* cumulative sha1 over tpm registers 0-7 */
+       /* cumulative digest over TPM registers 0-7 */
        for (i = TPM_PCR0; i < TPM_PCR8; i++) {
                ima_pcrread(i, &d);
                /* now accumulate with current aggregate */
                rc = crypto_shash_update(shash, d.digest,
                                         crypto_shash_digestsize(tfm));
        }
+       /*
+        * Extend cumulative digest over TPM registers 8-9, which contain
+        * measurement for the kernel command line (reg. 8) and image (reg. 9)
+        * in a typical PCR allocation. Registers 8-9 are only included in
+        * non-SHA1 boot_aggregate digests to avoid ambiguity.
+        */
+       if (alg_id != TPM_ALG_SHA1) {
+               for (i = TPM_PCR8; i < TPM_PCR10; i++) {
+                       ima_pcrread(i, &d);
+                       rc = crypto_shash_update(shash, d.digest,
+                                               crypto_shash_digestsize(tfm));
+               }
+       }
        if (!rc)
                crypto_shash_final(shash, digest);
        return rc;
index 0ce3e73edd4227cc2b76b40a34b79e7945cd0190..70a7ad357bc6ab35aa4c7b6f7616222a8b88c805 100644 (file)
@@ -1414,7 +1414,22 @@ EXPORT_SYMBOL(security_inode_copy_up);
 
 int security_inode_copy_up_xattr(const char *name)
 {
-       return call_int_hook(inode_copy_up_xattr, -EOPNOTSUPP, name);
+       struct security_hook_list *hp;
+       int rc;
+
+       /*
+        * The implementation can return 0 (accept the xattr), 1 (discard the
+        * xattr), -EOPNOTSUPP if it does not know anything about the xattr or
+        * any other error code incase of an error.
+        */
+       hlist_for_each_entry(hp,
+               &security_hook_heads.inode_copy_up_xattr, list) {
+               rc = hp->hook.inode_copy_up_xattr(name);
+               if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
+                       return rc;
+       }
+
+       return LSM_RET_DEFAULT(inode_copy_up_xattr);
 }
 EXPORT_SYMBOL(security_inode_copy_up_xattr);
 
index 509290f2efa8ecee7971c272deba82a4136ba50d..0e53f6f319167c9e4dd3b1e342bf6b8e2fe1f321 100644 (file)
@@ -764,6 +764,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
 
        retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
        if (!retval) {
+               /* clear flags and stop any drain wait */
+               stream->partial_drain = false;
+               stream->metadata_set = false;
                snd_compr_drain_notify(stream);
                stream->runtime->total_bytes_available = 0;
                stream->runtime->total_bytes_transferred = 0;
@@ -921,6 +924,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
        if (stream->next_track == false)
                return -EPERM;
 
+       stream->partial_drain = true;
        retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
        if (retval) {
                pr_debug("Partial drain returned failure\n");
index e69a4ef0d6bdea61218aa9c9b95b06d0cfdcddce..08c10ac9d6c87a75374ac97e787ded3be15c4617 100644 (file)
@@ -91,6 +91,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file,
                {
                        struct snd_dm_fm_info info;
 
+                       memset(&info, 0, sizeof(info));
+
                        info.fm_mode = opl3->fm_mode;
                        info.rhythm = opl3->rhythm;
                        if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info)))
index 2c6d2becfe1a0bce6b1910e9c144d05bb6b1334e..824f4ac1a8ce787d142ad42e17aab63b07b04c1e 100644 (file)
@@ -72,6 +72,12 @@ static int compare_input_type(const void *ap, const void *bp)
        if (a->type != b->type)
                return (int)(a->type - b->type);
 
+       /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */
+       if (a->is_headset_mic && b->is_headphone_mic)
+               return -1; /* don't swap */
+       else if (a->is_headphone_mic && b->is_headset_mic)
+               return 1; /* swap */
+
        /* In case one has boost and the other one has not,
           pick the one with boost first. */
        return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
index e2b21ef5d7d12994be0ca64d332d0d11c24486a0..41eaa89660c3ee9c21ada7e52bd84803129a7f8f 100644 (file)
@@ -259,7 +259,7 @@ static int hinfo_to_pcm_index(struct hda_codec *codec,
                if (get_pcm_rec(spec, pcm_idx)->stream == hinfo)
                        return pcm_idx;
 
-       codec_warn(codec, "HDMI: hinfo %p not registered\n", hinfo);
+       codec_warn(codec, "HDMI: hinfo %p not tied to a PCM\n", hinfo);
        return -EINVAL;
 }
 
@@ -277,7 +277,8 @@ static int hinfo_to_pin_index(struct hda_codec *codec,
                        return pin_idx;
        }
 
-       codec_dbg(codec, "HDMI: hinfo %p not registered\n", hinfo);
+       codec_dbg(codec, "HDMI: hinfo %p (pcm %d) not registered\n", hinfo,
+                 hinfo_to_pcm_index(codec, hinfo));
        return -EINVAL;
 }
 
@@ -1804,33 +1805,43 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
 
 static int hdmi_parse_codec(struct hda_codec *codec)
 {
-       hda_nid_t nid;
+       hda_nid_t start_nid;
+       unsigned int caps;
        int i, nodes;
 
-       nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &nid);
-       if (!nid || nodes < 0) {
+       nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid);
+       if (!start_nid || nodes < 0) {
                codec_warn(codec, "HDMI: failed to get afg sub nodes\n");
                return -EINVAL;
        }
 
-       for (i = 0; i < nodes; i++, nid++) {
-               unsigned int caps;
-               unsigned int type;
+       /*
+        * hdmi_add_pin() assumes total amount of converters to
+        * be known, so first discover all converters
+        */
+       for (i = 0; i < nodes; i++) {
+               hda_nid_t nid = start_nid + i;
 
                caps = get_wcaps(codec, nid);
-               type = get_wcaps_type(caps);
 
                if (!(caps & AC_WCAP_DIGITAL))
                        continue;
 
-               switch (type) {
-               case AC_WID_AUD_OUT:
+               if (get_wcaps_type(caps) == AC_WID_AUD_OUT)
                        hdmi_add_cvt(codec, nid);
-                       break;
-               case AC_WID_PIN:
+       }
+
+       /* discover audio pins */
+       for (i = 0; i < nodes; i++) {
+               hda_nid_t nid = start_nid + i;
+
+               caps = get_wcaps(codec, nid);
+
+               if (!(caps & AC_WCAP_DIGITAL))
+                       continue;
+
+               if (get_wcaps_type(caps) == AC_WID_PIN)
                        hdmi_add_pin(codec, nid);
-                       break;
-               }
        }
 
        return 0;
index 737ef82a75fda2a6a8aecf7238df648c3fc6c1e9..194ffa8c66cedb9890d98073e14794423ed5ba18 100644 (file)
@@ -6149,6 +6149,9 @@ enum {
        ALC236_FIXUP_HP_MUTE_LED,
        ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
        ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
+       ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
+       ALC269VC_FIXUP_ACER_HEADSET_MIC,
+       ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7327,6 +7330,35 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE
        },
+       [ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x14, 0x90100120 }, /* use as internal speaker */
+                       { 0x18, 0x02a111f0 }, /* use as headset mic, without its own jack detect */
+                       { 0x1a, 0x01011020 }, /* use as line out */
+                       { },
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
+       [ALC269VC_FIXUP_ACER_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x02a11030 }, /* use as headset mic */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
+       [ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x01a11130 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7342,10 +7374,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
+       SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+       SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
@@ -7571,8 +7606,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
-       SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
-       SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+       SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
        SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
index e4371932a55a66a6215b6478646570a8dcb1131e..4a82690aec16a91207038bd495d449ae2a37a254 100644 (file)
@@ -2,6 +2,7 @@
 # Renoir platform Support
 snd-rn-pci-acp3x-objs  := rn-pci-acp3x.o
 snd-acp3x-pdm-dma-objs := acp3x-pdm-dma.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR)        += snd-rn-pci-acp3x.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR)        += snd-acp3x-pdm-dma.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH)  += acp3x-rn.o
+snd-acp3x-rn-objs      := acp3x-rn.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR)       += snd-rn-pci-acp3x.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR)       += snd-acp3x-pdm-dma.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH)  += snd-acp3x-rn.o
index 3e9d2c6c51f9a60cacab882ddcf91f0fa2601ddc..7d6670abdb08e0fa9903b3ac08ae0790002bb2a8 100644 (file)
@@ -932,7 +932,9 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                        RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2);
                snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
                        RT5682_PWR_CBJ, RT5682_PWR_CBJ);
-
+               snd_soc_component_update_bits(component,
+                       RT5682_HP_CHARGE_PUMP_1,
+                       RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
 
@@ -956,6 +958,11 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                        rt5682->jack_type = SND_JACK_HEADPHONE;
                        break;
                }
+
+               snd_soc_component_update_bits(component,
+                       RT5682_HP_CHARGE_PUMP_1,
+                       RT5682_OSW_L_MASK | RT5682_OSW_R_MASK,
+                       RT5682_OSW_L_EN | RT5682_OSW_R_EN);
        } else {
                rt5682_enable_push_button_irq(component, false);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
index 0c813a45bba7c1e17144e1a54fc9c731d07d2e5c..69aeb0e71844d9f614e156f4e8a0cbcdb41d728f 100644 (file)
@@ -265,12 +265,20 @@ static int fsl_mqs_remove(struct platform_device *pdev)
 static int fsl_mqs_runtime_resume(struct device *dev)
 {
        struct fsl_mqs *mqs_priv = dev_get_drvdata(dev);
+       int ret;
 
-       if (mqs_priv->ipg)
-               clk_prepare_enable(mqs_priv->ipg);
+       ret = clk_prepare_enable(mqs_priv->ipg);
+       if (ret) {
+               dev_err(dev, "failed to enable ipg clock\n");
+               return ret;
+       }
 
-       if (mqs_priv->mclk)
-               clk_prepare_enable(mqs_priv->mclk);
+       ret = clk_prepare_enable(mqs_priv->mclk);
+       if (ret) {
+               dev_err(dev, "failed to enable mclk clock\n");
+               clk_disable_unprepare(mqs_priv->ipg);
+               return ret;
+       }
 
        if (mqs_priv->use_gpr)
                regmap_write(mqs_priv->regmap, IOMUXC_GPR2,
@@ -292,11 +300,8 @@ static int fsl_mqs_runtime_suspend(struct device *dev)
                regmap_read(mqs_priv->regmap, REG_MQS_CTRL,
                            &mqs_priv->reg_mqs_ctrl);
 
-       if (mqs_priv->mclk)
-               clk_disable_unprepare(mqs_priv->mclk);
-
-       if (mqs_priv->ipg)
-               clk_disable_unprepare(mqs_priv->ipg);
+       clk_disable_unprepare(mqs_priv->mclk);
+       clk_disable_unprepare(mqs_priv->ipg);
 
        return 0;
 }
index d6219fba96995bf187284a813546c005340b4a70..de43267b9c8af77dca423d8a1bc6079323806ac9 100644 (file)
@@ -84,10 +84,10 @@ struct snd_usb_endpoint {
        dma_addr_t sync_dma;            /* DMA address of syncbuf */
 
        unsigned int pipe;              /* the data i/o pipe */
-       unsigned int framesize[2];      /* small/large frame sizes in samples */
-       unsigned int sample_rem;        /* remainder from division fs/fps */
+       unsigned int packsize[2];       /* small/large packet sizes in samples */
+       unsigned int sample_rem;        /* remainder from division fs/pps */
        unsigned int sample_accum;      /* sample accumulator */
-       unsigned int fps;               /* frames per second */
+       unsigned int pps;               /* packets per second */
        unsigned int freqn;             /* nominal sampling rate in fs/fps in Q16.16 format */
        unsigned int freqm;             /* momentary sampling rate in fs/fps in Q16.16 format */
        int        freqshift;           /* how much to shift the feedback value to get Q16.16 */
index 9bea7d3f99f88f169806aa9f11bdffe9daaa1a78..88760268fb5568eddbb7cdf90e7961664f31b717 100644 (file)
@@ -159,11 +159,11 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
                return ep->maxframesize;
 
        ep->sample_accum += ep->sample_rem;
-       if (ep->sample_accum >= ep->fps) {
-               ep->sample_accum -= ep->fps;
-               ret = ep->framesize[1];
+       if (ep->sample_accum >= ep->pps) {
+               ep->sample_accum -= ep->pps;
+               ret = ep->packsize[1];
        } else {
-               ret = ep->framesize[0];
+               ret = ep->packsize[0];
        }
 
        return ret;
@@ -1088,15 +1088,15 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
 
        if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) {
                ep->freqn = get_usb_full_speed_rate(rate);
-               ep->fps = 1000;
+               ep->pps = 1000 >> ep->datainterval;
        } else {
                ep->freqn = get_usb_high_speed_rate(rate);
-               ep->fps = 8000;
+               ep->pps = 8000 >> ep->datainterval;
        }
 
-       ep->sample_rem = rate % ep->fps;
-       ep->framesize[0] = rate / ep->fps;
-       ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps;
+       ep->sample_rem = rate % ep->pps;
+       ep->packsize[0] = rate / ep->pps;
+       ep->packsize[1] = (rate + (ep->pps - 1)) / ep->pps;
 
        /* calculate the frequency in 16.16 format */
        ep->freqm = ep->freqn;
index a777d36c4f5a87d88e07cf0f151784d472c875b0..40b7cd13fed9c9e67462456ac4bbbb26d4ba6c55 100644 (file)
@@ -368,6 +368,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
                goto add_sync_ep_from_ifnum;
        case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
        case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
+       case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
                ep = 0x81;
                ifnum = 2;
                goto add_sync_ep_from_ifnum;
index 4ec491011b19c2ba7196226dd2240a25eac065ce..9092cc0aa8072b4fc7a9a7ddf24a2917a022def2 100644 (file)
@@ -3633,4 +3633,56 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
        }
 },
 
+/*
+ * MacroSilicon MS2109 based HDMI capture cards
+ *
+ * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
+ * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if
+ * they pretend to be 96kHz mono as a workaround for stereo being broken
+ * by that...
+ *
+ * They also have swapped L-R channels, but that's for userspace to deal
+ * with.
+ */
+{
+       USB_DEVICE(0x534d, 0x2109),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "MacroSilicon",
+               .product_name = "MS2109",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 2,
+                                       .iface = 3,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .attributes = 0,
+                                       .endpoint = 0x82,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                               USB_ENDPOINT_SYNC_ASYNC,
+                                       .rates = SNDRV_PCM_RATE_CONTINUOUS,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+
 #undef USB_DEVICE_VENDOR_SPEC
index df767afc690fea3c0eb66e0135f7437e37953e3a..45f8e1b02241f26d8136dbecf6183671cb780e14 100644 (file)
@@ -8,6 +8,8 @@
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 
+.pushsection .noinstr.text, "ax"
+
 /*
  * We build a jump to memcpy_orig by default which gets NOPped out on
  * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
@@ -184,6 +186,8 @@ SYM_FUNC_START(memcpy_orig)
        retq
 SYM_FUNC_END(memcpy_orig)
 
+.popsection
+
 #ifndef CONFIG_UML
 
 MCSAFE_TEST_CTL
index 4671fbf28842718fa74c9664dee74f8e129dbb7e..7f475d59a0974f17d7a9765c643b798a1dc6ee83 100644 (file)
@@ -18,8 +18,7 @@
  * position @h. For example
  * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
  */
-#if !defined(__ASSEMBLY__) && \
-       (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
+#if !defined(__ASSEMBLY__)
 #include <linux/build_bug.h>
 #define GENMASK_INPUT_CHECK(h, l) \
        (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
index 974a71342aea681d330bff9074810d3c73f125e5..8bd33050b7bbb6e0c5e1d0bf2e1e7ffc11b2caa4 100644 (file)
@@ -3171,13 +3171,12 @@ union bpf_attr {
  * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
  *     Description
  *             Copy *size* bytes from *data* into a ring buffer *ringbuf*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
- *             0, on success;
- *             < 0, on error.
+ *             0 on success, or a negative error in case of failure.
  *
  * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
  *     Description
@@ -3189,20 +3188,20 @@ union bpf_attr {
  * void bpf_ringbuf_submit(void *data, u64 flags)
  *     Description
  *             Submit reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
  * void bpf_ringbuf_discard(void *data, u64 flags)
  *     Description
  *             Discard reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
@@ -3210,16 +3209,18 @@ union bpf_attr {
  *     Description
  *             Query various characteristics of provided ring buffer. What
  *             exactly is queries is determined by *flags*:
- *               - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
- *               - BPF_RB_RING_SIZE - the size of ring buffer;
- *               - BPF_RB_CONS_POS - consumer position (can wrap around);
- *               - BPF_RB_PROD_POS - producer(s) position (can wrap around);
- *             Data returned is just a momentary snapshots of actual values
+ *
+ *             * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ *             * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ *             * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ *             * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ *             Data returned is just a momentary snapshot of actual values
  *             and could be inaccurate, so this facility should be used to
  *             power heuristics and for reporting, not to make 100% correct
  *             calculation.
  *     Return
- *             Requested value, or 0, if flags are not recognized.
+ *             Requested value, or 0, if *flags* are not recognized.
  *
  * int bpf_csum_level(struct sk_buff *skb, u64 level)
  *     Description
index 1b6015b21ba85979bdbf792cca6ef43319ae9b3f..dbef24ebcfcb9d251d5d6fa07e9f1d55d4bcbc82 100644 (file)
@@ -233,6 +233,8 @@ LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf,
 LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
                                 __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
                                 __u64 *probe_offset, __u64 *probe_addr);
+
+enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
 LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
 
 #ifdef __cplusplus
index df59fd4fc95ba82c2f0ceb3caa7ef6a8ce75082a..e0af36b0e5d839e2fdd57f0f921c8ba17e4a3629 100644 (file)
 #include <stdbool.h>
 #include <stddef.h>
 #include <limits.h>
-#ifndef __WORDSIZE
-#define __WORDSIZE (__SIZEOF_LONG__ * 8)
-#endif
 
 static inline size_t hash_bits(size_t h, int bits)
 {
        /* shuffle bits and return requested number of upper bits */
-       return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
+#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
+       /* LP64 case */
+       return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
+#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__)
+       return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits);
+#else
+#      error "Unsupported size_t size"
+#endif
 }
 
 typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
index 477c679ed94566289095b460bca29288806ea1b0..11e4725b8b1c010cf9d198f7457a520556267c59 100644 (file)
@@ -4818,7 +4818,13 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
                        err = -EINVAL;
                        goto out;
                }
-               prog = bpf_object__find_program_by_title(obj, sec_name);
+               prog = NULL;
+               for (i = 0; i < obj->nr_programs; i++) {
+                       if (!strcmp(obj->programs[i].section_name, sec_name)) {
+                               prog = &obj->programs[i];
+                               break;
+                       }
+               }
                if (!prog) {
                        pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
                                sec_name);
@@ -6653,7 +6659,7 @@ static const struct bpf_sec_def section_defs[] = {
                .expected_attach_type = BPF_TRACE_ITER,
                .is_attach_btf = true,
                .attach_fn = attach_iter),
-       BPF_EAPROG_SEC("xdp_devmap",            BPF_PROG_TYPE_XDP,
+       BPF_EAPROG_SEC("xdp_devmap/",           BPF_PROG_TYPE_XDP,
                                                BPF_XDP_DEVMAP),
        BPF_PROG_SEC("xdp",                     BPF_PROG_TYPE_XDP),
        BPF_PROG_SEC("perf_event",              BPF_PROG_TYPE_PERF_EVENT),
index 27f3b07fdae8bed2a5f02118b2933cca82ac46f0..f1640d651c8a86d340e13826dd8d533854da2bae 100644 (file)
@@ -361,6 +361,7 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
                break;
 
        case KBUFFER_TYPE_TIME_EXTEND:
+       case KBUFFER_TYPE_TIME_STAMP:
                extend = read_4(kbuf, data);
                data += 4;
                extend <<= TS_SHIFT;
@@ -369,10 +370,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
                *length = 0;
                break;
 
-       case KBUFFER_TYPE_TIME_STAMP:
-               data += 12;
-               *length = 0;
-               break;
        case 0:
                *length = read_4(kbuf, data) - 4;
                *length = (*length + 3) & ~3;
@@ -397,7 +394,11 @@ static unsigned int update_pointers(struct kbuffer *kbuf)
 
        type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
 
-       kbuf->timestamp += delta;
+       if (type_len == KBUFFER_TYPE_TIME_STAMP)
+               kbuf->timestamp = delta;
+       else
+               kbuf->timestamp += delta;
+
        kbuf->index = calc_index(kbuf, ptr);
        kbuf->next = kbuf->index + length;
 
@@ -454,7 +455,9 @@ static int __next_event(struct kbuffer *kbuf)
                if (kbuf->next >= kbuf->size)
                        return -1;
                type = update_pointers(kbuf);
-       } while (type == KBUFFER_TYPE_TIME_EXTEND || type == KBUFFER_TYPE_PADDING);
+       } while (type == KBUFFER_TYPE_TIME_EXTEND ||
+                type == KBUFFER_TYPE_TIME_STAMP ||
+                type == KBUFFER_TYPE_PADDING);
 
        return 0;
 }
@@ -546,6 +549,34 @@ int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer)
        return 0;
 }
 
+/**
+ * kbuffer_subbuf_timestamp - read the timestamp from a sub buffer
+ * @kbuf:      The kbuffer to load
+ * @subbuf:    The subbuffer to read from.
+ *
+ * Return the timestamp from a subbuffer.
+ */
+unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf)
+{
+       return kbuf->read_8(subbuf);
+}
+
+/**
+ * kbuffer_ptr_delta - read the delta field from a record
+ * @kbuf:      The kbuffer to load
+ * @ptr:       The record in the buffe.
+ *
+ * Return the timestamp delta from a record
+ */
+unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr)
+{
+       unsigned int type_len_ts;
+
+       type_len_ts = read_4(kbuf, ptr);
+       return ts4host(kbuf, type_len_ts);
+}
+
+
 /**
  * kbuffer_read_event - read the next event in the kbuffer subbuffer
  * @kbuf:      The kbuffer to read from
index ed4d697fc137861d9b98a23daef9f19d5e58da59..5fa8292e341b3da8fbd7be8af4744e5726006bb8 100644 (file)
@@ -49,6 +49,8 @@ int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer);
 void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts);
 void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts);
 unsigned long long kbuffer_timestamp(struct kbuffer *kbuf);
+unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf);
+unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr);
 
 void *kbuffer_translate_data(int swap, void *data, unsigned int *size);
 
index 839ef52c1ac22851914cd186ae5279e86716af1e..6ce451293634e398e0d53c6d666eae6108d631cb 100644 (file)
@@ -641,6 +641,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                        }
                        evsel->core.attr.freq = 0;
                        evsel->core.attr.sample_period = 1;
+                       evsel->no_aux_samples = true;
                        intel_pt_evsel = evsel;
                        opts->full_auxtrace = true;
                }
index e108d90ae2edf8704818a7e7c17e70afecddd335..a37e7910e9e90bf8c6bf5edd00b42e78a7593d66 100644 (file)
@@ -852,20 +852,20 @@ static int record__open(struct record *rec)
         * event synthesis.
         */
        if (opts->initial_delay || target__has_cpu(&opts->target)) {
-               if (perf_evlist__add_dummy(evlist))
-                       return -ENOMEM;
+               pos = perf_evlist__get_tracking_event(evlist);
+               if (!evsel__is_dummy_event(pos)) {
+                       /* Set up dummy event. */
+                       if (perf_evlist__add_dummy(evlist))
+                               return -ENOMEM;
+                       pos = evlist__last(evlist);
+                       perf_evlist__set_tracking_event(evlist, pos);
+               }
 
-               /* Disable tracking of mmaps on lead event. */
-               pos = evlist__first(evlist);
-               pos->tracking = 0;
-               /* Set up dummy event. */
-               pos = evlist__last(evlist);
-               pos->tracking = 1;
                /*
                 * Enable the dummy event when the process is forked for
                 * initial_delay, immediately for system wide.
                 */
-               if (opts->initial_delay)
+               if (opts->initial_delay && !pos->immediate)
                        pos->core.attr.enable_on_exec = 1;
                else
                        pos->immediate = 1;
index 181d65e5a45054cc93263bf7fbf15ec89d3e30b1..447457786362d83786ed328356a210980eb69272 100644 (file)
@@ -462,7 +462,7 @@ static int perf_evsel__check_attr(struct evsel *evsel, struct perf_session *sess
                return -EINVAL;
 
        if (PRINT_FIELD(IREGS) &&
-           evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS))
+           evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set))
                return -EINVAL;
 
        if (PRINT_FIELD(UREGS) &&
index 7bd73a904b4ee103a8ad8aa92fc2253517cb5109..d187e46c2683e8cd1ecccc6b1287c46598b0cacc 100644 (file)
@@ -1055,7 +1055,7 @@ def cbr(id, raw_buf):
        cbr = data[0]
        MHz = (data[4] + 500) / 1000
        percent = ((cbr * 1000 / data[2]) + 5) / 10
-       value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent)
+       value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent))
        cbr_file.write(value)
 
 def mwait(id, raw_buf):
index 26d7be785288293abf23f33f86776302c56dc0f7..7daa8bb70a5a09e85189bfd15da7f59b263f2fda 100755 (executable)
@@ -768,7 +768,8 @@ class CallGraphModel(CallGraphModelBase):
                                                " FROM calls"
                                                " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
                                                " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
-                                               " WHERE symbols.name" + match +
+                                               " WHERE calls.id <> 0"
+                                               " AND symbols.name" + match +
                                                " GROUP BY comm_id, thread_id, call_path_id"
                                                " ORDER BY comm_id, thread_id, call_path_id")
 
@@ -963,7 +964,8 @@ class CallTreeModel(CallGraphModelBase):
                                                " FROM calls"
                                                " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
                                                " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
-                                               " WHERE symbols.name" + match +
+                                               " WHERE calls.id <> 0"
+                                               " AND symbols.name" + match +
                                                " ORDER BY comm_id, thread_id, call_time, calls.id")
 
        def FindPath(self, query):
@@ -1050,6 +1052,7 @@ class TreeWindowBase(QMdiSubWindow):
                                child = self.model.index(row, 0, parent)
                                if child.internalPointer().dbid == dbid:
                                        found = True
+                                       self.view.setExpanded(parent, True)
                                        self.view.setCurrentIndex(child)
                                        parent = child
                                        break
@@ -1127,6 +1130,7 @@ class CallTreeWindow(TreeWindowBase):
                                child = self.model.index(row, 0, parent)
                                if child.internalPointer().dbid == dbid:
                                        found = True
+                                       self.view.setExpanded(parent, True)
                                        self.view.setCurrentIndex(child)
                                        parent = child
                                        break
@@ -1139,6 +1143,7 @@ class CallTreeWindow(TreeWindowBase):
                                return
                        last_child = None
                        for row in xrange(n):
+                               self.view.setExpanded(parent, True)
                                child = self.model.index(row, 0, parent)
                                child_call_time = child.internalPointer().call_time
                                if child_call_time < time:
@@ -1151,9 +1156,11 @@ class CallTreeWindow(TreeWindowBase):
                        if not last_child:
                                if not found:
                                        child = self.model.index(0, 0, parent)
+                                       self.view.setExpanded(parent, True)
                                        self.view.setCurrentIndex(child)
                                return
                        found = True
+                       self.view.setExpanded(parent, True)
                        self.view.setCurrentIndex(last_child)
                        parent = last_child
 
index 61f3be9add6b146714e914eeab9d16c682a18b55..65780013f74573e045cdb499e38f24d39c02baff 100755 (executable)
@@ -17,6 +17,7 @@
 from __future__ import print_function
 import sys
 import os
+import io
 import argparse
 import json
 
@@ -81,7 +82,7 @@ class FlameGraphCLI:
 
         if self.args.format == "html":
             try:
-                with open(self.args.template) as f:
+                with io.open(self.args.template, encoding="utf-8") as f:
                     output_str = f.read().replace("/** @flamegraph_json **/",
                                                   json_str)
             except IOError as e:
@@ -93,11 +94,12 @@ class FlameGraphCLI:
             output_fn = self.args.output or "stacks.json"
 
         if output_fn == "-":
-            sys.stdout.write(output_str)
+            with io.open(sys.stdout.fileno(), "w", encoding="utf-8", closefd=False) as out:
+                out.write(output_str)
         else:
             print("dumping data to {}".format(output_fn))
             try:
-                with open(output_fn, "w") as out:
+                with io.open(output_fn, "w", encoding="utf-8") as out:
                     out.write(output_str)
             except IOError as e:
                 print("Error writing output file: {}".format(e), file=sys.stderr)
index f98a118dfc49eb4142d555f1ab477f098c315430..be9c4c0549bc83ae0dc2de335284b96e8908b994 100644 (file)
@@ -2288,6 +2288,11 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *browser
        return browser->he_selection->thread;
 }
 
+static struct res_sample *hist_browser__selected_res_sample(struct hist_browser *browser)
+{
+       return browser->he_selection ? browser->he_selection->res_samples : NULL;
+}
+
 /* Check whether the browser is for 'top' or 'report' */
 static inline bool is_report_browser(void *timer)
 {
@@ -3357,16 +3362,16 @@ skip_annotation:
                                             &options[nr_options], NULL, NULL, evsel);
                nr_options += add_res_sample_opt(browser, &actions[nr_options],
                                                 &options[nr_options],
-                                hist_browser__selected_entry(browser)->res_samples,
-                                evsel, A_NORMAL);
+                                                hist_browser__selected_res_sample(browser),
+                                                evsel, A_NORMAL);
                nr_options += add_res_sample_opt(browser, &actions[nr_options],
                                                 &options[nr_options],
-                                hist_browser__selected_entry(browser)->res_samples,
-                                evsel, A_ASM);
+                                                hist_browser__selected_res_sample(browser),
+                                                evsel, A_ASM);
                nr_options += add_res_sample_opt(browser, &actions[nr_options],
                                                 &options[nr_options],
-                                hist_browser__selected_entry(browser)->res_samples,
-                                evsel, A_SOURCE);
+                                                hist_browser__selected_res_sample(browser),
+                                                evsel, A_SOURCE);
                nr_options += add_switch_opt(browser, &actions[nr_options],
                                             &options[nr_options]);
 skip_scripting:
@@ -3598,6 +3603,23 @@ static int __perf_evlist__tui_browse_hists(struct evlist *evlist,
                                    hbt, warn_lost_event);
 }
 
+static bool perf_evlist__single_entry(struct evlist *evlist)
+{
+       int nr_entries = evlist->core.nr_entries;
+
+       if (nr_entries == 1)
+              return true;
+
+       if (nr_entries == 2) {
+               struct evsel *last = evlist__last(evlist);
+
+               if (evsel__is_dummy_event(last))
+                       return true;
+       }
+
+       return false;
+}
+
 int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
                                  struct hist_browser_timer *hbt,
                                  float min_pcnt,
@@ -3608,7 +3630,7 @@ int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
        int nr_entries = evlist->core.nr_entries;
 
 single_entry:
-       if (nr_entries == 1) {
+       if (perf_evlist__single_entry(evlist)) {
                struct evsel *first = evlist__first(evlist);
 
                return perf_evsel__hists_browse(first, nr_entries, help,
index 173b4f0e0e6e6158c77ea1dcf15e43ddcfae9a8f..ab48be4cf2584a036b4a9bacfcd0e8e5bee7e2b7 100644 (file)
@@ -1566,6 +1566,18 @@ void perf_evlist__to_front(struct evlist *evlist,
        list_splice(&move, &evlist->core.entries);
 }
 
+struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
+{
+       struct evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               if (evsel->tracking)
+                       return evsel;
+       }
+
+       return evlist__first(evlist);
+}
+
 void perf_evlist__set_tracking_event(struct evlist *evlist,
                                     struct evsel *tracking_evsel)
 {
index b6f325dfb4d24d8481421be75c98b9567a9b610a..a8081dfc19cf5b4e10131f59d05e42ac22599efd 100644 (file)
@@ -335,6 +335,7 @@ void perf_evlist__to_front(struct evlist *evlist,
        evlist__cpu_iter_start(evlist);                 \
        perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus)
 
+struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist);
 void perf_evlist__set_tracking_event(struct evlist *evlist,
                                     struct evsel *tracking_evsel);
 
index 96e5171dce41e458a897fbae387bc78ef37276f7..ef802f6d40c17a27e8560fad8ccb9fb65fd30ebc 100644 (file)
@@ -898,12 +898,6 @@ static void evsel__apply_config_terms(struct evsel *evsel,
        }
 }
 
-static bool is_dummy_event(struct evsel *evsel)
-{
-       return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
-              (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
-}
-
 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
 {
        struct evsel_config_term *term, *found_term = NULL;
@@ -1020,12 +1014,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
        if (callchain && callchain->enabled && !evsel->no_aux_samples)
                evsel__config_callchain(evsel, opts, callchain);
 
-       if (opts->sample_intr_regs) {
+       if (opts->sample_intr_regs && !evsel->no_aux_samples) {
                attr->sample_regs_intr = opts->sample_intr_regs;
                evsel__set_sample_bit(evsel, REGS_INTR);
        }
 
-       if (opts->sample_user_regs) {
+       if (opts->sample_user_regs && !evsel->no_aux_samples) {
                attr->sample_regs_user |= opts->sample_user_regs;
                evsel__set_sample_bit(evsel, REGS_USER);
        }
@@ -1161,7 +1155,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
         * The software event will trigger -EOPNOTSUPP error out,
         * if BRANCH_STACK bit is set.
         */
-       if (is_dummy_event(evsel))
+       if (evsel__is_dummy_event(evsel))
                evsel__reset_sample_bit(evsel, BRANCH_STACK);
 }
 
index 0f963c2a88a5d3d1734189d5774823a0238de6db..35e3f6d66085b3885290e315e7268f0852f2bb1c 100644 (file)
@@ -399,6 +399,12 @@ static inline bool evsel__has_br_stack(const struct evsel *evsel)
               evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK;
 }
 
+static inline bool evsel__is_dummy_event(struct evsel *evsel)
+{
+       return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
+              (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
+}
+
 struct perf_env *evsel__env(struct evsel *evsel);
 
 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
index e4dd8bf610ceb1c2929602eb0380972a778f09da..cb3c1e569a2dbafa5e091a78368b07a1af513c0c 100644 (file)
@@ -1735,6 +1735,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
        u64 sample_type = evsel->core.attr.sample_type;
        u64 id = evsel->core.id[0];
        u8 cpumode;
+       u64 regs[8 * sizeof(sample.intr_regs.mask)];
 
        if (intel_pt_skip_event(pt))
                return 0;
@@ -1784,8 +1785,8 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
        }
 
        if (sample_type & PERF_SAMPLE_REGS_INTR &&
-           items->mask[INTEL_PT_GP_REGS_POS]) {
-               u64 regs[sizeof(sample.intr_regs.mask)];
+           (items->mask[INTEL_PT_GP_REGS_POS] ||
+            items->mask[INTEL_PT_XMM_POS])) {
                u64 regs_mask = evsel->core.attr.sample_regs_intr;
                u64 *pos;
 
index 787b6d4ad7162e759b3c06a287e24396e42edfbe..f9b769f3437ddce6b6fb7bf59c34afeacf4784ea 100755 (executable)
@@ -82,7 +82,9 @@ def build_tests(linux: kunit_kernel.LinuxSourceTree,
                                        request.make_options)
        build_end = time.time()
        if not success:
-               return KunitResult(KunitStatus.BUILD_FAILURE, 'could not build kernel')
+               return KunitResult(KunitStatus.BUILD_FAILURE,
+                                  'could not build kernel',
+                                  build_end - build_start)
        if not success:
                return KunitResult(KunitStatus.BUILD_FAILURE,
                                   'could not build kernel',
index e75063d603b5be152fe1c66c74180326effe09ca..02ffc3a3e5dc7f4b85156de813460ee89847fa61 100644 (file)
@@ -10,7 +10,7 @@ import collections
 import re
 
 CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
-CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+)$'
+CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
 
 KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value'])
 
index 64aac9dcd4314df3d9a1f973981899d69fca2a16..f13e0c0d666395595bd2cf71c8affe1deebe82f8 100644 (file)
@@ -265,11 +265,9 @@ def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
        return bubble_up_errors(lambda x: x.status, test_suite_list)
 
 def parse_test_result(lines: List[str]) -> TestResult:
-       if not lines:
-               return TestResult(TestStatus.NO_TESTS, [], lines)
        consume_non_diagnositic(lines)
-       if not parse_tap_header(lines):
-               return None
+       if not lines or not parse_tap_header(lines):
+               return TestResult(TestStatus.NO_TESTS, [], lines)
        test_suites = []
        test_suite = parse_test_suite(lines)
        while test_suite:
@@ -282,6 +280,8 @@ def parse_run_tests(kernel_output) -> TestResult:
        failed_tests = 0
        crashed_tests = 0
        test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
+       if test_result.status == TestStatus.NO_TESTS:
+               print_with_timestamp(red('[ERROR] ') + 'no kunit output detected')
        for test_suite in test_result.suites:
                if test_suite.status == TestStatus.SUCCESS:
                        print_suite_divider(green('[PASSED] ') + test_suite.name)
index 5bb7b118ebd941d797c995335cfffa411630cf01..f9eeaea94cad11dc3f50e49230155f3cd10e9fd3 100755 (executable)
@@ -170,6 +170,17 @@ class KUnitParserTest(unittest.TestCase):
                        result.status)
                file.close()
 
+       def test_no_kunit_output(self):
+               crash_log = get_absolute_path(
+                       'test_data/test_insufficient_memory.log')
+               file = open(crash_log)
+               print_mock = mock.patch('builtins.print').start()
+               result = kunit_parser.parse_run_tests(
+                       kunit_parser.isolate_kunit_output(file.readlines()))
+               print_mock.assert_any_call(StrContains("no kunit output detected"))
+               print_mock.stop()
+               file.close()
+
        def test_crashed_test(self):
                crashed_log = get_absolute_path(
                        'test_data/test_is_test_passed-crash.log')
diff --git a/tools/testing/kunit/test_data/test_insufficient_memory.log b/tools/testing/kunit/test_data/test_insufficient_memory.log
new file mode 100644 (file)
index 0000000..e69de29
index 83493bd5745c875d60cb71bfe929adc9bf5e0808..109d0345a2be5ae30de270311fbba2c1ade45628 100644 (file)
@@ -36,7 +36,7 @@ void test_fentry_fexit(void)
        fentry_res = (__u64 *)fentry_skel->bss;
        fexit_res = (__u64 *)fexit_skel->bss;
        printf("%lld\n", fentry_skel->bss->test1_result);
-       for (i = 0; i < 6; i++) {
+       for (i = 0; i < 8; i++) {
                CHECK(fentry_res[i] != 1, "result",
                      "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]);
                CHECK(fexit_res[i] != 1, "result",
index ea14e3ece81202ea8e4d922a1e0b041b2f2e2c99..f11f187990e95b87cb81d8457a976d8bb86f9fdd 100644 (file)
@@ -527,8 +527,8 @@ static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
 
        run_tests_skb_less(tap_fd, skel->maps.last_dissection);
 
-       err = bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
-       CHECK(err, "bpf_prog_detach", "err %d errno %d\n", err, errno);
+       err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+       CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
 }
 
 static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
index 15cb554a66d8c8504bb6e98ce13a7ead7ae533ba..172c586b69969b29d1e67c108f9649ea69c1ea1f 100644 (file)
@@ -1,9 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Test that the flow_dissector program can be updated with a single
- * syscall by attaching a new program that replaces the existing one.
- *
- * Corner case - the same program cannot be attached twice.
+ * Tests for attaching, detaching, and replacing flow_dissector BPF program.
  */
 
 #define _GNU_SOURCE
@@ -116,7 +113,7 @@ static void test_prog_attach_prog_attach(int netns, int prog1, int prog2)
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2));
 
 out_detach:
-       err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog2, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(err))
                perror("bpf_prog_detach");
        CHECK_FAIL(prog_is_attached(netns));
@@ -152,7 +149,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
        DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
        int err, link;
 
-       err = bpf_prog_attach(prog1, -1, BPF_FLOW_DISSECTOR, 0);
+       err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0);
        if (CHECK_FAIL(err)) {
                perror("bpf_prog_attach(prog1)");
                return;
@@ -168,7 +165,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
                close(link);
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
 
-       err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(err))
                perror("bpf_prog_detach");
        CHECK_FAIL(prog_is_attached(netns));
@@ -188,7 +185,7 @@ static void test_link_create_prog_attach(int netns, int prog1, int prog2)
 
        /* Expect failure attaching prog when link exists */
        errno = 0;
-       err = bpf_prog_attach(prog2, -1, BPF_FLOW_DISSECTOR, 0);
+       err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0);
        if (CHECK_FAIL(!err || errno != EEXIST))
                perror("bpf_prog_attach(prog2) expected EEXIST");
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
@@ -211,7 +208,7 @@ static void test_link_create_prog_detach(int netns, int prog1, int prog2)
 
        /* Expect failure detaching prog when link exists */
        errno = 0;
-       err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(!err || errno != EINVAL))
                perror("bpf_prog_detach expected EINVAL");
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
@@ -231,7 +228,7 @@ static void test_prog_attach_detach_query(int netns, int prog1, int prog2)
        }
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
 
-       err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(err)) {
                perror("bpf_prog_detach");
                return;
@@ -308,6 +305,31 @@ static void test_link_update_replace_old_prog(int netns, int prog1, int prog2)
        CHECK_FAIL(prog_is_attached(netns));
 }
 
+static void test_link_update_same_prog(int netns, int prog1, int prog2)
+{
+       DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
+       DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
+       int err, link;
+
+       link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
+       if (CHECK_FAIL(link < 0)) {
+               perror("bpf_link_create(prog1)");
+               return;
+       }
+       CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
+
+       /* Expect success updating the prog with the same one */
+       update_opts.flags = 0;
+       update_opts.old_prog_fd = 0;
+       err = bpf_link_update(link, prog1, &update_opts);
+       if (CHECK_FAIL(err))
+               perror("bpf_link_update");
+       CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
+
+       close(link);
+       CHECK_FAIL(prog_is_attached(netns));
+}
+
 static void test_link_update_invalid_opts(int netns, int prog1, int prog2)
 {
        DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
@@ -571,6 +593,8 @@ static void run_tests(int netns)
                  test_link_update_no_old_prog },
                { "link update with replace old prog",
                  test_link_update_replace_old_prog },
+               { "link update with same prog",
+                 test_link_update_same_prog },
                { "link update invalid opts",
                  test_link_update_invalid_opts },
                { "link update invalid prog",
index e7b8753eac0b14e73bad2f95aa6c3b82f8efe645..75ecf956a2df9bc7101c7a73cfcae7a4b947b0d6 100644 (file)
@@ -25,7 +25,7 @@ struct bpf_iter__netlink {
        struct netlink_sock *sk;
 } __attribute__((preserve_access_index));
 
-static inline struct inode *SOCK_INODE(struct socket *socket)
+static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket)
 {
        return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
 }
index 9365b686f84bc8ef7f15ec8b57f10a5c0af6a19d..5f645fdaba6f50a44cfd2a2ff49f37c34fdd52fa 100644 (file)
@@ -55,3 +55,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void * e, __u64 f)
                e == (void *)20 && f == 21;
        return 0;
 }
+
+struct bpf_fentry_test_t {
+       struct bpf_fentry_test_t *a;
+};
+
+__u64 test7_result = 0;
+SEC("fentry/bpf_fentry_test7")
+int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
+{
+       if (arg == 0)
+               test7_result = 1;
+       return 0;
+}
+
+__u64 test8_result = 0;
+SEC("fentry/bpf_fentry_test8")
+int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
+{
+       if (arg->a == 0)
+               test8_result = 1;
+       return 0;
+}
index bd1e17d8024ce7eee03782edd86372ae01117e51..0952affb22a68526eb57a416614a8f0705cf9649 100644 (file)
@@ -56,3 +56,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void *e, __u64 f, int ret)
                e == (void *)20 && f == 21 && ret == 111;
        return 0;
 }
+
+struct bpf_fentry_test_t {
+       struct bpf_fentry_test *a;
+};
+
+__u64 test7_result = 0;
+SEC("fexit/bpf_fentry_test7")
+int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
+{
+       if (arg == 0)
+               test7_result = 1;
+       return 0;
+}
+
+__u64 test8_result = 0;
+SEC("fexit/bpf_fentry_test8")
+int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
+{
+       if (arg->a == 0)
+               test8_result = 1;
+       return 0;
+}
index 057036ca111179fa737c4c670e4d745536598a2d..3dca4c2e24185480540fd7c283148b28ee6a08d2 100644 (file)
@@ -79,7 +79,7 @@ struct {
 
 struct {
        __uint(type, BPF_MAP_TYPE_ARRAY);
-       __uint(max_entries, 2);
+       __uint(max_entries, 3);
        __type(key, int);
        __type(value, int);
 } sock_skb_opts SEC(".maps");
@@ -94,6 +94,12 @@ struct {
 SEC("sk_skb1")
 int bpf_prog1(struct __sk_buff *skb)
 {
+       int *f, two = 2;
+
+       f = bpf_map_lookup_elem(&sock_skb_opts, &two);
+       if (f && *f) {
+               return *f;
+       }
        return skb->len;
 }
 
index 330811260123260949b58351176df511c887d226..0ac08649772228ebd7846e6a543ce78bbc25168f 100644 (file)
@@ -27,7 +27,7 @@ int xdp_dummy_prog(struct xdp_md *ctx)
 /* valid program on DEVMAP entry via SEC name;
  * has access to egress and ingress ifindex
  */
-SEC("xdp_devmap")
+SEC("xdp_devmap/map_prog")
 int xdp_dummy_dm(struct xdp_md *ctx)
 {
        char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
index 6a12a0e01e0731a390f22963954feabce648f0c2..754cf611723ee53f759bcf7a88f989033431ae1a 100644 (file)
@@ -789,19 +789,19 @@ static void test_sockmap(unsigned int tasks, void *data)
        }
 
        err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);
-       if (err) {
+       if (!err) {
                printf("Failed empty parser prog detach\n");
                goto out_sockmap;
        }
 
        err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);
-       if (err) {
+       if (!err) {
                printf("Failed empty verdict prog detach\n");
                goto out_sockmap;
        }
 
        err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);
-       if (err) {
+       if (!err) {
                printf("Failed empty msg verdict prog detach\n");
                goto out_sockmap;
        }
@@ -1090,19 +1090,19 @@ static void test_sockmap(unsigned int tasks, void *data)
                assert(status == 0);
        }
 
-       err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE);
+       err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE);
        if (!err) {
                printf("Detached an invalid prog type.\n");
                goto out_sockmap;
        }
 
-       err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
+       err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
        if (err) {
                printf("Failed parser prog detach\n");
                goto out_sockmap;
        }
 
-       err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
+       err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
        if (err) {
                printf("Failed parser prog detach\n");
                goto out_sockmap;
index 37695fc8096ad622990b034749416b6234daf58e..78789b27e5730267d7b54ad8e1679aac3b35b6b1 100644 (file)
@@ -85,6 +85,7 @@ int txmsg_ktls_skb_drop;
 int txmsg_ktls_skb_redir;
 int ktls;
 int peek_flag;
+int skb_use_parser;
 
 static const struct option long_options[] = {
        {"help",        no_argument,            NULL, 'h' },
@@ -174,6 +175,7 @@ static void test_reset(void)
        txmsg_apply = txmsg_cork = 0;
        txmsg_ingress = txmsg_redir_skb = 0;
        txmsg_ktls_skb = txmsg_ktls_skb_drop = txmsg_ktls_skb_redir = 0;
+       skb_use_parser = 0;
 }
 
 static int test_start_subtest(const struct _test *t, struct sockmap_options *o)
@@ -1211,6 +1213,11 @@ run:
                }
        }
 
+       if (skb_use_parser) {
+               i = 2;
+               err = bpf_map_update_elem(map_fd[7], &i, &skb_use_parser, BPF_ANY);
+       }
+
        if (txmsg_drop)
                options->drop_expected = true;
 
@@ -1650,6 +1657,16 @@ static void test_txmsg_cork(int cgrp, struct sockmap_options *opt)
        test_send(opt, cgrp);
 }
 
+static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt)
+{
+       txmsg_pass = 1;
+       skb_use_parser = 512;
+       opt->iov_length = 256;
+       opt->iov_count = 1;
+       opt->rate = 2;
+       test_exec(cgrp, opt);
+}
+
 char *map_names[] = {
        "sock_map",
        "sock_map_txmsg",
@@ -1748,6 +1765,7 @@ struct _test test[] = {
        {"txmsg test pull-data", test_txmsg_pull},
        {"txmsg test pop-data", test_txmsg_pop},
        {"txmsg test push/pop data", test_txmsg_push_pop},
+       {"txmsg text ingress parser", test_txmsg_ingress_parser},
 };
 
 static int check_whitelist(struct _test *t, struct sockmap_options *opt)
index 3702dbcc90a773d1e4d87aae2ce52ce9a9289701..c82aa77958e50a14b014435d8b5ace0867409019 100755 (executable)
@@ -63,6 +63,8 @@ ALL_TESTS="$ALL_TESTS 0008:150:1"
 ALL_TESTS="$ALL_TESTS 0009:150:1"
 ALL_TESTS="$ALL_TESTS 0010:1:1"
 ALL_TESTS="$ALL_TESTS 0011:1:1"
+ALL_TESTS="$ALL_TESTS 0012:1:1"
+ALL_TESTS="$ALL_TESTS 0013:1:1"
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
@@ -470,6 +472,38 @@ kmod_test_0011()
        echo "$MODPROBE" > /proc/sys/kernel/modprobe
 }
 
+kmod_check_visibility()
+{
+       local name="$1"
+       local cmd="$2"
+
+       modprobe $DEFAULT_KMOD_DRIVER
+
+       local priv=$(eval $cmd)
+       local unpriv=$(capsh --drop=CAP_SYSLOG -- -c "$cmd")
+
+       if [ "$priv" = "$unpriv" ] || \
+          [ "${priv:0:3}" = "0x0" ] || \
+          [ "${unpriv:0:3}" != "0x0" ] ; then
+               echo "${FUNCNAME[0]}: FAIL, $name visible to unpriv: '$priv' vs '$unpriv'" >&2
+               exit 1
+       else
+               echo "${FUNCNAME[0]}: OK!"
+       fi
+}
+
+kmod_test_0012()
+{
+       kmod_check_visibility /proc/modules \
+               "grep '^${DEFAULT_KMOD_DRIVER}\b' /proc/modules | awk '{print \$NF}'"
+}
+
+kmod_test_0013()
+{
+       kmod_check_visibility '/sys/module/*/sections/*' \
+               "cat /sys/module/${DEFAULT_KMOD_DRIVER}/sections/.*text | head -n1"
+}
+
 list_tests()
 {
        echo "Test ID list:"
@@ -489,6 +523,8 @@ list_tests()
        echo "0009 x $(get_test_count 0009) - multithreaded - push kmod_concurrent over max_modprobes for get_fs_type()"
        echo "0010 x $(get_test_count 0010) - test nonexistent modprobe path"
        echo "0011 x $(get_test_count 0011) - test completely disabling module autoloading"
+       echo "0012 x $(get_test_count 0012) - test /proc/modules address visibility under CAP_SYSLOG"
+       echo "0013 x $(get_test_count 0013) - test /sys/module/*/sections/* visibility under CAP_SYSLOG"
 }
 
 usage()
index 0ac49d91a26023c3eb10804f4cf77fc004b4d080..862eee73455388c8c4c5d9c962f95a8438dfa781 100644 (file)
@@ -36,7 +36,7 @@ struct ksft_count {
 static struct ksft_count ksft_cnt;
 static unsigned int ksft_plan;
 
-static inline int ksft_test_num(void)
+static inline unsigned int ksft_test_num(void)
 {
        return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail +
                ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass +
index dee567f7576abdff8bc83112ec5131452fff6727..22dc2f3d428bab3c00a299aa589a6edd794ea3db 100755 (executable)
@@ -747,6 +747,19 @@ ipv6_fcnal_runtime()
        run_cmd "$IP nexthop add id 86 via 2001:db8:91::2 dev veth1"
        run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81"
 
+       # rpfilter and default route
+       $IP nexthop flush >/dev/null 2>&1
+       run_cmd "ip netns exec me ip6tables -t mangle -I PREROUTING 1 -m rpfilter --invert -j DROP"
+       run_cmd "$IP nexthop add id 91 via 2001:db8:91::2 dev veth1"
+       run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3"
+       run_cmd "$IP nexthop add id 93 group 91/92"
+       run_cmd "$IP -6 ro add default nhid 91"
+       run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+       log_test $? 0 "Nexthop with default route and rpfilter"
+       run_cmd "$IP -6 ro replace default nhid 93"
+       run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+       log_test $? 0 "Nexthop with multipath default route and rpfilter"
+
        # TO-DO:
        # existing route with old nexthop; append route with new nexthop
        # existing route with old nexthop; replace route with new
index 663062701d5aa6772d56d467181b187aa0b02587..3e5ff29ee1dd9f9e14c31b02ca1859145b1afd76 100755 (executable)
@@ -1,15 +1,10 @@
-#!/bin/bash
+#!/bin/sh
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
-[ -f /dev/tpm0 ] || exit $ksft_skip
+[ -e /dev/tpm0 ] || exit $ksft_skip
 
-python -m unittest -v tpm2_tests.SmokeTest
-python -m unittest -v tpm2_tests.AsyncTest
-
-CLEAR_CMD=$(which tpm2_clear)
-if [ -n $CLEAR_CMD ]; then
-       tpm2_clear -T device
-fi
+python3 -m unittest -v tpm2_tests.SmokeTest
+python3 -m unittest -v tpm2_tests.AsyncTest
index 36c9d030a1c636e0719576d183116f61a99539ec..04c47b13fe8ac8cb27c242ea9a8f2c5eebf7e3e7 100755 (executable)
@@ -1,9 +1,9 @@
-#!/bin/bash
+#!/bin/sh
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
-[ -f /dev/tpmrm0 ] || exit $ksft_skip
+[ -e /dev/tpmrm0 ] || exit $ksft_skip
 
-python -m unittest -v tpm2_tests.SpaceTest
+python3 -m unittest -v tpm2_tests.SpaceTest
index d0fcb66a88a68a0bf9e57d5da24aadfd695c12f9..f34486cd7342d51a9cfddab193ca01243fb0b2e4 100644 (file)
@@ -247,14 +247,14 @@ class ProtocolError(Exception):
 class AuthCommand(object):
     """TPMS_AUTH_COMMAND"""
 
-    def __init__(self, session_handle=TPM2_RS_PW, nonce='', session_attributes=0,
-                 hmac=''):
+    def __init__(self, session_handle=TPM2_RS_PW, nonce=bytes(),
+                 session_attributes=0, hmac=bytes()):
         self.session_handle = session_handle
         self.nonce = nonce
         self.session_attributes = session_attributes
         self.hmac = hmac
 
-    def __str__(self):
+    def __bytes__(self):
         fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
         return struct.pack(fmt, self.session_handle, len(self.nonce),
                            self.nonce, self.session_attributes, len(self.hmac),
@@ -268,11 +268,11 @@ class AuthCommand(object):
 class SensitiveCreate(object):
     """TPMS_SENSITIVE_CREATE"""
 
-    def __init__(self, user_auth='', data=''):
+    def __init__(self, user_auth=bytes(), data=bytes()):
         self.user_auth = user_auth
         self.data = data
 
-    def __str__(self):
+    def __bytes__(self):
         fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
         return struct.pack(fmt, len(self.user_auth), self.user_auth,
                            len(self.data), self.data)
@@ -296,8 +296,9 @@ class Public(object):
         return '>HHIH%us%usH%us' % \
             (len(self.auth_policy), len(self.parameters), len(self.unique))
 
-    def __init__(self, object_type, name_alg, object_attributes, auth_policy='',
-                 parameters='', unique=''):
+    def __init__(self, object_type, name_alg, object_attributes,
+                 auth_policy=bytes(), parameters=bytes(),
+                 unique=bytes()):
         self.object_type = object_type
         self.name_alg = name_alg
         self.object_attributes = object_attributes
@@ -305,7 +306,7 @@ class Public(object):
         self.parameters = parameters
         self.unique = unique
 
-    def __str__(self):
+    def __bytes__(self):
         return struct.pack(self.__fmt(),
                            self.object_type,
                            self.name_alg,
@@ -343,7 +344,7 @@ def get_algorithm(name):
 
 def hex_dump(d):
     d = [format(ord(x), '02x') for x in d]
-    d = [d[i: i + 16] for i in xrange(0, len(d), 16)]
+    d = [d[i: i + 16] for i in range(0, len(d), 16)]
     d = [' '.join(x) for x in d]
     d = os.linesep.join(d)
 
@@ -401,7 +402,7 @@ class Client:
         pcrsel_len = max((i >> 3) + 1, 3)
         pcrsel = [0] * pcrsel_len
         pcrsel[i >> 3] = 1 << (i & 7)
-        pcrsel = ''.join(map(chr, pcrsel))
+        pcrsel = ''.join(map(chr, pcrsel)).encode()
 
         fmt = '>HII IHB%us' % (pcrsel_len)
         cmd = struct.pack(fmt,
@@ -443,7 +444,7 @@ class Client:
             TPM2_CC_PCR_EXTEND,
             i,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             1, bank_alg, dig)
 
         self.send_cmd(cmd)
@@ -457,7 +458,7 @@ class Client:
                           TPM2_RH_NULL,
                           TPM2_RH_NULL,
                           16,
-                          '\0' * 16,
+                          ('\0' * 16).encode(),
                           0,
                           session_type,
                           TPM2_ALG_NULL,
@@ -472,7 +473,7 @@ class Client:
 
         for i in pcrs:
             pcr = self.read_pcr(i, bank_alg)
-            if pcr == None:
+            if pcr is None:
                 return None
             x += pcr
 
@@ -489,7 +490,7 @@ class Client:
         pcrsel = [0] * pcrsel_len
         for i in pcrs:
             pcrsel[i >> 3] |= 1 << (i & 7)
-        pcrsel = ''.join(map(chr, pcrsel))
+        pcrsel = ''.join(map(chr, pcrsel)).encode()
 
         fmt = '>HII IH%usIHB3s' % ds
         cmd = struct.pack(fmt,
@@ -497,7 +498,8 @@ class Client:
                           struct.calcsize(fmt),
                           TPM2_CC_POLICY_PCR,
                           handle,
-                          len(dig), str(dig),
+                          len(dig),
+                          bytes(dig),
                           1,
                           bank_alg,
                           pcrsel_len, pcrsel)
@@ -534,7 +536,7 @@ class Client:
 
         self.send_cmd(cmd)
 
-    def create_root_key(self, auth_value = ''):
+    def create_root_key(self, auth_value = bytes()):
         attributes = \
             Public.FIXED_TPM | \
             Public.FIXED_PARENT | \
@@ -570,11 +572,11 @@ class Client:
             TPM2_CC_CREATE_PRIMARY,
             TPM2_RH_OWNER,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             len(sensitive),
-            str(sensitive),
+            bytes(sensitive),
             len(public),
-            str(public),
+            bytes(public),
             0, 0)
 
         return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
@@ -587,7 +589,7 @@ class Client:
         attributes = 0
         if not policy_dig:
             attributes |= Public.USER_WITH_AUTH
-            policy_dig = ''
+            policy_dig = bytes()
 
         auth_cmd =  AuthCommand()
         sensitive = SensitiveCreate(user_auth=auth_value, data=data)
@@ -608,11 +610,11 @@ class Client:
             TPM2_CC_CREATE,
             parent_key,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             len(sensitive),
-            str(sensitive),
+            bytes(sensitive),
             len(public),
-            str(public),
+            bytes(public),
             0, 0)
 
         rsp = self.send_cmd(cmd)
@@ -635,7 +637,7 @@ class Client:
             TPM2_CC_LOAD,
             parent_key,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             blob)
 
         data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
@@ -653,7 +655,7 @@ class Client:
             TPM2_CC_UNSEAL,
             data_handle,
             len(auth_cmd),
-            str(auth_cmd))
+            bytes(auth_cmd))
 
         try:
             rsp = self.send_cmd(cmd)
@@ -675,7 +677,7 @@ class Client:
             TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET,
             TPM2_RH_LOCKOUT,
             len(auth_cmd),
-            str(auth_cmd))
+            bytes(auth_cmd))
 
         self.send_cmd(cmd)
 
@@ -693,7 +695,7 @@ class Client:
         more_data, cap, cnt = struct.unpack('>BII', rsp[:9])
         rsp = rsp[9:]
 
-        for i in xrange(0, cnt):
+        for i in range(0, cnt):
             handle = struct.unpack('>I', rsp[:4])[0]
             handles.append(handle)
             rsp = rsp[4:]
index 728be7c69b764fe48592bd22e20d01a0fe19d68c..9d764306887b7008b5a75b2eab6584c2abb06e76 100644 (file)
@@ -20,8 +20,8 @@ class SmokeTest(unittest.TestCase):
         self.client.close()
 
     def test_seal_with_auth(self):
-        data = 'X' * 64
-        auth = 'A' * 15
+        data = ('X' * 64).encode()
+        auth = ('A' * 15).encode()
 
         blob = self.client.seal(self.root_key, data, auth, None)
         result = self.client.unseal(self.root_key, blob, auth, None)
@@ -30,8 +30,8 @@ class SmokeTest(unittest.TestCase):
     def test_seal_with_policy(self):
         handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
 
-        data = 'X' * 64
-        auth = 'A' * 15
+        data = ('X' * 64).encode()
+        auth = ('A' * 15).encode()
         pcrs = [16]
 
         try:
@@ -58,14 +58,15 @@ class SmokeTest(unittest.TestCase):
         self.assertEqual(data, result)
 
     def test_unseal_with_wrong_auth(self):
-        data = 'X' * 64
-        auth = 'A' * 20
+        data = ('X' * 64).encode()
+        auth = ('A' * 20).encode()
         rc = 0
 
         blob = self.client.seal(self.root_key, data, auth, None)
         try:
-            result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B', None)
-        except ProtocolError, e:
+            result = self.client.unseal(self.root_key, blob,
+                        auth[:-1] + 'B'.encode(), None)
+        except ProtocolError as e:
             rc = e.rc
 
         self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL)
@@ -73,8 +74,8 @@ class SmokeTest(unittest.TestCase):
     def test_unseal_with_wrong_policy(self):
         handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
 
-        data = 'X' * 64
-        auth = 'A' * 17
+        data = ('X' * 64).encode()
+        auth = ('A' * 17).encode()
         pcrs = [16]
 
         try:
@@ -91,7 +92,7 @@ class SmokeTest(unittest.TestCase):
         # This should succeed.
 
         ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
-        self.client.extend_pcr(1, 'X' * ds)
+        self.client.extend_pcr(1, ('X' * ds).encode())
 
         handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
 
@@ -108,7 +109,7 @@ class SmokeTest(unittest.TestCase):
 
         # Then, extend a PCR that is part of the policy and try to unseal.
         # This should fail.
-        self.client.extend_pcr(16, 'X' * ds)
+        self.client.extend_pcr(16, ('X' * ds).encode())
 
         handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
 
@@ -119,7 +120,7 @@ class SmokeTest(unittest.TestCase):
             self.client.policy_password(handle)
 
             result = self.client.unseal(self.root_key, blob, auth, handle)
-        except ProtocolError, e:
+        except ProtocolError as e:
             rc = e.rc
             self.client.flush_context(handle)
         except:
@@ -130,13 +131,13 @@ class SmokeTest(unittest.TestCase):
 
     def test_seal_with_too_long_auth(self):
         ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
-        data = 'X' * 64
-        auth = 'A' * (ds + 1)
+        data = ('X' * 64).encode()
+        auth = ('A' * (ds + 1)).encode()
 
         rc = 0
         try:
             blob = self.client.seal(self.root_key, data, auth, None)
-        except ProtocolError, e:
+        except ProtocolError as e:
             rc = e.rc
 
         self.assertEqual(rc, tpm2.TPM2_RC_SIZE)
@@ -152,7 +153,7 @@ class SmokeTest(unittest.TestCase):
                               0xDEADBEEF)
 
             self.client.send_cmd(cmd)
-        except IOError, e:
+        except IOError as e:
             rejected = True
         except:
             pass
@@ -212,7 +213,7 @@ class SmokeTest(unittest.TestCase):
             self.client.tpm.write(cmd)
             rsp = self.client.tpm.read()
 
-        except IOError, e:
+        except IOError as e:
             # read the response
             rsp = self.client.tpm.read()
             rejected = True
@@ -283,7 +284,7 @@ class SpaceTest(unittest.TestCase):
         rc = 0
         try:
             space1.send_cmd(cmd)
-        except ProtocolError, e:
+        except ProtocolError as e:
             rc = e.rc
 
         self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |
index 5f16821c7f63a69064e42ec33a456df6c0ac6b85..d2796ea98c5ac14057372dc6305a9c0e6aba679a 100644 (file)
@@ -70,10 +70,10 @@ all_64: $(BINARIES_64)
 
 EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64)
 
-$(BINARIES_32): $(OUTPUT)/%_32: %.c
+$(BINARIES_32): $(OUTPUT)/%_32: %.c helpers.h
        $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
 
-$(BINARIES_64): $(OUTPUT)/%_64: %.c
+$(BINARIES_64): $(OUTPUT)/%_64: %.c helpers.h
        $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
 
 # x86_64 users should be encouraged to install 32-bit libraries
diff --git a/tools/testing/selftests/x86/helpers.h b/tools/testing/selftests/x86/helpers.h
new file mode 100644 (file)
index 0000000..f5ff2a2
--- /dev/null
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __SELFTESTS_X86_HELPERS_H
+#define __SELFTESTS_X86_HELPERS_H
+
+#include <asm/processor-flags.h>
+
+static inline unsigned long get_eflags(void)
+{
+       unsigned long eflags;
+
+       asm volatile (
+#ifdef __x86_64__
+               "subq $128, %%rsp\n\t"
+               "pushfq\n\t"
+               "popq %0\n\t"
+               "addq $128, %%rsp"
+#else
+               "pushfl\n\t"
+               "popl %0"
+#endif
+               : "=r" (eflags) :: "memory");
+
+       return eflags;
+}
+
+static inline void set_eflags(unsigned long eflags)
+{
+       asm volatile (
+#ifdef __x86_64__
+               "subq $128, %%rsp\n\t"
+               "pushq %0\n\t"
+               "popfq\n\t"
+               "addq $128, %%rsp"
+#else
+               "pushl %0\n\t"
+               "popfl"
+#endif
+               :: "r" (eflags) : "flags", "memory");
+}
+
+#endif /* __SELFTESTS_X86_HELPERS_H */
index 1063328e275c90b33b07a2ac9d8c71d72d06d6bc..120ac741fe4405a9fe049672c52d88be6c79f7c6 100644 (file)
@@ -31,6 +31,8 @@
 #include <sys/ptrace.h>
 #include <sys/user.h>
 
+#include "helpers.h"
+
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                       int flags)
 {
@@ -67,21 +69,6 @@ static unsigned char altstack_data[SIGSTKSZ];
 # define INT80_CLOBBERS
 #endif
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
 static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
 {
        ucontext_t *ctx = (ucontext_t*)ctx_void;
index bc0ecc2e862ef781c610d3f337e65af4f0948683..5b7abebbcbb9b8a6f7405d2ef8617de36d2a6671 100644 (file)
 #include <setjmp.h>
 #include <errno.h>
 
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
+#include "helpers.h"
 
 /* Our sigaltstack scratch space. */
 static unsigned char altstack_data[SIGSTKSZ];
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                       int flags)
 {
index 02309a195041318caf6f83214c94aa2dc55a1d3d..a108b80dd082309ed24e0c3fd687c3353210c71b 100644 (file)
 #include <signal.h>
 #include <err.h>
 #include <sys/syscall.h>
-#include <asm/processor-flags.h>
 
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
+#include "helpers.h"
 
 static unsigned int nerrs;
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                       int flags)
 {
@@ -59,6 +41,7 @@ static void do_it(unsigned long extraflags)
        set_eflags(get_eflags() | extraflags);
        syscall(SYS_getpid);
        flags = get_eflags();
+       set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
        if ((flags & extraflags) == extraflags) {
                printf("[OK]\tThe syscall worked and flags are still set\n");
        } else {
@@ -73,6 +56,12 @@ int main(void)
        printf("[RUN]\tSet NT and issue a syscall\n");
        do_it(X86_EFLAGS_NT);
 
+       printf("[RUN]\tSet AC and issue a syscall\n");
+       do_it(X86_EFLAGS_AC);
+
+       printf("[RUN]\tSet NT|AC and issue a syscall\n");
+       do_it(X86_EFLAGS_NT | X86_EFLAGS_AC);
+
        /*
         * Now try it again with TF set -- TF forces returns via IRET in all
         * cases except non-ptregs-using 64-bit full fast path syscalls.
@@ -80,8 +69,28 @@ int main(void)
 
        sethandler(SIGTRAP, sigtrap, 0);
 
+       printf("[RUN]\tSet TF and issue a syscall\n");
+       do_it(X86_EFLAGS_TF);
+
        printf("[RUN]\tSet NT|TF and issue a syscall\n");
        do_it(X86_EFLAGS_NT | X86_EFLAGS_TF);
 
+       printf("[RUN]\tSet AC|TF and issue a syscall\n");
+       do_it(X86_EFLAGS_AC | X86_EFLAGS_TF);
+
+       printf("[RUN]\tSet NT|AC|TF and issue a syscall\n");
+       do_it(X86_EFLAGS_NT | X86_EFLAGS_AC | X86_EFLAGS_TF);
+
+       /*
+        * Now try DF.  This is evil and it's plausible that we will crash
+        * glibc, but glibc would have to do something rather surprising
+        * for this to happen.
+        */
+       printf("[RUN]\tSet DF and issue a syscall\n");
+       do_it(X86_EFLAGS_DF);
+
+       printf("[RUN]\tSet TF|DF and issue a syscall\n");
+       do_it(X86_EFLAGS_TF | X86_EFLAGS_DF);
+
        return nerrs == 0 ? 0 : 1;
 }
index a4f4d4cf22c3b47c40acf7ae4c6d25d335e3b313..c41f24b517f401c34fab8ae05a9d4ddf481eaa0b 100644 (file)
@@ -20,6 +20,8 @@
 #include <setjmp.h>
 #include <sys/uio.h>
 
+#include "helpers.h"
+
 #ifdef __x86_64__
 # define VSYS(x) (x)
 #else
@@ -493,21 +495,8 @@ static int test_process_vm_readv(void)
 }
 
 #ifdef __x86_64__
-#define X86_EFLAGS_TF (1UL << 8)
 static volatile sig_atomic_t num_vsyscall_traps;
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
-}
-
 static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
 {
        ucontext_t *ctx = (ucontext_t *)ctx_void;
index 0075ccd65407bb3dc7bec1907165299a4e410fa9..4c311e1af4c7a850ccf1f1b35d9820d010557281 100644 (file)
@@ -11,6 +11,8 @@
 #include <features.h>
 #include <stdio.h>
 
+#include "helpers.h"
+
 #if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16
 
 int main()
@@ -53,27 +55,6 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                err(1, "sigaction");
 }
 
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
-
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
 static volatile sig_atomic_t nerrs;
 static unsigned long sysinfo;
 static bool got_sysinfo = false;
index a852af5c3214d731a8cd7eb0290c79baa6d7072c..0a68c9d3d3ab18ac56fb6883554f7a078b4134d4 100644 (file)
@@ -3350,7 +3350,8 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
                        if (kvm_sigmask.len != sizeof(compat_sigset_t))
                                goto out;
                        r = -EFAULT;
-                       if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
+                       if (get_compat_sigset(&sigset,
+                                             (compat_sigset_t __user *)sigmask_arg->sigset))
                                goto out;
                        r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
                } else