]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
Merge tag 'driver-core-5.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 29 Mar 2019 22:07:29 +0000 (15:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 29 Mar 2019 22:07:29 +0000 (15:07 -0700)
Pull driver core fix from Greg KH:
 "Here is a single driver core patch for 5.1-rc3.

  After 5.1-rc1, all of the users of BUS_ATTR() are finally removed, so
  we can now drop this macro from include/linux/device.h so that no more
  new users will be created.

  This patch has been in linux-next for a while, with no reported
  issues"

* tag 'driver-core-5.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core:
  driver core: remove BUS_ATTR()

615 files changed:
Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt [moved from Documentation/devicetree/bindings/i2c/i2c-xscale.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt [moved from Documentation/devicetree/bindings/i2c/i2c-mtk.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-stu300.txt [moved from Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt [moved from Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-wmt.txt [moved from Documentation/devicetree/bindings/i2c/i2c-vt8500.txt with 100% similarity]
Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
Documentation/devicetree/bindings/net/dsa/qca8k.txt
Documentation/filesystems/mount_api.txt
Documentation/i2c/busses/i2c-i801
Documentation/networking/msg_zerocopy.rst
Documentation/networking/netdev-FAQ.rst
Documentation/networking/nf_flowtable.txt
Documentation/networking/snmp_counter.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/abilis_tb100.dtsi
arch/arc/boot/dts/abilis_tb100_dvk.dts
arch/arc/boot/dts/abilis_tb101.dtsi
arch/arc/boot/dts/abilis_tb101_dvk.dts
arch/arc/boot/dts/abilis_tb10x.dtsi
arch/arc/boot/dts/axc001.dtsi
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/hsdk.dts
arch/arc/boot/dts/vdk_axc003.dtsi
arch/arc/boot/dts/vdk_axc003_idu.dtsi
arch/arc/boot/dts/vdk_axs10x_mb.dtsi
arch/arc/configs/hsdk_defconfig
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/irqflags-arcv2.h
arch/arc/include/asm/perf_event.h
arch/arc/include/asm/spinlock.h
arch/arc/kernel/head.S
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arc/lib/Makefile
arch/arc/lib/memcpy-archs-unaligned.S [new file with mode: 0644]
arch/arc/plat-eznps/Kconfig
arch/arm/Kconfig
arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
arch/arm/boot/dts/ste-nomadik-nhk15.dts
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/mach-cns3xxx/core.c
arch/arm/mach-imx/cpuidle-imx6q.c
arch/arm/mach-imx/mach-imx51.c
arch/arm64/Kconfig
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/nvidia/tegra186.dtsi
arch/arm64/boot/dts/renesas/r8a774c0.dtsi
arch/arm64/boot/dts/renesas/r8a77990.dtsi
arch/arm64/include/asm/cputype.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/stacktrace.c
arch/mips/bcm47xx/workarounds.c
arch/mips/include/asm/jump_label.h
arch/mips/include/uapi/asm/posix_types.h
arch/mips/kernel/vmlinux.lds.S
arch/mips/loongson64/lemote-2f/irq.c
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/vdso_datapage.h
arch/powerpc/kernel/cpu_setup_6xx.S
arch/powerpc/kernel/head_32.S
arch/powerpc/kernel/security.c
arch/powerpc/kernel/vdso64/gettimeofday.S
arch/powerpc/mm/hash_low_32.S
arch/powerpc/net/bpf_jit.h
arch/powerpc/net/bpf_jit32.h
arch/powerpc/net/bpf_jit64.h
arch/powerpc/net/bpf_jit_comp64.c
arch/s390/include/asm/ap.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/lowcore.h
arch/s390/kernel/perf_cpum_cf_diag.c
arch/s390/kernel/smp.c
arch/s390/kernel/vtime.c
arch/x86/boot/string.c
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/cpu_device_id.h
arch/x86/include/asm/processor-cyrix.h
arch/x86/kernel/aperture_64.c
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/hpet.c
arch/x86/kernel/hw_breakpoint.c
arch/x86/kernel/mpparse.c
arch/x86/lib/csum-partial_64.c
arch/x86/mm/pti.c
block/bio.c
block/blk-cgroup.c
block/blk-flush.c
block/blk-iolatency.c
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
drivers/acpi/utils.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/ata/libata-zpodd.c
drivers/auxdisplay/Kconfig
drivers/auxdisplay/Makefile
drivers/auxdisplay/charlcd.c
drivers/auxdisplay/hd44780.c
drivers/auxdisplay/panel.c
drivers/base/power/domain.c
drivers/base/swnode.c
drivers/block/loop.c
drivers/block/paride/pcd.c
drivers/block/paride/pf.c
drivers/block/rbd.c
drivers/clocksource/clps711x-timer.c
drivers/clocksource/mips-gic-timer.c
drivers/clocksource/tcb_clksrc.c
drivers/clocksource/timer-riscv.c
drivers/clocksource/timer-ti-dm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_file.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/vic.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/vkms/vkms_gem.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-i801.c
drivers/infiniband/hw/i40iw/i40iw_utils.c
drivers/infiniband/hw/mlx4/alias_GUID.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/iova.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-imx-irqsteer.c
drivers/irqchip/irq-mbigen.c
drivers/irqchip/irq-mmp.c
drivers/irqchip/irq-mvebu-sei.c
drivers/irqchip/irq-stm32-exti.c
drivers/isdn/hardware/mISDN/hfcmulti.c
drivers/misc/habanalabs/command_submission.c
drivers/misc/habanalabs/debugfs.c
drivers/misc/habanalabs/device.c
drivers/misc/habanalabs/goya/goya.c
drivers/misc/habanalabs/habanalabs.h
drivers/misc/habanalabs/hw_queue.c
drivers/misc/habanalabs/memory.c
drivers/misc/habanalabs/mmu.c
drivers/mmc/host/alcor.c
drivers/mmc/host/davinci_mmc.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-omap.c
drivers/net/Kconfig
drivers/net/dsa/qca8k.c
drivers/net/dsa/qca8k.h
drivers/net/ethernet/3com/3c515.c
drivers/net/ethernet/8390/mac8390.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlxsw/core_env.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/micrel/ks8851.h
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/realtek/atp.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/phy/Kconfig
drivers/net/phy/broadcom.c
drivers/net/phy/dp83822.c
drivers/net/phy/meson-gxl.c
drivers/net/phy/phy_device.c
drivers/net/tun.c
drivers/net/usb/aqc111.c
drivers/net/usb/cdc_ether.c
drivers/net/vxlan.c
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
drivers/net/wireless/mediatek/mt76/mt7603/init.c
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
drivers/net/wireless/mediatek/mt76/mt7603/main.c
drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
drivers/net/wireless/mediatek/mt76/mt7603/soc.c
drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/net/wireless/mediatek/mt76/mt76x02.h
drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
drivers/net/wireless/mediatek/mt76/mt76x2/init.c
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/mediatek/mt7601u/usb.c
drivers/nvme/host/multipath.c
drivers/nvme/host/tcp.c
drivers/nvme/target/core.c
drivers/nvme/target/io-cmd-file.c
drivers/parport/daisy.c
drivers/parport/probe.c
drivers/parport/share.c
drivers/pci/pci.h
drivers/pci/pcie/bw_notification.c
drivers/pci/probe.c
drivers/platform/chrome/cros_ec_debugfs.c
drivers/platform/chrome/wilco_ec/mailbox.c
drivers/s390/cio/chsc.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fc.c
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/soc/bcm/bcm2835-power.c
drivers/thermal/broadcom/bcm2835_thermal.c
drivers/thermal/cpu_cooling.c
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
drivers/thermal/intel/intel_powerclamp.c
drivers/thermal/mtk_thermal.c
drivers/thermal/samsung/exynos_tmu.c
drivers/virt/vboxguest/vboxguest_core.c
drivers/virt/vboxguest/vboxguest_core.h
drivers/virt/vboxguest/vboxguest_linux.c
drivers/virt/vboxguest/vboxguest_utils.c
drivers/virt/vboxguest/vboxguest_version.h
drivers/virt/vboxguest/vmmdev.h
fs/afs/fsclient.c
fs/afs/yfsclient.c
fs/block_dev.c
fs/btrfs/extent-tree.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/inode.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/file.c
fs/cifs/smb2maperror.c
fs/cifs/smb2pdu.c
fs/cifs/trace.h
fs/ext4/ext4_jbd2.h
fs/ext4/file.c
fs/ext4/indirect.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/resize.c
fs/ext4/super.c
fs/io_uring.c
fs/iomap.c
fs/lockd/host.c
fs/locks.c
fs/nfs/client.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/nfs4proc.c
fs/notify/fanotify/fanotify_user.c
fs/notify/inotify/inotify_user.c
fs/proc/kcore.c
fs/udf/inode.c
fs/udf/truncate.c
fs/udf/udfdecl.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/scrub/btree.c
fs/xfs/scrub/dabtree.c
fs/xfs/xfs_discard.c
fs/xfs/xfs_file.c
include/linux/atalk.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/bpf_verifier.h
include/linux/brcmphy.h
include/linux/ceph/libceph.h
include/linux/irq.h
include/linux/irqchip/arm-gic.h
include/linux/kcore.h
include/linux/mlx5/qp.h
include/linux/net.h
include/linux/parport.h
include/linux/sbitmap.h
include/linux/socket.h
include/linux/uio.h
include/linux/vbox_utils.h
include/misc/charlcd.h
include/net/act_api.h
include/net/sch_generic.h
include/net/sctp/checksum.h
include/net/sock.h
include/net/tc_act/tc_gact.h
include/net/xdp_sock.h
include/uapi/linux/bpf.h
include/uapi/linux/vbox_vmmdev_types.h
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/events/core.c
kernel/futex.c
kernel/irq/devres.c
kernel/irq/manage.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/time/jiffies.c
kernel/trace/ftrace.c
kernel/trace/trace_dynevent.c
kernel/trace/trace_events_hist.c
kernel/watchdog.c
kernel/workqueue.c
lib/rhashtable.c
lib/sbitmap.c
net/appletalk/aarp.c
net/appletalk/ddp.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netfilter_ipv6.c
net/ceph/ceph_common.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/core/devlink.c
net/core/filter.c
net/core/net-sysfs.c
net/dccp/ipv6.c
net/ipv6/netfilter/ip6t_srh.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/mpls/mpls_iptunnel.c
net/ncsi/ncsi-netlink.c
net/netfilter/Kconfig
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_objref.c
net/netfilter/nft_redir.c
net/netfilter/nft_set_rbtree.c
net/netlink/genetlink.c
net/nfc/llcp_sock.c
net/openvswitch/datapath.c
net/packet/af_packet.c
net/rose/rose_subr.c
net/rxrpc/output.c
net/sched/Kconfig
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/act_tunnel_key.c
net/sched/act_vlan.c
net/sched/cls_api.c
net/sched/sch_cake.c
net/sctp/socket.c
net/socket.c
net/strparser/strparser.c
net/sunrpc/clnt.c
net/sunrpc/xprtsock.c
net/tipc/group.c
net/tipc/net.c
net/tipc/node.c
net/tipc/socket.c
net/tipc/topsrv.c
net/xdp/xdp_umem.c
scripts/Makefile.build
scripts/coccinelle/free/put_device.cocci
scripts/coccinelle/misc/badty.cocci
scripts/kconfig/lxdialog/inputbox.c
scripts/kconfig/nconf.c
scripts/kconfig/nconf.gui.c
scripts/mod/modpost.c
security/selinux/ss/policydb.c
sound/core/oss/pcm_oss.c
sound/core/pcm_native.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_synth.c
sound/drivers/opl3/opl3_voice.h
sound/firewire/motu/motu.c
sound/isa/sb/sb8.c
sound/pci/echoaudio/echoaudio.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_realtek.c
tools/arch/arm64/include/uapi/asm/unistd.h
tools/bpf/bpftool/prog.c
tools/build/Makefile.feature
tools/build/feature/test-all.c
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/in.h
tools/lib/bpf/Makefile
tools/lib/bpf/README.rst
tools/lib/bpf/btf.c
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/lib/bpf/libbpf.map
tools/lib/bpf/xsk.c
tools/objtool/check.c
tools/perf/Documentation/Build.txt
tools/perf/Documentation/perf-config.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/tips.txt
tools/perf/Makefile.config
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/arch/x86/util/Build
tools/perf/arch/x86/util/archinsn.c [new file with mode: 0644]
tools/perf/bench/epoll-ctl.c
tools/perf/bench/epoll-wait.c
tools/perf/builtin-list.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin.h
tools/perf/perf.c
tools/perf/perf.h
tools/perf/pmu-events/arch/powerpc/power8/other.json
tools/perf/pmu-events/arch/x86/amdfam17h/branch.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdfam17h/cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdfam17h/core.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdfam17h/memory.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdfam17h/other.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/mapfile.csv
tools/perf/scripts/python/export-to-postgresql.py
tools/perf/scripts/python/export-to-sqlite.py
tools/perf/scripts/python/exported-sql-viewer.py
tools/perf/tests/attr/test-record-C0
tools/perf/tests/attr/test-record-basic
tools/perf/tests/attr/test-record-branch-any
tools/perf/tests/attr/test-record-branch-filter-any
tools/perf/tests/attr/test-record-branch-filter-any_call
tools/perf/tests/attr/test-record-branch-filter-any_ret
tools/perf/tests/attr/test-record-branch-filter-hv
tools/perf/tests/attr/test-record-branch-filter-ind_call
tools/perf/tests/attr/test-record-branch-filter-k
tools/perf/tests/attr/test-record-branch-filter-u
tools/perf/tests/attr/test-record-count
tools/perf/tests/attr/test-record-data
tools/perf/tests/attr/test-record-freq
tools/perf/tests/attr/test-record-graph-default
tools/perf/tests/attr/test-record-graph-dwarf
tools/perf/tests/attr/test-record-graph-fp
tools/perf/tests/attr/test-record-group
tools/perf/tests/attr/test-record-group-sampling
tools/perf/tests/attr/test-record-group1
tools/perf/tests/attr/test-record-no-buffering
tools/perf/tests/attr/test-record-no-inherit
tools/perf/tests/attr/test-record-no-samples
tools/perf/tests/attr/test-record-period
tools/perf/tests/attr/test-record-raw
tools/perf/tests/backward-ring-buffer.c
tools/perf/tests/evsel-tp-sched.c
tools/perf/tests/expr.c
tools/perf/tests/openat-syscall-all-cpus.c
tools/perf/ui/browser.c
tools/perf/ui/browsers/Build
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/browsers/res_sample.c [new file with mode: 0644]
tools/perf/ui/browsers/scripts.c
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/archinsn.h [new file with mode: 0644]
tools/perf/util/bpf-event.c
tools/perf/util/bpf-event.h
tools/perf/util/build-id.c
tools/perf/util/config.c
tools/perf/util/data.c
tools/perf/util/data.h
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/env.c
tools/perf/util/env.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/map.c
tools/perf/util/ordered-events.c
tools/perf/util/parse-events.c
tools/perf/util/probe-event.c
tools/perf/util/session.c
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/stat.c
tools/perf/util/symbol.c
tools/perf/util/symbol_conf.h
tools/perf/util/time-utils.c
tools/perf/util/time-utils.h
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/prog_tests/map_lock.c
tools/testing/selftests/bpf/prog_tests/spinlock.c
tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_sock_fields.c
tools/testing/selftests/bpf/verifier/calls.c
tools/testing/selftests/bpf/verifier/ref_tracking.c
tools/testing/selftests/bpf/verifier/sock.c
tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json [new file with mode: 0644]
tools/testing/selftests/tc-testing/tc-tests/actions/police.json
tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json

index 8de96a4fb2d574095cb088744f405a1bab5e87f2..f977ea7617f68235f19d801d855e54bbd6f25b02 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
     - "renesas,irqc-r8a7793" (R-Car M2-N)
     - "renesas,irqc-r8a7794" (R-Car E2)
     - "renesas,intc-ex-r8a774a1" (RZ/G2M)
+    - "renesas,intc-ex-r8a774c0" (RZ/G2E)
     - "renesas,intc-ex-r8a7795" (R-Car H3)
     - "renesas,intc-ex-r8a7796" (R-Car M3-W)
     - "renesas,intc-ex-r8a77965" (R-Car M3-N)
index bbcb255c3150230978fba796b320a71c206ddbad..93a7469e70d4131fbc2d7f2daffd1917020709ee 100644 (file)
@@ -12,10 +12,15 @@ Required properties:
 Subnodes:
 
 The integrated switch subnode should be specified according to the binding
-described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of
-port and PHY id, each subnode describing a port needs to have a valid phandle
-referencing the internal PHY connected to it. The CPU port of this switch is
-always port 0.
+described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external
+mdio-bus each subnode describing a port needs to have a valid phandle
+referencing the internal PHY it is connected to. This is because there's no
+N:N mapping of port and PHY id.
+
+Don't use mixed external and internal mdio-bus configurations, as this is
+not supported by the hardware.
+
+The CPU port of this switch is always port 0.
 
 A CPU port node has the following optional node:
 
@@ -31,8 +36,9 @@ For QCA8K the 'fixed-link' sub-node supports only the following properties:
 - 'full-duplex' (boolean, optional), to indicate that full duplex is
   used. When absent, half duplex is assumed.
 
-Example:
+Examples:
 
+for the external mdio-bus configuration:
 
        &mdio0 {
                phy_port1: phy@0 {
@@ -55,12 +61,12 @@ Example:
                        reg = <4>;
                };
 
-               switch0@0 {
+               switch@10 {
                        compatible = "qca,qca8337";
                        #address-cells = <1>;
                        #size-cells = <0>;
 
-                       reg = <0>;
+                       reg = <0x10>;
 
                        ports {
                                #address-cells = <1>;
@@ -108,3 +114,56 @@ Example:
                        };
                };
        };
+
+for the internal master mdio-bus configuration:
+
+       &mdio0 {
+               switch@10 {
+                       compatible = "qca,qca8337";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       reg = <0x10>;
+
+                       ports {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               port@0 {
+                                       reg = <0>;
+                                       label = "cpu";
+                                       ethernet = <&gmac1>;
+                                       phy-mode = "rgmii";
+                                       fixed-link {
+                                               speed = 1000;
+                                               full-duplex;
+                                       };
+                               };
+
+                               port@1 {
+                                       reg = <1>;
+                                       label = "lan1";
+                               };
+
+                               port@2 {
+                                       reg = <2>;
+                                       label = "lan2";
+                               };
+
+                               port@3 {
+                                       reg = <3>;
+                                       label = "lan3";
+                               };
+
+                               port@4 {
+                                       reg = <4>;
+                                       label = "lan4";
+                               };
+
+                               port@5 {
+                                       reg = <5>;
+                                       label = "wan";
+                               };
+                       };
+               };
+       };
index 944d1965e917e9a91496637ab484d5197d68223d..00ff0cfccfa71cdce0d02ddd8608cf962ee99308 100644 (file)
@@ -12,11 +12,13 @@ CONTENTS
 
  (4) Filesystem context security.
 
- (5) VFS filesystem context operations.
+ (5) VFS filesystem context API.
 
- (6) Parameter description.
+ (6) Superblock creation helpers.
 
- (7) Parameter helper functions.
+ (7) Parameter description.
+
+ (8) Parameter helper functions.
 
 
 ========
@@ -41,12 +43,15 @@ The creation of new mounts is now to be done in a multistep process:
 
  (7) Destroy the context.
 
-To support this, the file_system_type struct gains a new field:
+To support this, the file_system_type struct gains two new fields:
 
        int (*init_fs_context)(struct fs_context *fc);
+       const struct fs_parameter_description *parameters;
 
-which is invoked to set up the filesystem-specific parts of a filesystem
-context, including the additional space.
+The first is invoked to set up the filesystem-specific parts of a filesystem
+context, including the additional space, and the second points to the
+parameter description for validation at registration time and querying by a
+future system call.
 
 Note that security initialisation is done *after* the filesystem is called so
 that the namespaces may be adjusted first.
@@ -73,9 +78,9 @@ context.  This is represented by the fs_context structure:
                void                    *s_fs_info;
                unsigned int            sb_flags;
                unsigned int            sb_flags_mask;
+               unsigned int            s_iflags;
+               unsigned int            lsm_flags;
                enum fs_context_purpose purpose:8;
-               bool                    sloppy:1;
-               bool                    silent:1;
                ...
        };
 
@@ -141,6 +146,10 @@ The fs_context fields are as follows:
 
      Which bits SB_* flags are to be set/cleared in super_block::s_flags.
 
+ (*) unsigned int s_iflags
+
+     These will be bitwise-OR'd with s->s_iflags when a superblock is created.
+
  (*) enum fs_context_purpose
 
      This indicates the purpose for which the context is intended.  The
@@ -150,17 +159,6 @@ The fs_context fields are as follows:
        FS_CONTEXT_FOR_SUBMOUNT         -- New automatic submount of extant mount
        FS_CONTEXT_FOR_RECONFIGURE      -- Change an existing mount
 
- (*) bool sloppy
- (*) bool silent
-
-     These are set if the sloppy or silent mount options are given.
-
-     [NOTE] sloppy is probably unnecessary when userspace passes over one
-     option at a time since the error can just be ignored if userspace deems it
-     to be unimportant.
-
-     [NOTE] silent is probably redundant with sb_flags & SB_SILENT.
-
 The mount context is created by calling vfs_new_fs_context() or
 vfs_dup_fs_context() and is destroyed with put_fs_context().  Note that the
 structure is not refcounted.
@@ -342,28 +340,47 @@ number of operations used by the new mount code for this purpose:
      It should return 0 on success or a negative error code on failure.
 
 
-=================================
-VFS FILESYSTEM CONTEXT OPERATIONS
-=================================
+==========================
+VFS FILESYSTEM CONTEXT API
+==========================
 
-There are four operations for creating a filesystem context and
-one for destroying a context:
+There are four operations for creating a filesystem context and one for
+destroying a context:
 
- (*) struct fs_context *vfs_new_fs_context(struct file_system_type *fs_type,
-                                          struct dentry *reference,
-                                          unsigned int sb_flags,
-                                          unsigned int sb_flags_mask,
-                                          enum fs_context_purpose purpose);
+ (*) struct fs_context *fs_context_for_mount(
+               struct file_system_type *fs_type,
+               unsigned int sb_flags);
 
-     Create a filesystem context for a given filesystem type and purpose.  This
-     allocates the filesystem context, sets the superblock flags, initialises
-     the security and calls fs_type->init_fs_context() to initialise the
-     filesystem private data.
+     Allocate a filesystem context for the purpose of setting up a new mount,
+     whether that be with a new superblock or sharing an existing one.  This
+     sets the superblock flags, initialises the security and calls
+     fs_type->init_fs_context() to initialise the filesystem private data.
 
-     reference can be NULL or it may indicate the root dentry of a superblock
-     that is going to be reconfigured (FS_CONTEXT_FOR_RECONFIGURE) or
-     the automount point that triggered a submount (FS_CONTEXT_FOR_SUBMOUNT).
-     This is provided as a source of namespace information.
+     fs_type specifies the filesystem type that will manage the context and
+     sb_flags presets the superblock flags stored therein.
+
+ (*) struct fs_context *fs_context_for_reconfigure(
+               struct dentry *dentry,
+               unsigned int sb_flags,
+               unsigned int sb_flags_mask);
+
+     Allocate a filesystem context for the purpose of reconfiguring an
+     existing superblock.  dentry provides a reference to the superblock to be
+     configured.  sb_flags and sb_flags_mask indicate which superblock flags
+     need changing and to what.
+
+ (*) struct fs_context *fs_context_for_submount(
+               struct file_system_type *fs_type,
+               struct dentry *reference);
+
+     Allocate a filesystem context for the purpose of creating a new mount for
+     an automount point or other derived superblock.  fs_type specifies the
+     filesystem type that will manage the context and the reference dentry
+     supplies the parameters.  Namespaces are propagated from the reference
+     dentry's superblock also.
+
+     Note that it's not a requirement that the reference dentry be of the same
+     filesystem type as fs_type.
 
  (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc);
 
@@ -390,20 +407,6 @@ context pointer or a negative error code.
 For the remaining operations, if an error occurs, a negative error code will be
 returned.
 
- (*) int vfs_get_tree(struct fs_context *fc);
-
-     Get or create the mountable root and superblock, using the parameters in
-     the filesystem context to select/configure the superblock.  This invokes
-     the ->validate() op and then the ->get_tree() op.
-
-     [NOTE] ->validate() could perhaps be rolled into ->get_tree() and
-     ->reconfigure().
-
- (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
-
-     Create a mount given the parameters in the specified filesystem context.
-     Note that this does not attach the mount to anything.
-
  (*) int vfs_parse_fs_param(struct fs_context *fc,
                            struct fs_parameter *param);
 
@@ -432,17 +435,80 @@ returned.
      clear the pointer, but then becomes responsible for disposing of the
      object.
 
- (*) int vfs_parse_fs_string(struct fs_context *fc, char *key,
+ (*) int vfs_parse_fs_string(struct fs_context *fc, const char *key,
                             const char *value, size_t v_size);
 
-     A wrapper around vfs_parse_fs_param() that just passes a constant string.
+     A wrapper around vfs_parse_fs_param() that copies the value string it is
+     passed.
 
  (*) int generic_parse_monolithic(struct fs_context *fc, void *data);
 
      Parse a sys_mount() data page, assuming the form to be a text list
      consisting of key[=val] options separated by commas.  Each item in the
      list is passed to vfs_mount_option().  This is the default when the
-     ->parse_monolithic() operation is NULL.
+     ->parse_monolithic() method is NULL.
+
+ (*) int vfs_get_tree(struct fs_context *fc);
+
+     Get or create the mountable root and superblock, using the parameters in
+     the filesystem context to select/configure the superblock.  This invokes
+     the ->get_tree() method.
+
+ (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
+
+     Create a mount given the parameters in the specified filesystem context.
+     Note that this does not attach the mount to anything.
+
+
+===========================
+SUPERBLOCK CREATION HELPERS
+===========================
+
+A number of VFS helpers are available for use by filesystems for the creation
+or looking up of superblocks.
+
+ (*) struct super_block *
+     sget_fc(struct fs_context *fc,
+            int (*test)(struct super_block *sb, struct fs_context *fc),
+            int (*set)(struct super_block *sb, struct fs_context *fc));
+
+     This is the core routine.  If test is non-NULL, it searches for an
+     existing superblock matching the criteria held in the fs_context, using
+     the test function to match them.  If no match is found, a new superblock
+     is created and the set function is called to set it up.
+
+     Prior to the set function being called, fc->s_fs_info will be transferred
+     to sb->s_fs_info - and fc->s_fs_info will be cleared if set returns
+     success (ie. 0).
+
+The following helpers all wrap sget_fc():
+
+ (*) int vfs_get_super(struct fs_context *fc,
+                      enum vfs_get_super_keying keying,
+                      int (*fill_super)(struct super_block *sb,
+                                        struct fs_context *fc))
+
+     This creates/looks up a deviceless superblock.  The keying indicates how
+     many superblocks of this type may exist and in what manner they may be
+     shared:
+
+       (1) vfs_get_single_super
+
+           Only one such superblock may exist in the system.  Any further
+           attempt to get a new superblock gets this one (and any parameter
+           differences are ignored).
+
+       (2) vfs_get_keyed_super
+
+           Multiple superblocks of this type may exist and they're keyed on
+           their s_fs_info pointer (for example this may refer to a
+           namespace).
+
+       (3) vfs_get_independent_super
+
+           Multiple independent superblocks of this type may exist.  This
+           function never matches an existing one and always creates a new
+           one.
 
 
 =====================
@@ -454,35 +520,22 @@ There's a core description struct that links everything together:
 
        struct fs_parameter_description {
                const char      name[16];
-               u8              nr_params;
-               u8              nr_alt_keys;
-               u8              nr_enums;
-               bool            ignore_unknown;
-               bool            no_source;
-               const char *const *keys;
-               const struct constant_table *alt_keys;
                const struct fs_parameter_spec *specs;
                const struct fs_parameter_enum *enums;
        };
 
 For example:
 
-       enum afs_param {
+       enum {
                Opt_autocell,
                Opt_bar,
                Opt_dyn,
                Opt_foo,
                Opt_source,
-               nr__afs_params
        };
 
        static const struct fs_parameter_description afs_fs_parameters = {
                .name           = "kAFS",
-               .nr_params      = nr__afs_params,
-               .nr_alt_keys    = ARRAY_SIZE(afs_param_alt_keys),
-               .nr_enums       = ARRAY_SIZE(afs_param_enums),
-               .keys           = afs_param_keys,
-               .alt_keys       = afs_param_alt_keys,
                .specs          = afs_param_specs,
                .enums          = afs_param_enums,
        };
@@ -494,28 +547,24 @@ The members are as follows:
      The name to be used in error messages generated by the parse helper
      functions.
 
- (2) u8 nr_params;
-
-     The number of discrete parameter identifiers.  This indicates the number
-     of elements in the ->types[] array and also limits the values that may be
-     used in the values that the ->keys[] array maps to.
-
-     It is expected that, for example, two parameters that are related, say
-     "acl" and "noacl" with have the same ID, but will be flagged to indicate
-     that one is the inverse of the other.  The value can then be picked out
-     from the parse result.
+ (2) const struct fs_parameter_specification *specs;
 
- (3) const struct fs_parameter_specification *specs;
+     Table of parameter specifications, terminated with a null entry, where the
+     entries are of type:
 
-     Table of parameter specifications, where the entries are of type:
-
-       struct fs_parameter_type {
-               enum fs_parameter_spec  type:8;
-               u8                      flags;
+       struct fs_parameter_spec {
+               const char              *name;
+               u8                      opt;
+               enum fs_parameter_type  type:8;
+               unsigned short          flags;
        };
 
-     and the parameter identifier is the index to the array.  'type' indicates
-     the desired value type and must be one of:
+     The 'name' field is a string to match exactly to the parameter key (no
+     wildcards, patterns and no case-independence) and 'opt' is the value that
+     will be returned by the fs_parser() function in the case of a successful
+     match.
+
+     The 'type' field indicates the desired value type and must be one of:
 
        TYPE NAME               EXPECTED VALUE          RESULT IN
        ======================= ======================= =====================
@@ -525,85 +574,65 @@ The members are as follows:
        fs_param_is_u32_octal   32-bit octal int        result->uint_32
        fs_param_is_u32_hex     32-bit hex int          result->uint_32
        fs_param_is_s32         32-bit signed int       result->int_32
+       fs_param_is_u64         64-bit unsigned int     result->uint_64
        fs_param_is_enum        Enum value name         result->uint_32
        fs_param_is_string      Arbitrary string        param->string
        fs_param_is_blob        Binary blob             param->blob
        fs_param_is_blockdev    Blockdev path           * Needs lookup
        fs_param_is_path        Path                    * Needs lookup
-       fs_param_is_fd          File descriptor         param->file
-
-     And each parameter can be qualified with 'flags':
-
-       fs_param_v_optional     The value is optional
-       fs_param_neg_with_no    If key name is prefixed with "no", it is false
-       fs_param_neg_with_empty If value is "", it is false
-       fs_param_deprecated     The parameter is deprecated.
-
-     For example:
-
-       static const struct fs_parameter_spec afs_param_specs[nr__afs_params] = {
-               [Opt_autocell]  = { fs_param_is flag },
-               [Opt_bar]       = { fs_param_is_enum },
-               [Opt_dyn]       = { fs_param_is flag },
-               [Opt_foo]       = { fs_param_is_bool, fs_param_neg_with_no },
-               [Opt_source]    = { fs_param_is_string },
-       };
+       fs_param_is_fd          File descriptor         result->int_32
 
      Note that if the value is of fs_param_is_bool type, fs_parse() will try
      to match any string value against "0", "1", "no", "yes", "false", "true".
 
-     [!] NOTE that the table must be sorted according to primary key name so
-        that ->keys[] is also sorted.
-
- (4) const char *const *keys;
-
-     Table of primary key names for the parameters.  There must be one entry
-     per defined parameter.  The table is optional if ->nr_params is 0.  The
-     table is just an array of names e.g.:
+     Each parameter can also be qualified with 'flags':
 
-       static const char *const afs_param_keys[nr__afs_params] = {
-               [Opt_autocell]  = "autocell",
-               [Opt_bar]       = "bar",
-               [Opt_dyn]       = "dyn",
-               [Opt_foo]       = "foo",
-               [Opt_source]    = "source",
-       };
-
-     [!] NOTE that the table must be sorted such that the table can be searched
-        with bsearch() using strcmp().  This means that the Opt_* values must
-        correspond to the entries in this table.
-
- (5) const struct constant_table *alt_keys;
-     u8 nr_alt_keys;
-
-     Table of additional key names and their mappings to parameter ID plus the
-     number of elements in the table.  This is optional.  The table is just an
-     array of { name, integer } pairs, e.g.:
+       fs_param_v_optional     The value is optional
+       fs_param_neg_with_no    result->negated set if key is prefixed with "no"
+       fs_param_neg_with_empty result->negated set if value is ""
+       fs_param_deprecated     The parameter is deprecated.
 
-       static const struct constant_table afs_param_keys[] = {
-               { "baz",        Opt_bar },
-               { "dynamic",    Opt_dyn },
+     These are wrapped with a number of convenience wrappers:
+
+       MACRO                   SPECIFIES
+       ======================= ===============================================
+       fsparam_flag()          fs_param_is_flag
+       fsparam_flag_no()       fs_param_is_flag, fs_param_neg_with_no
+       fsparam_bool()          fs_param_is_bool
+       fsparam_u32()           fs_param_is_u32
+       fsparam_u32oct()        fs_param_is_u32_octal
+       fsparam_u32hex()        fs_param_is_u32_hex
+       fsparam_s32()           fs_param_is_s32
+       fsparam_u64()           fs_param_is_u64
+       fsparam_enum()          fs_param_is_enum
+       fsparam_string()        fs_param_is_string
+       fsparam_blob()          fs_param_is_blob
+       fsparam_bdev()          fs_param_is_blockdev
+       fsparam_path()          fs_param_is_path
+       fsparam_fd()            fs_param_is_fd
+
+     all of which take two arguments, name string and option number - for
+     example:
+
+       static const struct fs_parameter_spec afs_param_specs[] = {
+               fsparam_flag    ("autocell",    Opt_autocell),
+               fsparam_flag    ("dyn",         Opt_dyn),
+               fsparam_string  ("source",      Opt_source),
+               fsparam_flag_no ("foo",         Opt_foo),
+               {}
        };
 
-     [!] NOTE that the table must be sorted such that strcmp() can be used with
-        bsearch() to search the entries.
-
-     The parameter ID can also be fs_param_key_removed to indicate that a
-     deprecated parameter has been removed and that an error will be given.
-     This differs from fs_param_deprecated where the parameter may still have
-     an effect.
-
-     Further, the behaviour of the parameter may differ when an alternate name
-     is used (for instance with NFS, "v3", "v4.2", etc. are alternate names).
+     An addition macro, __fsparam() is provided that takes an additional pair
+     of arguments to specify the type and the flags for anything that doesn't
+     match one of the above macros.
 
  (6) const struct fs_parameter_enum *enums;
-     u8 nr_enums;
 
-     Table of enum value names to integer mappings and the number of elements
-     stored therein.  This is of type:
+     Table of enum value names to integer mappings, terminated with a null
+     entry.  This is of type:
 
        struct fs_parameter_enum {
-               u8              param_id;
+               u8              opt;
                char            name[14];
                u8              value;
        };
@@ -621,11 +650,6 @@ The members are as follows:
      try to look the value up in the enum table and the result will be stored
      in the parse result.
 
- (7) bool no_source;
-
-     If this is set, fs_parse() will ignore any "source" parameter and not
-     pass it to the filesystem.
-
 The parser should be pointed to by the parser pointer in the file_system_type
 struct as this will provide validation on registration (if
 CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from
@@ -650,9 +674,8 @@ process the parameters it is given.
                int             value;
        };
 
-     and it must be sorted such that it can be searched using bsearch() using
-     strcmp().  If a match is found, the corresponding value is returned.  If a
-     match isn't found, the not_found value is returned instead.
+     If a match is found, the corresponding value is returned.  If a match
+     isn't found, the not_found value is returned instead.
 
  (*) bool validate_constant_table(const struct constant_table *tbl,
                                  size_t tbl_size,
@@ -665,36 +688,36 @@ process the parameters it is given.
      should just be set to lie inside the low-to-high range.
 
      If all is good, true is returned.  If the table is invalid, errors are
-     logged to dmesg, the stack is dumped and false is returned.
+     logged to dmesg and false is returned.
+
+ (*) bool fs_validate_description(const struct fs_parameter_description *desc);
+
+     This performs some validation checks on a parameter description.  It
+     returns true if the description is good and false if it is not.  It will
+     log errors to dmesg if validation fails.
 
  (*) int fs_parse(struct fs_context *fc,
-                 const struct fs_param_parser *parser,
+                 const struct fs_parameter_description *desc,
                  struct fs_parameter *param,
-                 struct fs_param_parse_result *result);
+                 struct fs_parse_result *result);
 
      This is the main interpreter of parameters.  It uses the parameter
-     description (parser) to look up the name of the parameter to use and to
-     convert that to a parameter ID (stored in result->key).
+     description to look up a parameter by key name and to convert that to an
+     option number (which it returns).
 
      If successful, and if the parameter type indicates the result is a
      boolean, integer or enum type, the value is converted by this function and
-     the result stored in result->{boolean,int_32,uint_32}.
+     the result stored in result->{boolean,int_32,uint_32,uint_64}.
 
      If a match isn't initially made, the key is prefixed with "no" and no
      value is present then an attempt will be made to look up the key with the
      prefix removed.  If this matches a parameter for which the type has flag
-     fs_param_neg_with_no set, then a match will be made and the value will be
-     set to false/0/NULL.
-
-     If the parameter is successfully matched and, optionally, parsed
-     correctly, 1 is returned.  If the parameter isn't matched and
-     parser->ignore_unknown is set, then 0 is returned.  Otherwise -EINVAL is
-     returned.
-
- (*) bool fs_validate_description(const struct fs_parameter_description *desc);
+     fs_param_neg_with_no set, then a match will be made and result->negated
+     will be set to true.
 
-     This is validates the parameter description.  It returns true if the
-     description is good and false if it is not.
+     If the parameter isn't matched, -ENOPARAM will be returned; if the
+     parameter is matched, but the value is erroneous, -EINVAL will be
+     returned; otherwise the parameter's option number will be returned.
 
  (*) int fs_lookup_param(struct fs_context *fc,
                         struct fs_parameter *value,
index d1ee484a787d1b476cf13bcf7d7b53ac084fb63e..ee9984f3586897c870bd42b854f5d883b245621e 100644 (file)
@@ -36,6 +36,7 @@ Supported adapters:
   * Intel Cannon Lake (PCH)
   * Intel Cedar Fork (PCH)
   * Intel Ice Lake (PCH)
+  * Intel Comet Lake (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
index 18c1415e7bfad8f6e6e9b03febaf47f83a0f9915..ace56204dd03b1de816a89e77ad1b0d05bdbbd03 100644 (file)
@@ -50,7 +50,7 @@ the excellent reporting over at LWN.net or read the original code.
 
   patchset
     [PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY
-    http://lkml.kernel.org/r/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
+    https://lkml.kernel.org/netdev/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
 
 
 Interface
index 0ac5fa77f50173c139376a3f2c271faff2e5e569..8c7a713cf657a769f011dfd45676473e2ee94e2e 100644 (file)
@@ -131,6 +131,19 @@ it to the maintainer to figure out what is the most recent and current
 version that should be applied. If there is any doubt, the maintainer
 will reply and ask what should be done.
 
+Q: I made changes to only a few patches in a patch series should I resend only those changed?
+--------------------------------------------------------------------------------------------
+A: No, please resend the entire patch series and make sure you do number your
+patches such that it is clear this is the latest and greatest set of patches
+that can be applied.
+
+Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
+-------------------------------------------------------------------------------------------------------------------------------------------
+A: There is no revert possible, once it is pushed out, it stays like that.
+Please send incremental versions on top of what has been merged in order to fix
+the patches the way they would look like if your latest patch series was to be
+merged.
+
 Q: How can I tell what patches are queued up for backporting to the various stable releases?
 --------------------------------------------------------------------------------------------
 A: Normally Greg Kroah-Hartman collects stable commits himself, but for
index 54128c50d508ef27e5c6f2026fc5dddd0df47ead..ca2136c76042c4ded1aa1608ea38f405e04772da 100644 (file)
@@ -44,10 +44,10 @@ including the Netfilter hooks and the flowtable fastpath bypass.
      /         \    /          \     |Routing |   /            \
   -->  ingress  ---> prerouting ---> |decision|   | postrouting |--> neigh_xmit
      \_________/    \__________/     ----------   \____________/          ^
-       |      ^          |               |               ^                |
-   flowtable  |          |          ____\/___            |                |
-       |      |          |         /         \           |                |
-    __\/___   |          --------->| forward |------------                |
+       |      ^                          |               ^                |
+   flowtable  |                     ____\/___            |                |
+       |      |                    /         \           |                |
+    __\/___   |                    | forward |------------                |
     |-----|   |                    \_________/                            |
     |-----|   |                 'flow offload' rule                       |
     |-----|   |                   adds entry to                           |
index 52b026be028f65f02aa8bf4a8816ab56f9e509ff..38a4edc4522b46f6ad3859f411eb46dfa4bc7f94 100644 (file)
@@ -413,7 +413,7 @@ algorithm.
 .. _F-RTO: https://tools.ietf.org/html/rfc5682
 
 TCP Fast Path
-============
+=============
 When kernel receives a TCP packet, it has two paths to handler the
 packet, one is fast path, another is slow path. The comment in kernel
 code provides a good explanation of them, I pasted them below::
@@ -681,6 +681,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a
 DSACK to the sender.
 
 * TcpExtTCPDSACKRecv
+
 The TCP stack receives a DSACK, which indicates an acknowledged
 duplicate packet is received.
 
@@ -690,7 +691,7 @@ The TCP stack receives a DSACK, which indicate an out of order
 duplicate packet is received.
 
 invalid SACK and DSACK
-====================
+======================
 When a SACK (or DSACK) block is invalid, a corresponding counter would
 be updated. The validation method is base on the start/end sequence
 number of the SACK block. For more details, please refer the comment
@@ -704,11 +705,13 @@ explaination:
 .. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
 
 * TcpExtTCPSACKDiscard
+
 This counter indicates how many SACK blocks are invalid. If the invalid
 SACK block is caused by ACK recording, the TCP stack will only ignore
 it and won't update this counter.
 
 * TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
+
 When a DSACK block is invalid, one of these two counters would be
 updated. Which counter will be updated depends on the undo_marker flag
 of the TCP socket. If the undo_marker is not set, the TCP stack isn't
@@ -719,7 +722,7 @@ will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
 will be updated. As implied in its name, it might be an old packet.
 
 SACK shift
-=========
+==========
 The linux networking stack stores data in sk_buff struct (skb for
 short). If a SACK block acrosses multiple skb, the TCP stack will try
 to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
@@ -730,12 +733,15 @@ seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
 discard, this operation is 'merge'.
 
 * TcpExtTCPSackShifted
+
 A skb is shifted
 
 * TcpExtTCPSackMerged
+
 A skb is merged
 
 * TcpExtTCPSackShiftFallback
+
 A skb should be shifted or merged, but the TCP stack doesn't do it for
 some reasons.
 
index e17ebf70b5480ecc232ce1f62aedf95a03b5f403..3e5a5d263f2992b77c4bbc884969af279513ee19 100644 (file)
@@ -8096,6 +8096,16 @@ F:       include/linux/iommu.h
 F:     include/linux/of_iommu.h
 F:     include/linux/iova.h
 
+IO_URING
+M:     Jens Axboe <axboe@kernel.dk>
+L:     linux-block@vger.kernel.org
+L:     linux-fsdevel@vger.kernel.org
+T:     git git://git.kernel.dk/linux-block
+T:     git git://git.kernel.dk/liburing
+S:     Maintained
+F:     fs/io_uring.c
+F:     include/uapi/linux/io_uring.h
+
 IP MASQUERADING
 M:     Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
 S:     Maintained
index 99c0530489ef000781e25e8e677e5332d328628f..929f51ef307521fe46e3b8703c3e5e73caa1d090 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
@@ -31,26 +31,12 @@ _all:
 # descending is started. They are now explicitly listed as the
 # prepare rule.
 
-# Ugly workaround for Debian make-kpkg:
-# make-kpkg directly includes the top Makefile of Linux kernel. In such a case,
-# skip sub-make to support debian_* targets in ruleset/kernel_version.mk, but
-# displays warning to discourage such abusage.
-ifneq ($(word 2, $(MAKEFILE_LIST)),)
-$(warning Do not include top Makefile of Linux Kernel)
-sub-make-done := 1
-MAKEFLAGS += -rR
-endif
-
-ifneq ($(sub-make-done),1)
+ifneq ($(sub_make_done),1)
 
 # Do not use make's built-in rules and variables
 # (this increases performance and avoids hard-to-debug behaviour)
 MAKEFLAGS += -rR
 
-# 'MAKEFLAGS += -rR' does not become immediately effective for old
-# GNU Make versions. Cancel implicit rules for this Makefile.
-$(lastword $(MAKEFILE_LIST)): ;
-
 # Avoid funny character set dependencies
 unexport LC_ALL
 LC_COLLATE=C
@@ -153,6 +139,7 @@ $(if $(KBUILD_OUTPUT),, \
 # 'sub-make' below.
 MAKEFLAGS += --include-dir=$(CURDIR)
 
+need-sub-make := 1
 else
 
 # Do not print "Entering directory ..." at all for in-tree build.
@@ -160,6 +147,18 @@ MAKEFLAGS += --no-print-directory
 
 endif # ifneq ($(KBUILD_OUTPUT),)
 
+ifneq ($(filter 3.%,$(MAKE_VERSION)),)
+# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
+# We need to invoke sub-make to avoid implicit rules in the top Makefile.
+need-sub-make := 1
+# Cancel implicit rules for this Makefile.
+$(lastword $(MAKEFILE_LIST)): ;
+endif
+
+export sub_make_done := 1
+
+ifeq ($(need-sub-make),1)
+
 PHONY += $(MAKECMDGOALS) sub-make
 
 $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@@ -167,12 +166,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
 
 # Invoke a second make in the output directory, passing relevant variables
 sub-make:
-       $(Q)$(MAKE) sub-make-done=1 \
+       $(Q)$(MAKE) \
        $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
        -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
 
-else # sub-make-done
+endif # need-sub-make
+endif # sub_make_done
+
 # We process the rest of the Makefile if this is the final invocation of make
+ifeq ($(need-sub-make),)
 
 # Do not print "Entering directory ...",
 # but we want to display it when entering to the output directory
@@ -497,7 +499,8 @@ outputmakefile:
 ifneq ($(KBUILD_SRC),)
        $(Q)ln -fsn $(srctree) source
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
-       $(Q){ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
+       $(Q)test -e .gitignore || \
+       { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
 endif
 
 ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
@@ -677,7 +680,7 @@ KBUILD_CFLAGS       += $(call cc-disable-warning, format-overflow)
 KBUILD_CFLAGS  += $(call cc-disable-warning, int-in-bool-context)
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS  += $(call cc-option,-Oz,-Os)
+KBUILD_CFLAGS  += -Os
 else
 KBUILD_CFLAGS   += -O2
 endif
@@ -1757,7 +1760,7 @@ existing-targets := $(wildcard $(sort $(targets)))
 
 endif   # ifeq ($(config-targets),1)
 endif   # ifeq ($(mixed-targets),1)
-endif   # sub-make-done
+endif   # need-sub-make
 
 PHONY += FORCE
 FORCE:
index df55672c59e6e4b99f5846f12944325c227cab03..c781e45d1d9953267b977bc094e0d1acde0ee623 100644 (file)
@@ -144,11 +144,11 @@ config ARC_CPU_770
          Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
          This core has a bunch of cool new features:
          -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
-                   Shared Address Spaces (for sharing TLB entries in MMU)
+                  Shared Address Spaces (for sharing TLB entries in MMU)
          -Caches: New Prog Model, Region Flush
          -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
 
-endif  #ISA_ARCOMPACT
+endif #ISA_ARCOMPACT
 
 config ARC_CPU_HS
        bool "ARC-HS"
@@ -198,7 +198,7 @@ config ARC_SMP_HALT_ON_RESET
          at designated entry point. For other case, all jump to common
          entry point and spin wait for Master's signal.
 
-endif  #SMP
+endif #SMP
 
 config ARC_MCIP
        bool "ARConnect Multicore IP (MCIP) Support "
@@ -249,7 +249,7 @@ config ARC_CACHE_VIPT_ALIASING
        bool "Support VIPT Aliasing D$"
        depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
 
-endif  #ARC_CACHE
+endif #ARC_CACHE
 
 config ARC_HAS_ICCM
        bool "Use ICCM"
@@ -370,7 +370,7 @@ config ARC_FPU_SAVE_RESTORE
          based on actual usage of FPU by a task. Thus our implemn does
          this for all tasks in system.
 
-endif  #ISA_ARCOMPACT
+endif #ISA_ARCOMPACT
 
 config ARC_CANT_LLSC
        def_bool n
@@ -386,6 +386,15 @@ config ARC_HAS_SWAPE
 
 if ISA_ARCV2
 
+config ARC_USE_UNALIGNED_MEM_ACCESS
+       bool "Enable unaligned access in HW"
+       default y
+       select HAVE_EFFICIENT_UNALIGNED_ACCESS
+       help
+         The ARC HS architecture supports unaligned memory access
+         which is disabled by default. Enable unaligned access in
+         hardware and use software to use it
+
 config ARC_HAS_LL64
        bool "Insn: 64bit LDD/STD"
        help
@@ -414,7 +423,7 @@ config ARC_IRQ_NO_AUTOSAVE
          This is programmable and can be optionally disabled in which case
          software INTERRUPT_PROLOGUE/EPILGUE do the needed work
 
-endif  # ISA_ARCV2
+endif # ISA_ARCV2
 
 endmenu   # "ARC CPU Configuration"
 
index df00578c279d4bc0ee03d71089769383440e7cf6..e2b991f75bc5b7bc0d8103d65e938df9b9c038ac 100644 (file)
@@ -28,6 +28,12 @@ cflags-$(CONFIG_ARC_HAS_SWAPE)               += -mswape
 
 ifdef CONFIG_ISA_ARCV2
 
+ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+cflags-y                               += -munaligned-access
+else
+cflags-y                               += -mno-unaligned-access
+endif
+
 ifndef CONFIG_ARC_HAS_LL64
 cflags-y                               += -mno-ll64
 endif
index 02410b2114334466572c05e651b6227b02e415f0..c0bcd97522bbfcfa96b0f9e0fa992d104df474f7 100644 (file)
@@ -38,7 +38,7 @@
                        clock-div = <6>;
                };
 
-               iomux: iomux@FF10601c {
+               iomux: iomux@ff10601c {
                        /* Port 1 */
                        pctl_tsin_s0: pctl-tsin-s0 {   /* Serial TS-in 0 */
                                abilis,function = "mis0";
                        };
                };
 
-               gpioa: gpio@FF140000 {
+               gpioa: gpio@ff140000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF140000 0x1000>;
+                       reg = <0xff140000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioa";
                };
-               gpiob: gpio@FF141000 {
+               gpiob: gpio@ff141000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF141000 0x1000>;
+                       reg = <0xff141000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiob";
                };
-               gpioc: gpio@FF142000 {
+               gpioc: gpio@ff142000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF142000 0x1000>;
+                       reg = <0xff142000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioc";
                };
-               gpiod: gpio@FF143000 {
+               gpiod: gpio@ff143000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF143000 0x1000>;
+                       reg = <0xff143000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiod";
                };
-               gpioe: gpio@FF144000 {
+               gpioe: gpio@ff144000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF144000 0x1000>;
+                       reg = <0xff144000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioe";
                };
-               gpiof: gpio@FF145000 {
+               gpiof: gpio@ff145000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF145000 0x1000>;
+                       reg = <0xff145000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiof";
                };
-               gpiog: gpio@FF146000 {
+               gpiog: gpio@ff146000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF146000 0x1000>;
+                       reg = <0xff146000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiog";
                };
-               gpioh: gpio@FF147000 {
+               gpioh: gpio@ff147000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF147000 0x1000>;
+                       reg = <0xff147000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioh";
                };
-               gpioi: gpio@FF148000 {
+               gpioi: gpio@ff148000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF148000 0x1000>;
+                       reg = <0xff148000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <12>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioi";
                };
-               gpioj: gpio@FF149000 {
+               gpioj: gpio@ff149000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF149000 0x1000>;
+                       reg = <0xff149000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <32>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioj";
                };
-               gpiok: gpio@FF14a000 {
+               gpiok: gpio@ff14a000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14A000 0x1000>;
+                       reg = <0xff14a000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <22>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiok";
                };
-               gpiol: gpio@FF14b000 {
+               gpiol: gpio@ff14b000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14B000 0x1000>;
+                       reg = <0xff14b000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <4>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiol";
                };
-               gpiom: gpio@FF14c000 {
+               gpiom: gpio@ff14c000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14C000 0x1000>;
+                       reg = <0xff14c000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <4>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiom";
                };
-               gpion: gpio@FF14d000 {
+               gpion: gpio@ff14d000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14D000 0x1000>;
+                       reg = <0xff14d000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <5>;
index 3acf04db80302875d86d4e4de355c077bfabbd02..c968e677db46b01e88f344c52a6232f33fda9556 100644 (file)
        };
 
        soc100 {
-               uart@FF100000 {
+               uart@ff100000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&pctl_uart0>;
                };
-               ethernet@FE100000 {
+               ethernet@fe100000 {
                        phy-mode = "rgmii";
                };
 
-               i2c0: i2c@FF120000 {
+               i2c0: i2c@ff120000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c1: i2c@FF121000 {
+               i2c1: i2c@ff121000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c2: i2c@FF122000 {
+               i2c2: i2c@ff122000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c3: i2c@FF123000 {
+               i2c3: i2c@ff123000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c4: i2c@FF124000 {
+               i2c4: i2c@ff124000 {
                        i2c-sda-hold-time-ns = <432>;
                };
 
index f9e7686044ebee0a4c49e1d4824810e49747256e..6a1615f58f052d3586dda6038e7b2ebc3a55d161 100644 (file)
@@ -38,7 +38,7 @@
                        clock-div = <6>;
                };
 
-               iomux: iomux@FF10601c {
+               iomux: iomux@ff10601c {
                        /* Port 1 */
                        pctl_tsin_s0: pctl-tsin-s0 {   /* Serial TS-in 0 */
                                abilis,function = "mis0";
                        };
                };
 
-               gpioa: gpio@FF140000 {
+               gpioa: gpio@ff140000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF140000 0x1000>;
+                       reg = <0xff140000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioa";
                };
-               gpiob: gpio@FF141000 {
+               gpiob: gpio@ff141000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF141000 0x1000>;
+                       reg = <0xff141000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiob";
                };
-               gpioc: gpio@FF142000 {
+               gpioc: gpio@ff142000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF142000 0x1000>;
+                       reg = <0xff142000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioc";
                };
-               gpiod: gpio@FF143000 {
+               gpiod: gpio@ff143000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF143000 0x1000>;
+                       reg = <0xff143000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiod";
                };
-               gpioe: gpio@FF144000 {
+               gpioe: gpio@ff144000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF144000 0x1000>;
+                       reg = <0xff144000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioe";
                };
-               gpiof: gpio@FF145000 {
+               gpiof: gpio@ff145000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF145000 0x1000>;
+                       reg = <0xff145000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiof";
                };
-               gpiog: gpio@FF146000 {
+               gpiog: gpio@ff146000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF146000 0x1000>;
+                       reg = <0xff146000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiog";
                };
-               gpioh: gpio@FF147000 {
+               gpioh: gpio@ff147000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF147000 0x1000>;
+                       reg = <0xff147000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioh";
                };
-               gpioi: gpio@FF148000 {
+               gpioi: gpio@ff148000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF148000 0x1000>;
+                       reg = <0xff148000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <12>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioi";
                };
-               gpioj: gpio@FF149000 {
+               gpioj: gpio@ff149000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF149000 0x1000>;
+                       reg = <0xff149000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <32>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioj";
                };
-               gpiok: gpio@FF14a000 {
+               gpiok: gpio@ff14a000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14A000 0x1000>;
+                       reg = <0xff14a000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <22>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiok";
                };
-               gpiol: gpio@FF14b000 {
+               gpiol: gpio@ff14b000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14B000 0x1000>;
+                       reg = <0xff14b000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <4>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiol";
                };
-               gpiom: gpio@FF14c000 {
+               gpiom: gpio@ff14c000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14C000 0x1000>;
+                       reg = <0xff14c000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <4>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiom";
                };
-               gpion: gpio@FF14d000 {
+               gpion: gpio@ff14d000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14D000 0x1000>;
+                       reg = <0xff14d000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <5>;
index 37d88c5dd181fc5c0de3eb8bd7526d84e56f483c..05143ce9c120434a0d64ce892ed593dc038d8cf1 100644 (file)
        };
 
        soc100 {
-               uart@FF100000 {
+               uart@ff100000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&pctl_uart0>;
                };
-               ethernet@FE100000 {
+               ethernet@fe100000 {
                        phy-mode = "rgmii";
                };
 
-               i2c0: i2c@FF120000 {
+               i2c0: i2c@ff120000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c1: i2c@FF121000 {
+               i2c1: i2c@ff121000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c2: i2c@FF122000 {
+               i2c2: i2c@ff122000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c3: i2c@FF123000 {
+               i2c3: i2c@ff123000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c4: i2c@FF124000 {
+               i2c4: i2c@ff124000 {
                        i2c-sda-hold-time-ns = <432>;
                };
 
index 3121536b25a375883a5eca1b94a3e2a4db372680..2fbf1bdfe6de815f0865865338f225a897a204c5 100644 (file)
@@ -54,7 +54,7 @@
                #size-cells     = <1>;
                device_type     = "soc";
                ranges          = <0xfe000000 0xfe000000 0x02000000
-                               0x000F0000 0x000F0000 0x00010000>;
+                               0x000f0000 0x000f0000 0x00010000>;
                compatible      = "abilis,tb10x", "simple-bus";
 
                pll0: oscillator {
                        clock-output-names = "ahb_clk";
                };
 
-               iomux: iomux@FF10601c {
+               iomux: iomux@ff10601c {
                        compatible = "abilis,tb10x-iomux";
                        #gpio-range-cells = <3>;
-                       reg = <0xFF10601c 0x4>;
+                       reg = <0xff10601c 0x4>;
                };
 
                intc: interrupt-controller {
@@ -88,7 +88,7 @@
                };
                tb10x_ictl: pic@fe002000 {
                        compatible = "abilis,tb10x-ictl";
-                       reg = <0xFE002000 0x20>;
+                       reg = <0xfe002000 0x20>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
                        interrupt-parent = <&intc>;
                                        20 21 22 23 24 25 26 27 28 29 30 31>;
                };
 
-               uart@FF100000 {
+               uart@ff100000 {
                        compatible = "snps,dw-apb-uart";
-                       reg = <0xFF100000 0x100>;
+                       reg = <0xff100000 0x100>;
                        clock-frequency = <166666666>;
                        interrupts = <25 8>;
                        reg-shift = <2>;
                        reg-io-width = <4>;
                        interrupt-parent = <&tb10x_ictl>;
                };
-               ethernet@FE100000 {
+               ethernet@fe100000 {
                        compatible = "snps,dwmac-3.70a","snps,dwmac";
-                       reg = <0xFE100000 0x1058>;
+                       reg = <0xfe100000 0x1058>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <6 8>;
                        interrupt-names = "macirq";
                        clocks = <&ahb_clk>;
                        clock-names = "stmmaceth";
                };
-               dma@FE000000 {
+               dma@fe000000 {
                        compatible = "snps,dma-spear1340";
-                       reg = <0xFE000000 0x400>;
+                       reg = <0xfe000000 0x400>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <14 8>;
                        dma-channels = <6>;
                        multi-block = <1 1 1 1 1 1>;
                };
 
-               i2c0: i2c@FF120000 {
+               i2c0: i2c@ff120000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "snps,designware-i2c";
-                       reg = <0xFF120000 0x1000>;
+                       reg = <0xff120000 0x1000>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <12 8>;
                        clocks = <&ahb_clk>;
                };
-               i2c1: i2c@FF121000 {
+               i2c1: i2c@ff121000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "snps,designware-i2c";
-                       reg = <0xFF121000 0x1000>;
+                       reg = <0xff121000 0x1000>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <12 8>;
                        clocks = <&ahb_clk>;
                };
-               i2c2: i2c@FF122000 {
+               i2c2: i2c@ff122000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "snps,designware-i2c";
-                       reg = <0xFF122000 0x1000>;
+                       reg = <0xff122000 0x1000>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <12 8>;
                        clocks = <&ahb_clk>;
                };
-               i2c3: i2c@FF123000 {
+               i2c3: i2c@ff123000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "snps,designware-i2c";
-                       reg = <0xFF123000 0x1000>;
+                       reg = <0xff123000 0x1000>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <12 8>;
                        clocks = <&ahb_clk>;
                };
-               i2c4: i2c@FF124000 {
+               i2c4: i2c@ff124000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "snps,designware-i2c";
-                       reg = <0xFF124000 0x1000>;
+                       reg = <0xff124000 0x1000>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <12 8>;
                        clocks = <&ahb_clk>;
                };
 
-               spi0: spi@0xFE010000 {
+               spi0: spi@fe010000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        cell-index = <0>;
                        compatible = "abilis,tb100-spi";
                        num-cs = <1>;
-                       reg = <0xFE010000 0x20>;
+                       reg = <0xfe010000 0x20>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <26 8>;
                        clocks = <&ahb_clk>;
                };
-               spi1: spi@0xFE011000 {
+               spi1: spi@fe011000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        cell-index = <1>;
                        compatible = "abilis,tb100-spi";
                        num-cs = <2>;
-                       reg = <0xFE011000 0x20>;
+                       reg = <0xfe011000 0x20>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <10 8>;
                        clocks = <&ahb_clk>;
                        interrupts = <20 2>, <19 2>;
                        interrupt-names = "cmd_irq", "event_irq";
                };
-               tb10x_mdsc0: tb10x-mdscr@FF300000 {
+               tb10x_mdsc0: tb10x-mdscr@ff300000 {
                        compatible = "abilis,tb100-mdscr";
-                       reg = <0xFF300000 0x7000>;
+                       reg = <0xff300000 0x7000>;
                        tb100-mdscr-manage-tsin;
                };
-               tb10x_mscr0: tb10x-mdscr@FF307000 {
+               tb10x_mscr0: tb10x-mdscr@ff307000 {
                        compatible = "abilis,tb100-mdscr";
-                       reg = <0xFF307000 0x7000>;
+                       reg = <0xff307000 0x7000>;
                };
                tb10x_scr0: tb10x-mdscr@ff30e000 {
                        compatible = "abilis,tb100-mdscr";
-                       reg = <0xFF30e000 0x4000>;
+                       reg = <0xff30e000 0x4000>;
                        tb100-mdscr-manage-tsin;
                };
                tb10x_scr1: tb10x-mdscr@ff312000 {
                        compatible = "abilis,tb100-mdscr";
-                       reg = <0xFF312000 0x4000>;
+                       reg = <0xff312000 0x4000>;
                        tb100-mdscr-manage-tsin;
                };
                tb10x_wfb: tb10x-wfb@ff319000 {
index fdc266504ada273e6efaf72c18cc8c2e2f48edf2..37be3bf03ad632f75214f82ff984d9f781e68358 100644 (file)
@@ -41,7 +41,7 @@
                 * this GPIO block ORs all interrupts on CPU card (creg,..)
                 * to uplink only 1 IRQ to ARC core intc
                 */
-               dw-apb-gpio@0x2000 {
+               dw-apb-gpio@2000 {
                        compatible = "snps,dw-apb-gpio";
                        reg = < 0x2000 0x80 >;
                        #address-cells = <1>;
@@ -60,7 +60,7 @@
                        };
                };
 
-               debug_uart: dw-apb-uart@0x5000 {
+               debug_uart: dw-apb-uart@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
                        clock-frequency = <33333000>;
@@ -88,7 +88,7 @@
         * avoid duplicating the MB dtsi file given that IRQ from
         * this intc to cpu intc are different for axs101 and axs103
         */
-       mb_intc: dw-apb-ictl@0xe0012000 {
+       mb_intc: dw-apb-ictl@e0012000 {
                #interrupt-cells = <1>;
                compatible = "snps,dw-apb-ictl";
                reg = < 0x0 0xe0012000 0x0 0x200 >;
index d75d65ddf8e31db78c58fa9882b90c2e6be2ed4b..effa37536d7ad3a02668e0455ed93220c245c2cb 100644 (file)
@@ -55,7 +55,7 @@
                 * this GPIO block ORs all interrupts on CPU card (creg,..)
                 * to uplink only 1 IRQ to ARC core intc
                 */
-               dw-apb-gpio@0x2000 {
+               dw-apb-gpio@2000 {
                        compatible = "snps,dw-apb-gpio";
                        reg = < 0x2000 0x80 >;
                        #address-cells = <1>;
@@ -74,7 +74,7 @@
                        };
                };
 
-               debug_uart: dw-apb-uart@0x5000 {
+               debug_uart: dw-apb-uart@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
                        clock-frequency = <33333000>;
         * external DMA buffer located outside of IOC aperture.
         */
        axs10x_mb {
-               ethernet@0x18000 {
+               ethernet@18000 {
                        dma-coherent;
                };
 
-               ehci@0x40000 {
+               ehci@40000 {
                        dma-coherent;
                };
 
-               ohci@0x60000 {
+               ohci@60000 {
                        dma-coherent;
                };
 
-               mmc@0x15000 {
+               mmc@15000 {
                        dma-coherent;
                };
        };
         * avoid duplicating the MB dtsi file given that IRQ from
         * this intc to cpu intc are different for axs101 and axs103
         */
-       mb_intc: dw-apb-ictl@0xe0012000 {
+       mb_intc: dw-apb-ictl@e0012000 {
                #interrupt-cells = <1>;
                compatible = "snps,dw-apb-ictl";
                reg = < 0x0 0xe0012000 0x0 0x200 >;
                #size-cells = <2>;
                ranges;
                /*
-                * Move frame buffer out of IOC aperture (0x8z-0xAz).
+                * Move frame buffer out of IOC aperture (0x8z-0xaz).
                 */
                frame_buffer: frame_buffer@be000000 {
                        compatible = "shared-dma-pool";
index a05bb737ea6392f5e77cd3830dceb8afe620943e..e401e59f61802f2ef33fcb12854a7f97f896200d 100644 (file)
@@ -62,7 +62,7 @@
                 * this GPIO block ORs all interrupts on CPU card (creg,..)
                 * to uplink only 1 IRQ to ARC core intc
                 */
-               dw-apb-gpio@0x2000 {
+               dw-apb-gpio@2000 {
                        compatible = "snps,dw-apb-gpio";
                        reg = < 0x2000 0x80 >;
                        #address-cells = <1>;
@@ -81,7 +81,7 @@
                        };
                };
 
-               debug_uart: dw-apb-uart@0x5000 {
+               debug_uart: dw-apb-uart@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
                        clock-frequency = <33333000>;
         * external DMA buffer located outside of IOC aperture.
         */
        axs10x_mb {
-               ethernet@0x18000 {
+               ethernet@18000 {
                        dma-coherent;
                };
 
-               ehci@0x40000 {
+               ehci@40000 {
                        dma-coherent;
                };
 
-               ohci@0x60000 {
+               ohci@60000 {
                        dma-coherent;
                };
 
-               mmc@0x15000 {
+               mmc@15000 {
                        dma-coherent;
                };
        };
         * avoid duplicating the MB dtsi file given that IRQ from
         * this intc to cpu intc are different for axs101 and axs103
         */
-       mb_intc: dw-apb-ictl@0xe0012000 {
+       mb_intc: dw-apb-ictl@e0012000 {
                #interrupt-cells = <1>;
                compatible = "snps,dw-apb-ictl";
                reg = < 0x0 0xe0012000 0x0 0x200 >;
                #size-cells = <2>;
                ranges;
                /*
-                * Move frame buffer out of IOC aperture (0x8z-0xAz).
+                * Move frame buffer out of IOC aperture (0x8z-0xaz).
                 */
                frame_buffer: frame_buffer@be000000 {
                        compatible = "shared-dma-pool";
index 37bafd44e36d0fed9b85e80ea356cd78df0c1872..4ead6dc9af2f7e3823b332ee9d7ed5df5b920d25 100644 (file)
@@ -72,7 +72,7 @@
                        };
                };
 
-               gmac: ethernet@0x18000 {
+               gmac: ethernet@18000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = < 0x18000 0x2000 >;
                        mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
                };
 
-               ehci@0x40000 {
+               ehci@40000 {
                        compatible = "generic-ehci";
                        reg = < 0x40000 0x100 >;
                        interrupts = < 8 >;
                };
 
-               ohci@0x60000 {
+               ohci@60000 {
                        compatible = "generic-ohci";
                        reg = < 0x60000 0x100 >;
                        interrupts = < 8 >;
                 * dw_mci_pltfm_prepare_command() is used in generic platform
                 * code.
                 */
-               mmc@0x15000 {
+               mmc@15000 {
                        compatible = "altr,socfpga-dw-mshc";
                        reg = < 0x15000 0x400 >;
                        fifo-depth = < 16 >;
                        bus-width = < 4 >;
                };
 
-               uart@0x20000 {
+               uart@20000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x20000 0x100>;
                        clock-frequency = <33333333>;
                        reg-io-width = <4>;
                };
 
-               uart@0x21000 {
+               uart@21000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x21000 0x100>;
                        clock-frequency = <33333333>;
                };
 
                /* UART muxed with USB data port (ttyS3) */
-               uart@0x22000 {
+               uart@22000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x22000 0x100>;
                        clock-frequency = <33333333>;
                        reg-io-width = <4>;
                };
 
-               i2c@0x1d000 {
+               i2c@1d000 {
                        compatible = "snps,designware-i2c";
                        reg = <0x1d000 0x100>;
                        clock-frequency = <400000>;
                        #sound-dai-cells = <0>;
                };
 
-               i2c@0x1f000 {
+               i2c@1f000 {
                        compatible = "snps,designware-i2c";
                        #address-cells = <1>;
                        #size-cells = <0>;
                                };
                        };
 
-                       eeprom@0x54{
+                       eeprom@54{
                                compatible = "atmel,24c01";
                                reg = <0x54>;
                                pagesize = <0x8>;
                        };
 
-                       eeprom@0x57{
+                       eeprom@57{
                                compatible = "atmel,24c04";
                                reg = <0x57>;
                                pagesize = <0x8>;
index 43f17b51ee89cca00a0b2eebb7ed045d49de03a0..69bc1c9e8e50d673729a6187fb4f1669971c9cb7 100644 (file)
                cgu_rst: reset-controller@8a0 {
                        compatible = "snps,hsdk-reset";
                        #reset-cells = <1>;
-                       reg = <0x8A0 0x4>, <0xFF0 0x4>;
+                       reg = <0x8a0 0x4>, <0xff0 0x4>;
                };
 
                core_clk: core-clk@0 {
                        compatible = "snps,hsdk-core-pll-clock";
-                       reg = <0x00 0x10>, <0x14B8 0x4>;
+                       reg = <0x00 0x10>, <0x14b8 0x4>;
                        #clock-cells = <0>;
                        clocks = <&input_clk>;
 
                        #clock-cells = <0>;
                };
 
+               dmac_core_clk: dmac-core-clk {
+                       compatible = "fixed-clock";
+                       clock-frequency = <400000000>;
+                       #clock-cells = <0>;
+               };
+
+               dmac_cfg_clk: dmac-gpu-cfg-clk {
+                       compatible = "fixed-clock";
+                       clock-frequency = <200000000>;
+                       #clock-cells = <0>;
+               };
+
                gmac: ethernet@8000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
                        reg = <0x60000 0x100>;
                        interrupts = <15>;
+                       resets = <&cgu_rst HSDK_USB_RESET>;
                        dma-coherent;
                };
 
                        compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
                        reg = <0x40000 0x100>;
                        interrupts = <15>;
+                       resets = <&cgu_rst HSDK_USB_RESET>;
                        dma-coherent;
                };
 
                                reg = <0>;
                        };
                };
+
+               dmac: dmac@80000 {
+                       compatible = "snps,axi-dma-1.01a";
+                       reg = <0x80000 0x400>;
+                       interrupts = <27>;
+                       clocks = <&dmac_core_clk>, <&dmac_cfg_clk>;
+                       clock-names = "core-clk", "cfgr-clk";
+
+                       dma-channels = <4>;
+                       snps,dma-masters = <2>;
+                       snps,data-width = <3>;
+                       snps,block-size = <4096 4096 4096 4096>;
+                       snps,priority = <0 1 2 3>;
+                       snps,axi-max-burst-len = <16>;
+               };
        };
 
        memory@80000000 {
index 0fd6ba985b164b7752c26544e5d4b6d9684c88be..84e8766c8ca2c6144167ea48e969145e6bebfe71 100644 (file)
@@ -36,7 +36,7 @@
                        #interrupt-cells = <1>;
                };
 
-               debug_uart: dw-apb-uart@0x5000 {
+               debug_uart: dw-apb-uart@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
                        clock-frequency = <2403200>;
@@ -49,7 +49,7 @@
 
        };
 
-       mb_intc: dw-apb-ictl@0xe0012000 {
+       mb_intc: dw-apb-ictl@e0012000 {
                #interrupt-cells = <1>;
                compatible = "snps,dw-apb-ictl";
                reg = < 0xe0012000 0x200 >;
index 28956f9a9f3db7e12042821ea72d5a4af0514e2a..eb7e705e8a2789722a449e5abf5d5ad122619f99 100644 (file)
@@ -44,7 +44,7 @@
                        #interrupt-cells = <1>;
                };
 
-               debug_uart: dw-apb-uart@0x5000 {
+               debug_uart: dw-apb-uart@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
                        clock-frequency = <2403200>;
@@ -57,7 +57,7 @@
 
        };
 
-       mb_intc: dw-apb-ictl@0xe0012000 {
+       mb_intc: dw-apb-ictl@e0012000 {
                #interrupt-cells = <1>;
                compatible = "snps,dw-apb-ictl";
                reg = < 0xe0012000 0x200 >;
index 48bb4b4cd234ed65a8b4002dbf060f3bc09b7e0d..925d5cc95dbbbf1419b920d4d3e73743ec794dd4 100644 (file)
@@ -36,7 +36,7 @@
                        };
                };
 
-               ethernet@0x18000 {
+               ethernet@18000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = < 0x18000 0x2000 >;
                        clock-names = "stmmaceth";
                };
 
-               ehci@0x40000 {
+               ehci@40000 {
                        compatible = "generic-ehci";
                        reg = < 0x40000 0x100 >;
                        interrupts = < 8 >;
                };
 
-               uart@0x20000 {
+               uart@20000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x20000 0x100>;
                        clock-frequency = <2403200>;
@@ -65,7 +65,7 @@
                        reg-io-width = <4>;
                };
 
-               uart@0x21000 {
+               uart@21000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x21000 0x100>;
                        clock-frequency = <2403200>;
@@ -75,7 +75,7 @@
                        reg-io-width = <4>;
                };
 
-               uart@0x22000 {
+               uart@22000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x22000 0x100>;
                        clock-frequency = <2403200>;
                        interrupt-names = "arc_ps2_irq";
                };
 
-               mmc@0x15000 {
+               mmc@15000 {
                        compatible = "snps,dw-mshc";
                        reg = <0x15000 0x400>;
                        fifo-depth = <1024>;
         * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
         *
         * This node is intentionally put outside of MB above becase
-        * it maps areas outside of MB's 0xEz-0xFz.
+        * it maps areas outside of MB's 0xez-0xfz.
         */
-       uio_ev: uio@0xD0000000 {
+       uio_ev: uio@d0000000 {
                compatible = "generic-uio";
-               reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
+               reg = <0xd0000000 0x2000 0xd1000000 0x2000 0x90000000 0x10000000 0xc0000000 0x10000000>;
                reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
                interrupt-parent = <&mb_intc>;
                interrupts = <23>;
index 6fd3d29546afd2e2f76f40e5a0af57757a5c369a..0e5fd29ed238b5a4dc715a364bde2d9ee88edb58 100644 (file)
@@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_RAM=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index a27eafdc82602f6856b00c5fbd82db72c33cf238..a7d4be87b2f0a8440307f87b00729839d1c9ab3e 100644 (file)
@@ -82,6 +82,7 @@
 #define ECR_V_DTLB_MISS                        0x05
 #define ECR_V_PROTV                    0x06
 #define ECR_V_TRAP                     0x09
+#define ECR_V_MISALIGN                 0x0d
 #endif
 
 /* DTLB Miss and Protection Violation Cause Codes */
@@ -167,14 +168,6 @@ struct bcr_mpy {
 #endif
 };
 
-struct bcr_extn_xymem {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-       unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
-#else
-       unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
-#endif
-};
-
 struct bcr_iccm_arcompact {
 #ifdef CONFIG_CPU_BIG_ENDIAN
        unsigned int base:16, pad:5, sz:3, ver:8;
@@ -312,7 +305,7 @@ struct cpuinfo_arc {
        struct cpuinfo_arc_bpu bpu;
        struct bcr_identity core;
        struct bcr_isa_arcv2 isa;
-       const char *details, *name;
+       const char *release, *name;
        unsigned int vec_base;
        struct cpuinfo_arc_ccm iccm, dccm;
        struct {
@@ -322,7 +315,6 @@ struct cpuinfo_arc {
                             timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
        } extn;
        struct bcr_mpy extn_mpy;
-       struct bcr_extn_xymem extn_xymem;
 };
 
 extern struct cpuinfo_arc cpuinfo_arc700[];
index 8a4f77ea3238e6f017ae24ab6b55e2952637fe04..e66d0339e1d8617ac30050f53d26d926243266e6 100644 (file)
 #define ARCV2_IRQ_DEF_PRIO     1
 
 /* seed value for status register */
-#define ISA_INIT_STATUS_BITS   (STATUS_IE_MASK | STATUS_AD_MASK | \
+#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+#define __AD_ENB       STATUS_AD_MASK
+#else
+#define __AD_ENB       0
+#endif
+
+#define ISA_INIT_STATUS_BITS   (STATUS_IE_MASK | __AD_ENB | \
                                        (ARCV2_IRQ_DEF_PRIO << 1))
 
 #ifndef __ASSEMBLY__
index 6958545390f0f847ed3a7745b7325964d7f23f17..9cd7ee4fad390e7806a812b715d2ac90bbe0e56f 100644 (file)
@@ -105,10 +105,10 @@ static const char * const arc_pmu_ev_hw_map[] = {
        [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
        /* All jump instructions that are taken */
        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
-       [PERF_COUNT_ARC_BPOK]         = "bpok",   /* NP-NT, PT-T, PNT-NT */
 #ifdef CONFIG_ISA_ARCV2
        [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
 #else
+       [PERF_COUNT_ARC_BPOK]         = "bpok",   /* NP-NT, PT-T, PNT-NT */
        [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
 #endif
        [PERF_COUNT_ARC_LDC] = "imemrdc",       /* Instr: mem read cached */
index 2ba04a7db62128148ac303e79e95c2cb2ee2d534..daa914da796886de6a3ae3744e3428d30f3804c7 100644 (file)
@@ -21,8 +21,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
        unsigned int val;
 
-       smp_mb();
-
        __asm__ __volatile__(
        "1:     llock   %[val], [%[slock]]      \n"
        "       breq    %[val], %[LOCKED], 1b   \n"     /* spin while LOCKED */
@@ -34,6 +32,14 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
          [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
        : "memory", "cc");
 
+       /*
+        * ACQUIRE barrier to ensure load/store after taking the lock
+        * don't "bleed-up" out of the critical section (leak-in is allowed)
+        * http://www.spinics.net/lists/kernel/msg2010409.html
+        *
+        * ARCv2 only has load-load, store-store and all-all barrier
+        * thus need the full all-all barrier
+        */
        smp_mb();
 }
 
@@ -42,8 +48,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 {
        unsigned int val, got_it = 0;
 
-       smp_mb();
-
        __asm__ __volatile__(
        "1:     llock   %[val], [%[slock]]      \n"
        "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
@@ -67,9 +71,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        smp_mb();
 
-       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
-       smp_mb();
+       WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
 }
 
 /*
@@ -81,8 +83,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned int val;
 
-       smp_mb();
-
        /*
         * zero means writer holds the lock exclusively, deny Reader.
         * Otherwise grant lock to first/subseq reader
@@ -113,8 +113,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned int val, got_it = 0;
 
-       smp_mb();
-
        __asm__ __volatile__(
        "1:     llock   %[val], [%[rwlock]]     \n"
        "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
@@ -140,8 +138,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned int val;
 
-       smp_mb();
-
        /*
         * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
         * deny writer. Otherwise if unlocked grant to writer
@@ -175,8 +171,6 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        unsigned int val, got_it = 0;
 
-       smp_mb();
-
        __asm__ __volatile__(
        "1:     llock   %[val], [%[rwlock]]     \n"
        "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
@@ -217,17 +211,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
        : [val]         "=&r"   (val)
        : [rwlock]      "r"     (&(rw->counter))
        : "memory", "cc");
-
-       smp_mb();
 }
 
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        smp_mb();
 
-       rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
-
-       smp_mb();
+       WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
 }
 
 #else  /* !CONFIG_ARC_HAS_LLSC */
@@ -237,10 +227,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 
        /*
-        * This smp_mb() is technically superfluous, we only need the one
-        * after the lock for providing the ACQUIRE semantics.
-        * However doing the "right" thing was regressing hackbench
-        * so keeping this, pending further investigation
+        * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
+        * for ACQ and REL semantics respectively. However EX based spinlocks
+        * need the extra smp_mb to workaround a hardware quirk.
         */
        smp_mb();
 
@@ -257,14 +246,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 #endif
        : "memory");
 
-       /*
-        * ACQUIRE barrier to ensure load/store after taking the lock
-        * don't "bleed-up" out of the critical section (leak-in is allowed)
-        * http://www.spinics.net/lists/kernel/msg2010409.html
-        *
-        * ARCv2 only has load-load, store-store and all-all barrier
-        * thus need the full all-all barrier
-        */
        smp_mb();
 }
 
@@ -309,8 +290,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        : "memory");
 
        /*
-        * superfluous, but keeping for now - see pairing version in
-        * arch_spin_lock above
+        * see pairing version/comment in arch_spin_lock above
         */
        smp_mb();
 }
@@ -344,7 +324,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
        arch_spin_unlock(&(rw->lock_mutex));
        local_irq_restore(flags);
 
-       smp_mb();
        return ret;
 }
 
index 30e090625916160acb23df6bfa44e86bcad7192f..8f6e0447dd1702b571b23a3f561f8ed032ae6abf 100644 (file)
        ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
        ; by default
        lr      r5, [status32]
+#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
        bset    r5, r5, STATUS_AD_BIT
+#else
+       ; Although disabled at reset, bootloader might have enabled it
+       bclr    r5, r5, STATUS_AD_BIT
+#endif
        kflag   r5
 #endif
 .endm
@@ -106,6 +111,7 @@ ENTRY(stext)
        ;    r2 = pointer to uboot provided cmdline or external DTB in mem
        ; These are handled later in handle_uboot_args()
        st      r0, [@uboot_tag]
+       st      r1, [@uboot_magic]
        st      r2, [@uboot_arg]
 
        ; setup "current" tsk and optionally cache it in dedicated r25
index cf18b3e5a934d34c684edcc7aa84533a10f932bf..c0d0124de089b4a456a98c20c5ab6f78913ab36d 100644 (file)
@@ -95,7 +95,7 @@ void arc_init_IRQ(void)
 
        /* setup status32, don't enable intr yet as kernel doesn't want */
        tmp = read_aux_reg(ARC_REG_STATUS32);
-       tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1);
+       tmp |= ARCV2_IRQ_DEF_PRIO << 1;
        tmp &= ~STATUS_IE_MASK;
        asm volatile("kflag %0  \n"::"r"(tmp));
 }
index 7b2340996cf80fc4ddc382c55d86acbe37d49bf5..a9c88b7e9182f6232b3f319a00865c8c9946b38f 100644 (file)
@@ -36,6 +36,7 @@ unsigned int intr_to_DE_cnt;
 
 /* Part of U-boot ABI: see head.S */
 int __initdata uboot_tag;
+int __initdata uboot_magic;
 char __initdata *uboot_arg;
 
 const struct machine_desc *machine_desc;
@@ -44,29 +45,24 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
 
 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
 
-static const struct id_to_str arc_cpu_rel[] = {
+static const struct id_to_str arc_legacy_rel[] = {
+       /* ID.ARCVER,   Release */
 #ifdef CONFIG_ISA_ARCOMPACT
-       { 0x34, "R4.10"},
-       { 0x35, "R4.11"},
+       { 0x34,         "R4.10"},
+       { 0x35,         "R4.11"},
 #else
-       { 0x51, "R2.0" },
-       { 0x52, "R2.1" },
-       { 0x53, "R3.0" },
-       { 0x54, "R3.10a" },
+       { 0x51,         "R2.0" },
+       { 0x52,         "R2.1" },
+       { 0x53,         "R3.0" },
 #endif
-       { 0x00, NULL   }
+       { 0x00,         NULL   }
 };
 
-static const struct id_to_str arc_cpu_nm[] = {
-#ifdef CONFIG_ISA_ARCOMPACT
-       { 0x20, "ARC 600"   },
-       { 0x30, "ARC 770"   },  /* 750 identified seperately */
-#else
-       { 0x40, "ARC EM"  },
-       { 0x50, "ARC HS38"  },
-       { 0x54, "ARC HS48"  },
-#endif
-       { 0x00, "Unknown"   }
+static const struct id_to_str arc_cpu_rel[] = {
+       /* UARCH.MAJOR, Release */
+       {  0,           "R3.10a"},
+       {  1,           "R3.50a"},
+       {  0xFF,        NULL   }
 };
 
 static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
@@ -116,31 +112,72 @@ static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
        }
 }
 
+static void decode_arc_core(struct cpuinfo_arc *cpu)
+{
+       struct bcr_uarch_build_arcv2 uarch;
+       const struct id_to_str *tbl;
+
+       /*
+        * Up until (including) the first core4 release (0x54) things were
+        * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
+        * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
+        */
+
+       if (cpu->core.family < 0x54) { /* includes arc700 */
+
+               for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
+                       if (cpu->core.family == tbl->id) {
+                               cpu->release = tbl->str;
+                               break;
+                       }
+               }
+
+               if (is_isa_arcompact())
+                       cpu->name = "ARC700";
+               else if (tbl->str)
+                       cpu->name = "HS38";
+               else
+                       cpu->name = cpu->release = "Unknown";
+
+               return;
+       }
+
+       /*
+        * However the subsequent HS release (same 0x54) allow HS38 or HS48
+        * configurations and encode this info in a different BCR.
+        * The BCR was introduced in 0x54 so can't be read unconditionally.
+        */
+
+       READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
+
+       if (uarch.prod == 4) {
+               cpu->name = "HS48";
+               cpu->extn.dual = 1;
+
+       } else {
+               cpu->name = "HS38";
+       }
+
+       for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
+               if (uarch.maj == tbl->id) {
+                       cpu->release = tbl->str;
+                       break;
+               }
+       }
+}
+
 static void read_arc_build_cfg_regs(void)
 {
        struct bcr_timer timer;
        struct bcr_generic bcr;
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
-       const struct id_to_str *tbl;
        struct bcr_isa_arcv2 isa;
        struct bcr_actionpoint ap;
 
        FIX_PTR(cpu);
 
        READ_BCR(AUX_IDENTITY, cpu->core);
-
-       for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
-               if (cpu->core.family == tbl->id) {
-                       cpu->details = tbl->str;
-                       break;
-               }
-       }
-
-       for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
-               if ((cpu->core.family & 0xF4) == tbl->id)
-                       break;
-       }
-       cpu->name = tbl->str;
+       decode_arc_core(cpu);
 
        READ_BCR(ARC_REG_TIMERS_BCR, timer);
        cpu->extn.timer0 = timer.t0;
@@ -151,16 +188,6 @@ static void read_arc_build_cfg_regs(void)
 
        READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
 
-       cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */
-       cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */
-       cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0;        /* 1,3 */
-       cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
-       cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
-       cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
-                               IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
-
-       READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
-
        /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
        read_decode_ccm_bcr(cpu);
 
@@ -198,30 +225,12 @@ static void read_arc_build_cfg_regs(void)
                cpu->bpu.num_pred = 2048 << bpu.pte;
                cpu->bpu.ret_stk = 4 << bpu.rse;
 
-               if (cpu->core.family >= 0x54) {
-
-                       struct bcr_uarch_build_arcv2 uarch;
-
-                       /*
-                        * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
-                        * dual issue only (HS4x). But next uarch rev (1:0)
-                        * allows it be configured for single issue (HS3x)
-                        * Ensure we fiddle with dual issue only on HS4x
-                        */
-                       READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
-
-                       if (uarch.prod == 4) {
-                               unsigned int exec_ctrl;
-
-                               /* dual issue hardware always present */
-                               cpu->extn.dual = 1;
-
-                               READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+               /* if dual issue hardware, is it enabled ? */
+               if (cpu->extn.dual) {
+                       unsigned int exec_ctrl;
 
-                               /* dual issue hardware enabled ? */
-                               cpu->extn.dual_enb = !(exec_ctrl & 1);
-
-                       }
+                       READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+                       cpu->extn.dual_enb = !(exec_ctrl & 1);
                }
        }
 
@@ -263,7 +272,8 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 {
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
        struct bcr_identity *core = &cpu->core;
-       int i, n = 0, ua = 0;
+       char mpy_opt[16];
+       int n = 0;
 
        FIX_PTR(cpu);
 
@@ -272,7 +282,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       core->family, core->cpu_id, core->chip_id);
 
        n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
-                      cpu_id, cpu->name, cpu->details,
+                      cpu_id, cpu->name, cpu->release,
                       is_isa_arcompact() ? "ARCompact" : "ARCv2",
                       IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
                       IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
@@ -283,61 +293,50 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
                       IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
 
-#ifdef __ARC_UNALIGNED__
-       ua = 1;
-#endif
-       n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
-                          IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
-                          IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
-                          IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
-
-       if (i)
-               n += scnprintf(buf + n, len - n, "\n\t\t: ");
-
        if (cpu->extn_mpy.ver) {
-               if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */
-                       n += scnprintf(buf + n, len - n, "mpy ");
+               if (is_isa_arcompact()) {
+                       scnprintf(mpy_opt, 16, "mpy");
                } else {
+
                        int opt = 2;    /* stock MPY/MPYH */
 
                        if (cpu->extn_mpy.dsp)  /* OPT 7-9 */
                                opt = cpu->extn_mpy.dsp + 6;
 
-                       n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
+                       scnprintf(mpy_opt, 16, "mpy[opt %d] ", opt);
                }
        }
 
        n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
-                      IS_AVAIL1(cpu->isa.div_rem, "div_rem "),
-                      IS_AVAIL1(cpu->extn.norm, "norm "),
-                      IS_AVAIL1(cpu->extn.barrel, "barrel-shift "),
-                      IS_AVAIL1(cpu->extn.swap, "swap "),
-                      IS_AVAIL1(cpu->extn.minmax, "minmax "),
-                      IS_AVAIL1(cpu->extn.crc, "crc "),
-                      IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
-
-       if (cpu->bpu.ver)
+                      IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+                      IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
+                      IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
+                      IS_AVAIL1(cpu->extn_mpy.ver, mpy_opt),
+                      IS_AVAIL1(cpu->isa.div_rem, "div_rem "));
+
+       if (cpu->bpu.ver) {
                n += scnprintf(buf + n, len - n,
                              "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
                              IS_AVAIL1(cpu->bpu.full, "full"),
                              IS_AVAIL1(!cpu->bpu.full, "partial"),
                              cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
 
-       if (is_isa_arcv2()) {
-               struct bcr_lpb lpb;
+               if (is_isa_arcv2()) {
+                       struct bcr_lpb lpb;
 
-               READ_BCR(ARC_REG_LPB_BUILD, lpb);
-               if (lpb.ver) {
-                       unsigned int ctl;
-                       ctl = read_aux_reg(ARC_REG_LPB_CTRL);
+                       READ_BCR(ARC_REG_LPB_BUILD, lpb);
+                       if (lpb.ver) {
+                               unsigned int ctl;
+                               ctl = read_aux_reg(ARC_REG_LPB_CTRL);
 
-                       n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
-                               lpb.entries,
-                               IS_DISABLED_RUN(!ctl));
+                               n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
+                                              lpb.entries,
+                                              IS_DISABLED_RUN(!ctl));
+                       }
                }
+               n += scnprintf(buf + n, len - n, "\n");
        }
 
-       n += scnprintf(buf + n, len - n, "\n");
        return buf;
 }
 
@@ -390,11 +389,6 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
                }
        }
 
-       n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
-                       EF_ARC_OSABI_CURRENT >> 8,
-                       EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
-                       "no-legacy-syscalls" : "64-bit data any register aligned");
-
        return buf;
 }
 
@@ -497,6 +491,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
 #define UBOOT_TAG_NONE         0
 #define UBOOT_TAG_CMDLINE      1
 #define UBOOT_TAG_DTB          2
+/* We always pass 0 as magic from U-boot */
+#define UBOOT_MAGIC_VALUE      0
 
 void __init handle_uboot_args(void)
 {
@@ -511,6 +507,11 @@ void __init handle_uboot_args(void)
                goto ignore_uboot_args;
        }
 
+       if (uboot_magic != UBOOT_MAGIC_VALUE) {
+               pr_warn(IGNORE_ARGS "non zero uboot magic\n");
+               goto ignore_uboot_args;
+       }
+
        if (uboot_tag != UBOOT_TAG_NONE &&
             uboot_arg_invalid((unsigned long)uboot_arg)) {
                pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
index 215f515442e03d53ee3a18ade4c62e2a06987b3b..b0aa8c02833137c42a95d2c48a187f45fc1fc479 100644 (file)
@@ -145,7 +145,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
        } else if (vec == ECR_V_PROTV) {
                if (cause_code == ECR_C_PROTV_INST_FETCH)
                        pr_cont("Execute from Non-exec Page\n");
-               else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
+               else if (cause_code == ECR_C_PROTV_MISALIG_DATA &&
+                        IS_ENABLED(CONFIG_ISA_ARCOMPACT))
                        pr_cont("Misaligned r/w from 0x%08lx\n", address);
                else
                        pr_cont("%s access not allowed on page\n",
@@ -161,6 +162,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
                        pr_cont("Bus Error from Data Mem\n");
                else
                        pr_cont("Bus Error, check PRM\n");
+       } else if (vec == ECR_V_MISALIGN) {
+               pr_cont("Misaligned r/w from 0x%08lx\n", address);
 #endif
        } else if (vec == ECR_V_TRAP) {
                if (regs->ecr_param == 5)
index b1656d15609750910512c9e00799c8d736f665b2..f7537b466b23dea34ca3e5772239f96f206a9124 100644 (file)
@@ -8,4 +8,10 @@
 lib-y  := strchr-700.o strcpy-700.o strlen.o memcmp.o
 
 lib-$(CONFIG_ISA_ARCOMPACT)    += memcpy-700.o memset.o strcmp.o
-lib-$(CONFIG_ISA_ARCV2)                += memcpy-archs.o memset-archs.o strcmp-archs.o
+lib-$(CONFIG_ISA_ARCV2)                += memset-archs.o strcmp-archs.o
+
+ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+lib-$(CONFIG_ISA_ARCV2)                +=memcpy-archs-unaligned.o
+else
+lib-$(CONFIG_ISA_ARCV2)                +=memcpy-archs.o
+endif
diff --git a/arch/arc/lib/memcpy-archs-unaligned.S b/arch/arc/lib/memcpy-archs-unaligned.S
new file mode 100644 (file)
index 0000000..28993a7
--- /dev/null
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ARCv2 memcpy implementation optimized for unaligned memory access using.
+ *
+ * Copyright (C) 2019 Synopsys
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+
+#include <linux/linkage.h>
+
+#ifdef CONFIG_ARC_HAS_LL64
+# define LOADX(DST,RX)         ldd.ab  DST, [RX, 8]
+# define STOREX(SRC,RX)                std.ab  SRC, [RX, 8]
+# define ZOLSHFT               5
+# define ZOLAND                        0x1F
+#else
+# define LOADX(DST,RX)         ld.ab   DST, [RX, 4]
+# define STOREX(SRC,RX)                st.ab   SRC, [RX, 4]
+# define ZOLSHFT               4
+# define ZOLAND                        0xF
+#endif
+
+ENTRY_CFI(memcpy)
+       mov     r3, r0          ; don;t clobber ret val
+
+       lsr.f   lp_count, r2, ZOLSHFT
+       lpnz    @.Lcopy32_64bytes
+       ;; LOOP START
+       LOADX   (r6, r1)
+       LOADX   (r8, r1)
+       LOADX   (r10, r1)
+       LOADX   (r4, r1)
+       STOREX  (r6, r3)
+       STOREX  (r8, r3)
+       STOREX  (r10, r3)
+       STOREX  (r4, r3)
+.Lcopy32_64bytes:
+
+       and.f   lp_count, r2, ZOLAND ;Last remaining 31 bytes
+       lpnz    @.Lcopyremainingbytes
+       ;; LOOP START
+       ldb.ab  r5, [r1, 1]
+       stb.ab  r5, [r3, 1]
+.Lcopyremainingbytes:
+
+       j       [blink]
+END_CFI(memcpy)
index 8eff057efcaebeae04b1fb801c003418090551eb..2eaecfb063a7336f2a78c54e31ab09f2576c63f6 100644 (file)
@@ -26,8 +26,8 @@ config EZNPS_MTM_EXT
        help
          Here we add new hierarchy for CPUs topology.
          We got:
-               Core
-               Thread
+           Core
+           Thread
          At the new thread level each CPU represent one HW thread.
          At highest hierarchy each core contain 16 threads,
          any of them seem like CPU from Linux point of view.
@@ -35,10 +35,10 @@ config EZNPS_MTM_EXT
          core and HW scheduler round robin between them.
 
 config EZNPS_MEM_ERROR_ALIGN
-       bool "ARC-EZchip Memory error as an exception"
-       depends on EZNPS_MTM_EXT
-       default n
-       help
+       bool "ARC-EZchip Memory error as an exception"
+       depends on EZNPS_MTM_EXT
+       default n
+       help
          On the real chip of the NPS, user memory errors are handled
          as a machine check exception, which is fatal, whereas on
          simulator platform for NPS, is handled as a Level 2 interrupt
index 054ead960f983a99a9f241ce1427fe0e1cd6cb8a..850b4805e2d171436e539b326867d6ce08a6f9d6 100644 (file)
@@ -596,6 +596,7 @@ config ARCH_DAVINCI
        select HAVE_IDE
        select PM_GENERIC_DOMAINS if PM
        select PM_GENERIC_DOMAINS_OF if PM && OF
+       select REGMAP_MMIO
        select RESET_CONTROLLER
        select SPARSE_IRQ
        select USE_OF
index 5641d162dfdb0c106eed6f7f4dc4f7c120930970..28e7513ce61713a084bc5f91f96cc2426d3f50a8 100644 (file)
@@ -93,7 +93,7 @@
 };
 
 &hdmi {
-       hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+       hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
 };
 
 &pwm {
index b715ab0fa1ffc09c24e101b4f506b9f9bb900550..e8d800fec63790925701a460afa8415c9706d8dc 100644 (file)
                        reg = <2>;
                };
 
-               switch@0 {
+               switch@10 {
                        compatible = "qca,qca8334";
-                       reg = <0>;
+                       reg = <10>;
 
                        switch_ports: ports {
                                #address-cells = <1>;
                                ethphy0: port@0 {
                                        reg = <0>;
                                        label = "cpu";
-                                       phy-mode = "rgmii";
+                                       phy-mode = "rgmii-id";
                                        ethernet = <&fec>;
 
                                        fixed-link {
index 1d1b4bd0670ffd094d2939ed9c91095d8ae8ba39..a4217f564a5347a568830e2032dd3fac2ae1c80f 100644 (file)
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
        vmcc-supply = <&reg_sd3_vmmc>;
        cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
-       bus-witdh = <4>;
+       bus-width = <4>;
        no-1-8-v;
        status = "okay";
 };
        pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
        vmcc-supply = <&reg_sd4_vmmc>;
-       bus-witdh = <8>;
+       bus-width = <8>;
        no-1-8-v;
        non-removable;
        status = "okay";
index 433bf09a1954c5ff05e1f3b3255c326fb69bf615..027df06c5dc7d60c9711ebef8b9333e2fe0c9a58 100644 (file)
@@ -91,6 +91,7 @@
        pinctrl-0 = <&pinctrl_enet>;
        phy-handle = <&ethphy>;
        phy-mode = "rgmii";
+       phy-reset-duration = <10>; /* in msecs */
        phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
        phy-supply = <&vdd_eth_io_reg>;
        status = "disabled";
index f6fb6783c1933154049768297372832f68586a04..54cfe72295aa47a278ee8d5ffae5c688b6d8b4fa 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2016 Freescale Semiconductor, Inc.
  * Copyright (C) 2017 NXP
index 04066f9cb8a31c643cba82ea337dd22cb7a85626..f2f6558a00f188937ca55ee5f44e689da5e1bf7a 100644 (file)
                gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>;
                gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>;
                /*
-                * It's not actually active high, but the frameworks assume
-                * the polarity of the passed-in GPIO is "normal" (active
-                * high) then actively drives the line low to select the
-                * chip.
+                * This chipselect is active high. Just setting the flags
+                * to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings,
+                * it will be ignored, only the special "spi-cs-high" flag
+                * really counts.
                 */
                cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+               spi-cs-high;
                num-chipselects = <1>;
 
                /*
index 8661dd9b064a5cdfd4a8801a8b98e9c9f45d7dc0..b37f8e675e4081b200bfd9b9a97d565efce1d1f7 100644 (file)
@@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_IIO=y
 CONFIG_FSL_MX25_ADC=y
+CONFIG_PWM=y
+CONFIG_PWM_IMX1=y
+CONFIG_PWM_IMX27=y
 CONFIG_EXT4_FS=y
 # CONFIG_DNOTIFY is not set
 CONFIG_VFAT_FS=y
index 5586a5074a96b6a84165e32f59ea2fa0800b484a..50fb01d70b1030ca6d2f721b30eaa8078894b589 100644 (file)
@@ -398,7 +398,7 @@ CONFIG_MAG3110=y
 CONFIG_MPL3115=y
 CONFIG_PWM=y
 CONFIG_PWM_FSL_FTM=y
-CONFIG_PWM_IMX=y
+CONFIG_PWM_IMX27=y
 CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_NVMEM_VF610_OCOTP=y
 CONFIG_TEE=y
index 7d5a44a06648de2fd8e5e15beef19762d6925e81..f676592d840210558a5daf54e1f7c265be06a918 100644 (file)
@@ -90,7 +90,7 @@ void __init cns3xxx_map_io(void)
 /* used by entry-macro.S */
 void __init cns3xxx_init_irq(void)
 {
-       gic_init(0, 29, IOMEM(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
+       gic_init(IOMEM(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
                 IOMEM(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT));
 }
 
index bfeb25aaf9a2a7a48857a3896fb682d7d94568a8..326e870d712394fad445033defd8e3ff5975ebdd 100644 (file)
 #include "cpuidle.h"
 #include "hardware.h"
 
-static atomic_t master = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(master_lock);
+static int num_idle_cpus = 0;
+static DEFINE_SPINLOCK(cpuidle_lock);
 
 static int imx6q_enter_wait(struct cpuidle_device *dev,
                            struct cpuidle_driver *drv, int index)
 {
-       if (atomic_inc_return(&master) == num_online_cpus()) {
-               /*
-                * With this lock, we prevent other cpu to exit and enter
-                * this function again and become the master.
-                */
-               if (!spin_trylock(&master_lock))
-                       goto idle;
+       spin_lock(&cpuidle_lock);
+       if (++num_idle_cpus == num_online_cpus())
                imx6_set_lpm(WAIT_UNCLOCKED);
-               cpu_do_idle();
-               imx6_set_lpm(WAIT_CLOCKED);
-               spin_unlock(&master_lock);
-               goto done;
-       }
+       spin_unlock(&cpuidle_lock);
 
-idle:
        cpu_do_idle();
-done:
-       atomic_dec(&master);
+
+       spin_lock(&cpuidle_lock);
+       if (num_idle_cpus-- == num_online_cpus())
+               imx6_set_lpm(WAIT_CLOCKED);
+       spin_unlock(&cpuidle_lock);
 
        return index;
 }
index c7169c2f94c4fd8cc018caa790c7b170e778eaf3..08c7892866c2df48732d15b9aa64329d0b009b75 100644 (file)
@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
                return;
 
        m4if_base = of_iomap(np, 0);
+       of_node_put(np);
        if (!m4if_base) {
                pr_err("Unable to map M4IF registers\n");
                return;
index 117b2541ef3d16dfe09ba67784e9faf3b43634e6..7e34b9eba5de151572ba479d73ccf82ba18e8beb 100644 (file)
@@ -159,7 +159,6 @@ config ARM64
        select IRQ_DOMAIN
        select IRQ_FORCED_THREADING
        select MODULES_USE_ELF_RELA
-       select MULTI_IRQ_HANDLER
        select NEED_DMA_MAP_STATE
        select NEED_SG_DMA_LENGTH
        select OF
index 70498a033cf57408ccdefe374c5fa8e1d22e785d..b5ca9c50876d9a23947dde5d7fe553104c9c0805 100644 (file)
@@ -27,6 +27,7 @@ config ARCH_BCM2835
        bool "Broadcom BCM2835 family"
        select TIMER_OF
        select GPIOLIB
+       select MFD_CORE
        select PINCTRL
        select PINCTRL_BCM2835
        select ARM_AMBA
index bb2045be8814036ddced1d4a7ec5b42951343832..97aeb946ed5e7473639ec94a498512d48a12ca8b 100644 (file)
                nvidia,default-trim = <0x9>;
                nvidia,dqs-trim = <63>;
                mmc-hs400-1_8v;
-               supports-cqe;
                status = "disabled";
        };
 
index 61a0afb74e6310b2b4c16bcf9939f6eab7db6258..1ea684af99c4a19b674f2ab90e38680584b09cf4 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Device Tree Source for the RZ/G2E (R8A774C0) SoC
  *
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
  */
 
 #include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
                                 <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
-                       dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
-                              <&dmac2 0x5b>, <&dmac2 0x5a>;
-                       dma-names = "tx", "rx", "tx", "rx";
+                       dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
+                       dma-names = "tx", "rx";
                        power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
                        resets = <&cpg 202>;
                        status = "disabled";
index a69faa60ea4da4bb06a257af39881138a026c6d1..d2ad665fe2d925db040e50d2d9341b5535ddd167 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Device Tree Source for the R-Car E3 (R8A77990) SoC
  *
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
  */
 
 #include <dt-bindings/clock/r8a77990-cpg-mssr.h>
                                 <&cpg CPG_CORE R8A77990_CLK_S3D1C>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
-                       dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
-                              <&dmac2 0x5b>, <&dmac2 0x5a>;
-                       dma-names = "tx", "rx", "tx", "rx";
+                       dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
+                       dma-names = "tx", "rx";
                        power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
                        resets = <&cpg 202>;
                        status = "disabled";
index 2afb1338b48a482c4a528a96ebda0ffb310fb987..5f1437099b9979ac983ae0896272229e2b04f1e3 100644 (file)
@@ -77,6 +77,7 @@
 #define ARM_CPU_IMP_QCOM               0x51
 #define ARM_CPU_IMP_NVIDIA             0x4E
 #define ARM_CPU_IMP_FUJITSU            0x46
+#define ARM_CPU_IMP_HISI               0x48
 
 #define ARM_CPU_PART_AEM_V8            0xD0F
 #define ARM_CPU_PART_FOUNDATION                0xD00
 
 #define FUJITSU_CPU_PART_A64FX         0x001
 
+#define HISI_CPU_PART_TSV110           0xD01
+
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
 #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
 #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
 #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
 #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
+#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
 #define MIDR_FUJITSU_ERRATUM_010001            MIDR_FUJITSU_A64FX
-#define MIDR_FUJITSU_ERRATUM_010001_MASK       (~MIDR_VARIANT(1))
+#define MIDR_FUJITSU_ERRATUM_010001_MASK       (~MIDR_CPU_VAR_REV(1, 0))
 #define TCR_CLEAR_FUJITSU_ERRATUM_010001       (TCR_NFD1 | TCR_NFD0)
 
 #ifndef __ASSEMBLY__
index e24e94d2876717b8ef3ac0311aa419049255134e..4061de10cea6ccb0ccaa890c4f2dd98d4a2bcd91 100644 (file)
@@ -963,6 +963,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+               MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
                { /* sentinel */ }
        };
        char const *str = "command line option";
index 7fb6f3aa5ceb7172c91277067da624085b158433..7a679caf45856e75c21860aaa7522c080fb48e41 100644 (file)
@@ -91,8 +91,6 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
 {
        unsigned long probe_addr = (unsigned long)p->addr;
-       extern char __start_rodata[];
-       extern char __end_rodata[];
 
        if (probe_addr & 0x3)
                return -EINVAL;
@@ -100,10 +98,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
        /* copy instruction */
        p->opcode = le32_to_cpu(*p->addr);
 
-       if (in_exception_text(probe_addr))
-               return -EINVAL;
-       if (probe_addr >= (unsigned long) __start_rodata &&
-           probe_addr <= (unsigned long) __end_rodata)
+       if (search_exception_tables(probe_addr))
                return -EINVAL;
 
        /* decode instruction */
@@ -476,26 +471,37 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
        return DBG_HOOK_HANDLED;
 }
 
-bool arch_within_kprobe_blacklist(unsigned long addr)
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
 {
-       if ((addr >= (unsigned long)__kprobes_text_start &&
-           addr < (unsigned long)__kprobes_text_end) ||
-           (addr >= (unsigned long)__entry_text_start &&
-           addr < (unsigned long)__entry_text_end) ||
-           (addr >= (unsigned long)__idmap_text_start &&
-           addr < (unsigned long)__idmap_text_end) ||
-           (addr >= (unsigned long)__hyp_text_start &&
-           addr < (unsigned long)__hyp_text_end) ||
-           !!search_exception_tables(addr))
-               return true;
-
-       if (!is_kernel_in_hyp_mode()) {
-               if ((addr >= (unsigned long)__hyp_idmap_text_start &&
-                   addr < (unsigned long)__hyp_idmap_text_end))
-                       return true;
-       }
-
-       return false;
+       int ret;
+
+       ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
+                                       (unsigned long)__entry_text_end);
+       if (ret)
+               return ret;
+       ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+                                       (unsigned long)__irqentry_text_end);
+       if (ret)
+               return ret;
+       ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start,
+                                       (unsigned long)__exception_text_end);
+       if (ret)
+               return ret;
+       ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
+                                       (unsigned long)__idmap_text_end);
+       if (ret)
+               return ret;
+       ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
+                                       (unsigned long)__hyp_text_end);
+       if (ret || is_kernel_in_hyp_mode())
+               return ret;
+       ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
+                                       (unsigned long)__hyp_idmap_text_end);
+       return ret;
 }
 
 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
index 1a29f2695ff24849304ad1f2106203b5114c5071..d908b5e9e949c6745598abd90f6c3afef89ae040 100644 (file)
@@ -143,6 +143,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
        if (trace->nr_entries < trace->max_entries)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
 
 static noinline void __save_stack_trace(struct task_struct *tsk,
        struct stack_trace *trace, unsigned int nosched)
index 46eddbec8d9fdec090ee273e1e5ba3cd573ba612..0ab95dd431b3c0b33fd400dd631ae183c796d8c2 100644 (file)
@@ -24,6 +24,7 @@ void __init bcm47xx_workarounds(void)
        case BCM47XX_BOARD_NETGEAR_WNR3500L:
                bcm47xx_workarounds_enable_usb_power(12);
                break;
+       case BCM47XX_BOARD_NETGEAR_WNDR3400V2:
        case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
                bcm47xx_workarounds_enable_usb_power(21);
                break;
index e77672539e8ed8f6744c03d49eaeb20c76d80b78..e4456e450f946d5c9c55b52d78aeee60d3a2a0e2 100644 (file)
 #endif
 
 #ifdef CONFIG_CPU_MICROMIPS
-#define NOP_INSN "nop32"
+#define B_INSN "b32"
 #else
-#define NOP_INSN "nop"
+#define B_INSN "b"
 #endif
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\t" NOP_INSN "\n\t"
-               "nop\n\t"
+       asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
+               "2:\tnop\n\t"
                ".pushsection __jump_table,  \"aw\"\n\t"
                WORD_INSN " 1b, %l[l_yes], %0\n\t"
                ".popsection\n\t"
index 6aa49c10f88f7e2073bc2bd4202d02cab6b0f46a..f0ccb5b90ce95b1e927f46ad850560e3745fea74 100644 (file)
 typedef long           __kernel_daddr_t;
 #define __kernel_daddr_t __kernel_daddr_t
 
-#if (_MIPS_SZLONG == 32)
-typedef struct {
-       long    val[2];
-} __kernel_fsid_t;
-#define __kernel_fsid_t __kernel_fsid_t
-#endif
-
 #include <asm-generic/posix_types.h>
 
 #endif /* _ASM_POSIX_TYPES_H */
index cb7e9ed7a453cd8982fca4da2b3ee659d52181cb..33ee0d18fb0adc21d952c98fbf77b5ed8c514d6a 100644 (file)
@@ -140,6 +140,13 @@ SECTIONS
        PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
 #endif
 
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+       .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
+               *(.appended_dtb)
+               KEEP(*(.appended_dtb))
+       }
+#endif
+
 #ifdef CONFIG_RELOCATABLE
        . = ALIGN(4);
 
@@ -164,11 +171,6 @@ SECTIONS
        __appended_dtb = .;
        /* leave space for appended DTB */
        . += 0x100000;
-#elif defined(CONFIG_MIPS_ELF_APPENDED_DTB)
-       .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
-               *(.appended_dtb)
-               KEEP(*(.appended_dtb))
-       }
 #endif
        /*
         * Align to 64K in attempt to eliminate holes before the
index 9e33e45aa17c5d6881d6bc8cd5ca3c90d42098d0..b213cecb8e3ac4e76573e334c42cbde7c88636f4 100644 (file)
@@ -103,7 +103,7 @@ static struct irqaction ip6_irqaction = {
 static struct irqaction cascade_irqaction = {
        .handler = no_action,
        .name = "cascade",
-       .flags = IRQF_NO_THREAD,
+       .flags = IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 };
 
 void __init mach_init_irq(void)
index d34ad1657d7b2c44cdb16683bfef359b651fd02d..598cdcdd13553dea4a80a9b72196dbee8987cd61 100644 (file)
@@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void)
 #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) &&  \
        defined (CONFIG_PPC_64K_PAGES)
 #define MAX_PHYSMEM_BITS        51
-#else
+#elif defined(CONFIG_SPARSEMEM)
 #define MAX_PHYSMEM_BITS        46
 #endif
 
index c5698a523bb189dee5650398603c4ac8a0f5bc27..23f7ed796f38829a054b5c0851b04e581990bcbf 100644 (file)
 /* Misc instructions for BPF compiler */
 #define PPC_INST_LBZ                   0x88000000
 #define PPC_INST_LD                    0xe8000000
+#define PPC_INST_LDX                   0x7c00002a
 #define PPC_INST_LHZ                   0xa0000000
 #define PPC_INST_LWZ                   0x80000000
 #define PPC_INST_LHBRX                 0x7c00062c
 #define PPC_INST_STB                   0x98000000
 #define PPC_INST_STH                   0xb0000000
 #define PPC_INST_STD                   0xf8000000
+#define PPC_INST_STDX                  0x7c00012a
 #define PPC_INST_STDU                  0xf8000001
 #define PPC_INST_STW                   0x90000000
 #define PPC_INST_STWU                  0x94000000
index 1afe90ade595e161016af3ca712b65f9990407f4..bbc06bd72b1f2497ef0a1120e631d4243f3f432a 100644 (file)
@@ -82,10 +82,10 @@ struct vdso_data {
        __u32 icache_block_size;                /* L1 i-cache block size     */
        __u32 dcache_log_block_size;            /* L1 d-cache log block size */
        __u32 icache_log_block_size;            /* L1 i-cache log block size */
-       __s32 wtom_clock_sec;                   /* Wall to monotonic clock */
-       __s32 wtom_clock_nsec;
-       struct timespec stamp_xtime;    /* xtime as at tb_orig_stamp */
-       __u32 stamp_sec_fraction;       /* fractional seconds of stamp_xtime */
+       __u32 stamp_sec_fraction;               /* fractional seconds of stamp_xtime */
+       __s32 wtom_clock_nsec;                  /* Wall to monotonic clock nsec */
+       __s64 wtom_clock_sec;                   /* Wall to monotonic clock sec */
+       struct timespec stamp_xtime;            /* xtime as at tb_orig_stamp */
        __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls  */
        __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
 };
index 6f1c11e0691f2dd937f77861f08a0778f91e8a30..7534ecff5e925b434e4613298503d66435326205 100644 (file)
@@ -24,9 +24,6 @@ BEGIN_MMU_FTR_SECTION
        li      r10,0
        mtspr   SPRN_SPRG_603_LRU,r10           /* init SW LRU tracking */
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
-       lis     r10, (swapper_pg_dir - PAGE_OFFSET)@h
-       ori     r10, r10, (swapper_pg_dir - PAGE_OFFSET)@l
-       mtspr   SPRN_SPRG_PGDIR, r10
 
 BEGIN_FTR_SECTION
        bl      __init_fpu_registers
index ce6a972f25849ea87774be809da0faa3d84d96f3..48051c8977c5603a1ac9f8b730c0283ab04497d8 100644 (file)
@@ -855,6 +855,9 @@ __secondary_start:
        li      r3,0
        stw     r3, RTAS_SP(r4)         /* 0 => not in RTAS */
 #endif
+       lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
+       ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
+       mtspr   SPRN_SPRG_PGDIR, r4
 
        /* enable MMU and jump to start_secondary */
        li      r4,MSR_KERNEL
@@ -942,6 +945,9 @@ start_here:
        li      r3,0
        stw     r3, RTAS_SP(r4)         /* 0 => not in RTAS */
 #endif
+       lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
+       ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
+       mtspr   SPRN_SPRG_PGDIR, r4
 
        /* stack */
        lis     r1,init_thread_union@ha
index 9b8631533e02a4559a4dfc4c23240db58e39192c..b33bafb8fcea1f7a964ad99e203ee0a2cf3103cb 100644 (file)
@@ -190,29 +190,22 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
        bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
        ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
 
-       if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
-               bool comma = false;
+       if (bcs || ccd) {
                seq_buf_printf(&s, "Mitigation: ");
 
-               if (bcs) {
+               if (bcs)
                        seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
-                       comma = true;
-               }
 
-               if (ccd) {
-                       if (comma)
-                               seq_buf_printf(&s, ", ");
-                       seq_buf_printf(&s, "Indirect branch cache disabled");
-                       comma = true;
-               }
-
-               if (comma)
+               if (bcs && ccd)
                        seq_buf_printf(&s, ", ");
 
-               seq_buf_printf(&s, "Software count cache flush");
+               if (ccd)
+                       seq_buf_printf(&s, "Indirect branch cache disabled");
+       } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
+               seq_buf_printf(&s, "Mitigation: Software count cache flush");
 
                if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
-                       seq_buf_printf(&s, "(hardware accelerated)");
+                       seq_buf_printf(&s, " (hardware accelerated)");
        } else if (btb_flush_enabled) {
                seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
        } else {
index a4ed9edfd5f0b694288478683858f0ed3f306516..1f324c28705bc799b48172c232d6f193e6520eae 100644 (file)
@@ -92,7 +92,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
         * At this point, r4,r5 contain our sec/nsec values.
         */
 
-       lwa     r6,WTOM_CLOCK_SEC(r3)
+       l     r6,WTOM_CLOCK_SEC(r3)
        lwa     r9,WTOM_CLOCK_NSEC(r3)
 
        /* We now have our result in r6,r9. We create a fake dependency
@@ -125,7 +125,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
        bne     cr6,75f
 
        /* CLOCK_MONOTONIC_COARSE */
-       lwa     r6,WTOM_CLOCK_SEC(r3)
+       l     r6,WTOM_CLOCK_SEC(r3)
        lwa     r9,WTOM_CLOCK_NSEC(r3)
 
        /* check if counter has updated */
index 1f13494efb2bfa9b50996ce6fda0711b9e933008..a6c491f18a04e2cfee3e61fbb6a0e37f955f5f16 100644 (file)
@@ -70,12 +70,12 @@ _GLOBAL(hash_page)
        lis     r0,KERNELBASE@h         /* check if kernel address */
        cmplw   0,r4,r0
        ori     r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
-       mfspr   r5, SPRN_SPRG_PGDIR     /* virt page-table root */
+       mfspr   r5, SPRN_SPRG_PGDIR     /* phys page-table root */
        blt+    112f                    /* assume user more likely */
-       lis     r5,swapper_pg_dir@ha    /* if kernel address, use */
-       addi    r5,r5,swapper_pg_dir@l  /* kernel page table */
+       lis     r5, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
+       addi    r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
        rlwimi  r3,r9,32-12,29,29       /* MSR_PR -> _PAGE_USER */
-112:   tophys(r5, r5)
+112:
 #ifndef CONFIG_PTE_64BIT
        rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
        lwz     r8,0(r5)                /* get pmd entry */
index 549e9490ff2aabd79e7e8a7acc970d8a599cba79..dcac37745b05cfcc70b89fafcda172e3406956a0 100644 (file)
@@ -51,6 +51,8 @@
 #define PPC_LIS(r, i)          PPC_ADDIS(r, 0, i)
 #define PPC_STD(r, base, i)    EMIT(PPC_INST_STD | ___PPC_RS(r) |            \
                                     ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_STDX(r, base, b)   EMIT(PPC_INST_STDX | ___PPC_RS(r) |           \
+                                    ___PPC_RA(base) | ___PPC_RB(b))
 #define PPC_STDU(r, base, i)   EMIT(PPC_INST_STDU | ___PPC_RS(r) |           \
                                     ___PPC_RA(base) | ((i) & 0xfffc))
 #define PPC_STW(r, base, i)    EMIT(PPC_INST_STW | ___PPC_RS(r) |            \
@@ -65,7 +67,9 @@
 #define PPC_LBZ(r, base, i)    EMIT(PPC_INST_LBZ | ___PPC_RT(r) |            \
                                     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LD(r, base, i)     EMIT(PPC_INST_LD | ___PPC_RT(r) |             \
-                                    ___PPC_RA(base) | IMM_L(i))
+                                    ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_LDX(r, base, b)    EMIT(PPC_INST_LDX | ___PPC_RT(r) |            \
+                                    ___PPC_RA(base) | ___PPC_RB(b))
 #define PPC_LWZ(r, base, i)    EMIT(PPC_INST_LWZ | ___PPC_RT(r) |            \
                                     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LHZ(r, base, i)    EMIT(PPC_INST_LHZ | ___PPC_RT(r) |            \
                                        ___PPC_RA(a) | ___PPC_RB(b))
 #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) |          \
                                        ___PPC_RA(a) | ___PPC_RB(b))
-
-#ifdef CONFIG_PPC64
-#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
-#else
-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
-#endif
-
 #define PPC_CMPWI(a, i)                EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
 #define PPC_CMPDI(a, i)                EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
 #define PPC_CMPW(a, b)         EMIT(PPC_INST_CMPW | ___PPC_RA(a) |           \
index dc50a8d4b3b972a479aa2b00b1ea1c46db2977e2..21744d8aa053118f138f4a98d4097da2b2262fa6 100644 (file)
@@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
 #define PPC_NTOHS_OFFS(r, base, i)     PPC_LHZ_OFFS(r, base, i)
 #endif
 
+#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
+#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
+
 #define SEEN_DATAREF 0x10000 /* might call external helpers */
 #define SEEN_XREG    0x20000 /* X reg is used */
 #define SEEN_MEM     0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
index 3609be4692b35e948f3ceff98c39c1b3bdaea744..47f441f351a6211c854ab3e7569a8ffa9e3dd943 100644 (file)
@@ -68,6 +68,26 @@ static const int b2p[] = {
 /* PPC NVR range -- update this if we ever use NVRs below r27 */
 #define BPF_PPC_NVR_MIN                27
 
+/*
+ * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
+ * so ensure that it isn't in use already.
+ */
+#define PPC_BPF_LL(r, base, i) do {                                          \
+                               if ((i) % 4) {                                \
+                                       PPC_LI(b2p[TMP_REG_2], (i));          \
+                                       PPC_LDX(r, base, b2p[TMP_REG_2]);     \
+                               } else                                        \
+                                       PPC_LD(r, base, i);                   \
+                               } while(0)
+#define PPC_BPF_STL(r, base, i) do {                                         \
+                               if ((i) % 4) {                                \
+                                       PPC_LI(b2p[TMP_REG_2], (i));          \
+                                       PPC_STDX(r, base, b2p[TMP_REG_2]);    \
+                               } else                                        \
+                                       PPC_STD(r, base, i);                  \
+                               } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
+
 #define SEEN_FUNC      0x1000 /* might call external helpers */
 #define SEEN_STACK     0x2000 /* uses BPF stack */
 #define SEEN_TAILCALL  0x4000 /* uses tail calls */
index 4194d3cfb60cd0702487a83174f85b29806520ec..21a1dcd4b156c4bc926eccd5bd2375383a7705d2 100644 (file)
@@ -252,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
         *   goto out;
         */
-       PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+       PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
        PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
        PPC_BCC(COND_GT, out);
 
@@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        /* prog = array->ptrs[index]; */
        PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
        PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
-       PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
+       PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
 
        /*
         * if (prog == NULL)
@@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        PPC_BCC(COND_EQ, out);
 
        /* goto *(prog->bpf_func + prologue_size); */
-       PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
+       PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
 #ifdef PPC64_ELF_ABI_v1
        /* skip past the function descriptor */
        PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
@@ -606,7 +606,7 @@ bpf_alu32_trunc:
                                 * the instructions generated will remain the
                                 * same across all passes
                                 */
-                               PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
+                               PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
                                PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
                                PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
                                break;
@@ -662,7 +662,7 @@ emit_clear:
                                PPC_LI32(b2p[TMP_REG_1], imm);
                                src_reg = b2p[TMP_REG_1];
                        }
-                       PPC_STD(src_reg, dst_reg, off);
+                       PPC_BPF_STL(src_reg, dst_reg, off);
                        break;
 
                /*
@@ -709,7 +709,7 @@ emit_clear:
                        break;
                /* dst = *(u64 *)(ul) (src + off) */
                case BPF_LDX | BPF_MEM | BPF_DW:
-                       PPC_LD(dst_reg, src_reg, off);
+                       PPC_BPF_LL(dst_reg, src_reg, off);
                        break;
 
                /*
index 1a6a7092d94209d4ee330003cfd3d2ccf713b916..e94a0a28b5ebe22b944ea73b1ac48bdcf52d9e63 100644 (file)
@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
        return reg1;
 }
 
+/*
+ * Interface to tell the AP bus code that a configuration
+ * change has happened. The bus code should at least do
+ * an ap bus resource rescan.
+ */
+#if IS_ENABLED(CONFIG_ZCRYPT)
+void ap_bus_cfg_chg(void);
+#else
+static inline void ap_bus_cfg_chg(void){};
+#endif
+
 #endif /* _ASM_S390_AP_H_ */
index 7d22a474a040ddd3d0e76c84075db6ab17bb2263..f74639a05f0ffc33f638c264af58c48933e36139 100644 (file)
@@ -252,11 +252,14 @@ do {                                                              \
 
 /*
  * Cache aliasing on the latest machines calls for a mapping granularity
- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
- * of up to 1GB. For 31-bit processes the virtual address space is limited,
- * use no alignment and limit the randomization to 8MB.
+ * of 512KB for the anonymous mapping base. For 64-bit processes use a
+ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
+ * the virtual address space is limited, use no alignment and limit the
+ * randomization to 8MB.
+ * For the additional randomization of the program break use 32MB for
+ * 64-bit and 8MB for 31-bit.
  */
-#define BRK_RND_MASK   (is_compat_task() ? 0x7ffUL : 0x3ffffUL)
+#define BRK_RND_MASK   (is_compat_task() ? 0x7ffUL : 0x1fffUL)
 #define MMAP_RND_MASK  (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
 #define MMAP_ALIGN_MASK        (is_compat_task() ? 0 : 0x7fUL)
 #define STACK_RND_MASK MMAP_RND_MASK
index cc0947e08b6ffef09419a52eb04f817535016127..5b9f10b1e55dec03c2878a6ab510cb0d128002e5 100644 (file)
@@ -91,52 +91,53 @@ struct lowcore {
        __u64   hardirq_timer;                  /* 0x02e8 */
        __u64   softirq_timer;                  /* 0x02f0 */
        __u64   steal_timer;                    /* 0x02f8 */
-       __u64   last_update_timer;              /* 0x0300 */
-       __u64   last_update_clock;              /* 0x0308 */
-       __u64   int_clock;                      /* 0x0310 */
-       __u64   mcck_clock;                     /* 0x0318 */
-       __u64   clock_comparator;               /* 0x0320 */
-       __u64   boot_clock[2];                  /* 0x0328 */
+       __u64   avg_steal_timer;                /* 0x0300 */
+       __u64   last_update_timer;              /* 0x0308 */
+       __u64   last_update_clock;              /* 0x0310 */
+       __u64   int_clock;                      /* 0x0318*/
+       __u64   mcck_clock;                     /* 0x0320 */
+       __u64   clock_comparator;               /* 0x0328 */
+       __u64   boot_clock[2];                  /* 0x0330 */
 
        /* Current process. */
-       __u64   current_task;                   /* 0x0338 */
-       __u64   kernel_stack;                   /* 0x0340 */
+       __u64   current_task;                   /* 0x0340 */
+       __u64   kernel_stack;                   /* 0x0348 */
 
        /* Interrupt, DAT-off and restartstack. */
-       __u64   async_stack;                    /* 0x0348 */
-       __u64   nodat_stack;                    /* 0x0350 */
-       __u64   restart_stack;                  /* 0x0358 */
+       __u64   async_stack;                    /* 0x0350 */
+       __u64   nodat_stack;                    /* 0x0358 */
+       __u64   restart_stack;                  /* 0x0360 */
 
        /* Restart function and parameter. */
-       __u64   restart_fn;                     /* 0x0360 */
-       __u64   restart_data;                   /* 0x0368 */
-       __u64   restart_source;                 /* 0x0370 */
+       __u64   restart_fn;                     /* 0x0368 */
+       __u64   restart_data;                   /* 0x0370 */
+       __u64   restart_source;                 /* 0x0378 */
 
        /* Address space pointer. */
-       __u64   kernel_asce;                    /* 0x0378 */
-       __u64   user_asce;                      /* 0x0380 */
-       __u64   vdso_asce;                      /* 0x0388 */
+       __u64   kernel_asce;                    /* 0x0380 */
+       __u64   user_asce;                      /* 0x0388 */
+       __u64   vdso_asce;                      /* 0x0390 */
 
        /*
         * The lpp and current_pid fields form a
         * 64-bit value that is set as program
         * parameter with the LPP instruction.
         */
-       __u32   lpp;                            /* 0x0390 */
-       __u32   current_pid;                    /* 0x0394 */
+       __u32   lpp;                            /* 0x0398 */
+       __u32   current_pid;                    /* 0x039c */
 
        /* SMP info area */
-       __u32   cpu_nr;                         /* 0x0398 */
-       __u32   softirq_pending;                /* 0x039c */
-       __u32   preempt_count;                  /* 0x03a0 */
-       __u32   spinlock_lockval;               /* 0x03a4 */
-       __u32   spinlock_index;                 /* 0x03a8 */
-       __u32   fpu_flags;                      /* 0x03ac */
-       __u64   percpu_offset;                  /* 0x03b0 */
-       __u64   vdso_per_cpu_data;              /* 0x03b8 */
-       __u64   machine_flags;                  /* 0x03c0 */
-       __u64   gmap;                           /* 0x03c8 */
-       __u8    pad_0x03d0[0x0400-0x03d0];      /* 0x03d0 */
+       __u32   cpu_nr;                         /* 0x03a0 */
+       __u32   softirq_pending;                /* 0x03a4 */
+       __u32   preempt_count;                  /* 0x03a8 */
+       __u32   spinlock_lockval;               /* 0x03ac */
+       __u32   spinlock_index;                 /* 0x03b0 */
+       __u32   fpu_flags;                      /* 0x03b4 */
+       __u64   percpu_offset;                  /* 0x03b8 */
+       __u64   vdso_per_cpu_data;              /* 0x03c0 */
+       __u64   machine_flags;                  /* 0x03c8 */
+       __u64   gmap;                           /* 0x03d0 */
+       __u8    pad_0x03d8[0x0400-0x03d8];      /* 0x03d8 */
 
        /* br %r1 trampoline */
        __u16   br_r1_trampoline;               /* 0x0400 */
index c6fad208c2fa5a8ffaad40d554c7597097d3e4fa..b6854812d2ed56f11cbd03865c16b26290518611 100644 (file)
@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
  */
 static int __hw_perf_event_init(struct perf_event *event)
 {
-       struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
        struct perf_event_attr *attr = &event->attr;
+       struct cpu_cf_events *cpuhw;
        enum cpumf_ctr_set i;
        int err = 0;
 
-       debug_sprintf_event(cf_diag_dbg, 5,
-                           "%s event %p cpu %d authorized %#x\n", __func__,
-                           event, event->cpu, cpuhw->info.auth_ctl);
+       debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
+                           event, event->cpu);
 
        event->hw.config = attr->config;
        event->hw.config_base = 0;
-       local64_set(&event->count, 0);
 
-       /* Add all authorized counter sets to config_base */
+       /* Add all authorized counter sets to config_base. The
+        * the hardware init function is either called per-cpu or just once
+        * for all CPUS (event->cpu == -1).  This depends on the whether
+        * counting is started for all CPUs or on a per workload base where
+        * the perf event moves from one CPU to another CPU.
+        * Checking the authorization on any CPU is fine as the hardware
+        * applies the same authorization settings to all CPUs.
+        */
+       cpuhw = &get_cpu_var(cpu_cf_events);
        for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
                if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
                        event->hw.config_base |= cpumf_ctr_ctl[i];
+       put_cpu_var(cpu_cf_events);
 
        /* No authorized counter sets, nothing to count/sample */
        if (!event->hw.config_base) {
index 3fe1c77c361b98a9a4443bf1a2941f486d024030..bd197baf1dc337f018af35eeb19635b1c95998b7 100644 (file)
@@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
        lc->percpu_offset = __per_cpu_offset[cpu];
        lc->kernel_asce = S390_lowcore.kernel_asce;
        lc->machine_flags = S390_lowcore.machine_flags;
-       lc->user_timer = lc->system_timer = lc->steal_timer = 0;
+       lc->user_timer = lc->system_timer =
+               lc->steal_timer = lc->avg_steal_timer = 0;
        __ctl_store(lc->cregs_save_area, 0, 15);
        save_access_regs((unsigned int *) lc->access_regs_save_area);
        memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
index 98f850e00008e99a1e64e8f20a74bbaaf4910636..a69a0911ed0e82720b10b124d0153681f2c821ea 100644 (file)
@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
  */
 static int do_account_vtime(struct task_struct *tsk)
 {
-       u64 timer, clock, user, guest, system, hardirq, softirq, steal;
+       u64 timer, clock, user, guest, system, hardirq, softirq;
 
        timer = S390_lowcore.last_update_timer;
        clock = S390_lowcore.last_update_clock;
@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
        if (softirq)
                account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
 
-       steal = S390_lowcore.steal_timer;
-       if ((s64) steal > 0) {
-               S390_lowcore.steal_timer = 0;
-               account_steal_time(cputime_to_nsecs(steal));
-       }
-
        return virt_timer_forward(user + guest + system + hardirq + softirq);
 }
 
@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
  */
 void vtime_flush(struct task_struct *tsk)
 {
+       u64 steal, avg_steal;
+
        if (do_account_vtime(tsk))
                virt_timer_expire();
+
+       steal = S390_lowcore.steal_timer;
+       avg_steal = S390_lowcore.avg_steal_timer / 2;
+       if ((s64) steal > 0) {
+               S390_lowcore.steal_timer = 0;
+               account_steal_time(steal);
+               avg_steal += steal;
+       }
+       S390_lowcore.avg_steal_timer = avg_steal;
 }
 
 /*
index 315a67b8896b9588c78b6128014f4702cdf965af..90154df8f12504e501a2c2b287e4fabba955f033 100644 (file)
@@ -13,8 +13,9 @@
  */
 
 #include <linux/types.h>
-#include <linux/kernel.h>
+#include <linux/compiler.h>
 #include <linux/errno.h>
+#include <linux/limits.h>
 #include <asm/asm.h>
 #include "ctype.h"
 #include "string.h"
index 6461a16b45594b144f8fe2390fe42973e4fc8725..e4ba467a9fc65b0ec21d7c9f868c9d013a8fddd9 100644 (file)
@@ -103,9 +103,13 @@ static int hv_cpu_init(unsigned int cpu)
        u64 msr_vp_index;
        struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
        void **input_arg;
+       struct page *pg;
 
        input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
-       *input_arg = page_address(alloc_page(GFP_KERNEL));
+       pg = alloc_page(GFP_KERNEL);
+       if (unlikely(!pg))
+               return -ENOMEM;
+       *input_arg = page_address(pg);
 
        hv_get_vp_index(msr_vp_index);
 
index 3417110574c12212f7185213bfe566ac2b541a1e..31c379c1da41c48b7f4ee89d3c100d62895ff5b2 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CPU_DEVICE_ID
-#define _CPU_DEVICE_ID 1
+#ifndef _ASM_X86_CPU_DEVICE_ID
+#define _ASM_X86_CPU_DEVICE_ID
 
 /*
  * Declare drivers belonging to specific x86 CPUs
@@ -9,8 +9,6 @@
 
 #include <linux/mod_devicetable.h>
 
-extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
-
 /*
  * Match specific microcode revisions.
  *
@@ -22,21 +20,22 @@ extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
  */
 
 struct x86_cpu_desc {
-       __u8    x86_family;
-       __u8    x86_vendor;
-       __u8    x86_model;
-       __u8    x86_stepping;
-       __u32   x86_microcode_rev;
+       u8      x86_family;
+       u8      x86_vendor;
+       u8      x86_model;
+       u8      x86_stepping;
+       u32     x86_microcode_rev;
 };
 
-#define INTEL_CPU_DESC(mod, step, rev) {                       \
-       .x86_family = 6,                                        \
-       .x86_vendor = X86_VENDOR_INTEL,                         \
-       .x86_model = mod,                                       \
-       .x86_stepping = step,                                   \
-       .x86_microcode_rev = rev,                               \
+#define INTEL_CPU_DESC(model, stepping, revision) {            \
+       .x86_family             = 6,                            \
+       .x86_vendor             = X86_VENDOR_INTEL,             \
+       .x86_model              = (model),                      \
+       .x86_stepping           = (stepping),                   \
+       .x86_microcode_rev      = (revision),                   \
 }
 
+extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
 extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
 
-#endif
+#endif /* _ASM_X86_CPU_DEVICE_ID */
index aaedd73ea2c66a21c2456e8519841ebd6c56b18e..df700a6cc869bb6309bc1069e3e3c030b47281f7 100644 (file)
@@ -3,19 +3,6 @@
  * NSC/Cyrix CPU indexed register access. Must be inlined instead of
  * macros to ensure correct access ordering
  * Access order is always 0x22 (=offset), 0x23 (=value)
- *
- * When using the old macros a line like
- *   setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
- * gets expanded to:
- *  do {
- *    outb((CX86_CCR2), 0x22);
- *    outb((({
- *        outb((CX86_CCR2), 0x22);
- *        inb(0x23);
- *    }) | 0x88), 0x23);
- *  } while (0);
- *
- * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23).
  */
 
 static inline u8 getCx86(u8 reg)
@@ -29,11 +16,3 @@ static inline void setCx86(u8 reg, u8 data)
        outb(reg, 0x22);
        outb(data, 0x23);
 }
-
-#define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); })
-
-#define setCx86_old(reg, data) do { \
-       outb((reg), 0x22); \
-       outb((data), 0x23); \
-} while (0)
-
index 58176b56354e4977ff20fb6140ef119f912f9fb8..294ed4392a0ecd965b6b527ba499d3c1be1d1fce 100644 (file)
@@ -14,6 +14,7 @@
 #define pr_fmt(fmt) "AGP: " fmt
 
 #include <linux/kernel.h>
+#include <linux/kcore.h>
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/memblock.h>
@@ -57,7 +58,7 @@ int fallback_aper_force __initdata;
 
 int fix_aperture __initdata = 1;
 
-#ifdef CONFIG_PROC_VMCORE
+#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
 /*
  * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
  * use the same range because it will remain configured in the northbridge.
@@ -66,20 +67,25 @@ int fix_aperture __initdata = 1;
  */
 static unsigned long aperture_pfn_start, aperture_page_count;
 
-static int gart_oldmem_pfn_is_ram(unsigned long pfn)
+static int gart_mem_pfn_is_ram(unsigned long pfn)
 {
        return likely((pfn < aperture_pfn_start) ||
                      (pfn >= aperture_pfn_start + aperture_page_count));
 }
 
-static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+static void __init exclude_from_core(u64 aper_base, u32 aper_order)
 {
        aperture_pfn_start = aper_base >> PAGE_SHIFT;
        aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
-       WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
+#ifdef CONFIG_PROC_VMCORE
+       WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram));
+#endif
+#ifdef CONFIG_PROC_KCORE
+       WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram));
+#endif
 }
 #else
-static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+static void exclude_from_core(u64 aper_base, u32 aper_order)
 {
 }
 #endif
@@ -474,7 +480,7 @@ out:
                         * may have allocated the range over its e820 RAM
                         * and fixed up the northbridge
                         */
-                       exclude_from_vmcore(last_aper_base, last_aper_order);
+                       exclude_from_core(last_aper_base, last_aper_order);
 
                        return 1;
                }
@@ -520,7 +526,7 @@ out:
         * overlap with the first kernel's memory. We can't access the
         * range through vmcore even though it should be part of the dump.
         */
-       exclude_from_vmcore(aper_alloc, aper_order);
+       exclude_from_core(aper_alloc, aper_order);
 
        /* Fix up the north bridges */
        for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
index d12226f60168e1d844be6dded3c666509326e7a2..1d9b8aaea06c8c9c7d14b0c30d51ded3bac83d7d 100644 (file)
@@ -124,7 +124,7 @@ static void set_cx86_reorder(void)
        setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
 
        /* Load/Store Serialize to mem access disable (=reorder it) */
-       setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
+       setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
        /* set load/store serialize from 1GB to 4GB */
        ccr3 |= 0xe0;
        setCx86(CX86_CCR3, ccr3);
@@ -135,11 +135,11 @@ static void set_cx86_memwb(void)
        pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
 
        /* CCR2 bit 2: unlock NW bit */
-       setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
        /* set 'Not Write-through' */
        write_cr0(read_cr0() | X86_CR0_NW);
        /* CCR2 bit 2: lock NW bit and set WT1 */
-       setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
 }
 
 /*
@@ -153,14 +153,14 @@ static void geode_configure(void)
        local_irq_save(flags);
 
        /* Suspend on halt power saving and enable #SUSP pin */
-       setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
 
        ccr3 = getCx86(CX86_CCR3);
        setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* enable MAPEN */
 
 
        /* FPU fast, DTE cache, Mem bypass */
-       setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
+       setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
        setCx86(CX86_CCR3, ccr3);                       /* disable MAPEN */
 
        set_cx86_memwb();
@@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
                /* GXm supports extended cpuid levels 'ala' AMD */
                if (c->cpuid_level == 2) {
                        /* Enable cxMMX extensions (GX1 Datasheet 54) */
-                       setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
+                       setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
 
                        /*
                         * GXm : 0x30 ... 0x5f GXm  datasheet 51
@@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
                if (dir1 > 7) {
                        dir0_msn++;  /* M II */
                        /* Enable MMX extensions (App note 108) */
-                       setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
+                       setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
                } else {
                        /* A 6x86MX - it has the bug. */
                        set_cpu_bug(c, X86_BUG_COMA);
index 97f9ada9cedaf4e7cde47e819f7b4a0d6ac39d77..5260185cbf7ba1a77ecc30bdd61a99a2338b159b 100644 (file)
@@ -608,6 +608,8 @@ static int microcode_reload_late(void)
        if (ret > 0)
                microcode_check();
 
+       pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode);
+
        return ret;
 }
 
index dfd3aca82c61cbe345f462a62ea6b52fa0b81516..fb32925a2e62bc462c22429c4a5a5c73fca3c028 100644 (file)
@@ -905,6 +905,8 @@ int __init hpet_enable(void)
                return 0;
 
        hpet_set_mapping();
+       if (!hpet_virt_address)
+               return 0;
 
        /*
         * Read the period and check for a sane value:
index ff9bfd40429efeb7b4868d370628356e28265ec1..d7308302100276539e5c78798ee7ff013d719aa8 100644 (file)
@@ -354,6 +354,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
 #endif
        default:
                WARN_ON_ONCE(1);
+               return -EINVAL;
        }
 
        /*
index 3482460d984d0395830c6227a39e639a5aa87b07..1bfe5c6e6cfe1a1e414b20dc4c92bce29ef9fbf7 100644 (file)
@@ -598,8 +598,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
                        mpf_base = base;
                        mpf_found = true;
 
-                       pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
-                               base, base + sizeof(*mpf) - 1, mpf);
+                       pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
+                               base, base + sizeof(*mpf) - 1);
 
                        memblock_reserve(base, sizeof(*mpf));
                        if (mpf->physptr)
index 9baca3e054bef32acd14a538d14289adee71fee0..e7925d668b680269fb2442766deaf416dc42f9a1 100644 (file)
@@ -94,7 +94,7 @@ static unsigned do_csum(const unsigned char *buff, unsigned len)
                                    : "m" (*(unsigned long *)buff), 
                                    "r" (zero),  "0" (result));
                                --count; 
-                                       buff += 8;
+                               buff += 8;
                        }
                        result = add32_with_carry(result>>32,
                                                  result&0xffffffff); 
index 4fee5c3003ed78ab9566fe92f09e0d14968c9865..139b28a01ce47f90b770b0eea4a98f35664a6ef5 100644 (file)
@@ -77,7 +77,7 @@ static void __init pti_print_if_secure(const char *reason)
                pr_info("%s\n", reason);
 }
 
-enum pti_mode {
+static enum pti_mode {
        PTI_AUTO = 0,
        PTI_FORCE_OFF,
        PTI_FORCE_ON
@@ -602,7 +602,7 @@ static void pti_clone_kernel_text(void)
        set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
 }
 
-void pti_set_kernel_image_nonglobal(void)
+static void pti_set_kernel_image_nonglobal(void)
 {
        /*
         * The identity map is created with PMDs, regardless of the
index 71a78d9fb8b722d767f9062813e344a3bc8b90b7..b64cedc7f87cf1cf5f24bf4c50c808ae6a59f210 100644 (file)
@@ -849,20 +849,14 @@ static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
        size = bio_add_page(bio, bv->bv_page, len,
                                bv->bv_offset + iter->iov_offset);
        if (size == len) {
-               struct page *page;
-               int i;
+               if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+                       struct page *page;
+                       int i;
+
+                       mp_bvec_for_each_page(page, bv, i)
+                               get_page(page);
+               }
 
-               /*
-                * For the normal O_DIRECT case, we could skip grabbing this
-                * reference and then not have to put them again when IO
-                * completes. But this breaks some in-kernel users, like
-                * splicing to/from a loop device, where we release the pipe
-                * pages unconditionally. If we can fix that case, we can
-                * get rid of the get here and the need to call
-                * bio_release_pages() at IO completion time.
-                */
-               mp_bvec_for_each_page(page, bv, i)
-                       get_page(page);
                iov_iter_advance(iter, size);
                return 0;
        }
@@ -925,10 +919,12 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
  * This takes either an iterator pointing to user memory, or one pointing to
  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
  * map them into the kernel. On IO completion, the caller should put those
- * pages. For now, when adding kernel pages, we still grab a reference to the
- * page. This isn't strictly needed for the common case, but some call paths
- * end up releasing pages from eg a pipe and we can't easily control these.
- * See comment in __bio_iov_bvec_add_pages().
+ * pages. If we're adding kernel pages, and the caller told us it's safe to
+ * do so, we just have to add the pages to the bio directly. We don't grab an
+ * extra reference to those pages (the user should already have that), and we
+ * don't put the page on IO completion. The caller needs to check if the bio is
+ * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
+ * released.
  *
  * The function tries, but does not guarantee, to pin as many pages as
  * fit into the bio, or are requested in *iter, whatever is smaller. If
@@ -940,6 +936,13 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
        const bool is_bvec = iov_iter_is_bvec(iter);
        unsigned short orig_vcnt = bio->bi_vcnt;
 
+       /*
+        * If this is a BVEC iter, then the pages are kernel pages. Don't
+        * release them on IO completion, if the caller asked us to.
+        */
+       if (is_bvec && iov_iter_bvec_no_ref(iter))
+               bio_set_flag(bio, BIO_NO_PAGE_REF);
+
        do {
                int ret;
 
@@ -1696,7 +1699,8 @@ static void bio_dirty_fn(struct work_struct *work)
                next = bio->bi_private;
 
                bio_set_pages_dirty(bio);
-               bio_release_pages(bio);
+               if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+                       bio_release_pages(bio);
                bio_put(bio);
        }
 }
@@ -1713,7 +1717,8 @@ void bio_check_pages_dirty(struct bio *bio)
                        goto defer;
        }
 
-       bio_release_pages(bio);
+       if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+               bio_release_pages(bio);
        bio_put(bio);
        return;
 defer:
index 77f37ef8ef06a68da12d380eec7f790030313ad4..617a2b3f758219b3dc7d9b6a592a68eea8edb060 100644 (file)
@@ -1736,8 +1736,8 @@ out:
 
 /**
  * blkcg_schedule_throttle - this task needs to check for throttling
- * @q - the request queue IO was submitted on
- * @use_memdelay - do we charge this to memory delay for PSI
+ * @q: the request queue IO was submitted on
+ * @use_memdelay: do we charge this to memory delay for PSI
  *
  * This is called by the IO controller when we know there's delay accumulated
  * for the blkg for this task.  We do not pass the blkg because there are places
@@ -1769,8 +1769,9 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
 
 /**
  * blkcg_add_delay - add delay to this blkg
- * @now - the current time in nanoseconds
- * @delta - how many nanoseconds of delay to add
+ * @blkg: blkg of interest
+ * @now: the current time in nanoseconds
+ * @delta: how many nanoseconds of delay to add
  *
  * Charge @delta to the blkg's current delay accumulation.  This is used to
  * throttle tasks if an IO controller thinks we need more throttling.
index 6e0f2d97fc6d8f0a5b14e6dbea23f817706bef7a..d95f9489201526081abf8b8bdcc65a525cf4c179 100644 (file)
@@ -220,7 +220,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
                blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
                flush_rq->tag = -1;
        } else {
-               blk_mq_put_driver_tag_hctx(hctx, flush_rq);
+               blk_mq_put_driver_tag(flush_rq);
                flush_rq->internal_tag = -1;
        }
 
@@ -324,7 +324,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 
        if (q->elevator) {
                WARN_ON(rq->tag < 0);
-               blk_mq_put_driver_tag_hctx(hctx, rq);
+               blk_mq_put_driver_tag(rq);
        }
 
        /*
index 2620baa1f6993db5e6706a36240ab4c6a2931038..507212d75ee2c473c1ca8fd38f9eeedc1d6894ee 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/blk-mq.h>
 #include "blk-rq-qos.h"
 #include "blk-stat.h"
+#include "blk.h"
 
 #define DEFAULT_SCALE_COOKIE 1000000U
 
index a9c181603cbda2df9f9b59d9c447b19d3f9c2889..3ff3d7b4996973458fa44a89133ed4ec5b65b2d4 100644 (file)
@@ -59,7 +59,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
 }
 
 /*
- * Check if any of the ctx's have pending work in this hardware queue
+ * Check if any of the ctx, dispatch list or elevator
+ * have pending work in this hardware queue.
  */
 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
 {
@@ -782,7 +783,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
        if (kick_requeue_list)
                blk_mq_kick_requeue_list(q);
 }
-EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
@@ -1072,7 +1072,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
        hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
 
        spin_lock(&hctx->dispatch_wait_lock);
-       list_del_init(&wait->entry);
+       if (!list_empty(&wait->entry)) {
+               struct sbitmap_queue *sbq;
+
+               list_del_init(&wait->entry);
+               sbq = &hctx->tags->bitmap_tags;
+               atomic_dec(&sbq->ws_active);
+       }
        spin_unlock(&hctx->dispatch_wait_lock);
 
        blk_mq_run_hw_queue(hctx, true);
@@ -1088,13 +1094,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
                                 struct request *rq)
 {
+       struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
        struct wait_queue_head *wq;
        wait_queue_entry_t *wait;
        bool ret;
 
        if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
-               if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
-                       set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+               blk_mq_sched_mark_restart_hctx(hctx);
 
                /*
                 * It's possible that a tag was freed in the window between the
@@ -1111,7 +1117,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
        if (!list_empty_careful(&wait->entry))
                return false;
 
-       wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+       wq = &bt_wait_ptr(sbq, hctx)->wait;
 
        spin_lock_irq(&wq->lock);
        spin_lock(&hctx->dispatch_wait_lock);
@@ -1121,6 +1127,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
                return false;
        }
 
+       atomic_inc(&sbq->ws_active);
        wait->flags &= ~WQ_FLAG_EXCLUSIVE;
        __add_wait_queue(wq, wait);
 
@@ -1141,6 +1148,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
         * someone else gets the wakeup.
         */
        list_del_init(&wait->entry);
+       atomic_dec(&sbq->ws_active);
        spin_unlock(&hctx->dispatch_wait_lock);
        spin_unlock_irq(&wq->lock);
 
@@ -2857,7 +2865,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        /*
         * Default to classic polling
         */
-       q->poll_nsec = -1;
+       q->poll_nsec = BLK_MQ_POLL_CLASSIC;
 
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
        blk_mq_add_queue_tag_set(set, q);
@@ -3392,7 +3400,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
 {
        struct request *rq;
 
-       if (q->poll_nsec == -1)
+       if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
                return false;
 
        if (!blk_qc_t_is_internal(cookie))
index c11353a3749dc3422cb564e848e7225634414a6c..d704fc7766f45458fd7f186a0111c859e09baafc 100644 (file)
@@ -41,6 +41,8 @@ void blk_mq_free_queue(struct request_queue *q);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 void blk_mq_wake_waiters(struct request_queue *q);
 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+                               bool kick_requeue_list);
 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 bool blk_mq_get_driver_tag(struct request *rq);
 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
@@ -222,15 +224,6 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
        }
 }
 
-static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
-                                      struct request *rq)
-{
-       if (rq->tag == -1 || rq->internal_tag == -1)
-               return;
-
-       __blk_mq_put_driver_tag(hctx, rq);
-}
-
 static inline void blk_mq_put_driver_tag(struct request *rq)
 {
        if (rq->tag == -1 || rq->internal_tag == -1)
index 59685918167e51d80a2808861b3f2f7744dec3a7..422327089e0fd963dbfaf346be68f7951da15df2 100644 (file)
@@ -360,8 +360,8 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
 {
        int val;
 
-       if (q->poll_nsec == -1)
-               val = -1;
+       if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
+               val = BLK_MQ_POLL_CLASSIC;
        else
                val = q->poll_nsec / 1000;
 
@@ -380,10 +380,12 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
        if (err < 0)
                return err;
 
-       if (val == -1)
-               q->poll_nsec = -1;
-       else
+       if (val == BLK_MQ_POLL_CLASSIC)
+               q->poll_nsec = BLK_MQ_POLL_CLASSIC;
+       else if (val >= 0)
                q->poll_nsec = val * 1000;
+       else
+               return -EINVAL;
 
        return count;
 }
index 78db97687f26a1512130ffadda01f3372dd4ae34..c4b06cc075f937f8a4c8b4c7b76cb3344b25d0f0 100644 (file)
@@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
        match.hrv = hrv;
 
        dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+       put_device(dev);
        return !!dev;
 }
 EXPORT_SYMBOL(acpi_dev_present);
index 8685882da64cdaf60dcbac09d9c61735905b5300..4b9c7ca492e6db85dad979a67c7baed7cedd972d 100644 (file)
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
        size_t object_size = 0;
 
        read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
-       if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
+       if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+           !IS_ALIGNED(offset, sizeof(u32)))
                return 0;
        binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
                                      offset, read_size);
index 6389467670a0bc171522a2035ae4788bb700d616..195f120c4e8c9aefa9f6e57e8ce400a8ddde95fb 100644 (file)
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+       mm = alloc->vma_vm_mm;
+       if (!mmget_not_zero(mm))
+               goto err_mmget;
+       if (!down_write_trylock(&mm->mmap_sem))
+               goto err_down_write_mmap_sem_failed;
        vma = binder_alloc_get_vma(alloc);
-       if (vma) {
-               if (!mmget_not_zero(alloc->vma_vm_mm))
-                       goto err_mmget;
-               mm = alloc->vma_vm_mm;
-               if (!down_read_trylock(&mm->mmap_sem))
-                       goto err_down_write_mmap_sem_failed;
-       }
 
        list_lru_isolate(lru, item);
        spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
                zap_page_range(vma, page_addr, PAGE_SIZE);
 
                trace_binder_unmap_user_end(alloc, index);
-
-               up_read(&mm->mmap_sem);
-               mmput(mm);
        }
+       up_write(&mm->mmap_sem);
+       mmput(mm);
 
        trace_binder_unmap_kernel_start(alloc, index);
 
index b3ed8f9953a862ea3ae67ef065ca5469330a44e0..173e6f2dd9af0f12afdc1fee7e372cfa4291e0aa 100644 (file)
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
 /* Per the spec, only slot type and drawer type ODD can be supported */
 static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
 {
-       char buf[16];
+       char *buf;
        unsigned int ret;
-       struct rm_feature_desc *desc = (void *)(buf + 8);
+       struct rm_feature_desc *desc;
        struct ata_taskfile tf;
        static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
                        2,      /* only 1 feature descriptor requested */
                        0, 3,   /* 3, removable medium feature */
                        0, 0, 0,/* reserved */
-                       0, sizeof(buf),
+                       0, 16,
                        0, 0, 0,
        };
 
+       buf = kzalloc(16, GFP_KERNEL);
+       if (!buf)
+               return ODD_MECH_TYPE_UNSUPPORTED;
+       desc = (void *)(buf + 8);
+
        ata_tf_init(dev, &tf);
        tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
        tf.command = ATA_CMD_PACKET;
        tf.protocol = ATAPI_PROT_PIO;
-       tf.lbam = sizeof(buf);
+       tf.lbam = 16;
 
        ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
-                               buf, sizeof(buf), 0);
-       if (ret)
+                               buf, 16, 0);
+       if (ret) {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 
-       if (be16_to_cpu(desc->feature_code) != 3)
+       if (be16_to_cpu(desc->feature_code) != 3) {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 
-       if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+       if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+               kfree(buf);
                return ODD_MECH_TYPE_SLOT;
-       else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+       } else if (desc->mech_type == 1 && desc->load == 0 &&
+                  desc->eject == 1) {
+               kfree(buf);
                return ODD_MECH_TYPE_DRAWER;
-       else
+       } else {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 }
 
 /* Test if ODD is zero power ready by sense code */
index 57410f9c5d44cc11b9156159a49bacf6173168ab..c52c738e554a2dfe8e660973ce8a2685d9a1934b 100644 (file)
@@ -164,9 +164,7 @@ config ARM_CHARLCD
          line and the Linux version on the second line, but that's
          still useful.
 
-endif # AUXDISPLAY
-
-menuconfig PANEL
+menuconfig PARPORT_PANEL
        tristate "Parallel port LCD/Keypad Panel support"
        depends on PARPORT
        select CHARLCD
@@ -178,7 +176,7 @@ menuconfig PANEL
          compiled as a module, or linked into the kernel and started at boot.
          If you don't understand what all this is about, say N.
 
-if PANEL
+if PARPORT_PANEL
 
 config PANEL_PARPORT
        int "Default parallel port number (0=LPT1)"
@@ -419,8 +417,11 @@ config PANEL_LCD_PIN_BL
 
          Default for the 'BL' pin in custom profile is '0' (uncontrolled).
 
+endif # PARPORT_PANEL
+
 config PANEL_CHANGE_MESSAGE
        bool "Change LCD initialization message ?"
+       depends on CHARLCD
        default "n"
        ---help---
          This allows you to replace the boot message indicating the kernel version
@@ -444,7 +445,34 @@ config PANEL_BOOT_MESSAGE
          An empty message will only clear the display at driver init time. Any other
          printf()-formatted message is valid with newline and escape codes.
 
-endif # PANEL
+choice
+       prompt "Backlight initial state"
+       default CHARLCD_BL_FLASH
+
+       config CHARLCD_BL_OFF
+               bool "Off"
+               help
+                 Backlight is initially turned off
+
+       config CHARLCD_BL_ON
+               bool "On"
+               help
+                 Backlight is initially turned on
+
+       config CHARLCD_BL_FLASH
+               bool "Flash"
+               help
+                 Backlight is flashed briefly on init
+
+endchoice
+
+endif # AUXDISPLAY
+
+config PANEL
+       tristate "Parallel port LCD/Keypad Panel support (OLD OPTION)"
+       depends on PARPORT
+       select AUXDISPLAY
+       select PARPORT_PANEL
 
 config CHARLCD
        tristate "Character LCD core support" if COMPILE_TEST
index 7ac6776ca3f674683c538aca2d7473198e01a3a9..cf54b5efb07e00ae1249dfbc95ef4c88a0f08386 100644 (file)
@@ -10,4 +10,4 @@ obj-$(CONFIG_CFAG12864B)      += cfag12864b.o cfag12864bfb.o
 obj-$(CONFIG_IMG_ASCII_LCD)    += img-ascii-lcd.o
 obj-$(CONFIG_HD44780)          += hd44780.o
 obj-$(CONFIG_HT16K33)          += ht16k33.o
-obj-$(CONFIG_PANEL)             += panel.o
+obj-$(CONFIG_PARPORT_PANEL)    += panel.o
index 60e0b772673f3bd0c631efb6e45b0f624f75aa8a..92745efefb540e5d723f5bbaa3aefd11735b0f29 100644 (file)
@@ -91,7 +91,7 @@ struct charlcd_priv {
        unsigned long long drvdata[0];
 };
 
-#define to_priv(p)     container_of(p, struct charlcd_priv, lcd)
+#define charlcd_to_priv(p)     container_of(p, struct charlcd_priv, lcd)
 
 /* Device single-open policy control */
 static atomic_t charlcd_available = ATOMIC_INIT(1);
@@ -105,7 +105,7 @@ static void long_sleep(int ms)
 /* turn the backlight on or off */
 static void charlcd_backlight(struct charlcd *lcd, int on)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        if (!lcd->ops->backlight)
                return;
@@ -134,7 +134,7 @@ static void charlcd_bl_off(struct work_struct *work)
 /* turn the backlight on for a little while */
 void charlcd_poke(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        if (!lcd->ops->backlight)
                return;
@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(charlcd_poke);
 
 static void charlcd_gotoxy(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
        unsigned int addr;
 
        /*
@@ -170,7 +170,7 @@ static void charlcd_gotoxy(struct charlcd *lcd)
 
 static void charlcd_home(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        priv->addr.x = 0;
        priv->addr.y = 0;
@@ -179,7 +179,7 @@ static void charlcd_home(struct charlcd *lcd)
 
 static void charlcd_print(struct charlcd *lcd, char c)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        if (priv->addr.x < lcd->bwidth) {
                if (lcd->char_conv)
@@ -211,7 +211,7 @@ static void charlcd_clear_fast(struct charlcd *lcd)
 /* clears the display and resets X/Y */
 static void charlcd_clear_display(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR);
        priv->addr.x = 0;
@@ -223,7 +223,7 @@ static void charlcd_clear_display(struct charlcd *lcd)
 static int charlcd_init_display(struct charlcd *lcd)
 {
        void (*write_cmd_raw)(struct charlcd *lcd, int cmd);
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
        u8 init;
 
        if (lcd->ifwidth != 4 && lcd->ifwidth != 8)
@@ -369,7 +369,7 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
 
 static inline int handle_lcd_special_code(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        /* LCD special codes */
 
@@ -580,7 +580,7 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
 
 static void charlcd_write_char(struct charlcd *lcd, char c)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        /* first, we'll test if we're in escape mode */
        if ((c != '\n') && priv->esc_seq.len >= 0) {
@@ -705,7 +705,7 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
 
 static int charlcd_open(struct inode *inode, struct file *file)
 {
-       struct charlcd_priv *priv = to_priv(the_charlcd);
+       struct charlcd_priv *priv = charlcd_to_priv(the_charlcd);
        int ret;
 
        ret = -EBUSY;
@@ -763,10 +763,24 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
        }
 }
 
+#ifdef CONFIG_PANEL_BOOT_MESSAGE
+#define LCD_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE
+#else
+#define LCD_INIT_TEXT "Linux-" UTS_RELEASE "\n"
+#endif
+
+#ifdef CONFIG_CHARLCD_BL_ON
+#define LCD_INIT_BL "\x1b[L+"
+#elif defined(CONFIG_CHARLCD_BL_FLASH)
+#define LCD_INIT_BL "\x1b[L*"
+#else
+#define LCD_INIT_BL "\x1b[L-"
+#endif
+
 /* initialize the LCD driver */
 static int charlcd_init(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
        int ret;
 
        if (lcd->ops->backlight) {
@@ -784,13 +798,8 @@ static int charlcd_init(struct charlcd *lcd)
                return ret;
 
        /* display a short message */
-#ifdef CONFIG_PANEL_CHANGE_MESSAGE
-#ifdef CONFIG_PANEL_BOOT_MESSAGE
-       charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
-#endif
-#else
-       charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\n");
-#endif
+       charlcd_puts(lcd, "\x1b[Lc\x1b[Lb" LCD_INIT_BL LCD_INIT_TEXT);
+
        /* clear the display on the next device opening */
        priv->must_clear = true;
        charlcd_home(lcd);
@@ -818,6 +827,12 @@ struct charlcd *charlcd_alloc(unsigned int drvdata_size)
 }
 EXPORT_SYMBOL_GPL(charlcd_alloc);
 
+void charlcd_free(struct charlcd *lcd)
+{
+       kfree(charlcd_to_priv(lcd));
+}
+EXPORT_SYMBOL_GPL(charlcd_free);
+
 static int panel_notify_sys(struct notifier_block *this, unsigned long code,
                            void *unused)
 {
@@ -866,7 +881,7 @@ EXPORT_SYMBOL_GPL(charlcd_register);
 
 int charlcd_unregister(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        unregister_reboot_notifier(&panel_notifier);
        charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
index 9ad93ea42fdc73e81242ea358ffb357612833367..ab15b64707ad22ef429a7ee6911d72ec9a284bd5 100644 (file)
@@ -271,7 +271,7 @@ static int hd44780_probe(struct platform_device *pdev)
        return 0;
 
 fail:
-       kfree(lcd);
+       charlcd_free(lcd);
        return ret;
 }
 
@@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev)
        struct charlcd *lcd = platform_get_drvdata(pdev);
 
        charlcd_unregister(lcd);
+
+       charlcd_free(lcd);
        return 0;
 }
 
index 21b9b2f2470a26d1f2d1c2d5eb4237fe3902af82..e06de63497cf8f00edde8d2d7bcffa5b25cc8e81 100644 (file)
@@ -1620,7 +1620,7 @@ err_lcd_unreg:
        if (lcd.enabled)
                charlcd_unregister(lcd.charlcd);
 err_unreg_device:
-       kfree(lcd.charlcd);
+       charlcd_free(lcd.charlcd);
        lcd.charlcd = NULL;
        parport_unregister_device(pprt);
        pprt = NULL;
@@ -1647,7 +1647,7 @@ static void panel_detach(struct parport *port)
        if (lcd.enabled) {
                charlcd_unregister(lcd.charlcd);
                lcd.initialized = false;
-               kfree(lcd.charlcd);
+               charlcd_free(lcd.charlcd);
                lcd.charlcd = NULL;
        }
 
index 76c9969b7124c11cf1638a82cc223999ae5e1b8a..96a6dc9d305c88b842258f4ed91cc7b7b569d506 100644 (file)
@@ -1469,12 +1469,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
        if (IS_ERR(gpd_data))
                return PTR_ERR(gpd_data);
 
-       genpd_lock(genpd);
-
        ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
        if (ret)
                goto out;
 
+       genpd_lock(genpd);
+
        dev_pm_domain_set(dev, &genpd->domain);
 
        genpd->device_count++;
@@ -1482,9 +1482,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
 
        list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
 
- out:
        genpd_unlock(genpd);
-
+ out:
        if (ret)
                genpd_free_dev_data(dev, gpd_data);
        else
@@ -1533,15 +1532,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
        genpd->device_count--;
        genpd->max_off_time_changed = true;
 
-       if (genpd->detach_dev)
-               genpd->detach_dev(genpd, dev);
-
        dev_pm_domain_set(dev, NULL);
 
        list_del_init(&pdd->list_node);
 
        genpd_unlock(genpd);
 
+       if (genpd->detach_dev)
+               genpd->detach_dev(genpd, dev);
+
        genpd_free_dev_data(dev, gpd_data);
 
        return 0;
index 1fad9291f6aaa6d893c45186a8a24e495f5169fc..7fc5a18e02ad5de5cf7269c79b33a7cecb3bfc21 100644 (file)
@@ -472,7 +472,7 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
                                                val, nval);
 }
 
-struct fwnode_handle *
+static struct fwnode_handle *
 software_node_get_parent(const struct fwnode_handle *fwnode)
 {
        struct software_node *swnode = to_software_node(fwnode);
@@ -481,7 +481,7 @@ software_node_get_parent(const struct fwnode_handle *fwnode)
                        NULL;
 }
 
-struct fwnode_handle *
+static struct fwnode_handle *
 software_node_get_next_child(const struct fwnode_handle *fwnode,
                             struct fwnode_handle *child)
 {
index 1e6edd568214f40400a5fc7eff474b316a4afda0..bf1c61cab8eb1cec135432fe52f1145653cfacd7 100644 (file)
@@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
                        return -EBADF;
 
                l = f->f_mapping->host->i_bdev->bd_disk->private_data;
-               if (l->lo_state == Lo_unbound) {
+               if (l->lo_state != Lo_bound) {
                        return -EINVAL;
                }
                f = l->lo_backing_file;
index 96670eefaeb2c3458964110a39bddd942ff9fde1..377a694dc22814b9d040a64a9d3ffd7666f5a6a4 100644 (file)
@@ -749,8 +749,12 @@ static int pcd_detect(void)
                return 0;
 
        printk("%s: No CD-ROM drive found\n", name);
-       for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+       for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+               blk_cleanup_queue(cd->disk->queue);
+               cd->disk->queue = NULL;
+               blk_mq_free_tag_set(&cd->tag_set);
                put_disk(cd->disk);
+       }
        pi_unregister_driver(par_drv);
        return -1;
 }
index e92e7a8eeeb2bf066d522277ead805324aecde9e..103b617cdc3184c0a381e569fbb8d8c81894585c 100644 (file)
@@ -761,8 +761,12 @@ static int pf_detect(void)
                return 0;
 
        printk("%s: No ATAPI disk detected\n", name);
-       for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+       for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+               blk_cleanup_queue(pf->disk->queue);
+               pf->disk->queue = NULL;
+               blk_mq_free_tag_set(&pf->tag_set);
                put_disk(pf->disk);
+       }
        pi_unregister_driver(par_drv);
        return -1;
 }
@@ -1047,13 +1051,15 @@ static void __exit pf_exit(void)
        int unit;
        unregister_blkdev(major, name);
        for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
-               if (!pf->present)
-                       continue;
-               del_gendisk(pf->disk);
+               if (pf->present)
+                       del_gendisk(pf->disk);
+
                blk_cleanup_queue(pf->disk->queue);
                blk_mq_free_tag_set(&pf->tag_set);
                put_disk(pf->disk);
-               pi_release(pf->pi);
+
+               if (pf->present)
+                       pi_release(pf->pi);
        }
 }
 
index 4ba967d65cf963c6f2a2084a62f2dbad1935c3bf..2210c1b9491ba2e9f690dd4a26b209b64f4ad925 100644 (file)
@@ -833,7 +833,7 @@ static int parse_rbd_opts_token(char *c, void *private)
                pctx->opts->queue_depth = intval;
                break;
        case Opt_alloc_size:
-               if (intval < 1) {
+               if (intval < SECTOR_SIZE) {
                        pr_err("alloc_size out of range\n");
                        return -EINVAL;
                }
@@ -924,23 +924,6 @@ static void rbd_put_client(struct rbd_client *rbdc)
                kref_put(&rbdc->kref, rbd_client_release);
 }
 
-static int wait_for_latest_osdmap(struct ceph_client *client)
-{
-       u64 newest_epoch;
-       int ret;
-
-       ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
-       if (ret)
-               return ret;
-
-       if (client->osdc.osdmap->epoch >= newest_epoch)
-               return 0;
-
-       ceph_osdc_maybe_request_map(&client->osdc);
-       return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
-                                    client->options->mount_timeout);
-}
-
 /*
  * Get a ceph client with specific addr and configuration, if one does
  * not exist create it.  Either way, ceph_opts is consumed by this
@@ -960,7 +943,8 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
                 * Using an existing client.  Make sure ->pg_pools is up to
                 * date before we look up the pool id in do_rbd_add().
                 */
-               ret = wait_for_latest_osdmap(rbdc->client);
+               ret = ceph_wait_for_latest_osdmap(rbdc->client,
+                                       rbdc->client->options->mount_timeout);
                if (ret) {
                        rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
                        rbd_put_client(rbdc);
@@ -4203,12 +4187,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        q->limits.max_sectors = queue_max_hw_sectors(q);
        blk_queue_max_segments(q, USHRT_MAX);
        blk_queue_max_segment_size(q, UINT_MAX);
-       blk_queue_io_min(q, objset_bytes);
-       blk_queue_io_opt(q, objset_bytes);
+       blk_queue_io_min(q, rbd_dev->opts->alloc_size);
+       blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
 
        if (rbd_dev->opts->trim) {
                blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
-               q->limits.discard_granularity = objset_bytes;
+               q->limits.discard_granularity = rbd_dev->opts->alloc_size;
                blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
                blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
        }
index a8dd80576c95a0dd24c751cad4387a6754ad9de5..857f8c0862744268eed2b1eee8beae301bbe0bbd 100644 (file)
@@ -31,16 +31,9 @@ static u64 notrace clps711x_sched_clock_read(void)
        return ~readw(tcd);
 }
 
-static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
+static void __init clps711x_clksrc_init(struct clk *clock, void __iomem *base)
 {
-       unsigned long rate;
-
-       if (!base)
-               return -ENOMEM;
-       if (IS_ERR(clock))
-               return PTR_ERR(clock);
-
-       rate = clk_get_rate(clock);
+       unsigned long rate = clk_get_rate(clock);
 
        tcd = base;
 
@@ -48,8 +41,6 @@ static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
                              clocksource_mmio_readw_down);
 
        sched_clock_register(clps711x_sched_clock_read, 16, rate);
-
-       return 0;
 }
 
 static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
@@ -67,13 +58,6 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
        struct clock_event_device *clkevt;
        unsigned long rate;
 
-       if (!irq)
-               return -EINVAL;
-       if (!base)
-               return -ENOMEM;
-       if (IS_ERR(clock))
-               return PTR_ERR(clock);
-
        clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
        if (!clkevt)
                return -ENOMEM;
@@ -93,31 +77,29 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
                           "clps711x-timer", clkevt);
 }
 
-void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
-                                unsigned int irq)
-{
-       struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL);
-       struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL);
-
-       BUG_ON(_clps711x_clksrc_init(tc1, tc1_base));
-       BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
-}
-
-#ifdef CONFIG_TIMER_OF
 static int __init clps711x_timer_init(struct device_node *np)
 {
        unsigned int irq = irq_of_parse_and_map(np, 0);
        struct clk *clock = of_clk_get(np, 0);
        void __iomem *base = of_iomap(np, 0);
 
+       if (!base)
+               return -ENOMEM;
+       if (!irq)
+               return -EINVAL;
+       if (IS_ERR(clock))
+               return PTR_ERR(clock);
+
        switch (of_alias_get_id(np, "timer")) {
        case CLPS711X_CLKSRC_CLOCKSOURCE:
-               return _clps711x_clksrc_init(clock, base);
+               clps711x_clksrc_init(clock, base);
+               break;
        case CLPS711X_CLKSRC_CLOCKEVENT:
                return _clps711x_clkevt_init(clock, base, irq);
        default:
                return -EINVAL;
        }
+
+       return 0;
 }
 TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
-#endif
index 54f8a331b53a0735e21e6befb4c894dae8f76f97..37671a5d4ed9fe1e59236a543b4c2b28be89f20e 100644 (file)
@@ -67,7 +67,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-struct irqaction gic_compare_irqaction = {
+static struct irqaction gic_compare_irqaction = {
        .handler = gic_compare_interrupt,
        .percpu_dev_id = &gic_clockevent_device,
        .flags = IRQF_PERCPU | IRQF_TIMER,
index 43f4d5c4d6fa4fdb8f4581d71010e4b54ecd18da..f987027ca56645dab3a604e9e872676d7c3406c7 100644 (file)
@@ -71,7 +71,7 @@ static u64 tc_get_cycles32(struct clocksource *cs)
        return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
 }
 
-void tc_clksrc_suspend(struct clocksource *cs)
+static void tc_clksrc_suspend(struct clocksource *cs)
 {
        int i;
 
@@ -86,7 +86,7 @@ void tc_clksrc_suspend(struct clocksource *cs)
        bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
 }
 
-void tc_clksrc_resume(struct clocksource *cs)
+static void tc_clksrc_resume(struct clocksource *cs)
 {
        int i;
 
index e8163693e936e92a54a5ed8a6145cbe4618f9cde..5e6038fbf115d10bc82cc77548b709c3145e4a43 100644 (file)
@@ -58,7 +58,7 @@ static u64 riscv_sched_clock(void)
 static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
        .name           = "riscv_clocksource",
        .rating         = 300,
-       .mask           = CLOCKSOURCE_MASK(BITS_PER_LONG),
+       .mask           = CLOCKSOURCE_MASK(64),
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
        .read           = riscv_clocksource_rdtime,
 };
@@ -120,8 +120,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
                return error;
        }
 
-       sched_clock_register(riscv_sched_clock,
-                       BITS_PER_LONG, riscv_timebase);
+       sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
 
        error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
                         "clockevents/riscv/timer:starting",
index c364027638e1aeccdb02c76963ad3212a13096bc..3352da6ed61f39139eb46aba585dfa0136697e80 100644 (file)
@@ -586,8 +586,8 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
 }
 
 /* Optimized set_load which removes costly spin wait in timer_start */
-int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
-                            unsigned int load)
+static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
+                                       int autoreload, unsigned int load)
 {
        u32 l;
 
index bfa9062ce6b9fed957a5e52c592dc57ff257a02e..16fcb56c232b55eef2e36027b624ddb6986aa68d 100644 (file)
@@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        struct amdgpu_vm_bo_base *bo_base, *tmp;
        int r = 0;
 
+       vm->bulk_moveable &= list_empty(&vm->evicted);
+
        list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
 
index 600259b4e29184a5ce05f3441151a7a2f2bf7223..2fe8397241ea4c128ed7fffc924955ac483daec8 100644 (file)
@@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
                }
 
                ring->vm_inv_eng = inv_eng - 1;
-               change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
+               vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
 
                dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
                         ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
index fb27783d7a542d565e1c002d03fc051d055039be..81127f7d6ed193c9cb996b685577a807f5e5646e 100644 (file)
@@ -5429,9 +5429,11 @@ static void get_freesync_config_for_crtc(
        struct amdgpu_dm_connector *aconnector =
                        to_amdgpu_dm_connector(new_con_state->base.connector);
        struct drm_display_mode *mode = &new_crtc_state->base.mode;
+       int vrefresh = drm_mode_vrefresh(mode);
 
        new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
-               aconnector->min_vfreq <= drm_mode_vrefresh(mode);
+                                       vrefresh >= aconnector->min_vfreq &&
+                                       vrefresh <= aconnector->max_vfreq;
 
        if (new_crtc_state->vrr_supported) {
                new_crtc_state->stream->ignore_msa_timing_param = true;
index 381581b01d485e581df8bcebcd6983b01bc8a488..05bbc2b622fc1094a2a8f85ce060d0805eae0f7e 100644 (file)
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
        synchronize_srcu(&drm_unplug_srcu);
 
        drm_dev_unregister(dev);
-
-       mutex_lock(&drm_global_mutex);
-       if (dev->open_count == 0)
-               drm_dev_put(dev);
-       mutex_unlock(&drm_global_mutex);
+       drm_dev_put(dev);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
index 0e9349ff2d16a64dd6628ab47de8f9ab0271d632..af2ab640cadbb05105325a0de2b31ae5f5c70ccf 100644 (file)
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
                                best_depth = fmt->depth;
                }
        }
-       if (sizes.surface_depth != best_depth) {
+       if (sizes.surface_depth != best_depth && best_depth) {
                DRM_INFO("requested bpp %d, scaled depth down to %d",
                         sizes.surface_bpp, best_depth);
                sizes.surface_depth = best_depth;
index 83a5bbca6e7e089f10d75ea723ac982b7df61356..7caa3c7ed9789901e4aa5df2c2204326cfe39c27 100644 (file)
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
 
        drm_close_helper(filp);
 
-       if (!--dev->open_count) {
+       if (!--dev->open_count)
                drm_lastclose(dev);
-               if (drm_dev_is_unplugged(dev))
-                       drm_put_dev(dev);
-       }
+
        mutex_unlock(&drm_global_mutex);
 
        drm_minor_release(minor);
index 0573eab0e190f6d76d0e970a9b0b37c67c6dfe3a..f35e4ab55b270132871aea56af219483349e43d9 100644 (file)
@@ -20,6 +20,7 @@
 #include "regs-vp.h"
 
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
@@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
        mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
 }
 
-static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+static bool mixer_is_synced(struct mixer_context *ctx)
 {
-       /* block update on vsync */
-       mixer_reg_writemask(ctx, MXR_STATUS, enable ?
-                       MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+       u32 base, shadow;
 
+       if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
+           ctx->mxr_ver == MXR_VER_128_0_0_184)
+               return !(mixer_reg_read(ctx, MXR_CFG) &
+                        MXR_CFG_LAYER_UPDATE_COUNT_MASK);
+
+       if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
+           vp_reg_read(ctx, VP_SHADOW_UPDATE))
+               return false;
+
+       base = mixer_reg_read(ctx, MXR_CFG);
+       shadow = mixer_reg_read(ctx, MXR_CFG_S);
+       if (base != shadow)
+               return false;
+
+       base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
+       shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
+       if (base != shadow)
+               return false;
+
+       base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
+       shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
+       if (base != shadow)
+               return false;
+
+       return true;
+}
+
+static int mixer_wait_for_sync(struct mixer_context *ctx)
+{
+       ktime_t timeout = ktime_add_us(ktime_get(), 100000);
+
+       while (!mixer_is_synced(ctx)) {
+               usleep_range(1000, 2000);
+               if (ktime_compare(ktime_get(), timeout) > 0)
+                       return -ETIMEDOUT;
+       }
+       return 0;
+}
+
+static void mixer_disable_sync(struct mixer_context *ctx)
+{
+       mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
+}
+
+static void mixer_enable_sync(struct mixer_context *ctx)
+{
+       if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
+           ctx->mxr_ver == MXR_VER_128_0_0_184)
+               mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+       mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
        if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
-               vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
-                       VP_SHADOW_UPDATE_ENABLE : 0);
+               vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
 }
 
 static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
@@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
 
        spin_lock_irqsave(&ctx->reg_slock, flags);
 
-       vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
        /* interlace or progressive scan mode */
        val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
        vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
@@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
        vp_regs_dump(ctx);
 }
 
-static void mixer_layer_update(struct mixer_context *ctx)
-{
-       mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
-}
-
 static void mixer_graph_buffer(struct mixer_context *ctx,
                               struct exynos_drm_plane *plane)
 {
@@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
        mixer_cfg_layer(ctx, win, priority, true);
        mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
 
-       /* layer update mandatory for mixer 16.0.33.0 */
-       if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
-               ctx->mxr_ver == MXR_VER_128_0_0_184)
-               mixer_layer_update(ctx);
-
        spin_unlock_irqrestore(&ctx->reg_slock, flags);
 
        mixer_regs_dump(ctx);
@@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
 static irqreturn_t mixer_irq_handler(int irq, void *arg)
 {
        struct mixer_context *ctx = arg;
-       u32 val, base, shadow;
+       u32 val;
 
        spin_lock(&ctx->reg_slock);
 
@@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
                val &= ~MXR_INT_STATUS_VSYNC;
 
                /* interlace scan need to check shadow register */
-               if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
-                       if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
-                           vp_reg_read(ctx, VP_SHADOW_UPDATE))
-                               goto out;
-
-                       base = mixer_reg_read(ctx, MXR_CFG);
-                       shadow = mixer_reg_read(ctx, MXR_CFG_S);
-                       if (base != shadow)
-                               goto out;
-
-                       base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
-                       shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
-                       if (base != shadow)
-                               goto out;
-
-                       base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
-                       shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
-                       if (base != shadow)
-                               goto out;
-               }
+               if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
+                   && !mixer_is_synced(ctx))
+                       goto out;
 
                drm_crtc_handle_vblank(&ctx->crtc->base);
        }
@@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
 
 static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
 {
-       struct mixer_context *mixer_ctx = crtc->ctx;
+       struct mixer_context *ctx = crtc->ctx;
 
-       if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
+       if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
                return;
 
-       mixer_vsync_set_update(mixer_ctx, false);
+       if (mixer_wait_for_sync(ctx))
+               dev_err(ctx->dev, "timeout waiting for VSYNC\n");
+       mixer_disable_sync(ctx);
 }
 
 static void mixer_update_plane(struct exynos_drm_crtc *crtc,
@@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
        if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
                return;
 
-       mixer_vsync_set_update(mixer_ctx, true);
+       mixer_enable_sync(mixer_ctx);
        exynos_crtc_handle_event(crtc);
 }
 
@@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 
        exynos_drm_pipe_clk_enable(crtc, true);
 
-       mixer_vsync_set_update(ctx, false);
+       mixer_disable_sync(ctx);
 
        mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
 
@@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 
        mixer_commit(ctx);
 
-       mixer_vsync_set_update(ctx, true);
+       mixer_enable_sync(ctx);
 
        set_bit(MXR_BIT_POWERED, &ctx->flags);
 }
index 35b4ec3f7618b887e5661d0d652cca99b6ed02c6..3592d04c33b283cac0abd2f432ce313194d2b606 100644 (file)
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
        }
 
        if (index_mode) {
-               if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+               if (guest_gma >= I915_GTT_PAGE_SIZE) {
                        ret = -EFAULT;
                        goto err;
                }
index c7103dd2d8d571fde462f173dcc67efc0973cc69..d7052ab7908c8d9c7872df64cb5cd68ec8f13b4e 100644 (file)
@@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
        }
 
        list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+
+       mutex_lock(&gvt->gtt.ppgtt_mm_lock);
        list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+       mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
+
        return mm;
 }
 
@@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
                if (ret)
                        return ret;
 
+               mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
                list_move_tail(&mm->ppgtt_mm.lru_list,
                               &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
-
+               mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
        }
 
        return 0;
@@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
        struct intel_vgpu_mm *mm;
        struct list_head *pos, *n;
 
+       mutex_lock(&gvt->gtt.ppgtt_mm_lock);
+
        list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
                mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
 
@@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
                        continue;
 
                list_del_init(&mm->ppgtt_mm.lru_list);
+               mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
                invalidate_ppgtt_mm(mm);
                return 1;
        }
+       mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
        return 0;
 }
 
@@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
                }
        }
        INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
+       mutex_init(&gvt->gtt.ppgtt_mm_lock);
        return 0;
 }
 
@@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
        list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
                mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
                if (mm->type == INTEL_GVT_MM_PPGTT) {
+                       mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
                        list_del_init(&mm->ppgtt_mm.lru_list);
+                       mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
                        if (mm->ppgtt_mm.shadowed)
                                invalidate_ppgtt_mm(mm);
                }
index d8cb04cc946dff3e19466ff387089db96c226d53..edb610dc5d8689e49f22310b310133b9cb3ee921 100644 (file)
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
        void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
        struct list_head oos_page_use_list_head;
        struct list_head oos_page_free_list_head;
+       struct mutex ppgtt_mm_lock;
        struct list_head ppgtt_mm_lru_list_head;
 
        struct page *scratch_page;
index 7d84cfb9051ac886579648ac7bb2cc5e2a70b3fa..7902fb162d09441f9b4f65447f5e6619b8792c01 100644 (file)
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
 
        {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
        {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+       {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
 
        {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
        {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
index 1bb8f936fdaa75f2ee738bdf3235a247fac90fe8..159192c097cc7eb7424070e8cec052f3f5e5b1f7 100644 (file)
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
        int i = 0;
 
        if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
-               return -1;
+               return -EINVAL;
 
        if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
                px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        if (workload->shadow)
                return 0;
 
-       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
-       if (ret < 0) {
-               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
-               return ret;
-       }
-
        /* pin shadow context by gvt even the shadow context will be pinned
         * when i915 alloc request. That is because gvt will update the guest
         * context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+       struct i915_request *rq;
        int ring_id = workload->ring_id;
        int ret;
 
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        mutex_lock(&vgpu->vgpu_lock);
        mutex_lock(&dev_priv->drm.struct_mutex);
 
+       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+       if (ret < 0) {
+               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+               goto err_req;
+       }
+
        ret = intel_gvt_workload_req_alloc(workload);
        if (ret)
                goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
        ret = prepare_workload(workload);
 out:
+       if (ret) {
+               /* We might still need to add request with
+                * clean ctx to retire it properly..
+                */
+               rq = fetch_and_zero(&workload->req);
+               i915_request_put(rq);
+       }
+
        if (!IS_ERR_OR_NULL(workload->req)) {
                gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
                                ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
                goto out;
        }
 
-       if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+       if (!scheduler->current_vgpu->active ||
+           list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
                goto out;
 
        /*
index 9adc7bb9e69ccfec96e468f95435b83e084ffcce..a67a63b5aa84a09d675793dc118fce8829315917 100644 (file)
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
                                 INTEL_DEVID(dev_priv) == 0x5915 || \
                                 INTEL_DEVID(dev_priv) == 0x591E)
 #define IS_AML_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x591C || \
-                                INTEL_DEVID(dev_priv) == 0x87C0)
+                                INTEL_DEVID(dev_priv) == 0x87C0 || \
+                                INTEL_DEVID(dev_priv) == 0x87CA)
 #define IS_SKL_GT2(dev_priv)   (IS_SKYLAKE(dev_priv) && \
                                 INTEL_INFO(dev_priv)->gt == 2)
 #define IS_SKL_GT3(dev_priv)   (IS_SKYLAKE(dev_priv) && \
index 30d516e975c64697b2b45019ba747c6251b7ecf1..8558e81fdc2af85dd52486b7c8d55580fe997373 100644 (file)
@@ -1734,8 +1734,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
         * pages from.
         */
        if (!obj->base.filp) {
-               i915_gem_object_put(obj);
-               return -ENXIO;
+               addr = -ENXIO;
+               goto err;
+       }
+
+       if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
+               addr = -EINVAL;
+               goto err;
        }
 
        addr = vm_mmap(obj->base.filp, 0, args->size,
@@ -1749,8 +1754,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                struct vm_area_struct *vma;
 
                if (down_write_killable(&mm->mmap_sem)) {
-                       i915_gem_object_put(obj);
-                       return -EINTR;
+                       addr = -EINTR;
+                       goto err;
                }
                vma = find_vma(mm, addr);
                if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
@@ -1768,12 +1773,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        i915_gem_object_put(obj);
 
        args->addr_ptr = (u64)addr;
-
        return 0;
 
 err:
        i915_gem_object_put(obj);
-
        return addr;
 }
 
index 9a65341fec097e500ace05a410b62f6f19390d21..aa6791255252f1800b2609285fb399653625b000 100644 (file)
@@ -1721,7 +1721,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
                        i915_error_generate_code(error, engines));
        if (engines) {
                /* Just show the first executing process, more is confusing */
-               i = ffs(engines);
+               i = __ffs(engines);
                len += scnprintf(error->error_msg + len,
                                 sizeof(error->error_msg) - len,
                                 ", in %s [%d]",
index 638a586469f97be9fb83bbbcb152c518e7d46e1e..047855dd8c6b828ce42f926680f7d8466883d3cc 100644 (file)
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
 #define GEN11_GT_VEBOX_VDBOX_DISABLE   _MMIO(0x9140)
 #define   GEN11_GT_VDBOX_DISABLE_MASK  0xff
 #define   GEN11_GT_VEBOX_DISABLE_SHIFT 16
-#define   GEN11_GT_VEBOX_DISABLE_MASK  (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+#define   GEN11_GT_VEBOX_DISABLE_MASK  (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
 
 #define GEN11_EU_DISABLE _MMIO(0x9134)
 #define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
 #define TRANS_DDI_FUNC_CTL2(tran)      _MMIO_TRANS2(tran, \
                                                     _TRANS_DDI_FUNC_CTL2_A)
 #define  PORT_SYNC_MODE_ENABLE                 (1 << 4)
-#define  PORT_SYNC_MODE_MASTER_SELECT(x)       ((x) < 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT(x)       ((x) << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_MASK     (0x7 << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_SHIFT    0
 
index b508d8a735e0347637274aebb2a5eaed29dda2fd..4364f42cac6b88cfd8eef1f82783a863e483884d 100644 (file)
@@ -1673,6 +1673,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
                info->supports_dvi = (port != PORT_A && port != PORT_E);
                info->supports_hdmi = info->supports_dvi;
                info->supports_dp = (port != PORT_E);
+               info->supports_edp = (port == PORT_A);
        }
 }
 
index 32dce7176f6381dc2a0429691dccc2eafc7fe360..b9b0ea4e2404d6cfce2c37be5d331591fb88fe6e 100644 (file)
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
                        struct i915_gem_context *ctx;
 
                        ctx = live_context(i915, file);
-                       if (!ctx)
+                       if (IS_ERR(ctx))
                                break;
 
                        /* We will need some GGTT space for the rq's context */
index 2281ed3eb7747757620288069f32d48a53b9ea15..8a4ebcb6405cee2427d0889ea49a0d871d2cc5ba 100644 (file)
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 
        ret = drm_dev_register(drm, 0);
        if (ret)
-               goto free_drm;
+               goto uninstall_irq;
 
        drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 
+uninstall_irq:
+       drm_irq_uninstall(drm);
 free_drm:
        drm_dev_put(drm);
 
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
 
 static void meson_drv_unbind(struct device *dev)
 {
-       struct drm_device *drm = dev_get_drvdata(dev);
-       struct meson_drm *priv = drm->dev_private;
+       struct meson_drm *priv = dev_get_drvdata(dev);
+       struct drm_device *drm = priv->drm;
 
        if (priv->canvas) {
                meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
        }
 
        drm_dev_unregister(drm);
+       drm_irq_uninstall(drm);
        drm_kms_helper_poll_fini(drm);
        drm_mode_config_cleanup(drm);
        drm_dev_put(drm);
index e28814f4ea6cd2e05724ee46a0892b261d3d4cef..563953ec6ad03fd904c2e5c38de8cbe1dc2edce0 100644 (file)
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
        DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 
        /* If sink max TMDS clock, we reject the mode */
-       if (mode->clock > connector->display_info.max_tmds_clock)
+       if (connector->display_info.max_tmds_clock &&
+           mode->clock > connector->display_info.max_tmds_clock)
                return MODE_BAD;
 
        /* Check against non-VIC supported modes */
index 88a52f6b39fe333df24c33dce9aef2535d6a1b09..7dfbbbc1beea6ad1f5fa10cd535bffa130badebe 100644 (file)
@@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
        }
 
        ret = pm_runtime_get_sync(drm->dev);
-       if (IS_ERR_VALUE(ret) && ret != -EACCES)
+       if (ret < 0 && ret != -EACCES)
                return ret;
        ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
        pm_runtime_put_autosuspend(drm->dev);
index aa9fec80492d167f720a07ee58f8e0196d858c3a..40c47d6a7d783d72c869937b2a6ad946086a20cf 100644 (file)
@@ -100,12 +100,10 @@ static void
 nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
 {
        struct nouveau_dmem_chunk *chunk;
-       struct nouveau_drm *drm;
        unsigned long idx;
 
        chunk = (void *)hmm_devmem_page_get_drvdata(page);
        idx = page_to_pfn(page) - chunk->pfn_first;
-       drm = chunk->drm;
 
        /*
         * FIXME:
@@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
                /* FIXME handle pin failure */
                WARN_ON(ret);
        }
-       list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
-               ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
-               /* FIXME handle pin failure */
-               WARN_ON(ret);
-       }
        mutex_unlock(&drm->dmem->mutex);
 }
 
@@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
        list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
                nouveau_bo_unpin(chunk->bo);
        }
-       list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
-               nouveau_bo_unpin(chunk->bo);
-       }
        mutex_unlock(&drm->dmem->mutex);
 }
 
@@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
         */
        drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
                                           device, size);
-       if (drm->dmem->devmem == NULL) {
+       if (IS_ERR(drm->dmem->devmem)) {
                kfree(drm->dmem);
                drm->dmem = NULL;
                return;
index c7d4c6073ea59b70c56559288def3fb7fd6fe215..0d4ade9d4722c340b706b82d7ea7bb587db5f293 100644 (file)
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
        clk_disable(vop->hclk);
 }
 
+static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+{
+       if (win->phy->scl && win->phy->scl->ext) {
+               VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
+       }
+
+       VOP_WIN_SET(vop, win, enable, 0);
+}
+
 static int vop_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
                struct vop_win *vop_win = &vop->win[i];
                const struct vop_win_data *win = vop_win->data;
 
-               VOP_WIN_SET(vop, win, enable, 0);
+               vop_win_disable(vop, win);
        }
        spin_unlock(&vop->reg_lock);
 
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
 
        spin_lock(&vop->reg_lock);
 
-       VOP_WIN_SET(vop, win, enable, 0);
+       vop_win_disable(vop, win);
 
        spin_unlock(&vop->reg_lock);
 }
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
                int channel = i * 2 + 1;
 
                VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
-               VOP_WIN_SET(vop, win, enable, 0);
+               vop_win_disable(vop, win);
                VOP_WIN_SET(vop, win, gate, 1);
        }
 
index ba9b3cfb8c3d247fae80f8026cc520936e5b954c..b3436c2aed6892b585ca221a9ac711027350310e 100644 (file)
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
                                              struct drm_plane_state *old_state)
 {
-       struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
        struct tegra_plane *p = to_tegra_plane(plane);
+       struct tegra_dc *dc;
        u32 value;
 
        /* rien ne va plus */
        if (!old_state || !old_state->crtc)
                return;
 
+       dc = to_tegra_dc(old_state->crtc);
+
        /*
         * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
         * on planes that are already disabled. Make sure we fallback to the
index 39bfed9623de28f0e62a0297f8e84b7151c28238..982ce37ecde1b0c9fc6ef07c9819b98541248151 100644 (file)
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
        if (vic->booted)
                return 0;
 
+#ifdef CONFIG_IOMMU_API
        if (vic->config->supports_sid) {
                struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
                u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
                        vic_writel(vic, value, VIC_THI_STREAMID1);
                }
        }
+#endif
 
        /* setup clockgating registers */
        vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
index 66885c24590f0147ce1a510991a546c4f2bbe427..c1bd5e3d9e4aee80bb185cc38307fb389fe54c2f 100644 (file)
 #include "udl_connector.h"
 #include "udl_drv.h"
 
-static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
-                                                          u8 *buff)
+static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
+                              size_t len)
 {
        int ret, i;
        u8 *read_buff;
+       struct udl_device *udl = data;
 
        read_buff = kmalloc(2, GFP_KERNEL);
        if (!read_buff)
-               return false;
+               return -1;
 
-       for (i = 0; i < EDID_LENGTH; i++) {
-               int bval = (i + block_idx * EDID_LENGTH) << 8;
+       for (i = 0; i < len; i++) {
+               int bval = (i + block * EDID_LENGTH) << 8;
                ret = usb_control_msg(udl->udev,
                                      usb_rcvctrlpipe(udl->udev, 0),
                                          (0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
                if (ret < 1) {
                        DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
                        kfree(read_buff);
-                       return false;
+                       return -1;
                }
-               buff[i] = read_buff[1];
+               buf[i] = read_buff[1];
        }
 
        kfree(read_buff);
-       return true;
-}
-
-static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
-                        int *result_buff_size)
-{
-       int i, extensions;
-       u8 *block_buff = NULL, *buff_ptr;
-
-       block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
-       if (block_buff == NULL)
-               return false;
-
-       if (udl_get_edid_block(udl, 0, block_buff) &&
-           memchr_inv(block_buff, 0, EDID_LENGTH)) {
-               extensions = ((struct edid *)block_buff)->extensions;
-               if (extensions > 0) {
-                       /* we have to read all extensions one by one */
-                       *result_buff_size = EDID_LENGTH * (extensions + 1);
-                       *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
-                       buff_ptr = *result_buff;
-                       if (buff_ptr == NULL) {
-                               kfree(block_buff);
-                               return false;
-                       }
-                       memcpy(buff_ptr, block_buff, EDID_LENGTH);
-                       kfree(block_buff);
-                       buff_ptr += EDID_LENGTH;
-                       for (i = 1; i < extensions; ++i) {
-                               if (udl_get_edid_block(udl, i, buff_ptr)) {
-                                       buff_ptr += EDID_LENGTH;
-                               } else {
-                                       kfree(*result_buff);
-                                       *result_buff = NULL;
-                                       return false;
-                               }
-                       }
-                       return true;
-               }
-               /* we have only base edid block */
-               *result_buff = block_buff;
-               *result_buff_size = EDID_LENGTH;
-               return true;
-       }
-
-       kfree(block_buff);
-
-       return false;
+       return 0;
 }
 
 static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
 static enum drm_connector_status
 udl_detect(struct drm_connector *connector, bool force)
 {
-       u8 *edid_buff = NULL;
-       int edid_buff_size = 0;
        struct udl_device *udl = connector->dev->dev_private;
        struct udl_drm_connector *udl_connector =
                                        container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
                udl_connector->edid = NULL;
        }
 
-
-       if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
+       udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
+       if (!udl_connector->edid)
                return connector_status_disconnected;
 
-       udl_connector->edid = (struct edid *)edid_buff;
-       
        return connector_status_connected;
 }
 
index d5a23295dd80c1a9c1f2cc202c4c93048fc163ef..bb7b58407039bbbb099a371b9a432dc12983f886 100644 (file)
@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
        *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
 
 out:
-       drm_gem_object_put(&gobj->base);
+       drm_gem_object_put_unlocked(&gobj->base);
 unlock:
        mutex_unlock(&udl->gem_lock);
        return ret;
index 5930facd6d2d85cca81cb9c1f5247a6be3632546..11a8f99ba18c5f007734abef1003cc44d5e778a1 100644 (file)
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
        ret = drm_gem_handle_create(file, &obj->base, handle);
        drm_gem_object_put_unlocked(&obj->base);
        if (ret)
-               goto err;
+               return ERR_PTR(ret);
 
        return &obj->base;
-
-err:
-       __vgem_gem_destroy(obj);
-       return ERR_PTR(ret);
 }
 
 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
index 138b0bb325cf9662cd59b5a54158947dc691a2d9..69048e73377dc97855aa3b71491008e5993a5304 100644 (file)
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
 
        ret = drm_gem_handle_create(file, &obj->gem, handle);
        drm_gem_object_put_unlocked(&obj->gem);
-       if (ret) {
-               drm_gem_object_release(&obj->gem);
-               kfree(obj);
+       if (ret)
                return ERR_PTR(ret);
-       }
 
        return &obj->gem;
 }
index b913a56f3426669f21582e271fac9add830bb91d..2a9112515f464c320628d64b8a9d92c645f730dd 100644 (file)
@@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info)
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
        };
-       struct drm_display_mode *old_mode;
        struct drm_display_mode *mode;
        int ret;
 
-       old_mode = par->set_mode;
        mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
        if (!mode) {
                DRM_ERROR("Could not create new fb mode.\n");
@@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info)
        mode->vdisplay = var->yres;
        vmw_guess_mode_timing(mode);
 
-       if (old_mode && drm_mode_equal(old_mode, mode)) {
-               drm_mode_destroy(vmw_priv->dev, mode);
-               mode = old_mode;
-               old_mode = NULL;
-       } else if (!vmw_kms_validate_mode_vram(vmw_priv,
+       if (!vmw_kms_validate_mode_vram(vmw_priv,
                                        mode->hdisplay *
                                        DIV_ROUND_UP(var->bits_per_pixel, 8),
                                        mode->vdisplay)) {
@@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info)
        schedule_delayed_work(&par->local_work, 0);
 
 out_unlock:
-       if (old_mode)
-               drm_mode_destroy(vmw_priv->dev, old_mode);
+       if (par->set_mode)
+               drm_mode_destroy(vmw_priv->dev, par->set_mode);
        par->set_mode = mode;
 
        mutex_unlock(&par->bo_mutex);
index b93c558dd86e0121741284becc87434a27b39d2a..7da752ca1c34bd06497e1491d264921c33011c80 100644 (file)
@@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 
        id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
        if (id < 0)
-               return id;
+               return (id != -ENOMEM ? 0 : id);
 
        spin_lock(&gman->lock);
 
index f2c6819712013046246002346af928bd1ab16bc0..f8979abb9a19ca963bf9625fc911ab74590b388a 100644 (file)
@@ -131,6 +131,7 @@ config I2C_I801
            Cannon Lake (PCH)
            Cedar Fork (PCH)
            Ice Lake (PCH)
+           Comet Lake (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
index c91e145ef5a56dbb1a512c23611f06ad7d22aa05..679c6c41f64b49babf8b0a7505d56a2c4093f6c7 100644 (file)
@@ -71,6 +71,7 @@
  * Cannon Lake-LP (PCH)                0x9da3  32      hard    yes     yes     yes
  * Cedar Fork (PCH)            0x18df  32      hard    yes     yes     yes
  * Ice Lake-LP (PCH)           0x34a3  32      hard    yes     yes     yes
+ * Comet Lake (PCH)            0x02a3  32      hard    yes     yes     yes
  *
  * Features supported by this driver:
  * Software PEC                                no
 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS       0xa223
 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS       0xa2a3
 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS         0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS            0x02a3
 
 struct i801_mux_config {
        char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
        { 0, }
 };
 
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
        case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
        case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
+       case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
                priv->features |= FEATURE_I2C_BLOCK_READ;
                priv->features |= FEATURE_IRQ;
                priv->features |= FEATURE_SMBUS_PEC;
index c5a881172524a6badd8f088850fb60c57120b1ce..337410f4086082d8f57c69afaaf9c3ccb9452de2 100644 (file)
@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
 
                rcu_read_lock();
                in = __in_dev_get_rcu(upper_dev);
-               local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
+               if (!in->ifa_list)
+                       local_ipaddr = 0;
+               else
+                       local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
                rcu_read_unlock();
        } else {
                local_ipaddr = ntohl(ifa->ifa_address);
@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
        case NETDEV_UP:
                /* Fall through */
        case NETDEV_CHANGEADDR:
+
+               /* Just skip if no need to handle ARP cache */
+               if (!local_ipaddr)
+                       break;
+
                i40iw_manage_arp_cache(iwdev,
                                       netdev->dev_addr,
                                       &local_ipaddr,
index 782499abcd9868d63b5f789ee002a0594b00c4d6..2a0b59a4b6ebc3c34ff9ff3308af7e337d2001f3 100644 (file)
@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
        unsigned long flags;
 
        for (i = 0 ; i < dev->num_ports; i++) {
-               cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
                det = &sriov->alias_guid.ports_guid[i];
+               cancel_delayed_work_sync(&det->alias_guid_work);
                spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
                while (!list_empty(&det->cb_list)) {
                        cb_ctx = list_entry(det->cb_list.next,
index eaa055007f28edfa6ac34e6fd88550ed879f12eb..9e08df7914aa2e142c8926a1230516f5c214d326 100644 (file)
@@ -20,6 +20,7 @@
 
 enum devx_obj_flags {
        DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
+       DEVX_OBJ_FLAGS_DCT = 1 << 1,
 };
 
 struct devx_async_data {
@@ -39,7 +40,10 @@ struct devx_obj {
        u32                     dinlen; /* destroy inbox length */
        u32                     dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
        u32                     flags;
-       struct mlx5_ib_devx_mr  devx_mr;
+       union {
+               struct mlx5_ib_devx_mr  devx_mr;
+               struct mlx5_core_dct    core_dct;
+       };
 };
 
 struct devx_umem {
@@ -347,7 +351,6 @@ static u64 devx_get_obj_id(const void *in)
                obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
                                        MLX5_GET(arm_rq_in, in, srq_number));
                break;
-       case MLX5_CMD_OP_DRAIN_DCT:
        case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
                obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
                                        MLX5_GET(drain_dct_in, in, dctn));
@@ -618,7 +621,6 @@ static bool devx_is_obj_modify_cmd(const void *in)
        case MLX5_CMD_OP_2RST_QP:
        case MLX5_CMD_OP_ARM_XRC_SRQ:
        case MLX5_CMD_OP_ARM_RQ:
-       case MLX5_CMD_OP_DRAIN_DCT:
        case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
        case MLX5_CMD_OP_ARM_XRQ:
        case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
@@ -1124,7 +1126,11 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
        if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
                devx_cleanup_mkey(obj);
 
-       ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+       if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+               ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
+       else
+               ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
+                                   sizeof(out));
        if (ib_is_destroy_retryable(ret, why, uobject))
                return ret;
 
@@ -1185,9 +1191,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
                devx_set_umem_valid(cmd_in);
        }
 
-       err = mlx5_cmd_exec(dev->mdev, cmd_in,
-                           cmd_in_len,
-                           cmd_out, cmd_out_len);
+       if (opcode == MLX5_CMD_OP_CREATE_DCT) {
+               obj->flags |= DEVX_OBJ_FLAGS_DCT;
+               err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
+                                          cmd_in, cmd_in_len,
+                                          cmd_out, cmd_out_len);
+       } else {
+               err = mlx5_cmd_exec(dev->mdev, cmd_in,
+                                   cmd_in_len,
+                                   cmd_out, cmd_out_len);
+       }
+
        if (err)
                goto obj_free;
 
@@ -1214,7 +1228,11 @@ err_copy:
        if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
                devx_cleanup_mkey(obj);
 obj_destroy:
-       mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+       if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+               mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
+       else
+               mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
+                             sizeof(out));
 obj_free:
        kfree(obj);
        return err;
index 994c19d012118b11a4a3ab3d3349ffad3002f645..531ff20b32ade6ccb4d0b3533bc1f8ceceed1b26 100644 (file)
@@ -415,10 +415,17 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
                *active_speed = IB_SPEED_EDR;
                break;
        case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
+               *active_width = IB_WIDTH_2X;
+               *active_speed = IB_SPEED_EDR;
+               break;
        case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
                *active_width = IB_WIDTH_1X;
                *active_speed = IB_SPEED_HDR;
                break;
+       case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
+               *active_width = IB_WIDTH_4X;
+               *active_speed = IB_SPEED_EDR;
+               break;
        case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
                *active_width = IB_WIDTH_2X;
                *active_speed = IB_SPEED_HDR;
index 6b1f0e76900b23778a11248b2c74985f8cc4ff7c..7cd006da1daef05cd335dc77cda8281e179630c4 100644 (file)
@@ -3729,6 +3729,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
                struct mlx5_ib_modify_qp_resp resp = {};
+               u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
                u32 min_resp_len = offsetof(typeof(resp), dctn) +
                                   sizeof(resp.dctn);
 
@@ -3747,7 +3748,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
 
                err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
-                                          MLX5_ST_SZ_BYTES(create_dct_in));
+                                          MLX5_ST_SZ_BYTES(create_dct_in), out,
+                                          sizeof(out));
                if (err)
                        return err;
                resp.dctn = qp->dct.mdct.mqp.qpn;
index b319e51c379bd664999e7d710c5e1240cc21a42d..21cb088d66877a4bd6c8c914e72eff5c385de031 100644 (file)
@@ -2608,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
 
        /* Everything is mapped - write the right values into s->dma_address */
        for_each_sg(sglist, s, nelems, i) {
-               s->dma_address += address + s->offset;
+               /*
+                * Add in the remaining piece of the scatter-gather offset that
+                * was masked out when we were determining the physical address
+                * via (sg_phys(s) & PAGE_MASK) earlier.
+                */
+               s->dma_address += address + (s->offset & ~PAGE_MASK);
                s->dma_length   = s->length;
        }
 
index 87274b54febd0eb3cfb1dd3358609fa037c90095..28cb713d728ceef9eb7f37caa746a546617e1dbb 100644 (file)
@@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
        u32 pmen;
        unsigned long flags;
 
+       if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
+               return;
+
        raw_spin_lock_irqsave(&iommu->register_lock, flags);
        pmen = readl(iommu->reg + DMAR_PMEN_REG);
        pmen &= ~DMA_PMEN_EPM;
@@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
 
        ctx_lo = context[0].lo;
 
-       sdev->did = domain->iommu_did[iommu->seq_id];
+       sdev->did = FLPT_DEFAULT_DID;
        sdev->sid = PCI_DEVID(info->bus, info->devfn);
 
        if (!(ctx_lo & CONTEXT_PASIDE)) {
index f8d3ba2475237f4477994a7c8b8b1cae0cfe3310..2de8122e218fde5856867252679b0b95682f3619 100644 (file)
@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
                curr_iova = rb_entry(curr, struct iova, node);
        } while (curr && new_pfn <= curr_iova->pfn_hi);
 
-       if (limit_pfn < size || new_pfn < iovad->start_pfn)
+       if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+               iovad->max32_alloc_size = size;
                goto iova32_full;
+       }
 
        /* pfn_lo will point to size aligned address if size_aligned is set */
        new->pfn_lo = new_pfn;
@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
        return 0;
 
 iova32_full:
-       iovad->max32_alloc_size = size;
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
        return -ENOMEM;
 }
index 83364fedbf0ab57962a7b325663ab910d6173a0a..5e4ca139e4eacaa70eb5cba5b7ce4124aed93381 100644 (file)
@@ -275,14 +275,14 @@ out_free:
        return ret;
 }
 
-int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
+static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
        struct device_node *parent)
 {
        return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
 }
 IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
 
-int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
+static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
        struct device_node *parent)
 {
        return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
index 2dd1ff0cf558050e8ae9cfa7ea81c26d12a012e5..7577755bdcf4f38588438634c7484999a51d927c 100644 (file)
@@ -1482,7 +1482,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
        ra = container_of(a, struct lpi_range, entry);
        rb = container_of(b, struct lpi_range, entry);
 
-       return rb->base_id - ra->base_id;
+       return ra->base_id - rb->base_id;
 }
 
 static void merge_lpi_ranges(void)
index ba2a37a27a54ff9fc3abab3bf98da61d64965b06..fd3110c171bad165737c5bb274f1adda7dbf6daa 100644 (file)
@@ -1089,11 +1089,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
 #endif
 }
 
-static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
+static int gic_init_bases(struct gic_chip_data *gic,
                          struct fwnode_handle *handle)
 {
-       irq_hw_number_t hwirq_base;
-       int gic_irqs, irq_base, ret;
+       int gic_irqs, ret;
 
        if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
                /* Frankein-GIC without banked registers... */
@@ -1145,28 +1144,21 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
        } else {                /* Legacy support */
                /*
                 * For primary GICs, skip over SGIs.
-                * For secondary GICs, skip over PPIs, too.
+                * No secondary GIC support whatsoever.
                 */
-               if (gic == &gic_data[0] && (irq_start & 31) > 0) {
-                       hwirq_base = 16;
-                       if (irq_start != -1)
-                               irq_start = (irq_start & ~31) + 16;
-               } else {
-                       hwirq_base = 32;
-               }
+               int irq_base;
 
-               gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+               gic_irqs -= 16; /* calculate # of irqs to allocate */
 
-               irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
+               irq_base = irq_alloc_descs(16, 16, gic_irqs,
                                           numa_node_id());
                if (irq_base < 0) {
-                       WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
-                            irq_start);
-                       irq_base = irq_start;
+                       WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
+                       irq_base = 16;
                }
 
                gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
-                                       hwirq_base, &gic_irq_domain_ops, gic);
+                                                   16, &gic_irq_domain_ops, gic);
        }
 
        if (WARN_ON(!gic->domain)) {
@@ -1195,7 +1187,6 @@ error:
 }
 
 static int __init __gic_init_bases(struct gic_chip_data *gic,
-                                  int irq_start,
                                   struct fwnode_handle *handle)
 {
        char *name;
@@ -1231,32 +1222,28 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
                gic_init_chip(gic, NULL, name, false);
        }
 
-       ret = gic_init_bases(gic, irq_start, handle);
+       ret = gic_init_bases(gic, handle);
        if (ret)
                kfree(name);
 
        return ret;
 }
 
-void __init gic_init(unsigned int gic_nr, int irq_start,
-                    void __iomem *dist_base, void __iomem *cpu_base)
+void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
 {
        struct gic_chip_data *gic;
 
-       if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
-               return;
-
        /*
         * Non-DT/ACPI systems won't run a hypervisor, so let's not
         * bother with these...
         */
        static_branch_disable(&supports_deactivate_key);
 
-       gic = &gic_data[gic_nr];
+       gic = &gic_data[0];
        gic->raw_dist_base = dist_base;
        gic->raw_cpu_base = cpu_base;
 
-       __gic_init_bases(gic, irq_start, NULL);
+       __gic_init_bases(gic, NULL);
 }
 
 static void gic_teardown(struct gic_chip_data *gic)
@@ -1399,7 +1386,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
        if (ret)
                return ret;
 
-       ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode);
+       ret = gic_init_bases(*gic, &dev->of_node->fwnode);
        if (ret) {
                gic_teardown(*gic);
                return ret;
@@ -1459,7 +1446,7 @@ gic_of_init(struct device_node *node, struct device_node *parent)
        if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
                static_branch_disable(&supports_deactivate_key);
 
-       ret = __gic_init_bases(gic, -1, &node->fwnode);
+       ret = __gic_init_bases(gic, &node->fwnode);
        if (ret) {
                gic_teardown(gic);
                return ret;
@@ -1650,7 +1637,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
                return -ENOMEM;
        }
 
-       ret = __gic_init_bases(gic, -1, domain_handle);
+       ret = __gic_init_bases(gic, domain_handle);
        if (ret) {
                pr_err("Failed to initialise GIC\n");
                irq_domain_free_fwnode(domain_handle);
index d1098f4da6a4c567adbcb392ab269bdf14c419b1..88df3d00052c00b3be13292215a679b67d58f776 100644 (file)
@@ -169,8 +169,12 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
 
        raw_spin_lock_init(&data->lock);
 
-       of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
-       of_property_read_u32(np, "fsl,channel", &data->channel);
+       ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
+       if (ret)
+               return ret;
+       ret = of_property_read_u32(np, "fsl,channel", &data->channel);
+       if (ret)
+               return ret;
 
        /*
         * There is one output irq for each group of 64 inputs.
index 567b29c476081056232f13eed15a27c8d8d4fa64..98b6e1d4b1a68cf8a247f4c75a67d09fd11b6b08 100644 (file)
@@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
        void __iomem *base = d->chip_data;
        u32 val;
 
+       if (!msg->address_lo && !msg->address_hi)
+               return;
        base += get_mbigen_vec_reg(d->hwirq);
        val = readl_relaxed(base);
 
index 3496b61a312aef87cc9668189fd9047a844e8ca3..8eed478f3b7e5d1fd7de2a20dd7c456350e5b0d5 100644 (file)
@@ -179,7 +179,7 @@ static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
        return 0;
 }
 
-const struct irq_domain_ops mmp_irq_domain_ops = {
+static const struct irq_domain_ops mmp_irq_domain_ops = {
        .map            = mmp_irq_domain_map,
        .xlate          = mmp_irq_domain_xlate,
 };
index add4c9c934c8abda564b25904dc7b9f479ca7afc..18832ccc8ff8751d2562f56e53466b6b5e5b84fb 100644 (file)
@@ -478,7 +478,7 @@ dispose_irq:
        return ret;
 }
 
-struct mvebu_sei_caps mvebu_sei_ap806_caps = {
+static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
        .ap_range = {
                .first = 0,
                .size = 21,
index a93296b9b45debecfb723e780f3f381b15660d2e..7bd1d4cb2e194679078ca67f782a48505641d5e7 100644 (file)
@@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
        const struct stm32_exti_bank *stm32_bank;
        struct stm32_exti_chip_data *chip_data;
        void __iomem *base = h_data->base;
-       u32 irqs_mask;
 
        stm32_bank = h_data->drv_data->exti_banks[bank_idx];
        chip_data = &h_data->chips_data[bank_idx];
@@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
 
        raw_spin_lock_init(&chip_data->rlock);
 
-       /* Determine number of irqs supported */
-       writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
-       irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
-
        /*
         * This IP has no reset, so after hot reboot we should
         * clear registers to avoid residue
         */
        writel_relaxed(0, base + stm32_bank->imr_ofst);
        writel_relaxed(0, base + stm32_bank->emr_ofst);
-       writel_relaxed(0, base + stm32_bank->rtsr_ofst);
-       writel_relaxed(0, base + stm32_bank->ftsr_ofst);
-       writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
-       if (stm32_bank->fpr_ofst != UNDEF_REG)
-               writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
 
        pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
 
index 4d85645c87f78721a83fcef94be1feb3bce8c094..0928fd1f0e0c134943c7dab3aeec7d0e5699eaf5 100644 (file)
@@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
        if (m->clock2)
                test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
 
-       if (ent->device == 0xB410) {
+       if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
+           ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
                test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
                test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
                test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
index 3525236ed8d9d702e25fac066926ba1933fe4edc..19c84214a7ea8890543ea8341033ed1ceb89df12 100644 (file)
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
 
        /* We also need to update CI for internal queues */
        if (cs->submitted) {
+               int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
+
+               WARN_ONCE((cs_cnt < 0),
+                       "hl%d: error in CS active cnt %d\n",
+                       hdev->id, cs_cnt);
+
                hl_int_hw_queue_update_ci(cs);
 
                spin_lock(&hdev->hw_queues_mirror_lock);
index a53c12aff6ad9cebd9be4a2b031b9a93ea6c72b3..974a87789bd8689d1530daa8890bac3b3b32d38c 100644 (file)
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
        struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
        enum vm_type_t *vm_type;
        bool once = true;
+       u64 j;
        int i;
 
        if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
                        } else {
                                phys_pg_pack = hnode->ptr;
                                seq_printf(s,
-                                       "    0x%-14llx      %-10u       %-4u\n",
+                                       "    0x%-14llx      %-10llu       %-4u\n",
                                        hnode->vaddr, phys_pg_pack->total_size,
                                        phys_pg_pack->handle);
                        }
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
                                                phys_pg_pack->page_size);
                        seq_puts(s, "   physical address\n");
                        seq_puts(s, "---------------------\n");
-                       for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+                       for (j = 0 ; j < phys_pg_pack->npages ; j++) {
                                seq_printf(s, "    0x%-14llx\n",
-                                               phys_pg_pack->pages[i]);
+                                               phys_pg_pack->pages[j]);
                        }
                }
                spin_unlock(&vm->idr_lock);
index de46aa6ed1542438c5d5952ff77c9cc17dadc5a6..77d51be66c7e84045558fff78eea0a8e9a70439e 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/sched/signal.h>
 #include <linux/hwmon.h>
 
+#define HL_PLDM_PENDING_RESET_PER_SEC  (HL_PENDING_RESET_PER_SEC * 10)
+
 bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
 {
        if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
        spin_lock_init(&hdev->hw_queues_mirror_lock);
        atomic_set(&hdev->in_reset, 0);
        atomic_set(&hdev->fd_open_cnt, 0);
+       atomic_set(&hdev->cs_active_cnt, 0);
 
        return 0;
 
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
 
        pci_save_state(hdev->pdev);
 
+       /* Block future CS/VM/JOB completion operations */
+       rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+       if (rc) {
+               dev_err(hdev->dev, "Can't suspend while in reset\n");
+               return -EIO;
+       }
+
+       /* This blocks all other stuff that is not blocked by in_reset */
+       hdev->disabled = true;
+
+       /*
+        * Flush anyone that is inside the critical section of enqueue
+        * jobs to the H/W
+        */
+       hdev->asic_funcs->hw_queues_lock(hdev);
+       hdev->asic_funcs->hw_queues_unlock(hdev);
+
+       /* Flush processes that are sending message to CPU */
+       mutex_lock(&hdev->send_cpu_message_lock);
+       mutex_unlock(&hdev->send_cpu_message_lock);
+
        rc = hdev->asic_funcs->suspend(hdev);
        if (rc)
                dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
 
        pci_set_power_state(hdev->pdev, PCI_D0);
        pci_restore_state(hdev->pdev);
-       rc = pci_enable_device(hdev->pdev);
+       rc = pci_enable_device_mem(hdev->pdev);
        if (rc) {
                dev_err(hdev->dev,
                        "Failed to enable PCI device in resume\n");
                return rc;
        }
 
+       pci_set_master(hdev->pdev);
+
        rc = hdev->asic_funcs->resume(hdev);
        if (rc) {
-               dev_err(hdev->dev,
-                       "Failed to enable PCI access from device CPU\n");
-               return rc;
+               dev_err(hdev->dev, "Failed to resume device after suspend\n");
+               goto disable_device;
+       }
+
+
+       hdev->disabled = false;
+       atomic_set(&hdev->in_reset, 0);
+
+       rc = hl_device_reset(hdev, true, false);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to reset device during resume\n");
+               goto disable_device;
        }
 
        return 0;
+
+disable_device:
+       pci_clear_master(hdev->pdev);
+       pci_disable_device(hdev->pdev);
+
+       return rc;
 }
 
 static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
        struct hl_device_reset_work *device_reset_work =
                container_of(work, struct hl_device_reset_work, reset_work);
        struct hl_device *hdev = device_reset_work->hdev;
-       u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
+       u16 pending_total, pending_cnt;
        struct task_struct *task = NULL;
 
+       if (hdev->pldm)
+               pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
+       else
+               pending_total = HL_PENDING_RESET_PER_SEC;
+
+       pending_cnt = pending_total;
+
        /* Flush all processes that are inside hl_open */
        mutex_lock(&hdev->fd_open_cnt_lock);
 
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
                }
        }
 
+       pending_cnt = pending_total;
+
+       while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+
+               pending_cnt--;
+
+               ssleep(1);
+       }
+
+       if (atomic_read(&hdev->fd_open_cnt))
+               dev_crit(hdev->dev,
+                       "Going to hard reset with open user contexts\n");
+
        mutex_unlock(&hdev->fd_open_cnt_lock);
 
        hl_device_reset(hdev, true, true);
index 238dd57c541bdf1e632f8ff008f69bafc3e5e59a..ea979ebd62fb8c5f30d08b052a0e481325470ece 100644 (file)
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
        return retval;
 }
 
-static void goya_resume_external_queues(struct hl_device *hdev)
-{
-       WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
-}
-
 /*
  * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
  *
@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
        return retval;
 }
 
-static void goya_resume_internal_queues(struct hl_device *hdev)
-{
-       WREG32(mmMME_QM_GLBL_CFG1, 0);
-       WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC0_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC1_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC2_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC3_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC4_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC5_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC6_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC7_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
-}
-
 static void goya_dma_stall(struct hl_device *hdev)
 {
        WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
 {
        int rc;
 
-       rc = goya_stop_internal_queues(hdev);
-
-       if (rc) {
-               dev_err(hdev->dev, "failed to stop internal queues\n");
-               return rc;
-       }
-
-       rc = goya_stop_external_queues(hdev);
-
-       if (rc) {
-               dev_err(hdev->dev, "failed to stop external queues\n");
-               return rc;
-       }
-
        rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
        if (rc)
                dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
 
 int goya_resume(struct hl_device *hdev)
 {
-       int rc;
-
-       goya_resume_external_queues(hdev);
-       goya_resume_internal_queues(hdev);
-
-       rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
-       if (rc)
-               dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
-       return rc;
+       return goya_init_iatu(hdev);
 }
 
 static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
 
        *dma_handle = hdev->asic_prop.sram_base_address;
 
-       base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
+       base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
 
        switch (queue_id) {
        case GOYA_QUEUE_ID_MME:
index a7c95e9f9b9a8808efa70651e66c34625ac82d0a..a8ee52c880cd800651681b866048126b2e9fc478 100644 (file)
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
  * struct hl_vm_phys_pg_pack - physical page pack.
  * @vm_type: describes the type of the virtual area descriptor.
  * @pages: the physical page array.
+ * @npages: num physical pages in the pack.
+ * @total_size: total size of all the pages in this list.
  * @mapping_cnt: number of shared mappings.
  * @asid: the context related to this list.
- * @npages: num physical pages in the pack.
  * @page_size: size of each page in the pack.
- * @total_size: total size of all the pages in this list.
  * @flags: HL_MEM_* flags related to this list.
  * @handle: the provided handle related to this list.
  * @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
 struct hl_vm_phys_pg_pack {
        enum vm_type_t          vm_type; /* must be first */
        u64                     *pages;
+       u64                     npages;
+       u64                     total_size;
        atomic_t                mapping_cnt;
        u32                     asid;
-       u32                     npages;
        u32                     page_size;
-       u32                     total_size;
        u32                     flags;
        u32                     handle;
        u32                     offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
  * @cb_pool_lock: protects the CB pool.
  * @user_ctx: current user context executing.
  * @dram_used_mem: current DRAM memory consumption.
- * @in_reset: is device in reset flow.
- * @curr_pll_profile: current PLL profile.
- * @fd_open_cnt: number of open user processes.
  * @timeout_jiffies: device CS timeout value.
  * @max_power: the max power of the device, as configured by the sysadmin. This
  *             value is saved so in case of hard-reset, KMD will restore this
  *             value and update the F/W after the re-initialization
+ * @in_reset: is device in reset flow.
+ * @curr_pll_profile: current PLL profile.
+ * @fd_open_cnt: number of open user processes.
+ * @cs_active_cnt: number of active command submissions on this device (active
+ *                 means already in H/W queues)
  * @major: habanalabs KMD major.
  * @high_pll: high PLL profile frequency.
  * @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
        struct hl_ctx                   *user_ctx;
 
        atomic64_t                      dram_used_mem;
+       u64                             timeout_jiffies;
+       u64                             max_power;
        atomic_t                        in_reset;
        atomic_t                        curr_pll_profile;
        atomic_t                        fd_open_cnt;
-       u64                             timeout_jiffies;
-       u64                             max_power;
+       atomic_t                        cs_active_cnt;
        u32                             major;
        u32                             high_pll;
        u32                             soft_reset_cnt;
index 67bece26417cbe930fa018abdb33c88ba8618b23..ef3bb695136025971c76b916a97dde8a4b36905b 100644 (file)
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
                spin_unlock(&hdev->hw_queues_mirror_lock);
        }
 
-       list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
+       atomic_inc(&hdev->cs_active_cnt);
+
+       list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
                if (job->ext_queue)
                        ext_hw_queue_schedule_job(job);
                else
                        int_hw_queue_schedule_job(job);
-       }
 
        cs->submitted = true;
 
index 3a12fd1a5274479e89406947991fd709203e6726..ce1fda40a8b8112572b9a26db139c8aa6de76f8e 100644 (file)
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
        struct hl_device *hdev = ctx->hdev;
        struct hl_vm *vm = &hdev->vm;
        struct hl_vm_phys_pg_pack *phys_pg_pack;
-       u64 paddr = 0;
-       u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
-       int handle, rc, i;
+       u64 paddr = 0, total_size, num_pgs, i;
+       u32 num_curr_pgs, page_size, page_shift;
+       int handle, rc;
        bool contiguous;
 
        num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
                if (!paddr) {
                        dev_err(hdev->dev,
-                               "failed to allocate %u huge contiguous pages\n",
+                               "failed to allocate %llu huge contiguous pages\n",
                                num_pgs);
                        return -ENOMEM;
                }
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
        phys_pg_pack->flags = args->flags;
        phys_pg_pack->contiguous = contiguous;
 
-       phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
+       phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
        if (!phys_pg_pack->pages) {
                rc = -ENOMEM;
                goto pages_arr_err;
@@ -148,7 +148,7 @@ page_err:
                        gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
                                        page_size);
 
-       kfree(phys_pg_pack->pages);
+       kvfree(phys_pg_pack->pages);
 pages_arr_err:
        kfree(phys_pg_pack);
 pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
                struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
        struct hl_vm *vm = &hdev->vm;
-       int i;
+       u64 i;
 
        if (!phys_pg_pack->created_from_userptr) {
                if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
                }
        }
 
-       kfree(phys_pg_pack->pages);
+       kvfree(phys_pg_pack->pages);
        kfree(phys_pg_pack);
 }
 
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
  * - Return the start address of the virtual block
  */
 static u64 get_va_block(struct hl_device *hdev,
-               struct hl_va_range *va_range, u32 size, u64 hint_addr,
+               struct hl_va_range *va_range, u64 size, u64 hint_addr,
                bool is_userptr)
 {
        struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
        }
 
        if (!new_va_block) {
-               dev_err(hdev->dev, "no available va block for size %u\n", size);
+               dev_err(hdev->dev, "no available va block for size %llu\n",
+                               size);
                goto out;
        }
 
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
        struct hl_vm_phys_pg_pack *phys_pg_pack;
        struct scatterlist *sg;
        dma_addr_t dma_addr;
-       u64 page_mask;
-       u32 npages, total_npages, page_size = PAGE_SIZE;
+       u64 page_mask, total_npages;
+       u32 npages, page_size = PAGE_SIZE;
        bool first = true, is_huge_page_opt = true;
        int rc, i, j;
 
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
 
        page_mask = ~(((u64) page_size) - 1);
 
-       phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
+       phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
+                                               GFP_KERNEL);
        if (!phys_pg_pack->pages) {
                rc = -ENOMEM;
                goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
                struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
        struct hl_device *hdev = ctx->hdev;
-       u64 next_vaddr = vaddr, paddr;
+       u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
        u32 page_size = phys_pg_pack->page_size;
-       int i, rc = 0, mapped_pg_cnt = 0;
+       int rc = 0;
 
        for (i = 0 ; i < phys_pg_pack->npages ; i++) {
                paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
                rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
                if (rc) {
                        dev_err(hdev->dev,
-                               "map failed for handle %u, npages: %d, mapped: %d",
+                               "map failed for handle %u, npages: %llu, mapped: %llu",
                                phys_pg_pack->handle, phys_pg_pack->npages,
                                mapped_pg_cnt);
                        goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
        struct hl_vm_hash_node *hnode = NULL;
        struct hl_userptr *userptr = NULL;
        enum vm_type_t *vm_type;
-       u64 next_vaddr;
+       u64 next_vaddr, i;
        u32 page_size;
        bool is_userptr;
-       int i, rc;
+       int rc;
 
        /* protect from double entrance */
        mutex_lock(&ctx->mem_hash_lock);
index 2f2e99cb27439433bd4527350b2347a6856cab5d..3a5a2cec83051b08c1b838372aaf29c0f1b99e13 100644 (file)
@@ -832,7 +832,7 @@ err:
 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
 {
        struct hl_device *hdev = ctx->hdev;
-       u64 real_virt_addr;
+       u64 real_virt_addr, real_phys_addr;
        u32 real_page_size, npages;
        int i, rc, mapped_cnt = 0;
 
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
 
        npages = page_size / real_page_size;
        real_virt_addr = virt_addr;
+       real_phys_addr = phys_addr;
 
        for (i = 0 ; i < npages ; i++) {
-               rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
+               rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
                                real_page_size);
                if (rc)
                        goto err;
 
                real_virt_addr += real_page_size;
+               real_phys_addr += real_page_size;
                mapped_cnt++;
        }
 
index c712b7deb3a9d88e62416005d2444e908a4fb664..82a97866e0cf4c857cbb25c5487a3c6f80addfa0 100644 (file)
@@ -1044,14 +1044,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
        mmc->caps2 = MMC_CAP2_NO_SDIO;
        mmc->ops = &alcor_sdc_ops;
 
-       /* Hardware cannot do scatter lists */
+       /* The hardware does DMA data transfer of 4096 bytes to/from a single
+        * buffer address. Scatterlists are not supported, but upon DMA
+        * completion (signalled via IRQ), the original vendor driver does
+        * then immediately set up another DMA transfer of the next 4096
+        * bytes.
+        *
+        * This means that we need to handle the I/O in 4096 byte chunks.
+        * Lacking a way to limit the sglist entries to 4096 bytes, we instead
+        * impose that only one segment is provided, with maximum size 4096,
+        * which also happens to be the minimum size. This means that the
+        * single-entry sglist handled by this driver can be handed directly
+        * to the hardware, nice and simple.
+        *
+        * Unfortunately though, that means we only do 4096 bytes I/O per
+        * MMC command. A future improvement would be to make the driver
+        * accept sg lists and entries of any size, and simply iterate
+        * through them 4096 bytes at a time.
+        */
        mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
        mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
-
-       mmc->max_blk_size = mmc->max_seg_size;
-       mmc->max_blk_count = mmc->max_segs;
-
-       mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
+       mmc->max_req_size = mmc->max_seg_size;
 }
 
 static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
index 49e0daf2ef5e1a99cf13eb1d7b46d71f0195a9c5..f37003df1e016f0b3b9cec2368ae23a3b7482dd2 100644 (file)
@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
 {
 }
 #endif
-static void __init init_mmcsd_host(struct mmc_davinci_host *host)
+static void init_mmcsd_host(struct mmc_davinci_host *host)
 {
 
        mmc_davinci_reset_ctrl(host, 1);
index d54612257b068441ae3ffe3aaa9b7d7f85a69182..45f7b9b53d48267f448f4d19c357391e9621a90b 100644 (file)
@@ -290,11 +290,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
        struct scatterlist *sg;
        int i;
 
-       for_each_sg(data->sg, sg, data->sg_len, i) {
-               void *buf = kmap_atomic(sg_page(sg) + sg->offset);
-               buffer_swap32(buf, sg->length);
-               kunmap_atomic(buf);
-       }
+       for_each_sg(data->sg, sg, data->sg_len, i)
+               buffer_swap32(sg_virt(sg), sg->length);
 }
 #else
 static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
@@ -611,7 +608,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
 {
        struct mmc_data *data = host->req->data;
        struct scatterlist *sg;
-       void *buf;
        int stat, i;
 
        host->data = data;
@@ -619,18 +615,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
 
        if (data->flags & MMC_DATA_READ) {
                for_each_sg(data->sg, sg, data->sg_len, i) {
-                       buf = kmap_atomic(sg_page(sg) + sg->offset);
-                       stat = mxcmci_pull(host, buf, sg->length);
-                       kunmap(buf);
+                       stat = mxcmci_pull(host, sg_virt(sg), sg->length);
                        if (stat)
                                return stat;
                        host->datasize += sg->length;
                }
        } else {
                for_each_sg(data->sg, sg, data->sg_len, i) {
-                       buf = kmap_atomic(sg_page(sg) + sg->offset);
-                       stat = mxcmci_push(host, buf, sg->length);
-                       kunmap(buf);
+                       stat = mxcmci_push(host, sg_virt(sg), sg->length);
                        if (stat)
                                return stat;
                        host->datasize += sg->length;
index c907bf502a123b5b588d8a70e3446fca1da20a66..c1d3f0e3892131a46192a68e12807d39b1a36c69 100644 (file)
@@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param);
 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
 {
        struct dma_async_tx_descriptor *tx;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        struct dma_slave_config config;
        struct dma_chan *chan;
        unsigned int nob = data->blocks;
index 71e13844df6c0deaa1a498140e8f553a04ad3148..8742e27e4e8bca8acdb73c37e334cc7f22b2d01f 100644 (file)
@@ -641,6 +641,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        struct renesas_sdhi *priv;
        struct resource *res;
        int irq, ret, i;
+       u16 ver;
 
        of_data = of_device_get_match_data(&pdev->dev);
 
@@ -773,12 +774,17 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        if (ret)
                goto efree;
 
+       ver = sd_ctrl_read16(host, CTL_VERSION);
+       /* GEN2_SDR104 is first known SDHI to use 32bit block count */
+       if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
+               mmc_data->max_blk_count = U16_MAX;
+
        ret = tmio_mmc_host_probe(host);
        if (ret < 0)
                goto edisclk;
 
        /* One Gen2 SDHI incarnation does NOT have a CBSY bit */
-       if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
+       if (ver == SDHI_VER_GEN2_SDR50)
                mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
 
        /* Enable tuning iff we have an SCC and a supported mode */
index b1a66ca3821a51f97be942ee9596897008e64215..5bbed477c9b1ee6546f066e55fd9946ed4a160b6 100644 (file)
@@ -1056,6 +1056,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
                        mmc->f_max = 48000000;
        }
 
+       if (!mmc_can_gpio_ro(mmc))
+               mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+
        pltfm_host->clk = devm_clk_get(dev, "fck");
        if (IS_ERR(pltfm_host->clk)) {
                ret = PTR_ERR(pltfm_host->clk);
index 5e4ca082cfcdb29845326adf3341d1dd3b461811..7a96d168efc41dce1510fbac29522ce06851a8db 100644 (file)
@@ -216,8 +216,8 @@ config GENEVE
 
 config GTP
        tristate "GPRS Tunneling Protocol datapath (GTP-U)"
-       depends on INET && NET_UDP_TUNNEL
-       select NET_IP_TUNNEL
+       depends on INET
+       select NET_UDP_TUNNEL
        ---help---
          This allows one to create gtp virtual interfaces that provide
          the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
index 576b37d12a63ca4ea5064cd568194ec25fa22ee5..c4fa400efdcc82643dcd4d2c762ed8079305adf7 100644 (file)
@@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
                qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
 }
 
+static u32
+qca8k_port_to_phy(int port)
+{
+       /* From Andrew Lunn:
+        * Port 0 has no internal phy.
+        * Port 1 has an internal PHY at MDIO address 0.
+        * Port 2 has an internal PHY at MDIO address 1.
+        * ...
+        * Port 5 has an internal PHY at MDIO address 4.
+        * Port 6 has no internal PHY.
+        */
+
+       return port - 1;
+}
+
+static int
+qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data)
+{
+       u32 phy, val;
+
+       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+               return -EINVAL;
+
+       /* callee is responsible for not passing bad ports,
+        * but we still would like to make spills impossible.
+        */
+       phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+       val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+             QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+             QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
+             QCA8K_MDIO_MASTER_DATA(data);
+
+       qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+
+       return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
+               QCA8K_MDIO_MASTER_BUSY);
+}
+
+static int
+qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum)
+{
+       u32 phy, val;
+
+       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+               return -EINVAL;
+
+       /* callee is responsible for not passing bad ports,
+        * but we still would like to make spills impossible.
+        */
+       phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+       val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+             QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+             QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+       qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+
+       if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
+                           QCA8K_MDIO_MASTER_BUSY))
+               return -ETIMEDOUT;
+
+       val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) &
+               QCA8K_MDIO_MASTER_DATA_MASK);
+
+       return val;
+}
+
+static int
+qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
+{
+       struct qca8k_priv *priv = ds->priv;
+
+       return qca8k_mdio_write(priv, port, regnum, data);
+}
+
+static int
+qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int ret;
+
+       ret = qca8k_mdio_read(priv, port, regnum);
+
+       if (ret < 0)
+               return 0xffff;
+
+       return ret;
+}
+
+static int
+qca8k_setup_mdio_bus(struct qca8k_priv *priv)
+{
+       u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
+       struct device_node *ports, *port;
+       int err;
+
+       ports = of_get_child_by_name(priv->dev->of_node, "ports");
+       if (!ports)
+               return -EINVAL;
+
+       for_each_available_child_of_node(ports, port) {
+               err = of_property_read_u32(port, "reg", &reg);
+               if (err)
+                       return err;
+
+               if (!dsa_is_user_port(priv->ds, reg))
+                       continue;
+
+               if (of_property_read_bool(port, "phy-handle"))
+                       external_mdio_mask |= BIT(reg);
+               else
+                       internal_mdio_mask |= BIT(reg);
+       }
+
+       if (!external_mdio_mask && !internal_mdio_mask) {
+               dev_err(priv->dev, "no PHYs are defined.\n");
+               return -EINVAL;
+       }
+
+       /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
+        * the MDIO_MASTER register also _disconnects_ the external MDC
+        * passthrough to the internal PHYs. It's not possible to use both
+        * configurations at the same time!
+        *
+        * Because this came up during the review process:
+        * If the external mdio-bus driver is capable magically disabling
+        * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
+        * accessors for the time being, it would be possible to pull this
+        * off.
+        */
+       if (!!external_mdio_mask && !!internal_mdio_mask) {
+               dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
+               return -EINVAL;
+       }
+
+       if (external_mdio_mask) {
+               /* Make sure to disable the internal mdio bus in cases
+                * a dt-overlay and driver reload changed the configuration
+                */
+
+               qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
+                               QCA8K_MDIO_MASTER_EN);
+               return 0;
+       }
+
+       priv->ops.phy_read = qca8k_phy_read;
+       priv->ops.phy_write = qca8k_phy_write;
+       return 0;
+}
+
 static int
 qca8k_setup(struct dsa_switch *ds)
 {
@@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds)
        if (IS_ERR(priv->regmap))
                pr_warn("regmap initialization failed");
 
+       ret = qca8k_setup_mdio_bus(priv);
+       if (ret)
+               return ret;
+
        /* Initialize CPU port pad mode (xMII type, delays...) */
        phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
        if (phy_mode < 0) {
@@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
        qca8k_port_set_status(priv, port, 1);
 }
 
-static int
-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
-       return mdiobus_read(priv->bus, phy, regnum);
-}
-
-static int
-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
-       return mdiobus_write(priv->bus, phy, regnum, val);
-}
-
 static void
 qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
 {
@@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
        .setup                  = qca8k_setup,
        .adjust_link            = qca8k_adjust_link,
        .get_strings            = qca8k_get_strings,
-       .phy_read               = qca8k_phy_read,
-       .phy_write              = qca8k_phy_write,
        .get_ethtool_stats      = qca8k_get_ethtool_stats,
        .get_sset_count         = qca8k_get_sset_count,
        .get_mac_eee            = qca8k_get_mac_eee,
@@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
                return -ENOMEM;
 
        priv->ds->priv = priv;
-       priv->ds->ops = &qca8k_switch_ops;
+       priv->ops = qca8k_switch_ops;
+       priv->ds->ops = &priv->ops;
        mutex_init(&priv->reg_mutex);
        dev_set_drvdata(&mdiodev->dev, priv);
 
index d146e54c8a6c615045ff18b31b413fba08365221..249fd62268e5450ff41fa33ef7d7ba055d35461c 100644 (file)
 #define   QCA8K_MIB_FLUSH                              BIT(24)
 #define   QCA8K_MIB_CPU_KEEP                           BIT(20)
 #define   QCA8K_MIB_BUSY                               BIT(17)
+#define QCA8K_MDIO_MASTER_CTRL                         0x3c
+#define   QCA8K_MDIO_MASTER_BUSY                       BIT(31)
+#define   QCA8K_MDIO_MASTER_EN                         BIT(30)
+#define   QCA8K_MDIO_MASTER_READ                       BIT(27)
+#define   QCA8K_MDIO_MASTER_WRITE                      0
+#define   QCA8K_MDIO_MASTER_SUP_PRE                    BIT(26)
+#define   QCA8K_MDIO_MASTER_PHY_ADDR(x)                        ((x) << 21)
+#define   QCA8K_MDIO_MASTER_REG_ADDR(x)                        ((x) << 16)
+#define   QCA8K_MDIO_MASTER_DATA(x)                    (x)
+#define   QCA8K_MDIO_MASTER_DATA_MASK                  GENMASK(15, 0)
+#define   QCA8K_MDIO_MASTER_MAX_PORTS                  5
+#define   QCA8K_MDIO_MASTER_MAX_REG                    32
 #define QCA8K_GOL_MAC_ADDR0                            0x60
 #define QCA8K_GOL_MAC_ADDR1                            0x64
 #define QCA8K_REG_PORT_STATUS(_i)                      (0x07c + (_i) * 4)
@@ -169,6 +181,7 @@ struct qca8k_priv {
        struct dsa_switch *ds;
        struct mutex reg_mutex;
        struct device *dev;
+       struct dsa_switch_ops ops;
 };
 
 struct qca8k_mib_desc {
index 808abb6b367134e76a79dd2a9bf857d02559af65..b15752267c8dfde6d40b6296154e2cd350b185e4 100644 (file)
@@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev)
 static void set_rx_mode(struct net_device *dev)
 {
        int ioaddr = dev->base_addr;
-       short new_mode;
+       unsigned short new_mode;
 
        if (dev->flags & IFF_PROMISC) {
                if (corkscrew_debug > 3)
index 342ae08ec3c29832ae5be0da8d93e59d6441cab1..d60a86aa8aa8049e7c5216f15f64b6d8406ec115 100644 (file)
@@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
 static void dayna_block_output(struct net_device *dev, int count,
                               const unsigned char *buf, int start_page);
 
-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
-
 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
 static void slow_sane_get_8390_hdr(struct net_device *dev,
                                   struct e8390_pkt_hdr *hdr, int ring_page);
@@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
 
 static enum mac8390_access mac8390_testio(unsigned long membase)
 {
-       unsigned long outdata = 0xA5A0B5B0;
-       unsigned long indata =  0x00000000;
+       u32 outdata = 0xA5A0B5B0;
+       u32 indata = 0;
+
        /* Try writing 32 bits */
-       memcpy_toio((void __iomem *)membase, &outdata, 4);
-       /* Now compare them */
-       if (memcmp_withio(&outdata, membase, 4) == 0)
+       nubus_writel(outdata, membase);
+       /* Now read it back */
+       indata = nubus_readl(membase);
+       if (outdata == indata)
                return ACCESS_32;
+
+       outdata = 0xC5C0D5D0;
+       indata = 0;
+
        /* Write 16 bit output */
        word_memcpy_tocard(membase, &outdata, 4);
        /* Now read it back */
        word_memcpy_fromcard(&indata, membase, 4);
        if (outdata == indata)
                return ACCESS_16;
+
        return ACCESS_UNKNOWN;
 }
 
index 74550ccc7a20ff8437463384e906b718027dc6ef..e2ffb159cbe2eeb5980a89aa688ebde8826fc7e6 100644 (file)
@@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
        }
        if (buff->is_ip_cso) {
                __skb_incr_checksum_unnecessary(skb);
-               if (buff->is_udp_cso || buff->is_tcp_cso)
-                       __skb_incr_checksum_unnecessary(skb);
        } else {
                skb->ip_summed = CHECKSUM_NONE;
        }
+
+       if (buff->is_udp_cso || buff->is_tcp_cso)
+               __skb_incr_checksum_unnecessary(skb);
 }
 
 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
index ad099fd01b45ae947492e828337c76df6d701587..1522aee81884bdf702b32e1cd8cbae4316e988b5 100644 (file)
@@ -3370,14 +3370,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
                *hclk = devm_clk_get(&pdev->dev, "hclk");
        }
 
-       if (IS_ERR(*pclk)) {
+       if (IS_ERR_OR_NULL(*pclk)) {
                err = PTR_ERR(*pclk);
+               if (!err)
+                       err = -ENODEV;
+
                dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
                return err;
        }
 
-       if (IS_ERR(*hclk)) {
+       if (IS_ERR_OR_NULL(*hclk)) {
                err = PTR_ERR(*hclk);
+               if (!err)
+                       err = -ENODEV;
+
                dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
                return err;
        }
index 3130b43bba52c9570e76223bf2779c3f3c076c34..02959035ed3f21287a3673f93c55f0e76b549de1 100644 (file)
@@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
        }
 
        /* should never happen! */
-       BUG_ON(1);
+       BUG();
        return NULL;
 }
 
index 88773ca58e6b1fc45dce1eeea8064174b67407d9..b3da81e90132fd74d26b007ca5414a066547774f 100644 (file)
@@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter,
                break;
 
        default:
-               BUG_ON(1);
+               BUG();
        }
 
        return buf_size;
index 2ba49e959c3fd391115740988ae44b3c0698b4d5..dc339dc1adb21c30224fbce6eb0d60fd861c9388 100644 (file)
@@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
         */
        queue_mapping = skb_get_queue_mapping(skb);
        fq = &priv->fq[queue_mapping];
+
+       fd_len = dpaa2_fd_get_len(&fd);
+       nq = netdev_get_tx_queue(net_dev, queue_mapping);
+       netdev_tx_sent_queue(nq, fd_len);
+
+       /* Everything that happens after this enqueues might race with
+        * the Tx confirmation callback for this frame
+        */
        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
                err = priv->enqueue(priv, fq, &fd, 0);
                if (err != -EBUSY)
@@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
                percpu_stats->tx_errors++;
                /* Clean up everything, including freeing the skb */
                free_tx_fd(priv, fq, &fd, false);
+               netdev_tx_completed_queue(nq, 1, fd_len);
        } else {
-               fd_len = dpaa2_fd_get_len(&fd);
                percpu_stats->tx_packets++;
                percpu_stats->tx_bytes += fd_len;
-
-               nq = netdev_get_tx_queue(net_dev, queue_mapping);
-               netdev_tx_sent_queue(nq, fd_len);
        }
 
        return NETDEV_TX_OK;
@@ -1817,7 +1822,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
        dpaa2_fd_set_format(&fd, dpaa2_fd_single);
        dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
 
-       fq = &priv->fq[smp_processor_id()];
+       fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
                err = priv->enqueue(priv, fq, &fd, 0);
                if (err != -EBUSY)
index 1c1f17ec6be2dfa11cd1208b0b04273f07855c10..162cb9afa0e705d1e7d668c1cf34eae64f4d65f2 100644 (file)
@@ -22,6 +22,7 @@
 #include "hns3_enet.h"
 
 #define hns3_set_field(origin, shift, val)     ((origin) |= ((val) << (shift)))
+#define hns3_tx_bd_count(S)    DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
 
 static void hns3_clear_all_ring(struct hnae3_handle *h);
 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
@@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 
        desc_cb->length = size;
 
-       frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+       frag_buf_num = hns3_tx_bd_count(size);
        sizeoflast = size & HNS3_TX_LAST_SIZE_M;
        sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
 
@@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
        int i;
 
        size = skb_headlen(skb);
-       buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+       buf_num = hns3_tx_bd_count(size);
 
        frag_num = skb_shinfo(skb)->nr_frags;
        for (i = 0; i < frag_num; i++) {
                frag = &skb_shinfo(skb)->frags[i];
                size = skb_frag_size(frag);
-               bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >>
-                                HNS3_MAX_BD_SIZE_OFFSET;
+               bdnum_for_frag = hns3_tx_bd_count(size);
                if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
                        return -ENOMEM;
 
@@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
        }
 
        if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
-               buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >>
-                         HNS3_MAX_BD_SIZE_OFFSET;
+               buf_num = hns3_tx_bd_count(skb->len);
                if (ring_space(ring) < buf_num)
                        return -EBUSY;
                /* manual split the send packet */
@@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
        buf_num = skb_shinfo(skb)->nr_frags + 1;
 
        if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
-               buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
+               buf_num = hns3_tx_bd_count(skb->len);
                if (ring_space(ring) < buf_num)
                        return -EBUSY;
                /* manual split the send packet */
index 1db0bd41d20961f931f464850b8e5f1395276f76..75669cd0c31145fd763959f226175452dbb399bf 100644 (file)
@@ -193,7 +193,6 @@ enum hns3_nic_state {
 #define HNS3_VECTOR_INITED                     1
 
 #define HNS3_MAX_BD_SIZE                       65535
-#define HNS3_MAX_BD_SIZE_OFFSET                16
 #define HNS3_MAX_BD_PER_FRAG                   8
 #define HNS3_MAX_BD_PER_PKT                    MAX_SKB_FRAGS
 
index 3baabdc897262698ab23b4bc1dedec22edc89919..90b62c1412c8f4715eaf1ab3ca14a9128f1f9046 100644 (file)
@@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev,
 
        if (ehea_add_adapter_mr(adapter)) {
                pr_err("creating MR failed\n");
+               of_node_put(eth_dn);
                return -EIO;
        }
 
index 370ca94b677586728541bec1099acd2ef6dc227a..b8ba74de95558f84c29b26c80fb1ccb30889b83f 100644 (file)
@@ -40,6 +40,9 @@
 #include "mlx5_core.h"
 #include "lib/eq.h"
 
+static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+                              struct mlx5_core_dct *dct);
+
 static struct mlx5_core_rsc_common *
 mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
 {
@@ -227,20 +230,49 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
        wait_for_completion(&qp->common.free);
 }
 
+static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+                                 struct mlx5_core_dct *dct, bool need_cleanup)
+{
+       u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
+       struct mlx5_core_qp *qp = &dct->mqp;
+       int err;
+
+       err = mlx5_core_drain_dct(dev, dct);
+       if (err) {
+               if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+                       goto destroy;
+               } else {
+                       mlx5_core_warn(
+                               dev, "failed drain DCT 0x%x with error 0x%x\n",
+                               qp->qpn, err);
+                       return err;
+               }
+       }
+       wait_for_completion(&dct->drained);
+destroy:
+       if (need_cleanup)
+               destroy_resource_common(dev, &dct->mqp);
+       MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
+       MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+       MLX5_SET(destroy_dct_in, in, uid, qp->uid);
+       err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+                           (void *)&out, sizeof(out));
+       return err;
+}
+
 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
                         struct mlx5_core_dct *dct,
-                        u32 *in, int inlen)
+                        u32 *in, int inlen,
+                        u32 *out, int outlen)
 {
-       u32 out[MLX5_ST_SZ_DW(create_dct_out)]   = {0};
-       u32 din[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
-       u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
        struct mlx5_core_qp *qp = &dct->mqp;
        int err;
 
        init_completion(&dct->drained);
        MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
 
-       err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+       err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
        if (err) {
                mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
                return err;
@@ -254,11 +286,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
 
        return 0;
 err_cmd:
-       MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
-       MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
-       MLX5_SET(destroy_dct_in, din, uid, qp->uid);
-       mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
-                     (void *)&out, sizeof(dout));
+       _mlx5_core_destroy_dct(dev, dct, false);
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
@@ -323,29 +351,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
                          struct mlx5_core_dct *dct)
 {
-       u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
-       struct mlx5_core_qp *qp = &dct->mqp;
-       int err;
-
-       err = mlx5_core_drain_dct(dev, dct);
-       if (err) {
-               if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-                       goto destroy;
-               } else {
-                       mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
-                       return err;
-               }
-       }
-       wait_for_completion(&dct->drained);
-destroy:
-       destroy_resource_common(dev, &dct->mqp);
-       MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
-       MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
-       MLX5_SET(destroy_dct_in, in, uid, qp->uid);
-       err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
-                           (void *)&out, sizeof(out));
-       return err;
+       return _mlx5_core_destroy_dct(dev, dct, true);
 }
 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
 
index 7a15e932ed2f5c8ddaee26ab078c943786cac421..c1c1965d7accabca443888932c30090564433d7c 100644 (file)
@@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
                return 0;
        default:
                /* Do not consider thresholds for zero temperature. */
-               if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) {
+               if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) {
                        *temp = 0;
                        return 0;
                }
index bd6e9014bc74794b9a8a7e680f5b59ea7048382f..7849119d407aef1a7b92d6b0e047a8f74b4867f7 100644 (file)
@@ -142,6 +142,12 @@ struct ks8851_net {
 
 static int msg_enable;
 
+/* SPI frame opcodes */
+#define KS_SPIOP_RD    (0x00)
+#define KS_SPIOP_WR    (0x40)
+#define KS_SPIOP_RXFIFO        (0x80)
+#define KS_SPIOP_TXFIFO        (0xC0)
+
 /* shift for byte-enable data */
 #define BYTE_EN(_x)    ((_x) << 2)
 
@@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
                /* set dma read address */
                ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
 
-               /* start the packet dma process, and set auto-dequeue rx */
-               ks8851_wrreg16(ks, KS_RXQCR,
-                              ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+               /* start DMA access */
+               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
 
                if (rxlen > 4) {
                        unsigned int rxalign;
@@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
                        }
                }
 
-               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+               /* end DMA access and dequeue packet */
+               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
        }
 }
 
@@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work)
 static int ks8851_net_open(struct net_device *dev)
 {
        struct ks8851_net *ks = netdev_priv(dev);
+       int ret;
+
+       ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
+                                  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                  dev->name, ks);
+       if (ret < 0) {
+               netdev_err(dev, "failed to get irq\n");
+               return ret;
+       }
 
        /* lock the card, even if we may not actually be doing anything
         * else at the moment */
@@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev)
        netif_dbg(ks, ifup, ks->netdev, "network device up\n");
 
        mutex_unlock(&ks->lock);
+       mii_check_link(&ks->mii);
        return 0;
 }
 
@@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev)
                dev_kfree_skb(txb);
        }
 
+       free_irq(dev->irq, ks);
+
        return 0;
 }
 
@@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi)
 
        spi_set_drvdata(spi, ks);
 
+       netif_carrier_off(ks->netdev);
        ndev->if_port = IF_PORT_100BASET;
        ndev->netdev_ops = &ks8851_netdev_ops;
        ndev->irq = spi->irq;
@@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi)
        ks8851_read_selftest(ks);
        ks8851_init_mac(ks);
 
-       ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
-                                  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                  ndev->name, ks);
-       if (ret < 0) {
-               dev_err(&spi->dev, "failed to get irq\n");
-               goto err_irq;
-       }
-
        ret = register_netdev(ndev);
        if (ret) {
                dev_err(&spi->dev, "failed to register network device\n");
@@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi)
 
        return 0;
 
-
 err_netdev:
-       free_irq(ndev->irq, ks);
-
-err_irq:
+err_id:
        if (gpio_is_valid(gpio))
                gpio_set_value(gpio, 0);
-err_id:
        regulator_disable(ks->vdd_reg);
 err_reg:
        regulator_disable(ks->vdd_io);
@@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi)
                dev_info(&spi->dev, "remove\n");
 
        unregister_netdev(priv->netdev);
-       free_irq(spi->irq, priv);
        if (gpio_is_valid(priv->gpio))
                gpio_set_value(priv->gpio, 0);
        regulator_disable(priv->vdd_reg);
index 852256ef1f2233b9d60efa0b01da45e128514a14..23da1e3ee429af922c603b5a53e70820e398f38a 100644 (file)
 */
 
 #define KS_CCR                                 0x08
+#define CCR_LE                                 (1 << 10)   /* KSZ8851-16MLL */
 #define CCR_EEPROM                             (1 << 9)
-#define CCR_SPI                                        (1 << 8)
-#define CCR_32PIN                              (1 << 0)
+#define CCR_SPI                                        (1 << 8)    /* KSZ8851SNL    */
+#define CCR_8BIT                               (1 << 7)    /* KSZ8851-16MLL */
+#define CCR_16BIT                              (1 << 6)    /* KSZ8851-16MLL */
+#define CCR_32BIT                              (1 << 5)    /* KSZ8851-16MLL */
+#define CCR_SHARED                             (1 << 4)    /* KSZ8851-16MLL */
+#define CCR_48PIN                              (1 << 1)    /* KSZ8851-16MLL */
+#define CCR_32PIN                              (1 << 0)    /* KSZ8851SNL    */
 
 /* MAC address registers */
 #define KS_MAR(_m)                             (0x15 - (_m))
 #define RXCR1_RXE                              (1 << 0)
 
 #define KS_RXCR2                               0x76
-#define RXCR2_SRDBL_MASK                       (0x7 << 5)
-#define RXCR2_SRDBL_SHIFT                      (5)
-#define RXCR2_SRDBL_4B                         (0x0 << 5)
-#define RXCR2_SRDBL_8B                         (0x1 << 5)
-#define RXCR2_SRDBL_16B                                (0x2 << 5)
-#define RXCR2_SRDBL_32B                                (0x3 << 5)
-#define RXCR2_SRDBL_FRAME                      (0x4 << 5)
+#define RXCR2_SRDBL_MASK                       (0x7 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_SHIFT                      (5)         /* KSZ8851SNL    */
+#define RXCR2_SRDBL_4B                         (0x0 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_8B                         (0x1 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_16B                                (0x2 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_32B                                (0x3 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_FRAME                      (0x4 << 5)  /* KSZ8851SNL    */
 #define RXCR2_IUFFP                            (1 << 4)
 #define RXCR2_RXIUFCEZ                         (1 << 3)
 #define RXCR2_UDPLFE                           (1 << 2)
 #define RXFSHR_RXCE                            (1 << 0)
 
 #define KS_RXFHBCR                             0x7E
+#define RXFHBCR_CNT_MASK                       (0xfff << 0)
+
 #define KS_TXQCR                               0x80
-#define TXQCR_AETFE                            (1 << 2)
+#define TXQCR_AETFE                            (1 << 2)    /* KSZ8851SNL    */
 #define TXQCR_TXQMAM                           (1 << 1)
 #define TXQCR_METFE                            (1 << 0)
 
 
 #define KS_RXFDPR                              0x86
 #define RXFDPR_RXFPAI                          (1 << 14)
+#define RXFDPR_WST                             (1 << 12)   /* KSZ8851-16MLL */
+#define RXFDPR_EMS                             (1 << 11)   /* KSZ8851-16MLL */
+#define RXFDPR_RXFP_MASK                       (0x7ff << 0)
+#define RXFDPR_RXFP_SHIFT                      (0)
 
 #define KS_RXDTTR                              0x8C
 #define KS_RXDBCTR                             0x8E
 #define IRQ_RXMPDI                             (1 << 4)
 #define IRQ_LDI                                        (1 << 3)
 #define IRQ_EDI                                        (1 << 2)
-#define IRQ_SPIBEI                             (1 << 1)
+#define IRQ_SPIBEI                             (1 << 1)    /* KSZ8851SNL    */
 #define IRQ_DEDI                               (1 << 0)
 
 #define KS_RXFCTR                              0x9C
 #define KS_P1ANLPR                             0xEE
 
 #define KS_P1SCLMD                             0xF4
-#define P1SCLMD_LEDOFF                         (1 << 15)
-#define P1SCLMD_TXIDS                          (1 << 14)
-#define P1SCLMD_RESTARTAN                      (1 << 13)
-#define P1SCLMD_DISAUTOMDIX                    (1 << 10)
-#define P1SCLMD_FORCEMDIX                      (1 << 9)
-#define P1SCLMD_AUTONEGEN                      (1 << 7)
-#define P1SCLMD_FORCE100                       (1 << 6)
-#define P1SCLMD_FORCEFDX                       (1 << 5)
-#define P1SCLMD_ADV_FLOW                       (1 << 4)
-#define P1SCLMD_ADV_100BT_FDX                  (1 << 3)
-#define P1SCLMD_ADV_100BT_HDX                  (1 << 2)
-#define P1SCLMD_ADV_10BT_FDX                   (1 << 1)
-#define P1SCLMD_ADV_10BT_HDX                   (1 << 0)
 
 #define KS_P1CR                                        0xF6
-#define P1CR_HP_MDIX                           (1 << 15)
-#define P1CR_REV_POL                           (1 << 13)
-#define P1CR_OP_100M                           (1 << 10)
-#define P1CR_OP_FDX                            (1 << 9)
-#define P1CR_OP_MDI                            (1 << 7)
-#define P1CR_AN_DONE                           (1 << 6)
-#define P1CR_LINK_GOOD                         (1 << 5)
-#define P1CR_PNTR_FLOW                         (1 << 4)
-#define P1CR_PNTR_100BT_FDX                    (1 << 3)
-#define P1CR_PNTR_100BT_HDX                    (1 << 2)
-#define P1CR_PNTR_10BT_FDX                     (1 << 1)
-#define P1CR_PNTR_10BT_HDX                     (1 << 0)
+#define P1CR_LEDOFF                            (1 << 15)
+#define P1CR_TXIDS                             (1 << 14)
+#define P1CR_RESTARTAN                         (1 << 13)
+#define P1CR_DISAUTOMDIX                       (1 << 10)
+#define P1CR_FORCEMDIX                         (1 << 9)
+#define P1CR_AUTONEGEN                         (1 << 7)
+#define P1CR_FORCE100                          (1 << 6)
+#define P1CR_FORCEFDX                          (1 << 5)
+#define P1CR_ADV_FLOW                          (1 << 4)
+#define P1CR_ADV_100BT_FDX                     (1 << 3)
+#define P1CR_ADV_100BT_HDX                     (1 << 2)
+#define P1CR_ADV_10BT_FDX                      (1 << 1)
+#define P1CR_ADV_10BT_HDX                      (1 << 0)
+
+#define KS_P1SR                                        0xF8
+#define P1SR_HP_MDIX                           (1 << 15)
+#define P1SR_REV_POL                           (1 << 13)
+#define P1SR_OP_100M                           (1 << 10)
+#define P1SR_OP_FDX                            (1 << 9)
+#define P1SR_OP_MDI                            (1 << 7)
+#define P1SR_AN_DONE                           (1 << 6)
+#define P1SR_LINK_GOOD                         (1 << 5)
+#define P1SR_PNTR_FLOW                         (1 << 4)
+#define P1SR_PNTR_100BT_FDX                    (1 << 3)
+#define P1SR_PNTR_100BT_HDX                    (1 << 2)
+#define P1SR_PNTR_10BT_FDX                     (1 << 1)
+#define P1SR_PNTR_10BT_HDX                     (1 << 0)
 
 /* TX Frame control */
-
 #define TXFR_TXIC                              (1 << 15)
 #define TXFR_TXFID_MASK                                (0x3f << 0)
 #define TXFR_TXFID_SHIFT                       (0)
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD                            (0x00)
-#define KS_SPIOP_WR                            (0x40)
-#define KS_SPIOP_RXFIFO                                (0x80)
-#define KS_SPIOP_TXFIFO                                (0xC0)
index 35f8c9ef204d91cd4c17591d84ebab597cff33b4..c946841c0a066d2e7eabd059092ed6cbbb156b01 100644 (file)
@@ -40,6 +40,8 @@
 #include <linux/of_device.h>
 #include <linux/of_net.h>
 
+#include "ks8851.h"
+
 #define        DRV_NAME        "ks8851_mll"
 
 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
@@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
 #define TX_BUF_SIZE                    2000
 #define RX_BUF_SIZE                    2000
 
-#define KS_CCR                         0x08
-#define CCR_EEPROM                     (1 << 9)
-#define CCR_SPI                                (1 << 8)
-#define CCR_8BIT                       (1 << 7)
-#define CCR_16BIT                      (1 << 6)
-#define CCR_32BIT                      (1 << 5)
-#define CCR_SHARED                     (1 << 4)
-#define CCR_32PIN                      (1 << 0)
-
-/* MAC address registers */
-#define KS_MARL                                0x10
-#define KS_MARM                                0x12
-#define KS_MARH                                0x14
-
-#define KS_OBCR                                0x20
-#define OBCR_ODS_16MA                  (1 << 6)
-
-#define KS_EEPCR                       0x22
-#define EEPCR_EESA                     (1 << 4)
-#define EEPCR_EESB                     (1 << 3)
-#define EEPCR_EEDO                     (1 << 2)
-#define EEPCR_EESCK                    (1 << 1)
-#define EEPCR_EECS                     (1 << 0)
-
-#define KS_MBIR                                0x24
-#define MBIR_TXMBF                     (1 << 12)
-#define MBIR_TXMBFA                    (1 << 11)
-#define MBIR_RXMBF                     (1 << 4)
-#define MBIR_RXMBFA                    (1 << 3)
-
-#define KS_GRR                         0x26
-#define GRR_QMU                                (1 << 1)
-#define GRR_GSR                                (1 << 0)
-
-#define KS_WFCR                                0x2A
-#define WFCR_MPRXE                     (1 << 7)
-#define WFCR_WF3E                      (1 << 3)
-#define WFCR_WF2E                      (1 << 2)
-#define WFCR_WF1E                      (1 << 1)
-#define WFCR_WF0E                      (1 << 0)
-
-#define KS_WF0CRC0                     0x30
-#define KS_WF0CRC1                     0x32
-#define KS_WF0BM0                      0x34
-#define KS_WF0BM1                      0x36
-#define KS_WF0BM2                      0x38
-#define KS_WF0BM3                      0x3A
-
-#define KS_WF1CRC0                     0x40
-#define KS_WF1CRC1                     0x42
-#define KS_WF1BM0                      0x44
-#define KS_WF1BM1                      0x46
-#define KS_WF1BM2                      0x48
-#define KS_WF1BM3                      0x4A
-
-#define KS_WF2CRC0                     0x50
-#define KS_WF2CRC1                     0x52
-#define KS_WF2BM0                      0x54
-#define KS_WF2BM1                      0x56
-#define KS_WF2BM2                      0x58
-#define KS_WF2BM3                      0x5A
-
-#define KS_WF3CRC0                     0x60
-#define KS_WF3CRC1                     0x62
-#define KS_WF3BM0                      0x64
-#define KS_WF3BM1                      0x66
-#define KS_WF3BM2                      0x68
-#define KS_WF3BM3                      0x6A
-
-#define KS_TXCR                                0x70
-#define TXCR_TCGICMP                   (1 << 8)
-#define TXCR_TCGUDP                    (1 << 7)
-#define TXCR_TCGTCP                    (1 << 6)
-#define TXCR_TCGIP                     (1 << 5)
-#define TXCR_FTXQ                      (1 << 4)
-#define TXCR_TXFCE                     (1 << 3)
-#define TXCR_TXPE                      (1 << 2)
-#define TXCR_TXCRC                     (1 << 1)
-#define TXCR_TXE                       (1 << 0)
-
-#define KS_TXSR                                0x72
-#define TXSR_TXLC                      (1 << 13)
-#define TXSR_TXMC                      (1 << 12)
-#define TXSR_TXFID_MASK                        (0x3f << 0)
-#define TXSR_TXFID_SHIFT               (0)
-#define TXSR_TXFID_GET(_v)             (((_v) >> 0) & 0x3f)
-
-
-#define KS_RXCR1                       0x74
-#define RXCR1_FRXQ                     (1 << 15)
-#define RXCR1_RXUDPFCC                 (1 << 14)
-#define RXCR1_RXTCPFCC                 (1 << 13)
-#define RXCR1_RXIPFCC                  (1 << 12)
-#define RXCR1_RXPAFMA                  (1 << 11)
-#define RXCR1_RXFCE                    (1 << 10)
-#define RXCR1_RXEFE                    (1 << 9)
-#define RXCR1_RXMAFMA                  (1 << 8)
-#define RXCR1_RXBE                     (1 << 7)
-#define RXCR1_RXME                     (1 << 6)
-#define RXCR1_RXUE                     (1 << 5)
-#define RXCR1_RXAE                     (1 << 4)
-#define RXCR1_RXINVF                   (1 << 1)
-#define RXCR1_RXE                      (1 << 0)
 #define RXCR1_FILTER_MASK              (RXCR1_RXINVF | RXCR1_RXAE | \
                                         RXCR1_RXMAFMA | RXCR1_RXPAFMA)
-
-#define KS_RXCR2                       0x76
-#define RXCR2_SRDBL_MASK               (0x7 << 5)
-#define RXCR2_SRDBL_SHIFT              (5)
-#define RXCR2_SRDBL_4B                 (0x0 << 5)
-#define RXCR2_SRDBL_8B                 (0x1 << 5)
-#define RXCR2_SRDBL_16B                        (0x2 << 5)
-#define RXCR2_SRDBL_32B                        (0x3 << 5)
-/* #define RXCR2_SRDBL_FRAME           (0x4 << 5) */
-#define RXCR2_IUFFP                    (1 << 4)
-#define RXCR2_RXIUFCEZ                 (1 << 3)
-#define RXCR2_UDPLFE                   (1 << 2)
-#define RXCR2_RXICMPFCC                        (1 << 1)
-#define RXCR2_RXSAF                    (1 << 0)
-
-#define KS_TXMIR                       0x78
-
-#define KS_RXFHSR                      0x7C
-#define RXFSHR_RXFV                    (1 << 15)
-#define RXFSHR_RXICMPFCS               (1 << 13)
-#define RXFSHR_RXIPFCS                 (1 << 12)
-#define RXFSHR_RXTCPFCS                        (1 << 11)
-#define RXFSHR_RXUDPFCS                        (1 << 10)
-#define RXFSHR_RXBF                    (1 << 7)
-#define RXFSHR_RXMF                    (1 << 6)
-#define RXFSHR_RXUF                    (1 << 5)
-#define RXFSHR_RXMR                    (1 << 4)
-#define RXFSHR_RXFT                    (1 << 3)
-#define RXFSHR_RXFTL                   (1 << 2)
-#define RXFSHR_RXRF                    (1 << 1)
-#define RXFSHR_RXCE                    (1 << 0)
-#define        RXFSHR_ERR                      (RXFSHR_RXCE | RXFSHR_RXRF |\
-                                       RXFSHR_RXFTL | RXFSHR_RXMR |\
-                                       RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
-                                       RXFSHR_RXTCPFCS)
-#define KS_RXFHBCR                     0x7E
-#define RXFHBCR_CNT_MASK               0x0FFF
-
-#define KS_TXQCR                       0x80
-#define TXQCR_AETFE                    (1 << 2)
-#define TXQCR_TXQMAM                   (1 << 1)
-#define TXQCR_METFE                    (1 << 0)
-
-#define KS_RXQCR                       0x82
-#define RXQCR_RXDTTS                   (1 << 12)
-#define RXQCR_RXDBCTS                  (1 << 11)
-#define RXQCR_RXFCTS                   (1 << 10)
-#define RXQCR_RXIPHTOE                 (1 << 9)
-#define RXQCR_RXDTTE                   (1 << 7)
-#define RXQCR_RXDBCTE                  (1 << 6)
-#define RXQCR_RXFCTE                   (1 << 5)
-#define RXQCR_ADRFE                    (1 << 4)
-#define RXQCR_SDA                      (1 << 3)
-#define RXQCR_RRXEF                    (1 << 0)
 #define RXQCR_CMD_CNTL                 (RXQCR_RXFCTE|RXQCR_ADRFE)
 
-#define KS_TXFDPR                      0x84
-#define TXFDPR_TXFPAI                  (1 << 14)
-#define TXFDPR_TXFP_MASK               (0x7ff << 0)
-#define TXFDPR_TXFP_SHIFT              (0)
-
-#define KS_RXFDPR                      0x86
-#define RXFDPR_RXFPAI                  (1 << 14)
-
-#define KS_RXDTTR                      0x8C
-#define KS_RXDBCTR                     0x8E
-
-#define KS_IER                         0x90
-#define KS_ISR                         0x92
-#define IRQ_LCI                                (1 << 15)
-#define IRQ_TXI                                (1 << 14)
-#define IRQ_RXI                                (1 << 13)
-#define IRQ_RXOI                       (1 << 11)
-#define IRQ_TXPSI                      (1 << 9)
-#define IRQ_RXPSI                      (1 << 8)
-#define IRQ_TXSAI                      (1 << 6)
-#define IRQ_RXWFDI                     (1 << 5)
-#define IRQ_RXMPDI                     (1 << 4)
-#define IRQ_LDI                                (1 << 3)
-#define IRQ_EDI                                (1 << 2)
-#define IRQ_SPIBEI                     (1 << 1)
-#define IRQ_DEDI                       (1 << 0)
-
-#define KS_RXFCTR                      0x9C
-#define RXFCTR_THRESHOLD_MASK          0x00FF
-
-#define KS_RXFC                                0x9D
-#define RXFCTR_RXFC_MASK               (0xff << 8)
-#define RXFCTR_RXFC_SHIFT              (8)
-#define RXFCTR_RXFC_GET(_v)            (((_v) >> 8) & 0xff)
-#define RXFCTR_RXFCT_MASK              (0xff << 0)
-#define RXFCTR_RXFCT_SHIFT             (0)
-
-#define KS_TXNTFSR                     0x9E
-
-#define KS_MAHTR0                      0xA0
-#define KS_MAHTR1                      0xA2
-#define KS_MAHTR2                      0xA4
-#define KS_MAHTR3                      0xA6
-
-#define KS_FCLWR                       0xB0
-#define KS_FCHWR                       0xB2
-#define KS_FCOWR                       0xB4
-
-#define KS_CIDER                       0xC0
-#define CIDER_ID                       0x8870
-#define CIDER_REV_MASK                 (0x7 << 1)
-#define CIDER_REV_SHIFT                        (1)
-#define CIDER_REV_GET(_v)              (((_v) >> 1) & 0x7)
-
-#define KS_CGCR                                0xC6
-#define KS_IACR                                0xC8
-#define IACR_RDEN                      (1 << 12)
-#define IACR_TSEL_MASK                 (0x3 << 10)
-#define IACR_TSEL_SHIFT                        (10)
-#define IACR_TSEL_MIB                  (0x3 << 10)
-#define IACR_ADDR_MASK                 (0x1f << 0)
-#define IACR_ADDR_SHIFT                        (0)
-
-#define KS_IADLR                       0xD0
-#define KS_IAHDR                       0xD2
-
-#define KS_PMECR                       0xD4
-#define PMECR_PME_DELAY                        (1 << 14)
-#define PMECR_PME_POL                  (1 << 12)
-#define PMECR_WOL_WAKEUP               (1 << 11)
-#define PMECR_WOL_MAGICPKT             (1 << 10)
-#define PMECR_WOL_LINKUP               (1 << 9)
-#define PMECR_WOL_ENERGY               (1 << 8)
-#define PMECR_AUTO_WAKE_EN             (1 << 7)
-#define PMECR_WAKEUP_NORMAL            (1 << 6)
-#define PMECR_WKEVT_MASK               (0xf << 2)
-#define PMECR_WKEVT_SHIFT              (2)
-#define PMECR_WKEVT_GET(_v)            (((_v) >> 2) & 0xf)
-#define PMECR_WKEVT_ENERGY             (0x1 << 2)
-#define PMECR_WKEVT_LINK               (0x2 << 2)
-#define PMECR_WKEVT_MAGICPKT           (0x4 << 2)
-#define PMECR_WKEVT_FRAME              (0x8 << 2)
-#define PMECR_PM_MASK                  (0x3 << 0)
-#define PMECR_PM_SHIFT                 (0)
-#define PMECR_PM_NORMAL                        (0x0 << 0)
-#define PMECR_PM_ENERGY                        (0x1 << 0)
-#define PMECR_PM_SOFTDOWN              (0x2 << 0)
-#define PMECR_PM_POWERSAVE             (0x3 << 0)
-
-/* Standard MII PHY data */
-#define KS_P1MBCR                      0xE4
-#define P1MBCR_FORCE_FDX               (1 << 8)
-
-#define KS_P1MBSR                      0xE6
-#define P1MBSR_AN_COMPLETE             (1 << 5)
-#define P1MBSR_AN_CAPABLE              (1 << 3)
-#define P1MBSR_LINK_UP                 (1 << 2)
-
-#define KS_PHY1ILR                     0xE8
-#define KS_PHY1IHR                     0xEA
-#define KS_P1ANAR                      0xEC
-#define KS_P1ANLPR                     0xEE
-
-#define KS_P1SCLMD                     0xF4
-#define P1SCLMD_LEDOFF                 (1 << 15)
-#define P1SCLMD_TXIDS                  (1 << 14)
-#define P1SCLMD_RESTARTAN              (1 << 13)
-#define P1SCLMD_DISAUTOMDIX            (1 << 10)
-#define P1SCLMD_FORCEMDIX              (1 << 9)
-#define P1SCLMD_AUTONEGEN              (1 << 7)
-#define P1SCLMD_FORCE100               (1 << 6)
-#define P1SCLMD_FORCEFDX               (1 << 5)
-#define P1SCLMD_ADV_FLOW               (1 << 4)
-#define P1SCLMD_ADV_100BT_FDX          (1 << 3)
-#define P1SCLMD_ADV_100BT_HDX          (1 << 2)
-#define P1SCLMD_ADV_10BT_FDX           (1 << 1)
-#define P1SCLMD_ADV_10BT_HDX           (1 << 0)
-
-#define KS_P1CR                                0xF6
-#define P1CR_HP_MDIX                   (1 << 15)
-#define P1CR_REV_POL                   (1 << 13)
-#define P1CR_OP_100M                   (1 << 10)
-#define P1CR_OP_FDX                    (1 << 9)
-#define P1CR_OP_MDI                    (1 << 7)
-#define P1CR_AN_DONE                   (1 << 6)
-#define P1CR_LINK_GOOD                 (1 << 5)
-#define P1CR_PNTR_FLOW                 (1 << 4)
-#define P1CR_PNTR_100BT_FDX            (1 << 3)
-#define P1CR_PNTR_100BT_HDX            (1 << 2)
-#define P1CR_PNTR_10BT_FDX             (1 << 1)
-#define P1CR_PNTR_10BT_HDX             (1 << 0)
-
-/* TX Frame control */
-
-#define TXFR_TXIC                      (1 << 15)
-#define TXFR_TXFID_MASK                        (0x3f << 0)
-#define TXFR_TXFID_SHIFT               (0)
-
-#define KS_P1SR                                0xF8
-#define P1SR_HP_MDIX                   (1 << 15)
-#define P1SR_REV_POL                   (1 << 13)
-#define P1SR_OP_100M                   (1 << 10)
-#define P1SR_OP_FDX                    (1 << 9)
-#define P1SR_OP_MDI                    (1 << 7)
-#define P1SR_AN_DONE                   (1 << 6)
-#define P1SR_LINK_GOOD                 (1 << 5)
-#define P1SR_PNTR_FLOW                 (1 << 4)
-#define P1SR_PNTR_100BT_FDX            (1 << 3)
-#define P1SR_PNTR_100BT_HDX            (1 << 2)
-#define P1SR_PNTR_10BT_FDX             (1 << 1)
-#define P1SR_PNTR_10BT_HDX             (1 << 0)
-
 #define        ENUM_BUS_NONE                   0
 #define        ENUM_BUS_8BIT                   1
 #define        ENUM_BUS_16BIT                  2
@@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks)
        ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
 
        /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
-       ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
+       ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
 
        /* Setup RxQ Command Control (RXQCR) */
        ks->rc_rxqcr = RXQCR_CMD_CNTL;
@@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks)
         */
 
        w = ks_rdreg16(ks, KS_P1MBCR);
-       w &= ~P1MBCR_FORCE_FDX;
+       w &= ~BMCR_FULLDPLX;
        ks_wrreg16(ks, KS_P1MBCR, w);
 
        w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
@@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev)
        ks_setup_int(ks);
 
        data = ks_rdreg16(ks, KS_OBCR);
-       ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
+       ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
 
        /* overwriting the default MAC address */
        if (pdev->dev.of_node) {
index 3b0adda7cc9c66769f84a1047a91aaa33c7939c8..a4cd6f2cfb862cb25315823d155b5497e59f5c2f 100644 (file)
@@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
 
        for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
                skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+               if (!skb)
+                       break;
                qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
                skb_put(skb, QLCNIC_ILB_PKT_SIZE);
                adapter->ahw->diag_cnt = 0;
index cfb67b7465958ec4eb6ae8aa68003a777052310b..58e0ca9093d3d9b4f08fe4bd2a8b3c3db23267a0 100644 (file)
@@ -482,7 +482,7 @@ static void hardware_init(struct net_device *dev)
        write_reg_high(ioaddr, IMR, ISRh_RxErr);
 
        lp->tx_unit_busy = 0;
-    lp->pac_cnt_in_tx_buf = 0;
+       lp->pac_cnt_in_tx_buf = 0;
        lp->saved_tx_size = 0;
 }
 
index c29dde0640784b57a687888c605fb2e52c1b5117..7562ccbbb39af59a2ba0e4078b2f43b2a7376809 100644 (file)
@@ -678,6 +678,7 @@ struct rtl8169_private {
                struct work_struct work;
        } wk;
 
+       unsigned irq_enabled:1;
        unsigned supports_gmii:1;
        dma_addr_t counters_phys_addr;
        struct rtl8169_counters *counters;
@@ -1293,6 +1294,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
 static void rtl_irq_disable(struct rtl8169_private *tp)
 {
        RTL_W16(tp, IntrMask, 0);
+       tp->irq_enabled = 0;
 }
 
 #define RTL_EVENT_NAPI_RX      (RxOK | RxErr)
@@ -1301,6 +1303,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
 
 static void rtl_irq_enable(struct rtl8169_private *tp)
 {
+       tp->irq_enabled = 1;
        RTL_W16(tp, IntrMask, tp->irq_mask);
 }
 
@@ -6520,9 +6523,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
 {
        struct rtl8169_private *tp = dev_instance;
        u16 status = RTL_R16(tp, IntrStatus);
-       u16 irq_mask = RTL_R16(tp, IntrMask);
 
-       if (status == 0xffff || !(status & irq_mask))
+       if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
                return IRQ_NONE;
 
        if (unlikely(status & SYSErr)) {
@@ -6540,7 +6542,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
        }
 
-       if (status & RTL_EVENT_NAPI) {
+       if (status & (RTL_EVENT_NAPI | LinkChg)) {
                rtl_irq_disable(tp);
                napi_schedule_irqoff(&tp->napi);
        }
index 6073387511f887e4b0cf069041a0b04b759572d1..67f9bb6e941b7ed2467dcfd78a49a0e6c2c8165d 100644 (file)
@@ -730,10 +730,10 @@ static u16 sis900_default_phy(struct net_device * net_dev)
                status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
 
                /* Link ON & Not select default PHY & not ghost PHY */
-                if ((status & MII_STAT_LINK) && !default_phy &&
-                                       (phy->phy_types != UNKNOWN))
-                       default_phy = phy;
-                else {
+               if ((status & MII_STAT_LINK) && !default_phy &&
+                   (phy->phy_types != UNKNOWN)) {
+                       default_phy = phy;
+               } else {
                        status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
                        mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
                                status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
@@ -741,7 +741,7 @@ static u16 sis900_default_phy(struct net_device * net_dev)
                                phy_home = phy;
                        else if(phy->phy_types == LAN)
                                phy_lan = phy;
-                }
+               }
        }
 
        if (!default_phy && phy_home)
index d8c5bc4122195d73f7150f2775797cc6ba9a3393..4d9bcb4d0378319d2d71d61a6e751ab9d141083d 100644 (file)
@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
-                               STMMAC_RING_MODE, 1, false, skb->len);
+                               STMMAC_RING_MODE, 0, false, skb->len);
                tx_q->tx_skbuff[entry] = NULL;
                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
 
@@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
-                               STMMAC_RING_MODE, 1, true, skb->len);
+                               STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
+                               skb->len);
        } else {
                des2 = dma_map_single(priv->device, skb->data,
                                      nopaged_len, DMA_TO_DEVICE);
@@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
                tx_q->tx_skbuff_dma[entry].is_jumbo = true;
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
-                               STMMAC_RING_MODE, 1, true, skb->len);
+                               STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
+                               skb->len);
        }
 
        tx_q->cur_tx = entry;
@@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
 
 static void refill_desc3(void *priv_ptr, struct dma_desc *p)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+       struct stmmac_rx_queue *rx_q = priv_ptr;
+       struct stmmac_priv *priv = rx_q->priv_data;
 
        /* Fill DES3 in case of RING mode */
-       if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+       if (priv->dma_buf_sz == BUF_SIZE_16KiB)
                p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
 }
 
index 97c5e1aad88f979208c80efe30d19cfcc5ba05e6..6a2e1031a62ae3c4d16f7f09f4d09481ccfa325d 100644 (file)
@@ -3216,14 +3216,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
                                csum_insertion, priv->mode, 1, last_segment,
                                skb->len);
-
-               /* The own bit must be the latest setting done when prepare the
-                * descriptor and then barrier is needed to make sure that
-                * all is coherent before granting the DMA engine.
-                */
-               wmb();
+       } else {
+               stmmac_set_tx_owner(priv, first);
        }
 
+       /* The own bit must be the latest setting done when prepare the
+        * descriptor and then barrier is needed to make sure that
+        * all is coherent before granting the DMA engine.
+        */
+       wmb();
+
        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
        stmmac_enable_dma_transmission(priv, priv->ioaddr);
index 5174d318901e0f74aa6fa6c7e12b29bfe2b362c7..0a920c5936b24e1a14a8625a63f9bf2eed019ccb 100644 (file)
@@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
 
        ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
                                gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
-       if (ret)
+       if (ret) {
+               of_node_put(interfaces);
                return ret;
+       }
 
        ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
-       if (ret)
+       if (ret) {
+               of_node_put(interfaces);
                return ret;
+       }
 
        /* Create network interfaces */
        INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
index ec7e7ec24ff910f7db36f9da1bbcd8c04ccf949a..4041c75997ba5ed52803b69b4c2e19bf41dee8e8 100644 (file)
@@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev)
        ret = of_address_to_resource(np, 0, &dmares);
        if (ret) {
                dev_err(&pdev->dev, "unable to get DMA resource\n");
+               of_node_put(np);
                goto free_netdev;
        }
        lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
        if (IS_ERR(lp->dma_regs)) {
                dev_err(&pdev->dev, "could not map DMA regs\n");
                ret = PTR_ERR(lp->dma_regs);
+               of_node_put(np);
                goto free_netdev;
        }
        lp->rx_irq = irq_of_parse_and_map(np, 1);
index cd1d8faccca5fb36b488312d734d5e42cebb7b1a..cd6b95e673a58319a2f0ea0ed15445cc1782435f 100644 (file)
@@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi)
        INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
        lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
                                             WQ_MEM_RECLAIM);
+       if (unlikely(!lp->wqueue)) {
+               ret = -ENOMEM;
+               goto err_hw_init;
+       }
 
        ret = adf7242_hw_init(lp);
        if (ret)
index b6743f03dce000578b65bf9a8afddd3c2613d628..3b88846de31b18236e423c3b941f69537ab3bcfa 100644 (file)
@@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        goto out_err;
                }
 
-               genlmsg_reply(skb, info);
+               res = genlmsg_reply(skb, info);
                break;
        }
 
index 071869db44cf3e0b33cc75b3b4fd285212bf1c9d..520657945b8279debe7583d33c31c095899ddb7a 100644 (file)
@@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE
        help
          MDIO devices and driver infrastructure code.
 
+if MDIO_DEVICE
+
 config MDIO_BUS
        tristate
        default m if PHYLIB=m
@@ -179,6 +181,7 @@ config MDIO_XGENE
          APM X-Gene SoC's.
 
 endif
+endif
 
 config PHYLINK
        tristate
index 9605d4fe540b1e4ed894d4ff5ab4f40e5d9caf16..cb86a3e90c7de3ff41a7d821c135aec8dad9eef0 100644 (file)
@@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev)
 
        bcm54xx_phydsp_config(phydev);
 
+       /* Encode link speed into LED1 and LED3 pair (green/amber).
+        * Also flash these two LEDs on activity. This means configuring
+        * them for MULTICOLOR and encoding link/activity into them.
+        */
+       val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) |
+               BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1);
+       bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val);
+
+       val = BCM_LED_MULTICOLOR_IN_PHASE |
+               BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) |
+               BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT);
+       bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
+
        return 0;
 }
 
index bbd8c22067f3d2c4975757febf0658ddb1c3e8f7..97d45bd5b38e382b678dc3ce814f813cf045d7d6 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/netdevice.h>
 
 #define DP83822_PHY_ID         0x2000a240
+#define DP83825I_PHY_ID                0x2000a150
+
 #define DP83822_DEVADDR                0x1f
 
 #define MII_DP83822_PHYSCR     0x11
@@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev)
        return 0;
 }
 
+#define DP83822_PHY_DRIVER(_id, _name)                         \
+       {                                                       \
+               PHY_ID_MATCH_MODEL(_id),                        \
+               .name           = (_name),                      \
+               .features       = PHY_BASIC_FEATURES,           \
+               .soft_reset     = dp83822_phy_reset,            \
+               .config_init    = dp83822_config_init,          \
+               .get_wol = dp83822_get_wol,                     \
+               .set_wol = dp83822_set_wol,                     \
+               .ack_interrupt = dp83822_ack_interrupt,         \
+               .config_intr = dp83822_config_intr,             \
+               .suspend = dp83822_suspend,                     \
+               .resume = dp83822_resume,                       \
+       }
+
 static struct phy_driver dp83822_driver[] = {
-       {
-               .phy_id = DP83822_PHY_ID,
-               .phy_id_mask = 0xfffffff0,
-               .name = "TI DP83822",
-               .features = PHY_BASIC_FEATURES,
-               .config_init = dp83822_config_init,
-               .soft_reset = dp83822_phy_reset,
-               .get_wol = dp83822_get_wol,
-               .set_wol = dp83822_set_wol,
-               .ack_interrupt = dp83822_ack_interrupt,
-               .config_intr = dp83822_config_intr,
-               .suspend = dp83822_suspend,
-               .resume = dp83822_resume,
-        },
+       DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
+       DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
 };
 module_phy_driver(dp83822_driver);
 
 static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
        { DP83822_PHY_ID, 0xfffffff0 },
+       { DP83825I_PHY_ID, 0xfffffff0 },
        { },
 };
 MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
index a238388eb1a5e09f138f5a63d627cc25f076da29..0eec2913c289b83a77a238aca2da64e558378336 100644 (file)
@@ -201,6 +201,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
 static int meson_gxl_config_intr(struct phy_device *phydev)
 {
        u16 val;
+       int ret;
 
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
                val = INTSRC_ANEG_PR
@@ -213,6 +214,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
                val = 0;
        }
 
+       /* Ack any pending IRQ */
+       ret = meson_gxl_ack_interrupt(phydev);
+       if (ret)
+               return ret;
+
        return phy_write(phydev, INTSRC_MASK, val);
 }
 
index 49fdd1ee798e4418f5145b00ad6573525eed0f82..77068c545de0d33607981e7a94a32bf7ed1ff34c 100644 (file)
@@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev)
 {
        int ret;
 
-       ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+       ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
        if (ret < 0)
                return ret;
 
index 1d68921723dc08532b3f5321a52865076ad66336..e9ca1c088d0b11611e4d80268ced3806db05cffb 100644 (file)
@@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        int skb_xdp = 1;
        bool frags = tun_napi_frags_enabled(tfile);
 
-       if (!(tun->dev->flags & IFF_UP))
-               return -EIO;
-
        if (!(tun->flags & IFF_NO_PI)) {
                if (len < sizeof(pi))
                        return -EINVAL;
@@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                        err = skb_copy_datagram_from_iter(skb, 0, from, len);
 
                if (err) {
+                       err = -EFAULT;
+drop:
                        this_cpu_inc(tun->pcpu_stats->rx_dropped);
                        kfree_skb(skb);
                        if (frags) {
@@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                                mutex_unlock(&tfile->napi_mutex);
                        }
 
-                       return -EFAULT;
+                       return err;
                }
        }
 
@@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
            !tfile->detached)
                rxhash = __skb_get_hash_symmetric(skb);
 
+       rcu_read_lock();
+       if (unlikely(!(tun->dev->flags & IFF_UP))) {
+               err = -EIO;
+               rcu_read_unlock();
+               goto drop;
+       }
+
        if (frags) {
                /* Exercise flow dissector code path. */
                u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
@@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                if (unlikely(headlen > skb_headlen(skb))) {
                        this_cpu_inc(tun->pcpu_stats->rx_dropped);
                        napi_free_frags(&tfile->napi);
+                       rcu_read_unlock();
                        mutex_unlock(&tfile->napi_mutex);
                        WARN_ON(1);
                        return -ENOMEM;
@@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        } else {
                netif_rx_ni(skb);
        }
+       rcu_read_unlock();
 
        stats = get_cpu_ptr(tun->pcpu_stats);
        u64_stats_update_begin(&stats->syncp);
index 820a2fe7d027733eb1c9ccc54aa504a4088a6600..aff995be2a318796a832e19c5c3c3e3cfc5c9efd 100644 (file)
@@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = {
        .tx_fixup       = aqc111_tx_fixup,
 };
 
+static const struct driver_info qnap_info = {
+       .description    = "QNAP QNA-UC5G1T USB to 5GbE Adapter",
+       .bind           = aqc111_bind,
+       .unbind         = aqc111_unbind,
+       .status         = aqc111_status,
+       .link_reset     = aqc111_link_reset,
+       .reset          = aqc111_reset,
+       .stop           = aqc111_stop,
+       .flags          = FLAG_ETHER | FLAG_FRAMING_AX |
+                         FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+       .rx_fixup       = aqc111_rx_fixup,
+       .tx_fixup       = aqc111_tx_fixup,
+};
+
 static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
@@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = {
        {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
        {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
        {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
+       {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)},
        { },/* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
index 5512a1038721459a727326bb8823e16c0886b7f1..3e9b2c319e45256865415da43386031c556d9e2b 100644 (file)
@@ -851,6 +851,14 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* WHITELIST!!!
  *
  * CDC Ether uses two interfaces, not necessarily consecutive.
index 077f1b9f27616d34e966fc3760b40fd1f6a5915a..d76dfed8d9bbef1d1ae8470686e417af2e531ac8 100644 (file)
@@ -4335,10 +4335,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
                /* If vxlan->dev is in the same netns, it has already been added
                 * to the list by the previous loop.
                 */
-               if (!net_eq(dev_net(vxlan->dev), net)) {
-                       gro_cells_destroy(&vxlan->gro_cells);
+               if (!net_eq(dev_net(vxlan->dev), net))
                        unregister_netdevice_queue(vxlan->dev, head);
-               }
        }
 
        for (h = 0; h < PORT_HASH_SIZE; ++h)
index e9822a3ec373929ff520c9dea90c51194adcb69b..94132cfd1f56241b7e9548466c7050541c87e4c5 100644 (file)
@@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
                                     struct cfg80211_pmsr_result *res)
 {
-       s64 rtt_avg = res->ftm.rtt_avg * 100;
-
-       do_div(rtt_avg, 6666);
+       s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
 
        IWL_DEBUG_INFO(mvm, "entry %d\n", index);
        IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
index 6eedc0ec76616cc55afec8b98f55db1ec27e540b..76629b98c78d78d81d7c61e0cd4771ba8d5f3c39 100644 (file)
@@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
 static void
 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
 {
+       iowrite32(q->desc_dma, &q->regs->desc_base);
+       iowrite32(q->ndesc, &q->regs->ring_size);
        q->head = ioread32(&q->regs->dma_idx);
        q->tail = q->head;
        iowrite32(q->head, &q->regs->cpu_idx);
@@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
        else
                mt76_dma_sync_idx(dev, q);
 
-       wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+       wake = wake && q->stopped &&
+              qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+       if (wake)
+               q->stopped = false;
 
        if (!q->queued)
                wake_up(&dev->tx_wait);
index a033745adb2f7a738ac576aafd41aa931fcc5ea6..316167404729fdcd8322c71bd5626785822ce90d 100644 (file)
@@ -679,19 +679,15 @@ out:
        return ret;
 }
 
-static void
-mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
-               struct ieee80211_sta *sta)
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta)
 {
        struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-       int idx = wcid->idx;
-       int i;
+       int i, idx = wcid->idx;
 
        rcu_assign_pointer(dev->wcid[idx], NULL);
        synchronize_rcu();
 
-       mutex_lock(&dev->mutex);
-
        if (dev->drv->sta_remove)
                dev->drv->sta_remove(dev, vif, sta);
 
@@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
        for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
                mt76_txq_remove(dev, sta->txq[i]);
        mt76_wcid_free(dev->wcid_mask, idx);
+}
+EXPORT_SYMBOL_GPL(__mt76_sta_remove);
 
+static void
+mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+               struct ieee80211_sta *sta)
+{
+       mutex_lock(&dev->mutex);
+       __mt76_sta_remove(dev, vif, sta);
        mutex_unlock(&dev->mutex);
 }
 
index 5dfb0601f1015c01251d409070ba64bda516db26..bcbfd3c4a44b68199dbc53e0e5ea498cc9e78e89 100644 (file)
@@ -126,6 +126,7 @@ struct mt76_queue {
        int ndesc;
        int queued;
        int buf_size;
+       bool stopped;
 
        u8 buf_offset;
        u8 hw_idx;
@@ -143,6 +144,7 @@ struct mt76_mcu_ops {
                         const struct mt76_reg_pair *rp, int len);
        int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
                         struct mt76_reg_pair *rp, int len);
+       int (*mcu_restart)(struct mt76_dev *dev);
 };
 
 struct mt76_queue_ops {
@@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                   struct ieee80211_sta *sta,
                   enum ieee80211_sta_state old_state,
                   enum ieee80211_sta_state new_state);
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta);
 
 struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
 
index afcd86f735b40e1a508d7fba5c4d38429906c9af..4dcb465095d19e9a0fe88c15a757a59fec802d87 100644 (file)
@@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
 
 out:
        mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
-       if (dev->mt76.q_tx[MT_TXQ_BEACON].queued >
-           __sw_hweight8(dev->beacon_mask))
+       if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
                dev->beacon_check++;
 }
 
index d69e82c66ab29fb8e8c645de9270ab47a066dd49..b3ae0aaea62a15b51b1ed2c23bc8857f88dec8c9 100644 (file)
@@ -27,12 +27,16 @@ static void
 mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
 {
        __le32 *txd = (__le32 *)skb->data;
+       struct ieee80211_hdr *hdr;
+       struct ieee80211_sta *sta;
        struct mt7603_sta *msta;
        struct mt76_wcid *wcid;
+       void *priv;
        int idx;
        u32 val;
+       u8 tid;
 
-       if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr))
+       if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
                goto free;
 
        val = le32_to_cpu(txd[1]);
@@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
        if (!wcid)
                goto free;
 
-       msta = container_of(wcid, struct mt7603_sta, wcid);
+       priv = msta = container_of(wcid, struct mt7603_sta, wcid);
        val = le32_to_cpu(txd[0]);
        skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
 
+       val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+       val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
+       txd[0] = cpu_to_le32(val);
+
+       sta = container_of(priv, struct ieee80211_sta, drv_priv);
+       hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
+       tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+       ieee80211_sta_set_buffered(sta, tid, true);
+
        spin_lock_bh(&dev->ps_lock);
        __skb_queue_tail(&msta->psq, skb);
        if (skb_queue_len(&msta->psq) >= 64) {
index 15cc8f33b34d656d86c745cd10950d2253e864fc..d54dda67d036c19cffce6bc30765c39dc93ee326 100644 (file)
@@ -112,7 +112,7 @@ static void
 mt7603_phy_init(struct mt7603_dev *dev)
 {
        int rx_chains = dev->mt76.antenna_mask;
-       int tx_chains = __sw_hweight8(rx_chains) - 1;
+       int tx_chains = hweight8(rx_chains) - 1;
 
        mt76_rmw(dev, MT_WF_RMAC_RMCR,
                 (MT_WF_RMAC_RMCR_SMPS_MODE |
index 0a0115861b51e500c777daeb805685286ebb4f5a..5e31d7da96fc88e5fab246c61ec1d37a328a8700 100644 (file)
@@ -1072,7 +1072,7 @@ out:
        case MT_PHY_TYPE_HT:
                final_rate_flags |= IEEE80211_TX_RC_MCS;
                final_rate &= GENMASK(5, 0);
-               if (i > 15)
+               if (final_rate > 15)
                        return false;
                break;
        default:
index b10775ed92e65ff72036dc314c061437315c009c..cc0fe0933b2d8043e622f1b513817b6528bbcaae 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/pci.h>
 #include <linux/module.h>
 #include "mt7603.h"
+#include "mac.h"
 #include "eeprom.h"
 
 static int
@@ -385,6 +386,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
        mt7603_ps_tx_list(dev, &list);
 }
 
+static void
+mt7603_ps_set_more_data(struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr;
+
+       hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
+       hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
 static void
 mt7603_release_buffered_frames(struct ieee80211_hw *hw,
                               struct ieee80211_sta *sta,
@@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
 
        __skb_queue_head_init(&list);
 
+       mt7603_wtbl_set_ps(dev, msta, false);
+
        spin_lock_bh(&dev->ps_lock);
        skb_queue_walk_safe(&msta->psq, skb, tmp) {
                if (!nframes)
@@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
 
                skb_set_queue_mapping(skb, MT_TXQ_PSD);
                __skb_unlink(skb, &msta->psq);
+               mt7603_ps_set_more_data(skb);
                __skb_queue_tail(&list, skb);
                nframes--;
        }
        spin_unlock_bh(&dev->ps_lock);
 
+       if (!skb_queue_empty(&list))
+               ieee80211_sta_eosp(sta);
+
        mt7603_ps_tx_list(dev, &list);
 
        if (nframes)
index 4b0713f1fd5e3da78c83efa05b8f14aabd2fbfc4..d06905ea8cc63f2b9851eb679655007c1e8f7486 100644 (file)
@@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
 {
        struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
        struct ieee80211_hw *hw = mt76_hw(dev);
-       int n_chains = __sw_hweight8(dev->mt76.antenna_mask);
+       int n_chains = hweight8(dev->mt76.antenna_mask);
        struct {
                u8 control_chan;
                u8 center_chan;
index e13fea80d970d228ef7b99faa69e489f017ffb49..b920be1f5718b75d2f7374b6bbe6a533ebdec9c2 100644 (file)
@@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev)
        }
 
        mem_base = devm_ioremap_resource(&pdev->dev, res);
-       if (!mem_base) {
+       if (IS_ERR(mem_base)) {
                dev_err(&pdev->dev, "Failed to get memory resource\n");
-               return -EINVAL;
+               return PTR_ERR(mem_base);
        }
 
        mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
index 0290ba5869a5182ca283db62629d38a2e37b21ef..736f81752b5b488518e1393e6200b67be7bfa87c 100644 (file)
@@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
        { MT_MM20_PROT_CFG,             0x01742004 },
        { MT_MM40_PROT_CFG,             0x03f42084 },
        { MT_TXOP_CTRL_CFG,             0x0000583f },
-       { MT_TX_RTS_CFG,                0x00092b20 },
+       { MT_TX_RTS_CFG,                0x00ffff20 },
        { MT_EXP_ACK_TIME,              0x002400ca },
        { MT_TXOP_HLDR_ET,              0x00000002 },
        { MT_XIFS_TIME_CFG,             0x33a41010 },
index 91718647da0285e40eda86aa97dc98559592732d..e5a06f74a6f701419f703844f4e73fba6307a627 100644 (file)
@@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
        struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
        struct mt76x02_dev *dev;
        struct mt76_dev *mdev;
-       u32 asic_rev, mac_rev;
+       u32 mac_rev;
        int ret;
 
        mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
@@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
                goto err;
        }
 
-       asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
+       mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
        mac_rev = mt76_rr(dev, MT_MAC_CSR0);
        dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
-                asic_rev, mac_rev);
+                mdev->rev, mac_rev);
+       if (!is_mt76x0(dev)) {
+               ret = -ENODEV;
+               goto err;
+       }
 
        /* Note: vendor driver skips this check for MT76X0U */
        if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
index 6915cce5def9342935784c888c477417e26b5c69..07061eb4d1e1b3ef97b7131791af79a0962b6360 100644 (file)
@@ -51,6 +51,7 @@ struct mt76x02_calibration {
        u16 false_cca;
        s8 avg_rssi_all;
        s8 agc_gain_adjust;
+       s8 agc_lowest_gain;
        s8 low_gain;
 
        s8 temp_vco;
@@ -114,8 +115,11 @@ struct mt76x02_dev {
        struct mt76x02_dfs_pattern_detector dfs_pd;
 
        /* edcca monitor */
+       unsigned long ed_trigger_timeout;
        bool ed_tx_blocked;
        bool ed_monitor;
+       u8 ed_monitor_enabled;
+       u8 ed_monitor_learning;
        u8 ed_trigger;
        u8 ed_silent;
        ktime_t ed_time;
@@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev);
 
 void mt76x02_init_debugfs(struct mt76x02_dev *dev);
 
+static inline bool is_mt76x0(struct mt76x02_dev *dev)
+{
+       return mt76_chip(&dev->mt76) == 0x7610 ||
+              mt76_chip(&dev->mt76) == 0x7630 ||
+              mt76_chip(&dev->mt76) == 0x7650;
+}
+
 static inline bool is_mt76x2(struct mt76x02_dev *dev)
 {
        return mt76_chip(&dev->mt76) == 0x7612 ||
index 7580c5c986ffe5226f4c91feccaf73cd445aabc0..b1d6fd4861e3236b9cb255277c01b9a2f01b1fca 100644 (file)
@@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data)
        return 0;
 }
 
+static int
+mt76_edcca_set(void *data, u64 val)
+{
+       struct mt76x02_dev *dev = data;
+       enum nl80211_dfs_regions region = dev->dfs_pd.region;
+
+       dev->ed_monitor_enabled = !!val;
+       dev->ed_monitor = dev->ed_monitor_enabled &&
+                         region == NL80211_DFS_ETSI;
+       mt76x02_edcca_init(dev, true);
+
+       return 0;
+}
+
+static int
+mt76_edcca_get(void *data, u64 *val)
+{
+       struct mt76x02_dev *dev = data;
+
+       *val = dev->ed_monitor_enabled;
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
+                        "%lld\n");
+
 void mt76x02_init_debugfs(struct mt76x02_dev *dev)
 {
        struct dentry *dir;
@@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
        debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
        debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
 
+       debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
        debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
        debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
        debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
index e4649103efd49ab5d77c28f3f01d1aac1c81fc3b..17d12d212d1ba1d0a3eb8f65b9e0ecef42908406 100644 (file)
@@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
        if (dfs_pd->region != region) {
                tasklet_disable(&dfs_pd->dfs_tasklet);
 
-               dev->ed_monitor = region == NL80211_DFS_ETSI;
+               dev->ed_monitor = dev->ed_monitor_enabled &&
+                                 region == NL80211_DFS_ETSI;
                mt76x02_edcca_init(dev, true);
 
                dfs_pd->region = region;
index 91ff6598eccfb55dfb0fe8e96b4edf6e7941f59f..9ed231abe91676119d751b06cfa995a7f5dd716c 100644 (file)
@@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
 }
 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
 
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+                             struct ieee80211_key_conf *key)
+{
+       enum mt76x02_cipher_type cipher;
+       u8 key_data[32];
+       u32 iv, eiv;
+       u64 pn;
+
+       cipher = mt76x02_mac_get_key_info(key, key_data);
+       iv = mt76_rr(dev, MT_WCID_IV(idx));
+       eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
+
+       pn = (u64)eiv << 16;
+       if (cipher == MT_CIPHER_TKIP) {
+               pn |= (iv >> 16) & 0xff;
+               pn |= (iv & 0xff) << 8;
+       } else if (cipher >= MT_CIPHER_AES_CCMP) {
+               pn |= iv & 0xffff;
+       } else {
+               return;
+       }
+
+       atomic64_set(&key->tx_pn, pn);
+}
+
+
 int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
                             struct ieee80211_key_conf *key)
 {
        enum mt76x02_cipher_type cipher;
        u8 key_data[32];
        u8 iv_data[8];
+       u64 pn;
 
        cipher = mt76x02_mac_get_key_info(key, key_data);
        if (cipher == MT_CIPHER_NONE && key)
@@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
        if (key) {
                mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
                               !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+
+               pn = atomic64_read(&key->tx_pn);
+
                iv_data[3] = key->keyidx << 6;
-               if (cipher >= MT_CIPHER_TKIP)
+               if (cipher >= MT_CIPHER_TKIP) {
                        iv_data[3] |= 0x20;
+                       put_unaligned_le32(pn >> 16, &iv_data[4]);
+               }
+
+               if (cipher == MT_CIPHER_TKIP) {
+                       iv_data[0] = (pn >> 8) & 0xff;
+                       iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
+                       iv_data[2] = pn & 0xff;
+               } else if (cipher >= MT_CIPHER_AES_CCMP) {
+                       put_unaligned_le16((pn & 0xffff), &iv_data[0]);
+               }
        }
 
        mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
@@ -920,6 +960,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
                }
        }
        mt76x02_edcca_tx_enable(dev, true);
+       dev->ed_monitor_learning = true;
 
        /* clear previous CCA timer value */
        mt76_rr(dev, MT_ED_CCA_TIMER);
@@ -929,6 +970,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
 
 #define MT_EDCCA_TH            92
 #define MT_EDCCA_BLOCK_TH      2
+#define MT_EDCCA_LEARN_TH      50
+#define MT_EDCCA_LEARN_CCA     180
+#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
+
 static void mt76x02_edcca_check(struct mt76x02_dev *dev)
 {
        ktime_t cur_time;
@@ -951,11 +996,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
                dev->ed_trigger = 0;
        }
 
-       if (dev->ed_trigger > MT_EDCCA_BLOCK_TH &&
-           !dev->ed_tx_blocked)
+       if (dev->cal.agc_lowest_gain &&
+           dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
+           dev->ed_trigger > MT_EDCCA_LEARN_TH) {
+               dev->ed_monitor_learning = false;
+               dev->ed_trigger_timeout = jiffies + 20 * HZ;
+       } else if (!dev->ed_monitor_learning &&
+                  time_is_after_jiffies(dev->ed_trigger_timeout)) {
+               dev->ed_monitor_learning = true;
+               mt76x02_edcca_tx_enable(dev, true);
+       }
+
+       if (dev->ed_monitor_learning)
+               return;
+
+       if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
                mt76x02_edcca_tx_enable(dev, false);
-       else if (dev->ed_silent > MT_EDCCA_BLOCK_TH &&
-                dev->ed_tx_blocked)
+       else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
                mt76x02_edcca_tx_enable(dev, true);
 }
 
index 6b1f25d2f64c3a931fbf1bd4c695984f9d5bf4ff..caeeef96c42faf74ccf9bb15cc60f4ffe865e9f4 100644 (file)
@@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
                                 u8 key_idx, struct ieee80211_key_conf *key);
 int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
                             struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+                             struct ieee80211_key_conf *key);
 void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
                            u8 *mac);
 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
index 1229f19f2b02c68b4144662e8097333d5133f3ac..daaed1220147ea914c32f13f32f04bd5a360d7fb 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/irq.h>
 
 #include "mt76x02.h"
+#include "mt76x02_mcu.h"
 #include "mt76x02_trace.h"
 
 struct beacon_bc_data {
@@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
        return i < 4;
 }
 
+static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta,
+                            struct ieee80211_key_conf *key, void *data)
+{
+       struct mt76x02_dev *dev = hw->priv;
+       struct mt76_wcid *wcid;
+
+       if (!sta)
+           return;
+
+       wcid = (struct mt76_wcid *) sta->drv_priv;
+
+       if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
+           return;
+
+       mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
+}
+
+static void mt76x02_reset_state(struct mt76x02_dev *dev)
+{
+       int i;
+
+       lockdep_assert_held(&dev->mt76.mutex);
+
+       clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+       rcu_read_lock();
+       ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
+       rcu_read_unlock();
+
+       for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
+               struct ieee80211_sta *sta;
+               struct ieee80211_vif *vif;
+               struct mt76x02_sta *msta;
+               struct mt76_wcid *wcid;
+               void *priv;
+
+               wcid = rcu_dereference_protected(dev->mt76.wcid[i],
+                                       lockdep_is_held(&dev->mt76.mutex));
+               if (!wcid)
+                       continue;
+
+               priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+               sta = container_of(priv, struct ieee80211_sta, drv_priv);
+
+               priv = msta->vif;
+               vif = container_of(priv, struct ieee80211_vif, drv_priv);
+
+               __mt76_sta_remove(&dev->mt76, vif, sta);
+               memset(msta, 0, sizeof(*msta));
+       }
+
+       dev->vif_mask = 0;
+       dev->beacon_mask = 0;
+}
+
 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
 {
        u32 mask = dev->mt76.mmio.irqmask;
+       bool restart = dev->mt76.mcu_ops->mcu_restart;
        int i;
 
        ieee80211_stop_queues(dev->mt76.hw);
@@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
 
        mutex_lock(&dev->mt76.mutex);
 
+       if (restart)
+               mt76x02_reset_state(dev);
+
        if (dev->beacon_mask)
                mt76_clear(dev, MT_BEACON_TIME_CFG,
                           MT_BEACON_TIME_CFG_BEACON_TX |
@@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
        /* let fw reset DMA */
        mt76_set(dev, 0x734, 0x3);
 
+       if (restart)
+               dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
+
        for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
                mt76_queue_tx_cleanup(dev, i, true);
 
        for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
                mt76_queue_rx_reset(dev, i);
 
-       mt76_wr(dev, MT_MAC_SYS_CTRL,
-               MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
-       mt76_set(dev, MT_WPDMA_GLO_CFG,
-                MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+       mt76x02_mac_start(dev);
+
        if (dev->ed_monitor)
                mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
 
-       if (dev->beacon_mask)
+       if (dev->beacon_mask && !restart)
                mt76_set(dev, MT_BEACON_TIME_CFG,
                         MT_BEACON_TIME_CFG_BEACON_TX |
                         MT_BEACON_TIME_CFG_TBTT_EN);
@@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
                napi_schedule(&dev->mt76.napi[i]);
        }
 
-       ieee80211_wake_queues(dev->mt76.hw);
-
-       mt76_txq_schedule_all(&dev->mt76);
+       if (restart) {
+               mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+               ieee80211_restart_hw(dev->mt76.hw);
+       } else {
+               ieee80211_wake_queues(dev->mt76.hw);
+               mt76_txq_schedule_all(&dev->mt76);
+       }
 }
 
 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
index a020c757ba5c6c59fba0463d01f5774a916e4339..a54b63a96eaefa24268f28573f1123c30ee6b9d5 100644 (file)
@@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
                ret = true;
        }
 
+       dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
index 43f07461c8d39b6045388c8bbe59b1d2b0fcd6e8..6fb52b596d421753ff24f247225136b30c15150b 100644 (file)
@@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
 
        mt76x02_insert_hdr_pad(skb);
 
-       txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
+       txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
        mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+       skb_push(skb, sizeof(struct mt76x02_txwi));
 
        pid = mt76_tx_status_skb_add(mdev, wcid, skb);
        txwi->pktid = pid;
index a48c261b0c634bca601f8d9fcd496b99a5bc2fa7..cd072ac614f76847b86618a4f40c87af171f5147 100644 (file)
@@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
        int idx = 0;
 
+       memset(msta, 0, sizeof(*msta));
+
        idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
        if (idx < 0)
                return -ENOSPC;
@@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
        struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
        struct mt76_txq *mtxq;
 
+       memset(mvif, 0, sizeof(*mvif));
+
        mvif->idx = idx;
        mvif->group_wcid.idx = MT_VIF_WCID(idx);
        mvif->group_wcid.hw_key_idx = -1;
@@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        struct mt76x02_dev *dev = hw->priv;
        unsigned int idx = 0;
 
+       /* Allow to change address in HW if we create first interface. */
+       if (!dev->vif_mask &&
+           (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
+            memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
+               mt76x02_mac_setaddr(dev, vif->addr);
+
        if (vif->addr[0] & BIT(1))
                idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
 
@@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        if (dev->vif_mask & BIT(idx))
                return -EBUSY;
 
-       /* Allow to change address in HW if we create first interface. */
-       if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
-                mt76x02_mac_setaddr(dev, vif->addr);
-
        dev->vif_mask |= BIT(idx);
 
        mt76x02_vif_init(dev, vif, idx);
index f8534362e2c8cdf7edf9b03d5e2118d885d5ca6f..a30ef2c5a9db0433cbb696f6ab6f3e8b1811cb5d 100644 (file)
@@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
                { MT_TX_SW_CFG1,                0x00010000 },
                { MT_TX_SW_CFG2,                0x00000000 },
                { MT_TXOP_CTRL_CFG,             0x0400583f },
-               { MT_TX_RTS_CFG,                0x00100020 },
+               { MT_TX_RTS_CFG,                0x00ffff20 },
                { MT_TX_TIMEOUT_CFG,            0x000a2290 },
                { MT_TX_RETRY_CFG,              0x47f01f0f },
                { MT_EXP_ACK_TIME,              0x002c00dc },
index 6c619f1c65c9cbfbbdf9241dc340c448826ab228..d7abe3d73badbbce7100781ece4b5c91407bc0dd 100644 (file)
@@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
 
 void mt76x2_cleanup(struct mt76x02_dev *dev);
 
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
 void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
 void mt76x2_init_txpower(struct mt76x02_dev *dev,
                         struct ieee80211_supported_band *sband);
index 984d9c4c2e1a8ac9bfb435ef8de97898cb720b4d..d3927a13e92e91068344e431176a3b42ef6ff444 100644 (file)
@@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
        }
 }
 
-static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
 {
        const u8 *macaddr = dev->mt76.macaddr;
        u32 val;
index 03e24ae7f66c7c8953e7308a1bac304ff294fb1b..605dc66ae83be45d956c0483e9688a8482f442c8 100644 (file)
@@ -165,9 +165,30 @@ error:
        return -ENOENT;
 }
 
+static int
+mt76pci_mcu_restart(struct mt76_dev *mdev)
+{
+       struct mt76x02_dev *dev;
+       int ret;
+
+       dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+       mt76x02_mcu_cleanup(dev);
+       mt76x2_mac_reset(dev, true);
+
+       ret = mt76pci_load_firmware(dev);
+       if (ret)
+               return ret;
+
+       mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+       return 0;
+}
+
 int mt76x2_mcu_init(struct mt76x02_dev *dev)
 {
        static const struct mt76_mcu_ops mt76x2_mcu_ops = {
+               .mcu_restart = mt76pci_mcu_restart,
                .mcu_send_msg = mt76x02_mcu_msg_send,
        };
        int ret;
index 1848e8ab2e21cfb6332fd17259986d6b932dbff6..769a9b9720442c5d303e9a65519a8fe1de166089 100644 (file)
@@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
        gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
        gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
 
-       if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+       val = 0x1836 << 16;
+       if (!mt76x2_has_ext_lna(dev) &&
+           dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
                val = 0x1e42 << 16;
-       else
-               val = 0x1836 << 16;
+
+       if (mt76x2_has_ext_lna(dev) &&
+           dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
+           dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
+               val = 0x0f36 << 16;
 
        val |= 0xf8;
 
@@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
 {
        u8 *gain = dev->cal.agc_gain_init;
        u8 low_gain_delta, gain_delta;
+       u32 agc_35, agc_37;
        bool gain_change;
        int low_gain;
        u32 val;
@@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
        else
                low_gain_delta = 14;
 
+       agc_37 = 0x2121262c;
+       if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+               agc_35 = 0x11111516;
+       else if (low_gain == 2)
+               agc_35 = agc_37 = 0x08080808;
+       else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+               agc_35 = 0x10101014;
+       else
+               agc_35 = 0x11111116;
+
        if (low_gain == 2) {
                mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
                mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
@@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
                dev->cal.agc_gain_adjust = 0;
        } else {
                mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
-               if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
-                       mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
-               else
-                       mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
-               mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
                gain_delta = 0;
                dev->cal.agc_gain_adjust = low_gain_delta;
        }
 
+       mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
+       mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
+
        dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
        dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
        mt76x2_phy_set_gain_val(dev);
index ddb6b2c48e01283a8041620fdbc46542de8d04c7..ac0f13d4629963cea77e5ab113caf3e11a33275a 100644 (file)
 #include "mt76x2u.h"
 
 static const struct usb_device_id mt76x2u_device_table[] = {
-       { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
        { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
        { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
        { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
-       { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
+       { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
        { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
        { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
        { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
@@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf,
 
        mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
        dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+       if (!is_mt76x2(dev)) {
+               err = -ENODEV;
+               goto err;
+       }
 
        err = mt76x2u_register_device(dev);
        if (err < 0)
index 5e84b4535cb1456c22bbf883d79c9d44c62da438..3b82345756ea90d3cc93f71946c2ec6cb7e81110 100644 (file)
@@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
        mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
        mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
        mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
-       mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
 
        mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
        mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
index 5a349fe3e576f606ec2fb1997ed8a394d54d7b3f..2585df5123350ba8adf52cf14ef6133de7e03f77 100644 (file)
@@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
        dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
        dev->queue_ops->kick(dev, q);
 
-       if (q->queued > q->ndesc - 8)
+       if (q->queued > q->ndesc - 8 && !q->stopped) {
                ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+               q->stopped = true;
+       }
+
        spin_unlock_bh(&q->lock);
 }
 EXPORT_SYMBOL_GPL(mt76_tx);
@@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
        if (last_skb) {
                mt76_queue_ps_skb(dev, sta, last_skb, true);
                dev->queue_ops->kick(dev, hwq);
+       } else {
+               ieee80211_sta_eosp(sta);
        }
+
        spin_unlock_bh(&hwq->lock);
 }
 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
@@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
        struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
        struct mt76_queue *hwq = mtxq->hwq;
 
+       if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+               return;
+
        spin_lock_bh(&hwq->lock);
        if (list_empty(&mtxq->list))
                list_add_tail(&mtxq->list, &hwq->swq);
index ae6ada370597a6f0ebfd3b7d69bb583ecf8759ef..4c1abd4924054c6f377ca062ad98ca5bb8446dc7 100644 (file)
@@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data)
                        spin_lock_bh(&q->lock);
                }
                mt76_txq_schedule(dev, q);
-               wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+
+               wake = q->stopped && q->queued < q->ndesc - 8;
+               if (wake)
+                       q->stopped = false;
+
                if (!q->queued)
                        wake_up(&dev->tx_wait);
 
index d8b7863f79261a3275b6641ffdf7607e23fdd206..6ae7f14dc9bf936ec3ae34f9f4d4f1ee67de6646 100644 (file)
@@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
        mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
        dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
                 asic_rev, mac_rev);
+       if ((asic_rev >> 16) != 0x7601) {
+               ret = -ENODEV;
+               goto err;
+       }
 
        /* Note: vendor driver skips this check for MT7601U */
        if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
index 2839bb70badfbcb8284bc5bbbc1f457bd3b58c63..f0716f6ce41fa2a1ad993e45adba9148d7f0c120 100644 (file)
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
                struct nvme_ns *ns)
 {
-       enum nvme_ana_state old;
-
        mutex_lock(&ns->head->lock);
-       old = ns->ana_state;
        ns->ana_grpid = le32_to_cpu(desc->grpid);
        ns->ana_state = desc->state;
        clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
 
-       if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+       if (nvme_state_is_live(ns->ana_state))
                nvme_mpath_set_live(ns);
        mutex_unlock(&ns->head->lock);
 }
index e7e08889865e732d503a6ac2af5d38cac4dd9672..68c49dd672104d82ea768a6e9bf4354df731422b 100644 (file)
@@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
        return ret;
 }
 
-static inline void nvme_tcp_end_request(struct request *rq, __le16 status)
+static inline void nvme_tcp_end_request(struct request *rq, u16 status)
 {
        union nvme_result res = {};
 
index 2d73b66e368627cdee268a74d30fb3c5d6a34235..b3e765a95af8ee7447c536ff48095504c8100d67 100644 (file)
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
 
        ret = nvmet_p2pmem_ns_enable(ns);
        if (ret)
-               goto out_unlock;
+               goto out_dev_disable;
 
        list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
                nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ out_unlock:
 out_dev_put:
        list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
                pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
-
+out_dev_disable:
        nvmet_ns_dev_disable(ns);
        goto out_unlock;
 }
index 3e43212d3c1c6bba5a6d553dc2a965188c5ccbf5..bc6ebb51b0bf7c5310940fca19450fd115ea7788 100644 (file)
@@ -75,11 +75,11 @@ err:
        return ret;
 }
 
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
 {
-       bv->bv_page = sg_page_iter_page(iter);
-       bv->bv_offset = iter->sg->offset;
-       bv->bv_len = PAGE_SIZE - iter->sg->offset;
+       bv->bv_page = sg_page(sg);
+       bv->bv_offset = sg->offset;
+       bv->bv_len = sg->length;
 }
 
 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
 
 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 {
-       ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
-       struct sg_page_iter sg_pg_iter;
+       ssize_t nr_bvec = req->sg_cnt;
        unsigned long bv_cnt = 0;
        bool is_sync = false;
        size_t len = 0, total_len = 0;
        ssize_t ret = 0;
        loff_t pos;
-
+       int i;
+       struct scatterlist *sg;
 
        if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
                is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
        }
 
        memset(&req->f.iocb, 0, sizeof(struct kiocb));
-       for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
-               nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
+       for_each_sg(req->sg, sg, req->sg_cnt, i) {
+               nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
                len += req->f.bvec[bv_cnt].bv_len;
                total_len += req->f.bvec[bv_cnt].bv_len;
                bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
 
 static void nvmet_file_execute_rw(struct nvmet_req *req)
 {
-       ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+       ssize_t nr_bvec = req->sg_cnt;
 
        if (!req->sg_cnt || !nr_bvec) {
                nvmet_req_complete(req, 0);
index 56dd83a45e55dc21360f729c488a213a2a258241..5484a46dafda857a7e64207ccac4a8249cf1512c 100644 (file)
@@ -213,12 +213,10 @@ void parport_daisy_fini(struct parport *port)
 struct pardevice *parport_open(int devnum, const char *name)
 {
        struct daisydev *p = topology;
-       struct pardev_cb par_cb;
        struct parport *port;
        struct pardevice *dev;
        int daisy;
 
-       memset(&par_cb, 0, sizeof(par_cb));
        spin_lock(&topology_lock);
        while (p && p->devnum != devnum)
                p = p->next;
@@ -232,7 +230,7 @@ struct pardevice *parport_open(int devnum, const char *name)
        port = parport_get_port(p->port);
        spin_unlock(&topology_lock);
 
-       dev = parport_register_dev_model(port, name, &par_cb, devnum);
+       dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
        parport_put_port(port);
        if (!dev)
                return NULL;
@@ -482,31 +480,3 @@ static int assign_addrs(struct parport *port)
        kfree(deviceid);
        return detected;
 }
-
-static int daisy_drv_probe(struct pardevice *par_dev)
-{
-       struct device_driver *drv = par_dev->dev.driver;
-
-       if (strcmp(drv->name, "daisy_drv"))
-               return -ENODEV;
-       if (strcmp(par_dev->name, daisy_dev_name))
-               return -ENODEV;
-
-       return 0;
-}
-
-static struct parport_driver daisy_driver = {
-       .name = "daisy_drv",
-       .probe = daisy_drv_probe,
-       .devmodel = true,
-};
-
-int daisy_drv_init(void)
-{
-       return parport_register_driver(&daisy_driver);
-}
-
-void daisy_drv_exit(void)
-{
-       parport_unregister_driver(&daisy_driver);
-}
index e5e6a463a9412e167a9e2b2c34f4a6cfb3a1cb2a..e035174ba205d12dbc6e529c6ec85c8bda9e5d21 100644 (file)
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
 ssize_t parport_device_id (int devnum, char *buffer, size_t count)
 {
        ssize_t retval = -ENXIO;
-       struct pardevice *dev = parport_open(devnum, daisy_dev_name);
+       struct pardevice *dev = parport_open (devnum, "Device ID probe");
        if (!dev)
                return -ENXIO;
 
index 0171b8dbcdcd5f57c54eeee19eed23d65dbd8897..5dc53d420ca8ca805c0c036c23e3c1a3fc42ac00 100644 (file)
@@ -137,19 +137,11 @@ static struct bus_type parport_bus_type = {
 
 int parport_bus_init(void)
 {
-       int retval;
-
-       retval = bus_register(&parport_bus_type);
-       if (retval)
-               return retval;
-       daisy_drv_init();
-
-       return 0;
+       return bus_register(&parport_bus_type);
 }
 
 void parport_bus_exit(void)
 {
-       daisy_drv_exit();
        bus_unregister(&parport_bus_type);
 }
 
index 224d886341158ba55494da1c766a933b0cfaeefc..d994839a3e24b5ec8c1452f3489c47d8fa7aba20 100644 (file)
@@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
                           enum pcie_link_width *width);
 void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
+void pcie_report_downtraining(struct pci_dev *dev);
 
 /* Single Root I/O Virtualization */
 struct pci_sriov {
index d2eae3b7cc0f74d5c8fdec80fa6ffffd68dd8501..4fa9e3523ee1a22bc763aa5ea0f162dc00ab09dd 100644 (file)
@@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
 {
        u16 lnk_ctl;
 
+       pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+
        pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
        lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
@@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
 }
 
-static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
 {
        struct pcie_device *srv = context;
        struct pci_dev *port = srv->port;
-       struct pci_dev *dev;
        u16 link_status, events;
        int ret;
 
@@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
        if (ret != PCIBIOS_SUCCESSFUL || !events)
                return IRQ_NONE;
 
+       pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
+       pcie_update_link_speed(port->subordinate, link_status);
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+{
+       struct pcie_device *srv = context;
+       struct pci_dev *port = srv->port;
+       struct pci_dev *dev;
+
        /*
         * Print status from downstream devices, not this root port or
         * downstream switch port.
         */
        down_read(&pci_bus_sem);
        list_for_each_entry(dev, &port->subordinate->devices, bus_list)
-               __pcie_print_link_status(dev, false);
+               pcie_report_downtraining(dev);
        up_read(&pci_bus_sem);
 
-       pcie_update_link_speed(port->subordinate, link_status);
-       pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
        return IRQ_HANDLED;
 }
 
@@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
        if (!pcie_link_bandwidth_notification_supported(srv->port))
                return -ENODEV;
 
-       ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler,
+       ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
+                                  pcie_bw_notification_handler,
                                   IRQF_SHARED, "PCIe BW notif", srv);
        if (ret)
                return ret;
index 2ec0df04e0dca15ce1f56b3f9049280f199e0928..7e12d016386394ab9b401f3e5dcb8da8b917484c 100644 (file)
@@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
        return dev;
 }
 
-static void pcie_report_downtraining(struct pci_dev *dev)
+void pcie_report_downtraining(struct pci_dev *dev)
 {
        if (!pci_is_pcie(dev))
                return;
index 900c7073c46f4e4ca530463fcfa91e79b38b10b4..71308766e89199f443047c715f5ce3206375df70 100644 (file)
@@ -440,7 +440,7 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
 
        ret = cros_ec_create_pdinfo(debug_info);
        if (ret)
-               goto remove_debugfs;
+               goto remove_log;
 
        ec->debug_info = debug_info;
 
@@ -448,6 +448,8 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
 
        return 0;
 
+remove_log:
+       cros_ec_cleanup_console_log(debug_info);
 remove_debugfs:
        debugfs_remove_recursive(debug_info->dir);
        return ret;
@@ -467,7 +469,8 @@ static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev)
 {
        struct cros_ec_dev *ec = dev_get_drvdata(dev);
 
-       cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
+       if (ec->debug_info->log_buffer.buf)
+               cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
 
        return 0;
 }
@@ -476,7 +479,8 @@ static int __maybe_unused cros_ec_debugfs_resume(struct device *dev)
 {
        struct cros_ec_dev *ec = dev_get_drvdata(dev);
 
-       schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
+       if (ec->debug_info->log_buffer.buf)
+               schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
 
        return 0;
 }
index f6ff29a11f1ace1bf1234d4617cc84671162eb2c..14355668ddfa3146e88074132737a757a075090a 100644 (file)
@@ -223,11 +223,11 @@ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg)
                msg->command, msg->type, msg->flags, msg->response_size,
                msg->request_size);
 
+       mutex_lock(&ec->mailbox_lock);
        /* Prepare request packet */
        rq = ec->data_buffer;
        wilco_ec_prepare(msg, rq);
 
-       mutex_lock(&ec->mailbox_lock);
        ret = wilco_ec_transfer(ec, msg, rq);
        mutex_unlock(&ec->mailbox_lock);
 
index 4159c63a5fd2bbba9b9c2949fde8c56ba9030a89..a835b31aad999dcbc90847455b0c75f612aba563 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/crw.h>
 #include <asm/isc.h>
 #include <asm/ebcdic.h>
+#include <asm/ap.h>
 
 #include "css.h"
 #include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
                              " failed (rc=%d).\n", ret);
 }
 
+static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
+{
+       CIO_CRW_EVENT(3, "chsc: ap config changed\n");
+       if (sei_area->rs != 5)
+               return;
+
+       ap_bus_cfg_chg();
+}
+
 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
 {
        switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
        case 2: /* i/o resource accessibility */
                chsc_process_sei_res_acc(sei_area);
                break;
+       case 3: /* ap config changed */
+               chsc_process_sei_ap_cfg_chg(sei_area);
+               break;
        case 7: /* channel-path-availability information */
                chsc_process_sei_chp_avail(sei_area);
                break;
index a10cec0e86eb495ffd45f3854a09e1a76bf3e598..0b3b9de45c602042384751921379b0d903e5be79 100644 (file)
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
 {
        struct vfio_ccw_private *private;
        struct irb *irb;
+       bool is_final;
 
        private = container_of(work, struct vfio_ccw_private, io_work);
        irb = &private->irb;
 
+       is_final = !(scsw_actl(&irb->scsw) &
+                    (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
        if (scsw_is_solicited(&irb->scsw)) {
                cp_update_scsw(&private->cp, &irb->scsw);
-               cp_free(&private->cp);
+               if (is_final)
+                       cp_free(&private->cp);
        }
        memcpy(private->io_region->irb_area, irb, sizeof(*irb));
 
        if (private->io_trigger)
                eventfd_signal(private->io_trigger, 1);
 
-       if (private->mdev)
+       if (private->mdev && is_final)
                private->state = VFIO_CCW_STATE_IDLE;
 }
 
index e15816ff126582f933c66add86bb45e7b0606e0f..1546389d71dbca7ebc1f2f103780182742226376 100644 (file)
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
        struct ap_device *ap_dev = to_ap_dev(dev);
        struct ap_driver *ap_drv = ap_dev->drv;
 
+       /* prepare ap queue device removal */
        if (is_queue_dev(dev))
-               ap_queue_remove(to_ap_queue(dev));
+               ap_queue_prepare_remove(to_ap_queue(dev));
+
+       /* driver's chance to clean up gracefully */
        if (ap_drv->remove)
                ap_drv->remove(ap_dev);
 
+       /* now do the ap queue device remove */
+       if (is_queue_dev(dev))
+               ap_queue_remove(to_ap_queue(dev));
+
        /* Remove queue/card from list of active queues/cards */
        spin_lock_bh(&ap_list_lock);
        if (is_card_dev(dev))
@@ -860,6 +867,16 @@ void ap_bus_force_rescan(void)
 }
 EXPORT_SYMBOL(ap_bus_force_rescan);
 
+/*
+* A config change has happened, force an ap bus rescan.
+*/
+void ap_bus_cfg_chg(void)
+{
+       AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+
+       ap_bus_force_rescan();
+}
+
 /*
  * hex2bitmap() - parse hex mask string and set bitmap.
  * Valid strings are "0x012345678" with at least one valid hex number.
index d0059eae5d94bd51a5c677c28162ed63c9f0d437..15a98a673c5cc3323980f15e95d3418b1c65e028 100644 (file)
@@ -91,6 +91,7 @@ enum ap_state {
        AP_STATE_WORKING,
        AP_STATE_QUEUE_FULL,
        AP_STATE_SUSPEND_WAIT,
+       AP_STATE_REMOVE,        /* about to be removed from driver */
        AP_STATE_UNBOUND,       /* momentary not bound to a driver */
        AP_STATE_BORKED,        /* broken */
        NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
 
 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_prepare_remove(struct ap_queue *aq);
 void ap_queue_remove(struct ap_queue *aq);
 void ap_queue_suspend(struct ap_device *ap_dev);
 void ap_queue_resume(struct ap_device *ap_dev);
index ba261210c6da0518fe7f8f4cb8f702b0503464b9..6a340f2c355693170776992c6a1d018e78d6ee96 100644 (file)
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
                [AP_EVENT_POLL] = ap_sm_suspend_read,
                [AP_EVENT_TIMEOUT] = ap_sm_nop,
        },
+       [AP_STATE_REMOVE] = {
+               [AP_EVENT_POLL] = ap_sm_nop,
+               [AP_EVENT_TIMEOUT] = ap_sm_nop,
+       },
        [AP_STATE_UNBOUND] = {
                [AP_EVENT_POLL] = ap_sm_nop,
                [AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
 }
 EXPORT_SYMBOL(ap_flush_queue);
 
-void ap_queue_remove(struct ap_queue *aq)
+void ap_queue_prepare_remove(struct ap_queue *aq)
 {
-       ap_flush_queue(aq);
+       spin_lock_bh(&aq->lock);
+       /* flush queue */
+       __ap_flush_queue(aq);
+       /* set REMOVE state to prevent new messages are queued in */
+       aq->state = AP_STATE_REMOVE;
        del_timer_sync(&aq->timeout);
+       spin_unlock_bh(&aq->lock);
+}
 
-       /* reset with zero, also clears irq registration */
+void ap_queue_remove(struct ap_queue *aq)
+{
+       /*
+        * all messages have been flushed and the state is
+        * AP_STATE_REMOVE. Now reset with zero which also
+        * clears the irq registration and move the state
+        * to AP_STATE_UNBOUND to signal that this queue
+        * is not used by any driver currently.
+        */
        spin_lock_bh(&aq->lock);
        ap_zapq(aq->qid);
        aq->state = AP_STATE_UNBOUND;
        spin_unlock_bh(&aq->lock);
 }
-EXPORT_SYMBOL(ap_queue_remove);
 
 void ap_queue_reinit_state(struct ap_queue *aq)
 {
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
        ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
        spin_unlock_bh(&aq->lock);
 }
-EXPORT_SYMBOL(ap_queue_reinit_state);
index eb93c2d27d0ad142c4d977d74df3e415468336af..689c2af7026a3adcf08e2e6eb019d9352e6de9d4 100644 (file)
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
 
 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
                                                     struct zcrypt_queue *zq,
+                                                    struct module **pmod,
                                                     unsigned int weight)
 {
        if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
        atomic_add(weight, &zc->load);
        atomic_add(weight, &zq->load);
        zq->request_count++;
+       *pmod = zq->queue->ap_dev.drv->driver.owner;
        return zq;
 }
 
 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
                                     struct zcrypt_queue *zq,
+                                    struct module *mod,
                                     unsigned int weight)
 {
-       struct module *mod = zq->queue->ap_dev.drv->driver.owner;
-
        zq->request_count--;
        atomic_sub(weight, &zc->load);
        atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
        unsigned int weight, pref_weight;
        unsigned int func_code;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
 
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
        rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
        unsigned int weight, pref_weight;
        unsigned int func_code;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(crt, TP_ICARSACRT);
 
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
        rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
        unsigned int func_code;
        unsigned short *domain;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
 
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
        rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
        unsigned int func_code;
        struct ap_message ap_msg;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
 
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
        rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
        struct ap_message ap_msg;
        unsigned int domain;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
 
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
        rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
index 197b0f5b63e7183473b91a0d8d8f728bb3e0c16e..44bd6f04c145da55b1aef66ad147983d74cedf9a 100644 (file)
@@ -1150,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
 
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
 {
+       struct sk_buff *skb;
+
        /* release may never happen from within CQ tasklet scope */
        WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
 
        if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
                qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
 
-       __skb_queue_purge(&buf->skb_list);
+       while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
+               consume_skb(skb);
 }
 
 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
index 8efb2e8ff8f460adacd53376d5cddd3fc6953b03..c3067fd3bd9ee47ad79d106cd3b17067ea91fbf3 100644 (file)
@@ -629,8 +629,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
        } /* else fall through */
 
        QETH_TXQ_STAT_INC(queue, tx_dropped);
-       QETH_TXQ_STAT_INC(queue, tx_errors);
-       dev_kfree_skb_any(skb);
+       kfree_skb(skb);
        netif_wake_queue(dev);
        return NETDEV_TX_OK;
 }
@@ -645,6 +644,8 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
        int rc;
 
+       qeth_l2_vnicc_set_defaults(card);
+
        if (gdev->dev.type == &qeth_generic_devtype) {
                rc = qeth_l2_create_device_attributes(&gdev->dev);
                if (rc)
@@ -652,8 +653,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
        }
 
        hash_init(card->mac_htable);
-       card->info.hwtrap = 0;
-       qeth_l2_vnicc_set_defaults(card);
        return 0;
 }
 
index 7e68d9d16859d24eaae38d3f0a8079f40cbabdf2..53712cf2640659cb0da642ba1dc05bb03e6567ec 100644 (file)
@@ -2096,8 +2096,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
 
 tx_drop:
        QETH_TXQ_STAT_INC(queue, tx_dropped);
-       QETH_TXQ_STAT_INC(queue, tx_errors);
-       dev_kfree_skb_any(skb);
+       kfree_skb(skb);
        netif_wake_queue(dev);
        return NETDEV_TX_OK;
 }
@@ -2253,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
        int rc;
 
+       hash_init(card->ip_htable);
+
        if (gdev->dev.type == &qeth_generic_devtype) {
                rc = qeth_l3_create_device_attributes(&gdev->dev);
                if (rc)
                        return rc;
        }
-       hash_init(card->ip_htable);
+
        hash_init(card->ip_mc_htable);
-       card->info.hwtrap = 0;
        return 0;
 }
 
index 744a64680d5b0d16c982012bfe2b351becd54a9a..e8fc28dba8dfc3521532c3c87d26d199b8ed9b6c 100644 (file)
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
        add_timer(&erp_action->timer);
 }
 
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+                                    int clear, char *dbftag)
+{
+       unsigned long flags;
+       struct zfcp_port *port;
+
+       write_lock_irqsave(&adapter->erp_lock, flags);
+       read_lock(&adapter->port_list_lock);
+       list_for_each_entry(port, &adapter->port_list, list)
+               _zfcp_erp_port_forced_reopen(port, clear, dbftag);
+       read_unlock(&adapter->port_list_lock);
+       write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
                                      int clear, char *dbftag)
 {
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
                struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
                int lun_status;
 
+               if (sdev->sdev_state == SDEV_DEL ||
+                   sdev->sdev_state == SDEV_CANCEL)
+                       continue;
                if (zsdev->port != port)
                        continue;
                /* LUN under port of interest */
index 3fce47b0b21b55142a64bb3b838bf28168ddd89e..c6acca521ffec71ee7b3f7e7231a32b18fdceff7 100644 (file)
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
                                 char *dbftag);
 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+                                           int clear, char *dbftag);
 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
index db00b5e3abbe361143c83dc5d6becfaa0e62aac0..33eddb02ee300238897f0f9018119717b387fd58 100644 (file)
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
        list_for_each_entry(port, &adapter->port_list, list) {
                if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
                        zfcp_fc_test_link(port);
-               if (!port->d_id)
-                       zfcp_erp_port_reopen(port,
-                                            ZFCP_STATUS_COMMON_ERP_FAILED,
-                                            "fcrscn1");
        }
        read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 {
        struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+       struct zfcp_adapter *adapter = fsf_req->adapter;
        struct fc_els_rscn *head;
        struct fc_els_rscn_page *page;
        u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
        no_entries = be16_to_cpu(head->rscn_plen) /
                sizeof(struct fc_els_rscn_page);
 
+       if (no_entries > 1) {
+               /* handle failed ports */
+               unsigned long flags;
+               struct zfcp_port *port;
+
+               read_lock_irqsave(&adapter->port_list_lock, flags);
+               list_for_each_entry(port, &adapter->port_list, list) {
+                       if (port->d_id)
+                               continue;
+                       zfcp_erp_port_reopen(port,
+                                            ZFCP_STATUS_COMMON_ERP_FAILED,
+                                            "fcrscn1");
+               }
+               read_unlock_irqrestore(&adapter->port_list_lock, flags);
+       }
+
        for (i = 1; i < no_entries; i++) {
                /* skip head and start with 1st element */
                page++;
index f4f6a07c52220234fb0e865ca3f0d87a2d2fdbe0..221d0dfb849329eb5ebf1758004628301b500ba8 100644 (file)
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
        struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
        int ret = SUCCESS, fc_ret;
 
+       if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+               zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+               zfcp_erp_wait(adapter);
+       }
        zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
        zfcp_erp_wait(adapter);
        fc_ret = fc_block_scsi_eh(scpnt);
index 1df5171594b89dc70087def629200c30b4149d1f..11fb68d7e60de6ed5ab388250691b647cbc030bc 100644 (file)
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
        return capacity;
 }
 
+static inline int aac_pci_offline(struct aac_dev *dev)
+{
+       return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
+}
+
 static inline int aac_adapter_check_health(struct aac_dev *dev)
 {
-       if (unlikely(pci_channel_offline(dev->pdev)))
+       if (unlikely(aac_pci_offline(dev)))
                return -1;
 
        return (dev)->a_ops.adapter_check_health(dev);
index e67e032936ef015b66c242eaf9c3111cfb3812c2..78430a7b294c6e651024300d86aaec5eecbe53c4 100644 (file)
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
                                        return -ETIMEDOUT;
                                }
 
-                               if (unlikely(pci_channel_offline(dev->pdev)))
+                               if (unlikely(aac_pci_offline(dev)))
                                        return -EFAULT;
 
                                if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 
                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 
-               if (unlikely(pci_channel_offline(dev->pdev)))
+               if (unlikely(aac_pci_offline(dev)))
                        return -EFAULT;
 
                fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
index 3c3cf89f713fbfaf8a7c15fbca17120af609fe87..14bac4966c87bf0e8bc601fc3c4e7afb0eb1cec8 100644 (file)
@@ -1801,6 +1801,12 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
        }
        hisi_sas_dereg_device(hisi_hba, device);
 
+       if (dev_is_sata(device)) {
+               rc = hisi_sas_softreset_ata_disk(device);
+               if (rc)
+                       return TMF_RESP_FUNC_FAILED;
+       }
+
        rc = hisi_sas_debug_I_T_nexus_reset(device);
 
        if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
index dbaa4f131433abde497c843c17e2b0e677b4544e..3ad997ac351034bd2e556a17c6f9d14addf2921a 100644 (file)
@@ -139,6 +139,7 @@ static const struct {
        { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
 
        { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+       { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
 };
 
 static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
        if (rsp->flags & FCP_RSP_LEN_VALID)
                rsp_code = rsp->data.info.rsp_code;
 
-       scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+       scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
                    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
-                   cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+                   cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
                    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
 }
 
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
                sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
                            "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
                            ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-                           rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+                           be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
                            fc_rsp->scsi_status);
                rsp_rc = -EIO;
        } else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
                sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
                            "flags: %x fcp_rsp: %x, scsi_status: %x\n",
                            ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-                           rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+                           be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
                            fc_rsp->scsi_status);
                rsp_rc = -EIO;
        } else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
                ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
                if (crq->format == IBMVFC_PARTITION_MIGRATED) {
                        /* We need to re-setup the interpartition connection */
-                       dev_info(vhost->dev, "Re-enabling adapter\n");
+                       dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
                        vhost->client_migrated = 1;
                        ibmvfc_purge_requests(vhost, DID_REQUEUE);
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
                        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
-               } else {
-                       dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+               } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
+                       dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
                        ibmvfc_purge_requests(vhost, DID_ERROR);
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
                        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+               } else {
+                       dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
                }
                return;
        case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
 
                tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
                        ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                       rsp->status, rsp->error, status);
+                       be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
                break;
        }
 
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
                        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 
                tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
-                       ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
-                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
-                       ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+                       ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+                                            be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+                       ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
                break;
        }
 
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
                fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
                tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
                         ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
-                        mad->iu.status, mad->iu.error,
+                        be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
                         ibmvfc_get_fc_type(fc_reason), fc_reason,
                         ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
                break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
 
                tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
                        ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                       rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
-                       rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
-                       rsp->fc_explain, status);
+                       be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+                       ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
+                       status);
                break;
        }
 
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
                level += ibmvfc_retry_host_init(vhost);
                ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
                           ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                          rsp->status, rsp->error);
+                          be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
                break;
        case IBMVFC_MAD_DRIVER_FAILED:
                break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
                ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
                           ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                                               rsp->status, rsp->error);
+                                               be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
                ibmvfc_free_event(evt);
                return;
        case IBMVFC_MAD_CRQ_ERROR:
index b81a53c4a9a8b1020a96a85fd5e84cde2adb9900..459cc288ba1d01abe63c28454bf73c7190bb64a4 100644 (file)
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
        IBMVFC_CRQ_XPORT_EVENT          = 0xFF,
 };
 
-enum ibmvfc_crq_format {
+enum ibmvfc_crq_init_msg {
        IBMVFC_CRQ_INIT                 = 0x01,
        IBMVFC_CRQ_INIT_COMPLETE        = 0x02,
+};
+
+enum ibmvfc_crq_xport_evts {
+       IBMVFC_PARTNER_FAILED           = 0x01,
+       IBMVFC_PARTNER_DEREGISTER       = 0x02,
        IBMVFC_PARTITION_MIGRATED       = 0x06,
 };
 
index 1135e74646e21c6657e5bdd0ad16582f9aca9139..8cec5230fe313fd53557af39b361a044c3eca5f6 100644 (file)
@@ -96,6 +96,7 @@ static int client_reserve = 1;
 static char partition_name[96] = "UNKNOWN";
 static unsigned int partition_number = -1;
 static LIST_HEAD(ibmvscsi_head);
+static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
 
@@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        }
 
        dev_set_drvdata(&vdev->dev, hostdata);
+       spin_lock(&ibmvscsi_driver_lock);
        list_add_tail(&hostdata->host_list, &ibmvscsi_head);
+       spin_unlock(&ibmvscsi_driver_lock);
        return 0;
 
       add_srp_port_failed:
@@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
-       list_del(&hostdata->host_list);
-       unmap_persist_bufs(hostdata);
+       unsigned long flags;
+
+       srp_remove_host(hostdata->host);
+       scsi_remove_host(hostdata->host);
+
+       purge_requests(hostdata, DID_ERROR);
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
        release_event_pool(&hostdata->pool, hostdata);
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
        ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
                                        max_events);
 
        kthread_stop(hostdata->work_thread);
-       srp_remove_host(hostdata->host);
-       scsi_remove_host(hostdata->host);
+       unmap_persist_bufs(hostdata);
+
+       spin_lock(&ibmvscsi_driver_lock);
+       list_del(&hostdata->host_list);
+       spin_unlock(&ibmvscsi_driver_lock);
+
        scsi_host_put(hostdata->host);
 
        return 0;
index e57774472e752013ce762912a6ceec512905fc2a..1d8c584ec1e9197595acf2baa61bccae4305b646 100644 (file)
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 
        if (smid < ioc->hi_priority_smid) {
                struct scsiio_tracker *st;
+               void *request;
 
                st = _get_st_from_smid(ioc, smid);
                if (!st) {
                        _base_recovery_check(ioc);
                        return;
                }
+
+               /* Clear MPI request frame */
+               request = mpt3sas_base_get_msg_frame(ioc, smid);
+               memset(request, 0, ioc->request_sz);
+
                mpt3sas_base_clear_st(ioc, st);
                _base_recovery_check(ioc);
                return;
index 8bb5b8f9f4d2cdbbc127c73cda4d9672b4adcf77..1ccfbc7eebe0323ce88b1c450e52bb87aba3c45e 100644 (file)
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 {
        struct scsi_cmnd *scmd = NULL;
        struct scsiio_tracker *st;
+       Mpi25SCSIIORequest_t *mpi_request;
 
        if (smid > 0  &&
            smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
                u32 unique_tag = smid - 1;
 
+               mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+               /*
+                * If SCSI IO request is outstanding at driver level then
+                * DevHandle filed must be non-zero. If DevHandle is zero
+                * then it means that this smid is free at driver level,
+                * so return NULL.
+                */
+               if (!mpi_request->DevHandle)
+                       return scmd;
+
                scmd = scsi_host_find_tag(ioc->shost, unique_tag);
                if (scmd) {
                        st = scsi_cmd_priv(scmd);
index 420045155ba042fbf316463fe6c087f0ba032412..0c700b140ce7d943e4df404a493ba2257c886e56 100644 (file)
@@ -4991,6 +4991,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
                if ((domain & 0xf0) == 0xf0)
                        continue;
 
+               /* Bypass if not same domain and area of adapter. */
+               if (area && domain && ((area != vha->d_id.b.area) ||
+                   (domain != vha->d_id.b.domain)) &&
+                   (ha->current_topology == ISP_CFG_NL))
+                       continue;
+
+
                /* Bypass invalid local loop ID. */
                if (loop_id > LAST_LOCAL_LOOP_ID)
                        continue;
index 677f82fdf56fd174c2c033b852431a91ab5b0fe6..91f576d743fe6fa9d2cbd71ba88f80459832d914 100644 (file)
@@ -1517,7 +1517,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
                goto eh_reset_failed;
        }
        err = 2;
-       if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1)
+       if (do_reset(fcport, cmd->device->lun, 1)
                != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x800c,
                    "do_reset failed for cmd=%p.\n", cmd);
index 16a18d5d856f91725b33e25df042eb21bba8c20a..6e4f4931ae175f806731d2fcb1fbb4ba655cc885 100644 (file)
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
        if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
                return -EINVAL;
        ep = iscsi_lookup_endpoint(transport_fd);
+       if (!ep)
+               return -EINVAL;
        conn = cls_conn->dd_data;
        qla_conn = conn->dd_data;
        qla_conn->qla_ep = ep->dd_data;
index 20189675677a000325b8185b43b2743413c5171d..601b9f1de26758a1d078a69de36469ad4318d39d 100644 (file)
@@ -585,9 +585,16 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
        if (!blk_rq_is_scsi(req)) {
                WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
                cmd->flags &= ~SCMD_INITIALIZED;
-               destroy_rcu_head(&cmd->rcu);
        }
 
+       /*
+        * Calling rcu_barrier() is not necessary here because the
+        * SCSI error handler guarantees that the function called by
+        * call_rcu() has been called before scsi_end_request() is
+        * called.
+        */
+       destroy_rcu_head(&cmd->rcu);
+
        /*
         * In the MQ case the command gets freed by __blk_mq_end_request,
         * so we have to do all cleanup that depends on it earlier.
@@ -2541,8 +2548,10 @@ void scsi_device_resume(struct scsi_device *sdev)
         * device deleted during suspend)
         */
        mutex_lock(&sdev->state_mutex);
-       sdev->quiesced_by = NULL;
-       blk_clear_pm_only(sdev->request_queue);
+       if (sdev->quiesced_by) {
+               sdev->quiesced_by = NULL;
+               blk_clear_pm_only(sdev->request_queue);
+       }
        if (sdev->sdev_state == SDEV_QUIESCE)
                scsi_device_set_state(sdev, SDEV_RUNNING);
        mutex_unlock(&sdev->state_mutex);
index 6a9040faed00c93ba5beeda77ffb5b2cbcb6c07e..3b119ca0cc0ce9ba2cfcc95cf78307a96b1d264b 100644 (file)
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&sdev->state_mutex);
        ret = scsi_device_set_state(sdev, state);
+       /*
+        * If the device state changes to SDEV_RUNNING, we need to run
+        * the queue to avoid I/O hang.
+        */
+       if (ret == 0 && state == SDEV_RUNNING)
+               blk_mq_run_hw_queues(sdev->request_queue, true);
        mutex_unlock(&sdev->state_mutex);
 
        return ret == 0 ? count : -EINVAL;
index 0508831d6fb9cb6d5ff3b90210dcfef71ef465da..0a82e93566dc8516fc0775a8975da3c25453699a 100644 (file)
@@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
        scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
        /* flush running scans then delete devices */
        flush_work(&session->scan_work);
+       /* flush running unbind operations */
+       flush_work(&session->unbind_work);
        __iscsi_unbind_session(&session->unbind_work);
 
        /* hw iscsi may not have removed all connections from session */
index 251db30d0882dc83556a688798c4f277072edefe..2b2bc4b49d78a36c737cd9e70666b900ec0fc2b2 100644 (file)
@@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
                        scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
        }
 
-       /*
-        * XXX and what if there are packets in flight and this close()
-        * XXX is followed by a "rmmod sd_mod"?
-        */
-
        scsi_disk_put(sdkp);
 }
 
@@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
        unsigned int opt_xfer_bytes =
                logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
 
+       if (sdkp->opt_xfer_blocks == 0)
+               return false;
+
        if (sdkp->opt_xfer_blocks > dev_max) {
                sd_first_printk(KERN_WARNING, sdkp,
                                "Optimal transfer size %u logical blocks " \
@@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
 {
        struct scsi_disk *sdkp = to_scsi_disk(dev);
        struct gendisk *disk = sdkp->disk;
-       
+       struct request_queue *q = disk->queue;
+
        ida_free(&sd_index_ida, sdkp->index);
 
+       /*
+        * Wait until all requests that are in progress have completed.
+        * This is necessary to avoid that e.g. scsi_end_request() crashes
+        * due to clearing the disk->private_data pointer. Wait from inside
+        * scsi_disk_release() instead of from sd_release() to avoid that
+        * freezing and unfreezing the request queue affects user space I/O
+        * in case multiple processes open a /dev/sd... node concurrently.
+        */
+       blk_mq_freeze_queue(q);
+       blk_mq_unfreeze_queue(q);
+
        disk->private_data = NULL;
        put_disk(disk);
        put_device(&sdkp->device->sdev_gendev);
index 9351349cf0a930cd5c25dedd6bb747970e455e96..1e0041ec813238cbfa7ab52c3fdc9799961169d1 100644 (file)
@@ -150,7 +150,12 @@ struct bcm2835_power {
 
 static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
 {
-       u64 start = ktime_get_ns();
+       u64 start;
+
+       if (!reg)
+               return 0;
+
+       start = ktime_get_ns();
 
        /* Enable the module's async AXI bridges. */
        ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
 
 static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
 {
-       u64 start = ktime_get_ns();
+       u64 start;
+
+       if (!reg)
+               return 0;
+
+       start = ktime_get_ns();
 
        /* Enable the module's async AXI bridges. */
        ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
        }
 }
 
-static void
+static int
 bcm2835_init_power_domain(struct bcm2835_power *power,
                          int pd_xlate_index, const char *name)
 {
@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
        struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
 
        dom->clk = devm_clk_get(dev->parent, name);
+       if (IS_ERR(dom->clk)) {
+               int ret = PTR_ERR(dom->clk);
+
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+
+               /* Some domains don't have a clk, so make sure that we
+                * don't deref an error pointer later.
+                */
+               dom->clk = NULL;
+       }
 
        dom->base.name = name;
        dom->base.power_on = bcm2835_power_pd_power_on;
@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
        pm_genpd_init(&dom->base, NULL, true);
 
        power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+
+       return 0;
 }
 
 /** bcm2835_reset_reset - Resets a block that has a reset line in the
@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
                { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
                { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
        };
-       int ret, i;
+       int ret = 0, i;
        u32 id;
 
        power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
 
        power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
 
-       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
-               bcm2835_init_power_domain(power, i, power_domain_names[i]);
+       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+               ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
+               if (ret)
+                       goto fail;
+       }
 
        for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
                pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
 
        ret = devm_reset_controller_register(dev, &power->reset);
        if (ret)
-               return ret;
+               goto fail;
 
        of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
 
        dev_info(dev, "Broadcom BCM2835 power domains driver");
        return 0;
+
+fail:
+       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+               struct generic_pm_domain *dom = &power->domains[i].base;
+
+               if (dom->name)
+                       pm_genpd_remove(dom);
+       }
+       return ret;
 }
 
 static int bcm2835_power_remove(struct platform_device *pdev)
index 720760cd493feb4583bffde64edd5253dd35e380..ba39647a690c3e37ed06b1ff3a9395302277fa76 100644 (file)
@@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
 
 static void bcm2835_thermal_debugfs(struct platform_device *pdev)
 {
-       struct thermal_zone_device *tz = platform_get_drvdata(pdev);
-       struct bcm2835_thermal_data *data = tz->devdata;
+       struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
        struct debugfs_regset32 *regset;
 
        data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
@@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
 
        data->tz = tz;
 
-       platform_set_drvdata(pdev, tz);
+       platform_set_drvdata(pdev, data);
 
        /*
         * Thermal_zone doesn't enable hwmon as default,
@@ -290,8 +289,8 @@ err_clk:
 
 static int bcm2835_thermal_remove(struct platform_device *pdev)
 {
-       struct thermal_zone_device *tz = platform_get_drvdata(pdev);
-       struct bcm2835_thermal_data *data = tz->devdata;
+       struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
+       struct thermal_zone_device *tz = data->tz;
 
        debugfs_remove_recursive(data->debugfsdir);
        thermal_zone_of_sensor_unregister(&pdev->dev, tz);
index 6fff16113628743ae9a6b006799fb376abb39198..f7c1f49ec87f2a397d882ca595421e26d71df2b3 100644 (file)
@@ -536,12 +536,11 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
                               struct thermal_zone_device *tz, u32 power,
                               unsigned long *state)
 {
-       unsigned int cur_freq, target_freq;
+       unsigned int target_freq;
        u32 last_load, normalised_power;
        struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
        struct cpufreq_policy *policy = cpufreq_cdev->policy;
 
-       cur_freq = cpufreq_quick_get(policy->cpu);
        power = power > 0 ? power : 0;
        last_load = cpufreq_cdev->last_load ?: 1;
        normalised_power = (power * 100) / last_load;
index 61ca7ce3624ed4298c6b0b99ee2d5e63f0f534cc..5f3ed24e26ec78fbf3bd3a93b1f91d1845d67f2a 100644 (file)
@@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
        INT3400_THERMAL_PASSIVE_1,
        INT3400_THERMAL_ACTIVE,
        INT3400_THERMAL_CRITICAL,
+       INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
+       INT3400_THERMAL_EMERGENCY_CALL_MODE,
+       INT3400_THERMAL_PASSIVE_2,
+       INT3400_THERMAL_POWER_BOSS,
+       INT3400_THERMAL_VIRTUAL_SENSOR,
+       INT3400_THERMAL_COOLING_MODE,
+       INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
        INT3400_THERMAL_MAXIMUM_UUID,
 };
 
@@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
        "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
        "3A95C389-E4B8-4629-A526-C52C88626BAE",
        "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
+       "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
+       "5349962F-71E6-431D-9AE8-0A635B710AEE",
+       "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
+       "F5A35014-C209-46A4-993A-EB56DE7530A1",
+       "6ED722A7-9240-48A5-B479-31EEF723D7CF",
+       "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
+       "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
 };
 
 struct int3400_thermal_priv {
@@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, priv);
 
-       if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
-               int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
-               int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
-       }
+       int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
+       int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
+
        priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
                                                priv, &int3400_thermal_ops,
                                                &int3400_thermal_params, 0, 0);
index 7571f7c2e7c9abd488cf9ee3a6cdb223257f80ab..ac7256b5f020519608a0ec3bd4a2cb8fd652e489 100644 (file)
@@ -101,7 +101,7 @@ struct powerclamp_worker_data {
        bool clamping;
 };
 
-static struct powerclamp_worker_data * __percpu worker_data;
+static struct powerclamp_worker_data __percpu *worker_data;
 static struct thermal_cooling_device *cooling_dev;
 static unsigned long *cpu_clamping_mask;  /* bit map for tracking per cpu
                                           * clamping kthread worker
@@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
        struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
        struct kthread_worker *worker;
 
-       worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
+       worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
        if (IS_ERR(worker))
                return;
 
index 5c07a61447d3fc1bb469ab531a876a27fae44739..e4ea7f6aef20e335e160ad11605bc4f318e17739 100644 (file)
@@ -199,6 +199,9 @@ enum {
 #define MT7622_TS1     0
 #define MT7622_NUM_CONTROLLER          1
 
+/* The maximum number of banks */
+#define MAX_NUM_ZONES          8
+
 /* The calibration coefficient of sensor  */
 #define MT7622_CALIBRATION     165
 
@@ -249,7 +252,7 @@ struct mtk_thermal_data {
        const int num_controller;
        const int *controller_offset;
        bool need_switch_bank;
-       struct thermal_bank_cfg bank_data[];
+       struct thermal_bank_cfg bank_data[MAX_NUM_ZONES];
 };
 
 struct mtk_thermal {
@@ -268,7 +271,7 @@ struct mtk_thermal {
        s32 vts[MAX_NUM_VTS];
 
        const struct mtk_thermal_data *conf;
-       struct mtk_thermal_bank banks[];
+       struct mtk_thermal_bank banks[MAX_NUM_ZONES];
 };
 
 /* MT8183 thermal sensor data */
index 48eef552cba48edb5d60f7a75b715f9536e1b5ae..fc9399d9c0820d59a4321520fd5dcf9242620867 100644 (file)
@@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp)
        struct exynos_tmu_data *data = p;
        int value, ret = 0;
 
-       if (!data || !data->tmu_read || !data->enabled)
+       if (!data || !data->tmu_read)
                return -EINVAL;
        else if (!data->enabled)
                /*
index df7d09409efe3a9512495b6c718ba0bbbebb39b3..8ca333f21292ee7dcb611591aed0e6f03421341b 100644 (file)
 
 #define GUEST_MAPPINGS_TRIES   5
 
+#define VBG_KERNEL_REQUEST \
+       (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
+        VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
+
 /**
  * Reserves memory in which the VMM can relocate any guest mappings
  * that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
        int i, rc;
 
        /* Query the required space. */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return;
 
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
         * Tell the host that we're going to free the memory we reserved for
         * it, the free it up. (Leak the memory if anything goes wrong here.)
         */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return;
 
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
        struct vmmdev_guest_info2 *req2 = NULL;
        int rc, ret = -ENOMEM;
 
-       req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
-       req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
+       req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
+                            VBG_KERNEL_REQUEST);
+       req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
+                            VBG_KERNEL_REQUEST);
        if (!req1 || !req2)
                goto out_free;
 
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
        req2->additions_minor = VBG_VERSION_MINOR;
        req2->additions_build = VBG_VERSION_BUILD;
        req2->additions_revision = VBG_SVN_REV;
-       /* (no features defined yet) */
-       req2->additions_features = 0;
+       req2->additions_features =
+               VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
        strlcpy(req2->name, VBG_VERSION_STRING,
                sizeof(req2->name));
 
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
        struct vmmdev_guest_status *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
        struct vmmdev_heartbeat *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
 
        gdev->guest_heartbeat_req = vbg_req_alloc(
                                        sizeof(*gdev->guest_heartbeat_req),
-                                       VMMDEVREQ_GUEST_HEARTBEAT);
+                                       VMMDEVREQ_GUEST_HEARTBEAT,
+                                       VBG_KERNEL_REQUEST);
        if (!gdev->guest_heartbeat_req)
                return -ENOMEM;
 
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
        struct vmmdev_mask *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
        u32 changed, previous;
        int rc, ret = 0;
 
-       /* Allocate a request buffer before taking the spinlock */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+       /*
+        * Allocate a request buffer before taking the spinlock, when
+        * the session is being terminated the requestor is the kernel,
+        * as we're cleaning up.
+        */
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+                           session_termination ? VBG_KERNEL_REQUEST :
+                                                 session->requestor);
        if (!req) {
                if (!session_termination)
                        return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
        struct vmmdev_mask *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
        u32 changed, previous;
        int rc, ret = 0;
 
-       /* Allocate a request buffer before taking the spinlock */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+       /*
+        * Allocate a request buffer before taking the spinlock, when
+        * the session is being terminated the requestor is the kernel,
+        * as we're cleaning up.
+        */
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+                           session_termination ? VBG_KERNEL_REQUEST :
+                                                 session->requestor);
        if (!req) {
                if (!session_termination)
                        return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
        struct vmmdev_host_version *req;
        int rc, ret;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
 
        gdev->mem_balloon.get_req =
                vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
-                             VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
+                             VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
+                             VBG_KERNEL_REQUEST);
        gdev->mem_balloon.change_req =
                vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
-                             VMMDEVREQ_CHANGE_MEMBALLOON);
+                             VMMDEVREQ_CHANGE_MEMBALLOON,
+                             VBG_KERNEL_REQUEST);
        gdev->cancel_req =
                vbg_req_alloc(sizeof(*(gdev->cancel_req)),
-                             VMMDEVREQ_HGCM_CANCEL2);
+                             VMMDEVREQ_HGCM_CANCEL2,
+                             VBG_KERNEL_REQUEST);
        gdev->ack_events_req =
                vbg_req_alloc(sizeof(*gdev->ack_events_req),
-                             VMMDEVREQ_ACKNOWLEDGE_EVENTS);
+                             VMMDEVREQ_ACKNOWLEDGE_EVENTS,
+                             VBG_KERNEL_REQUEST);
        gdev->mouse_status_req =
                vbg_req_alloc(sizeof(*gdev->mouse_status_req),
-                             VMMDEVREQ_GET_MOUSE_STATUS);
+                             VMMDEVREQ_GET_MOUSE_STATUS,
+                             VBG_KERNEL_REQUEST);
 
        if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
            !gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
  * vboxguest_linux.c calls this when userspace opens the char-device.
  * Return: A pointer to the new session or an ERR_PTR on error.
  * @gdev:              The Guest extension device.
- * @user:              Set if this is a session for the vboxuser device.
+ * @requestor:         VMMDEV_REQUESTOR_* flags
  */
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
 {
        struct vbg_session *session;
 
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
                return ERR_PTR(-ENOMEM);
 
        session->gdev = gdev;
-       session->user_session = user;
+       session->requestor = requestor;
 
        return session;
 }
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
                if (!session->hgcm_client_ids[i])
                        continue;
 
-               vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
+               /* requestor is kernel here, as we're cleaning up. */
+               vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
+                                   session->hgcm_client_ids[i], &rc);
        }
 
        kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
                return -EPERM;
        }
 
-       if (trusted_apps_only && session->user_session) {
+       if (trusted_apps_only &&
+           (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
                vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
                        req->request_type);
                return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
        if (i >= ARRAY_SIZE(session->hgcm_client_ids))
                return -EMFILE;
 
-       ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
-                              &conn->hdr.rc);
+       ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
+                              &client_id, &conn->hdr.rc);
 
        mutex_lock(&gdev->session_mutex);
        if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
        if (i >= ARRAY_SIZE(session->hgcm_client_ids))
                return -EINVAL;
 
-       ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
+       ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
+                                 &disconn->hdr.rc);
 
        mutex_lock(&gdev->session_mutex);
        if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
        }
 
        if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
-               ret = vbg_hgcm_call32(gdev, client_id,
+               ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
                                      call->function, call->timeout_ms,
                                      VBG_IOCTL_HGCM_CALL_PARMS32(call),
                                      call->parm_count, &call->hdr.rc);
        else
-               ret = vbg_hgcm_call(gdev, client_id,
+               ret = vbg_hgcm_call(gdev, session->requestor, client_id,
                                    call->function, call->timeout_ms,
                                    VBG_IOCTL_HGCM_CALL_PARMS(call),
                                    call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
 }
 
 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+                                    struct vbg_session *session,
                                     struct vbg_ioctl_write_coredump *dump)
 {
        struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
        if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
                return -EINVAL;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
+                           session->requestor);
        if (!req)
                return -ENOMEM;
 
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
        case VBG_IOCTL_CHECK_BALLOON:
                return vbg_ioctl_check_balloon(gdev, data);
        case VBG_IOCTL_WRITE_CORE_DUMP:
-               return vbg_ioctl_write_core_dump(gdev, data);
+               return vbg_ioctl_write_core_dump(gdev, session, data);
        }
 
        /* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
        struct vmmdev_mouse_status *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
index 7ad9ec45bfa9d649627f45e9410aebff43cd22c7..4188c12b839f7e74f845cc9524c1917b188dae95 100644 (file)
@@ -154,15 +154,15 @@ struct vbg_session {
         * host. Protected by vbg_gdev.session_mutex.
         */
        u32 guest_caps;
-       /** Does this session belong to a root process or a user one? */
-       bool user_session;
+       /** VMMDEV_REQUESTOR_* flags */
+       u32 requestor;
        /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
        bool cancel_waiters;
 };
 
 int  vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
 void vbg_core_exit(struct vbg_dev *gdev);
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
 void vbg_core_close_session(struct vbg_session *session);
 int  vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
 int  vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
 void vbg_linux_mouse_event(struct vbg_dev *gdev);
 
 /* Private (non exported) functions form vboxguest_utils.c */
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+                   u32 requestor);
 void vbg_req_free(void *req, size_t len);
 int vbg_req_perform(struct vbg_dev *gdev, void *req);
 int vbg_hgcm_call32(
-       struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-       struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-       int *vbox_status);
+       struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+       u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+       u32 parm_count, int *vbox_status);
 
 #endif
index 6e2a9619192d2317f8f449fbb5f9c24d0699e3f8..6e8c0f1c1056296e983fd70af5de7c405392c3ee 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2006-2016 Oracle Corporation
  */
 
+#include <linux/cred.h>
 #include <linux/input.h>
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
 /** Global vbg_gdev pointer used by vbg_get/put_gdev. */
 static struct vbg_dev *vbg_gdev;
 
+static u32 vbg_misc_device_requestor(struct inode *inode)
+{
+       u32 requestor = VMMDEV_REQUESTOR_USERMODE |
+                       VMMDEV_REQUESTOR_CON_DONT_KNOW |
+                       VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
+
+       if (from_kuid(current_user_ns(), current->cred->uid) == 0)
+               requestor |= VMMDEV_REQUESTOR_USR_ROOT;
+       else
+               requestor |= VMMDEV_REQUESTOR_USR_USER;
+
+       if (in_egroup_p(inode->i_gid))
+               requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
+
+       return requestor;
+}
+
 static int vbg_misc_device_open(struct inode *inode, struct file *filp)
 {
        struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
        /* misc_open sets filp->private_data to our misc device */
        gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
 
-       session = vbg_core_open_session(gdev, false);
+       session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
        if (IS_ERR(session))
                return PTR_ERR(session);
 
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
        gdev = container_of(filp->private_data, struct vbg_dev,
                            misc_device_user);
 
-       session = vbg_core_open_session(gdev, false);
+       session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
+                                             VMMDEV_REQUESTOR_USER_DEVICE);
        if (IS_ERR(session))
                return PTR_ERR(session);
 
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
                         req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
 
        if (is_vmmdev_req)
-               buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
+               buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+                                   session->requestor);
        else
                buf = kmalloc(size, GFP_KERNEL);
        if (!buf)
index bf4474214b4d31bb708c3d9c302d6ce415e17c7a..75fd140b02ff8aa41816a1f0284a4926c214df7e 100644 (file)
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
 VBG_LOG(vbg_debug, pr_debug);
 #endif
 
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+                   u32 requestor)
 {
        struct vmmdev_request_header *req;
        int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
        req->request_type = req_type;
        req->rc = VERR_GENERAL_FAILURE;
        req->reserved1 = 0;
-       req->reserved2 = 0;
+       req->requestor = requestor;
 
        return req;
 }
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
        return done;
 }
 
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
                     struct vmmdev_hgcm_service_location *loc,
                     u32 *client_id, int *vbox_status)
 {
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
        int rc;
 
        hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
-                                    VMMDEVREQ_HGCM_CONNECT);
+                                    VMMDEVREQ_HGCM_CONNECT, requestor);
        if (!hgcm_connect)
                return -ENOMEM;
 
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
 }
 EXPORT_SYMBOL(vbg_hgcm_connect);
 
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+                       u32 client_id, int *vbox_status)
 {
        struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
        int rc;
 
        hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
-                                       VMMDEVREQ_HGCM_DISCONNECT);
+                                       VMMDEVREQ_HGCM_DISCONNECT,
+                                       requestor);
        if (!hgcm_disconnect)
                return -ENOMEM;
 
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
        return 0;
 }
 
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
-                 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
-                 u32 parm_count, int *vbox_status)
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+                 u32 function, u32 timeout_ms,
+                 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+                 int *vbox_status)
 {
        struct vmmdev_hgcm_call *call;
        void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
                goto free_bounce_bufs;
        }
 
-       call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
+       call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
        if (!call) {
                ret = -ENOMEM;
                goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
 
 #ifdef CONFIG_COMPAT
 int vbg_hgcm_call32(
-       struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-       struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-       int *vbox_status)
+       struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+       u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+       u32 parm_count, int *vbox_status)
 {
        struct vmmdev_hgcm_function_parameter *parm64 = NULL;
        u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
                        goto out_free;
        }
 
-       ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
+       ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
                            parm64, parm_count, vbox_status);
        if (ret < 0)
                goto out_free;
index 77f0c8f8a23112f1d3c16b237514237767bab398..84834dad38d5c431d161607989080e758bb62b7b 100644 (file)
@@ -9,11 +9,10 @@
 #ifndef __VBOX_VERSION_H__
 #define __VBOX_VERSION_H__
 
-/* Last synced October 4th 2017 */
-#define VBG_VERSION_MAJOR 5
-#define VBG_VERSION_MINOR 2
+#define VBG_VERSION_MAJOR 6
+#define VBG_VERSION_MINOR 0
 #define VBG_VERSION_BUILD 0
-#define VBG_SVN_REV 68940
-#define VBG_VERSION_STRING "5.2.0"
+#define VBG_SVN_REV 127566
+#define VBG_VERSION_STRING "6.0.0"
 
 #endif
index 5e2ae978935de3630cfda2184534bb838620ea6f..6337b8d75d960bdefc5c8119a187d185ff918dfb 100644 (file)
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
        s32 rc;
        /** Reserved field no.1. MBZ. */
        u32 reserved1;
-       /** Reserved field no.2. MBZ. */
-       u32 reserved2;
+       /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
+       u32 requestor;
 };
 VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
 
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
 };
 VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
 
+#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO   BIT(0)
+
 /** struct vmmdev_guestinfo2 - Guest information report, version 2. */
 struct vmmdev_guest_info2 {
        /** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
        u32 additions_build;
        /** SVN revision. */
        u32 additions_revision;
-       /** Feature mask, currently unused. */
+       /** Feature mask. */
        u32 additions_features;
        /**
         * The intentional meaning of this field was:
index ca08c83168f5fbf1f7f6b52c8c3ff769bf70cf04..0b37867b5c202332b66ba5bede2a31e4287a23e0 100644 (file)
@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
 
        xdr_encode_AFS_StoreStatus(&bp, attr);
 
-       *bp++ = 0;                              /* position of start of write */
-       *bp++ = 0;
+       *bp++ = htonl(attr->ia_size >> 32);     /* position of start of write */
+       *bp++ = htonl((u32) attr->ia_size);
        *bp++ = 0;                              /* size of write */
        *bp++ = 0;
        *bp++ = htonl(attr->ia_size >> 32);     /* new file length */
@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
 
        xdr_encode_AFS_StoreStatus(&bp, attr);
 
-       *bp++ = 0;                              /* position of start of write */
+       *bp++ = htonl(attr->ia_size);           /* position of start of write */
        *bp++ = 0;                              /* size of write */
        *bp++ = htonl(attr->ia_size);           /* new file length */
 
index 5aa57929e8c23559c41b8a875f3ea2db43a364dc..6e97a42d24d130471a97a28510ec3712605c50cd 100644 (file)
@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
        bp = xdr_encode_u32(bp, 0); /* RPC flags */
        bp = xdr_encode_YFSFid(bp, &vnode->fid);
        bp = xdr_encode_YFS_StoreStatus(bp, attr);
-       bp = xdr_encode_u64(bp, 0);             /* position of start of write */
+       bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
        bp = xdr_encode_u64(bp, 0);             /* size of write */
        bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
        yfs_check_req(call, bp);
index e9faa52bb489c424775af69ccf9362c1c4b07c2f..78d3257435c00b76633ee6168a2586ccfa70118c 100644 (file)
@@ -336,12 +336,14 @@ static void blkdev_bio_end_io(struct bio *bio)
        if (should_dirty) {
                bio_check_pages_dirty(bio);
        } else {
-               struct bio_vec *bvec;
-               int i;
-               struct bvec_iter_all iter_all;
+               if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+                       struct bvec_iter_all iter_all;
+                       struct bio_vec *bvec;
+                       int i;
 
-               bio_for_each_segment_all(bvec, bio, i, iter_all)
-                       put_page(bvec->bv_page);
+                       bio_for_each_segment_all(bvec, bio, i, iter_all)
+                               put_page(bvec->bv_page);
+               }
                bio_put(bio);
        }
 }
index 1d49694e6ae3226044c8d9464cca09f78889d335..c5880329ae37c661b4e87b3cafc0599e776f242d 100644 (file)
@@ -6174,7 +6174,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
         *
         * This is overestimating in most cases.
         */
-       qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
+       qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize;
 
        spin_lock(&block_rsv->lock);
        block_rsv->size = reserve_size;
index eb680b715dd6b2f50e7d98026a8dcc256797eacc..e659d9d6110733845b35309703d9dff73cb78689 100644 (file)
@@ -1922,8 +1922,8 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
        int i;
 
        /* Level sanity check */
-       if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
-           root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
+       if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
+           root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
            root_level < cur_level) {
                btrfs_err_rl(fs_info,
                        "%s: bad levels, cur_level=%d root_level=%d",
index 1869ba8e5981c948c435bf26e3c2f9d8016770b0..67a6f7d4740230aaa24b1ecad5e91ec5b94b5f70 100644 (file)
@@ -2430,8 +2430,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
                        bitmap_clear(rbio->dbitmap, pagenr, 1);
                kunmap(p);
 
-               for (stripe = 0; stripe < rbio->real_stripes; stripe++)
+               for (stripe = 0; stripe < nr_data; stripe++)
                        kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+               kunmap(p_page);
        }
 
        __free_page(p_page);
index acdad6d658f54bda7cf9c379867d212a41d1c24b..e4e665f422fc4c87b05181211b73ca2097e3b7c2 100644 (file)
@@ -1886,8 +1886,10 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
        }
 }
 
-static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
+static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
 {
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+
        /*
         * We use writeback_inodes_sb here because if we used
         * btrfs_start_delalloc_roots we would deadlock with fs freeze.
@@ -1897,15 +1899,50 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
         * from already being in a transaction and our join_transaction doesn't
         * have to re-take the fs freeze lock.
         */
-       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
+       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
                writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
+       } else {
+               struct btrfs_pending_snapshot *pending;
+               struct list_head *head = &trans->transaction->pending_snapshots;
+
+               /*
+                * Flush dellaloc for any root that is going to be snapshotted.
+                * This is done to avoid a corrupted version of files, in the
+                * snapshots, that had both buffered and direct IO writes (even
+                * if they were done sequentially) due to an unordered update of
+                * the inode's size on disk.
+                */
+               list_for_each_entry(pending, head, list) {
+                       int ret;
+
+                       ret = btrfs_start_delalloc_snapshot(pending->root);
+                       if (ret)
+                               return ret;
+               }
+       }
        return 0;
 }
 
-static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
+static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
 {
-       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+
+       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
                btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+       } else {
+               struct btrfs_pending_snapshot *pending;
+               struct list_head *head = &trans->transaction->pending_snapshots;
+
+               /*
+                * Wait for any dellaloc that we started previously for the roots
+                * that are going to be snapshotted. This is to avoid a corrupted
+                * version of files in the snapshots that had both buffered and
+                * direct IO writes (even if they were done sequentially).
+                */
+               list_for_each_entry(pending, head, list)
+                       btrfs_wait_ordered_extents(pending->root,
+                                                  U64_MAX, 0, U64_MAX);
+       }
 }
 
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
@@ -2023,7 +2060,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
 
        extwriter_counter_dec(cur_trans, trans->type);
 
-       ret = btrfs_start_delalloc_flush(fs_info);
+       ret = btrfs_start_delalloc_flush(trans);
        if (ret)
                goto cleanup_transaction;
 
@@ -2039,7 +2076,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
        if (ret)
                goto cleanup_transaction;
 
-       btrfs_wait_delalloc_flush(fs_info);
+       btrfs_wait_delalloc_flush(trans);
 
        btrfs_scrub_pause(fs_info);
        /*
index f06454a55e00cb4df0f71f03eb0013adbae1e4f4..561884f60d35c36e11928e28e5007901fc695198 100644 (file)
@@ -3578,9 +3578,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
        }
        btrfs_release_path(path);
 
-       /* find the first key from this transaction again */
+       /*
+        * Find the first key from this transaction again.  See the note for
+        * log_new_dir_dentries, if we're logging a directory recursively we
+        * won't be holding its i_mutex, which means we can modify the directory
+        * while we're logging it.  If we remove an entry between our first
+        * search and this search we'll not find the key again and can just
+        * bail.
+        */
        ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
-       if (WARN_ON(ret != 0))
+       if (ret != 0)
                goto done;
 
        /*
@@ -4544,6 +4551,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
                item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                      struct btrfs_inode_item);
                *size_ret = btrfs_inode_size(path->nodes[0], item);
+               /*
+                * If the in-memory inode's i_size is smaller then the inode
+                * size stored in the btree, return the inode's i_size, so
+                * that we get a correct inode size after replaying the log
+                * when before a power failure we had a shrinking truncate
+                * followed by addition of a new name (rename / new hard link).
+                * Otherwise return the inode size from the btree, to avoid
+                * data loss when replaying a log due to previously doing a
+                * write that expands the inode's size and logging a new name
+                * immediately after.
+                */
+               if (*size_ret > inode->vfs_inode.i_size)
+                       *size_ret = inode->vfs_inode.i_size;
        }
 
        btrfs_release_path(path);
@@ -4705,15 +4725,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
                                        struct btrfs_file_extent_item);
 
                if (btrfs_file_extent_type(leaf, extent) ==
-                   BTRFS_FILE_EXTENT_INLINE) {
-                       len = btrfs_file_extent_ram_bytes(leaf, extent);
-                       ASSERT(len == i_size ||
-                              (len == fs_info->sectorsize &&
-                               btrfs_file_extent_compression(leaf, extent) !=
-                               BTRFS_COMPRESS_NONE) ||
-                              (len < i_size && i_size < fs_info->sectorsize));
+                   BTRFS_FILE_EXTENT_INLINE)
                        return 0;
-               }
 
                len = btrfs_file_extent_num_bytes(leaf, extent);
                /* Last extent goes beyond i_size, no need to log a hole. */
index 9024eee889b9838caa2799ca51f439106a955ff0..db934ceae9c109f39eade85457a25b8ce99604ae 100644 (file)
@@ -6407,7 +6407,7 @@ static void btrfs_end_bio(struct bio *bio)
                                if (bio_op(bio) == REQ_OP_WRITE)
                                        btrfs_dev_stat_inc_and_print(dev,
                                                BTRFS_DEV_STAT_WRITE_ERRS);
-                               else
+                               else if (!(bio->bi_opf & REQ_RAHEAD))
                                        btrfs_dev_stat_inc_and_print(dev,
                                                BTRFS_DEV_STAT_READ_ERRS);
                                if (bio->bi_opf & REQ_PREFLUSH)
index e3346628efe2e221c844db3af6b4336d07b4f7f1..2d61ddda9bf5653fb559fb320422fd84ec470419 100644 (file)
@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head)
        struct inode *inode = container_of(head, struct inode, i_rcu);
        struct ceph_inode_info *ci = ceph_inode(inode);
 
+       kfree(ci->i_symlink);
        kmem_cache_free(ceph_inode_cachep, ci);
 }
 
@@ -566,7 +567,6 @@ void ceph_destroy_inode(struct inode *inode)
                }
        }
 
-       kfree(ci->i_symlink);
        while ((n = rb_first(&ci->i_fragtree)) != NULL) {
                frag = rb_entry(n, struct ceph_inode_frag, node);
                rb_erase(n, &ci->i_fragtree);
index 217276b8b942f59d96fa74edc9d119402d3c7031..f9b71c12cc9f6d46267eaf73a801dd00715a9cf2 100644 (file)
@@ -1008,7 +1008,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
        unsigned int xid;
        int rc;
 
-       if (remap_flags & ~REMAP_FILE_ADVISORY)
+       if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
                return -EINVAL;
 
        cifs_dbg(FYI, "clone range\n");
index 142164ef1f05fe7befc2ad5df61409336b422fb7..5c0298b9998fc326795c435f6050c3563bdf09f4 100644 (file)
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.18"
+#define CIFS_VERSION   "2.19"
 #endif                         /* _CIFSFS_H */
index 2a6d20c0ce0288d37ad2405424200be3fefaa9f4..89006e044973ec2d97ad784236f66cb447431693 100644 (file)
@@ -2632,43 +2632,56 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
        struct TCP_Server_Info *server =
                tlink_tcon(wdata->cfile->tlink)->ses->server;
 
-       /*
-        * Wait for credits to resend this wdata.
-        * Note: we are attempting to resend the whole wdata not in segments
-        */
        do {
-               rc = server->ops->wait_mtu_credits(server, wdata->bytes, &wsize,
-                                                  &credits);
+               if (wdata->cfile->invalidHandle) {
+                       rc = cifs_reopen_file(wdata->cfile, false);
+                       if (rc == -EAGAIN)
+                               continue;
+                       else if (rc)
+                               break;
+               }
 
-               if (rc)
-                       goto out;
 
-               if (wsize < wdata->bytes) {
-                       add_credits_and_wake_if(server, &credits, 0);
-                       msleep(1000);
-               }
-       } while (wsize < wdata->bytes);
+               /*
+                * Wait for credits to resend this wdata.
+                * Note: we are attempting to resend the whole wdata not in
+                * segments
+                */
+               do {
+                       rc = server->ops->wait_mtu_credits(server, wdata->bytes,
+                                               &wsize, &credits);
+                       if (rc)
+                               goto fail;
+
+                       if (wsize < wdata->bytes) {
+                               add_credits_and_wake_if(server, &credits, 0);
+                               msleep(1000);
+                       }
+               } while (wsize < wdata->bytes);
+               wdata->credits = credits;
 
-       wdata->credits = credits;
-       rc = -EAGAIN;
-       while (rc == -EAGAIN) {
-               rc = 0;
-               if (wdata->cfile->invalidHandle)
-                       rc = cifs_reopen_file(wdata->cfile, false);
-               if (!rc)
-                       rc = server->ops->async_writev(wdata,
+               rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+
+               if (!rc) {
+                       if (wdata->cfile->invalidHandle)
+                               rc = -EAGAIN;
+                       else
+                               rc = server->ops->async_writev(wdata,
                                        cifs_uncached_writedata_release);
-       }
+               }
 
-       if (!rc) {
-               list_add_tail(&wdata->list, wdata_list);
-               return 0;
-       }
+               /* If the write was successfully sent, we are done */
+               if (!rc) {
+                       list_add_tail(&wdata->list, wdata_list);
+                       return 0;
+               }
 
-       add_credits_and_wake_if(server, &wdata->credits, 0);
-out:
-       kref_put(&wdata->refcount, cifs_uncached_writedata_release);
+               /* Roll back credits and retry if needed */
+               add_credits_and_wake_if(server, &wdata->credits, 0);
+       } while (rc == -EAGAIN);
 
+fail:
+       kref_put(&wdata->refcount, cifs_uncached_writedata_release);
        return rc;
 }
 
@@ -2896,12 +2909,12 @@ restart_loop:
                                                wdata->bytes, &tmp_from,
                                                ctx->cfile, cifs_sb, &tmp_list,
                                                ctx);
+
+                                       kref_put(&wdata->refcount,
+                                               cifs_uncached_writedata_release);
                                }
 
                                list_splice(&tmp_list, &ctx->list);
-
-                               kref_put(&wdata->refcount,
-                                        cifs_uncached_writedata_release);
                                goto restart_loop;
                        }
                }
@@ -3348,44 +3361,55 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
        struct TCP_Server_Info *server =
                tlink_tcon(rdata->cfile->tlink)->ses->server;
 
-       /*
-        * Wait for credits to resend this rdata.
-        * Note: we are attempting to resend the whole rdata not in segments
-        */
        do {
-               rc = server->ops->wait_mtu_credits(server, rdata->bytes,
+               if (rdata->cfile->invalidHandle) {
+                       rc = cifs_reopen_file(rdata->cfile, true);
+                       if (rc == -EAGAIN)
+                               continue;
+                       else if (rc)
+                               break;
+               }
+
+               /*
+                * Wait for credits to resend this rdata.
+                * Note: we are attempting to resend the whole rdata not in
+                * segments
+                */
+               do {
+                       rc = server->ops->wait_mtu_credits(server, rdata->bytes,
                                                &rsize, &credits);
 
-               if (rc)
-                       goto out;
+                       if (rc)
+                               goto fail;
 
-               if (rsize < rdata->bytes) {
-                       add_credits_and_wake_if(server, &credits, 0);
-                       msleep(1000);
-               }
-       } while (rsize < rdata->bytes);
+                       if (rsize < rdata->bytes) {
+                               add_credits_and_wake_if(server, &credits, 0);
+                               msleep(1000);
+                       }
+               } while (rsize < rdata->bytes);
+               rdata->credits = credits;
 
-       rdata->credits = credits;
-       rc = -EAGAIN;
-       while (rc == -EAGAIN) {
-               rc = 0;
-               if (rdata->cfile->invalidHandle)
-                       rc = cifs_reopen_file(rdata->cfile, true);
-               if (!rc)
-                       rc = server->ops->async_readv(rdata);
-       }
+               rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+               if (!rc) {
+                       if (rdata->cfile->invalidHandle)
+                               rc = -EAGAIN;
+                       else
+                               rc = server->ops->async_readv(rdata);
+               }
 
-       if (!rc) {
-               /* Add to aio pending list */
-               list_add_tail(&rdata->list, rdata_list);
-               return 0;
-       }
+               /* If the read was successfully sent, we are done */
+               if (!rc) {
+                       /* Add to aio pending list */
+                       list_add_tail(&rdata->list, rdata_list);
+                       return 0;
+               }
 
-       add_credits_and_wake_if(server, &rdata->credits, 0);
-out:
-       kref_put(&rdata->refcount,
-               cifs_uncached_readdata_release);
+               /* Roll back credits and retry if needed */
+               add_credits_and_wake_if(server, &rdata->credits, 0);
+       } while (rc == -EAGAIN);
 
+fail:
+       kref_put(&rdata->refcount, cifs_uncached_readdata_release);
        return rc;
 }
 
index 924269cec1352f75a592ba81e9865bbb030caafe..e32c264e3adbb847653911499ef635c586032c8a 100644 (file)
@@ -1036,7 +1036,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
        "STATUS_UNFINISHED_CONTEXT_DELETED"},
        {STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
-       {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
+       /* Note that ENOATTTR and ENODATA are the same errno */
+       {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
        {STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
        {STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
        "STATUS_WRONG_CREDENTIAL_HANDLE"},
index c399e09b76e62a7c733857075ff348b039e49a58..21ac19ff19cb2c3257f524f4aef90f0de2e8d342 100644 (file)
@@ -1628,9 +1628,16 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        iov[1].iov_base = unc_path;
        iov[1].iov_len = unc_path_len;
 
-       /* 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 */
+       /*
+        * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
+        * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
+        * (Samba servers don't always set the flag so also check if null user)
+        */
        if ((ses->server->dialect == SMB311_PROT_ID) &&
-           !smb3_encryption_required(tcon))
+           !smb3_encryption_required(tcon) &&
+           !(ses->session_flags &
+                   (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
+           ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
        memset(&rqst, 0, sizeof(struct smb_rqst));
index fa226de48ef38c0c0696aca767c69cc2ac8dbdd8..99c4d799c24b63c1f16efba4cf5ab1341c9ae90b 100644 (file)
@@ -549,19 +549,19 @@ DECLARE_EVENT_CLASS(smb3_tcon_class,
                __field(unsigned int, xid)
                __field(__u32, tid)
                __field(__u64, sesid)
-               __field(const char *,  unc_name)
+               __string(name, unc_name)
                __field(int, rc)
        ),
        TP_fast_assign(
                __entry->xid = xid;
                __entry->tid = tid;
                __entry->sesid = sesid;
-               __entry->unc_name = unc_name;
+               __assign_str(name, unc_name);
                __entry->rc = rc;
        ),
        TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
                __entry->xid, __entry->sesid, __entry->tid,
-               __entry->unc_name, __entry->rc)
+               __get_str(name), __entry->rc)
 )
 
 #define DEFINE_SMB3_TCON_EVENT(name)          \
index a1ac7e9245ecc46efaceda18901f513922937376..75a5309f223151767aa951336dbd370e6776ec14 100644 (file)
@@ -384,7 +384,7 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle,
 {
        struct ext4_inode_info *ei = EXT4_I(inode);
 
-       if (ext4_handle_valid(handle)) {
+       if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) {
                ei->i_sync_tid = handle->h_transaction->t_tid;
                if (datasync)
                        ei->i_datasync_tid = handle->h_transaction->t_tid;
index 69d65d49837bb65b10e47895f33ce6acc2ee696f..98ec11f69cd4d0d50abbaf14b6fd82224a10e6d0 100644 (file)
@@ -125,7 +125,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
        struct super_block *sb = inode->i_sb;
        int blockmask = sb->s_blocksize - 1;
 
-       if (pos >= i_size_read(inode))
+       if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
                return 0;
 
        if ((pos | iov_iter_alignment(from)) & blockmask)
index c2225f0d31b511bbb0c62b176324e4f483437638..2024d3fa55044f734b961f3c8818ba3d243260a3 100644 (file)
@@ -1222,6 +1222,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
        ext4_lblk_t offsets[4], offsets2[4];
        Indirect chain[4], chain2[4];
        Indirect *partial, *partial2;
+       Indirect *p = NULL, *p2 = NULL;
        ext4_lblk_t max_block;
        __le32 nr = 0, nr2 = 0;
        int n = 0, n2 = 0;
@@ -1263,7 +1264,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
                }
 
 
-               partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+               partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
                if (nr) {
                        if (partial == chain) {
                                /* Shared branch grows from the inode */
@@ -1288,13 +1289,11 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
                                partial->p + 1,
                                (__le32 *)partial->bh->b_data+addr_per_block,
                                (chain+n-1) - partial);
-                       BUFFER_TRACE(partial->bh, "call brelse");
-                       brelse(partial->bh);
                        partial--;
                }
 
 end_range:
-               partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+               partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
                if (nr2) {
                        if (partial2 == chain2) {
                                /*
@@ -1324,16 +1323,14 @@ end_range:
                                           (__le32 *)partial2->bh->b_data,
                                           partial2->p,
                                           (chain2+n2-1) - partial2);
-                       BUFFER_TRACE(partial2->bh, "call brelse");
-                       brelse(partial2->bh);
                        partial2--;
                }
                goto do_indirects;
        }
 
        /* Punch happened within the same level (n == n2) */
-       partial = ext4_find_shared(inode, n, offsets, chain, &nr);
-       partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+       partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
+       partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
 
        /* Free top, but only if partial2 isn't its subtree. */
        if (nr) {
@@ -1390,11 +1387,7 @@ end_range:
                                           partial->p + 1,
                                           partial2->p,
                                           (chain+n-1) - partial);
-                       BUFFER_TRACE(partial->bh, "call brelse");
-                       brelse(partial->bh);
-                       BUFFER_TRACE(partial2->bh, "call brelse");
-                       brelse(partial2->bh);
-                       return 0;
+                       goto cleanup;
                }
 
                /*
@@ -1409,8 +1402,6 @@ end_range:
                                           partial->p + 1,
                                           (__le32 *)partial->bh->b_data+addr_per_block,
                                           (chain+n-1) - partial);
-                       BUFFER_TRACE(partial->bh, "call brelse");
-                       brelse(partial->bh);
                        partial--;
                }
                if (partial2 > chain2 && depth2 <= depth) {
@@ -1418,11 +1409,21 @@ end_range:
                                           (__le32 *)partial2->bh->b_data,
                                           partial2->p,
                                           (chain2+n2-1) - partial2);
-                       BUFFER_TRACE(partial2->bh, "call brelse");
-                       brelse(partial2->bh);
                        partial2--;
                }
        }
+
+cleanup:
+       while (p && p > chain) {
+               BUFFER_TRACE(p->bh, "call brelse");
+               brelse(p->bh);
+               p--;
+       }
+       while (p2 && p2 > chain2) {
+               BUFFER_TRACE(p2->bh, "call brelse");
+               brelse(p2->bh);
+               p2--;
+       }
        return 0;
 
 do_indirects:
@@ -1430,7 +1431,7 @@ do_indirects:
        switch (offsets[0]) {
        default:
                if (++n >= n2)
-                       return 0;
+                       break;
                nr = i_data[EXT4_IND_BLOCK];
                if (nr) {
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
@@ -1439,7 +1440,7 @@ do_indirects:
                /* fall through */
        case EXT4_IND_BLOCK:
                if (++n >= n2)
-                       return 0;
+                       break;
                nr = i_data[EXT4_DIND_BLOCK];
                if (nr) {
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
@@ -1448,7 +1449,7 @@ do_indirects:
                /* fall through */
        case EXT4_DIND_BLOCK:
                if (++n >= n2)
-                       return 0;
+                       break;
                nr = i_data[EXT4_TIND_BLOCK];
                if (nr) {
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
@@ -1458,5 +1459,5 @@ do_indirects:
        case EXT4_TIND_BLOCK:
                ;
        }
-       return 0;
+       goto cleanup;
 }
index b54b261ded36f92076d95197e6c456e5bd17698a..b32a57bc5d5d602ffcb8536ef28ae2684317ed7d 100644 (file)
@@ -6080,36 +6080,6 @@ out:
        return;
 }
 
-#if 0
-/*
- * Bind an inode's backing buffer_head into this transaction, to prevent
- * it from being flushed to disk early.  Unlike
- * ext4_reserve_inode_write, this leaves behind no bh reference and
- * returns no iloc structure, so the caller needs to repeat the iloc
- * lookup to mark the inode dirty later.
- */
-static int ext4_pin_inode(handle_t *handle, struct inode *inode)
-{
-       struct ext4_iloc iloc;
-
-       int err = 0;
-       if (handle) {
-               err = ext4_get_inode_loc(inode, &iloc);
-               if (!err) {
-                       BUFFER_TRACE(iloc.bh, "get_write_access");
-                       err = jbd2_journal_get_write_access(handle, iloc.bh);
-                       if (!err)
-                               err = ext4_handle_dirty_metadata(handle,
-                                                                NULL,
-                                                                iloc.bh);
-                       brelse(iloc.bh);
-               }
-       }
-       ext4_std_error(inode->i_sb, err);
-       return err;
-}
-#endif
-
 int ext4_change_inode_journal_flag(struct inode *inode, int val)
 {
        journal_t *journal;
index 3c4f8bb59f8abfd23ceaf36f93c7fceffac0134e..bab3da4f1e0d36692fa172b6724379baa47bb2d6 100644 (file)
@@ -1000,6 +1000,13 @@ resizefs_out:
                if (!blk_queue_discard(q))
                        return -EOPNOTSUPP;
 
+               /*
+                * We haven't replayed the journal, so we cannot use our
+                * block-bitmap-guided storage zapping commands.
+                */
+               if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
+                       return -EROFS;
+
                if (copy_from_user(&range, (struct fstrim_range __user *)arg,
                    sizeof(range)))
                        return -EFAULT;
index 3d9b18505c0c799b272553d0adf81fa26e6a6833..e7ae26e36c9c119a0b8b025e57914ba411b38ac5 100644 (file)
@@ -932,11 +932,18 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
        memcpy(n_group_desc, o_group_desc,
               EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
        n_group_desc[gdb_num] = gdb_bh;
+
+       BUFFER_TRACE(gdb_bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, gdb_bh);
+       if (err) {
+               kvfree(n_group_desc);
+               brelse(gdb_bh);
+               return err;
+       }
+
        EXT4_SB(sb)->s_group_desc = n_group_desc;
        EXT4_SB(sb)->s_gdb_count++;
        kvfree(o_group_desc);
-       BUFFER_TRACE(gdb_bh, "get_write_access");
-       err = ext4_journal_get_write_access(handle, gdb_bh);
        return err;
 }
 
@@ -2073,6 +2080,10 @@ out:
                free_flex_gd(flex_gd);
        if (resize_inode != NULL)
                iput(resize_inode);
-       ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
+       if (err)
+               ext4_warning(sb, "error (%d) occurred during "
+                            "file system resize", err);
+       ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
+                ext4_blocks_count(es));
        return err;
 }
index f5b828bf1299f1998d7b8ac2696b979c8f303079..6ed4eb81e67437dc2dddbbce8ae9f7f2b59681d1 100644 (file)
@@ -430,6 +430,12 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
        spin_unlock(&sbi->s_md_lock);
 }
 
+static bool system_going_down(void)
+{
+       return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
+               || system_state == SYSTEM_RESTART;
+}
+
 /* Deal with the reporting of failure conditions on a filesystem such as
  * inconsistencies detected or read IO failures.
  *
@@ -460,7 +466,12 @@ static void ext4_handle_error(struct super_block *sb)
                if (journal)
                        jbd2_journal_abort(journal, -EIO);
        }
-       if (test_opt(sb, ERRORS_RO)) {
+       /*
+        * We force ERRORS_RO behavior when system is rebooting. Otherwise we
+        * could panic during 'reboot -f' as the underlying device got already
+        * disabled.
+        */
+       if (test_opt(sb, ERRORS_RO) || system_going_down()) {
                ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
                /*
                 * Make sure updated value of ->s_mount_flags will be visible
@@ -468,8 +479,7 @@ static void ext4_handle_error(struct super_block *sb)
                 */
                smp_wmb();
                sb->s_flags |= SB_RDONLY;
-       }
-       if (test_opt(sb, ERRORS_PANIC)) {
+       } else if (test_opt(sb, ERRORS_PANIC)) {
                if (EXT4_SB(sb)->s_journal &&
                  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
                        return;
index c88088d92613595eecf6f1119a2c53f0b63bab27..bbdbd56cf2ac9384b83e78945b2c6c0031cc346d 100644 (file)
@@ -189,17 +189,28 @@ struct sqe_submit {
        bool                            needs_fixed_file;
 };
 
+/*
+ * First field must be the file pointer in all the
+ * iocb unions! See also 'struct kiocb' in <linux/fs.h>
+ */
 struct io_poll_iocb {
        struct file                     *file;
        struct wait_queue_head          *head;
        __poll_t                        events;
-       bool                            woken;
+       bool                            done;
        bool                            canceled;
        struct wait_queue_entry         wait;
 };
 
+/*
+ * NOTE! Each of the iocb union members has the file pointer
+ * as the first entry in their struct definition. So you can
+ * access the file pointer through any of the sub-structs,
+ * or directly as just 'ki_filp' in this struct.
+ */
 struct io_kiocb {
        union {
+               struct file             *file;
                struct kiocb            rw;
                struct io_poll_iocb     poll;
        };
@@ -214,6 +225,7 @@ struct io_kiocb {
 #define REQ_F_IOPOLL_COMPLETED 2       /* polled IO has completed */
 #define REQ_F_FIXED_FILE       4       /* ctx owns file */
 #define REQ_F_SEQ_PREV         8       /* sequential with previous */
+#define REQ_F_PREPPED          16      /* prep already done */
        u64                     user_data;
        u64                     error;
 
@@ -355,20 +367,25 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
        }
 }
 
-static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 ki_user_data,
+static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+{
+       if (waitqueue_active(&ctx->wait))
+               wake_up(&ctx->wait);
+       if (waitqueue_active(&ctx->sqo_wait))
+               wake_up(&ctx->sqo_wait);
+}
+
+static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
                                long res, unsigned ev_flags)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&ctx->completion_lock, flags);
-       io_cqring_fill_event(ctx, ki_user_data, res, ev_flags);
+       io_cqring_fill_event(ctx, user_data, res, ev_flags);
        io_commit_cqring(ctx);
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-       if (waitqueue_active(&ctx->wait))
-               wake_up(&ctx->wait);
-       if (waitqueue_active(&ctx->sqo_wait))
-               wake_up(&ctx->sqo_wait);
+       io_cqring_ev_posted(ctx);
 }
 
 static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
@@ -382,13 +399,14 @@ static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
 static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
                                   struct io_submit_state *state)
 {
+       gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
        struct io_kiocb *req;
 
        if (!percpu_ref_tryget(&ctx->refs))
                return NULL;
 
        if (!state) {
-               req = kmem_cache_alloc(req_cachep, __GFP_NOWARN);
+               req = kmem_cache_alloc(req_cachep, gfp);
                if (unlikely(!req))
                        goto out;
        } else if (!state->free_reqs) {
@@ -396,10 +414,18 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
                int ret;
 
                sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
-               ret = kmem_cache_alloc_bulk(req_cachep, __GFP_NOWARN, sz,
-                                               state->reqs);
-               if (unlikely(ret <= 0))
-                       goto out;
+               ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
+
+               /*
+                * Bulk alloc is all-or-nothing. If we fail to get a batch,
+                * retry single alloc to be on the safe side.
+                */
+               if (unlikely(ret <= 0)) {
+                       state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
+                       if (!state->reqs[0])
+                               goto out;
+                       ret = 1;
+               }
                state->free_reqs = ret - 1;
                state->cur_req = 1;
                req = state->reqs[0];
@@ -411,7 +437,8 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
 
        req->ctx = ctx;
        req->flags = 0;
-       refcount_set(&req->refs, 0);
+       /* one is dropped after submission, the other at completion */
+       refcount_set(&req->refs, 2);
        return req;
 out:
        io_ring_drop_ctx_refs(ctx, 1);
@@ -429,10 +456,16 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
 
 static void io_free_req(struct io_kiocb *req)
 {
-       if (!refcount_read(&req->refs) || refcount_dec_and_test(&req->refs)) {
-               io_ring_drop_ctx_refs(req->ctx, 1);
-               kmem_cache_free(req_cachep, req);
-       }
+       if (req->file && !(req->flags & REQ_F_FIXED_FILE))
+               fput(req->file);
+       io_ring_drop_ctx_refs(req->ctx, 1);
+       kmem_cache_free(req_cachep, req);
+}
+
+static void io_put_req(struct io_kiocb *req)
+{
+       if (refcount_dec_and_test(&req->refs))
+               io_free_req(req);
 }
 
 /*
@@ -442,44 +475,34 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                               struct list_head *done)
 {
        void *reqs[IO_IOPOLL_BATCH];
-       int file_count, to_free;
-       struct file *file = NULL;
        struct io_kiocb *req;
+       int to_free;
 
-       file_count = to_free = 0;
+       to_free = 0;
        while (!list_empty(done)) {
                req = list_first_entry(done, struct io_kiocb, list);
                list_del(&req->list);
 
                io_cqring_fill_event(ctx, req->user_data, req->error, 0);
-
-               reqs[to_free++] = req;
                (*nr_events)++;
 
-               /*
-                * Batched puts of the same file, to avoid dirtying the
-                * file usage count multiple times, if avoidable.
-                */
-               if (!(req->flags & REQ_F_FIXED_FILE)) {
-                       if (!file) {
-                               file = req->rw.ki_filp;
-                               file_count = 1;
-                       } else if (file == req->rw.ki_filp) {
-                               file_count++;
+               if (refcount_dec_and_test(&req->refs)) {
+                       /* If we're not using fixed files, we have to pair the
+                        * completion part with the file put. Use regular
+                        * completions for those, only batch free for fixed
+                        * file.
+                        */
+                       if (req->flags & REQ_F_FIXED_FILE) {
+                               reqs[to_free++] = req;
+                               if (to_free == ARRAY_SIZE(reqs))
+                                       io_free_req_many(ctx, reqs, &to_free);
                        } else {
-                               fput_many(file, file_count);
-                               file = req->rw.ki_filp;
-                               file_count = 1;
+                               io_free_req(req);
                        }
                }
-
-               if (to_free == ARRAY_SIZE(reqs))
-                       io_free_req_many(ctx, reqs, &to_free);
        }
-       io_commit_cqring(ctx);
 
-       if (file)
-               fput_many(file, file_count);
+       io_commit_cqring(ctx);
        io_free_req_many(ctx, reqs, &to_free);
 }
 
@@ -602,21 +625,14 @@ static void kiocb_end_write(struct kiocb *kiocb)
        }
 }
 
-static void io_fput(struct io_kiocb *req)
-{
-       if (!(req->flags & REQ_F_FIXED_FILE))
-               fput(req->rw.ki_filp);
-}
-
 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
 
        kiocb_end_write(kiocb);
 
-       io_fput(req);
        io_cqring_add_event(req->ctx, req->user_data, res, 0);
-       io_free_req(req);
+       io_put_req(req);
 }
 
 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
@@ -731,31 +747,18 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
        const struct io_uring_sqe *sqe = s->sqe;
        struct io_ring_ctx *ctx = req->ctx;
        struct kiocb *kiocb = &req->rw;
-       unsigned ioprio, flags;
-       int fd, ret;
+       unsigned ioprio;
+       int ret;
 
+       if (!req->file)
+               return -EBADF;
        /* For -EAGAIN retry, everything is already prepped */
-       if (kiocb->ki_filp)
+       if (req->flags & REQ_F_PREPPED)
                return 0;
 
-       flags = READ_ONCE(sqe->flags);
-       fd = READ_ONCE(sqe->fd);
+       if (force_nonblock && !io_file_supports_async(req->file))
+               force_nonblock = false;
 
-       if (flags & IOSQE_FIXED_FILE) {
-               if (unlikely(!ctx->user_files ||
-                   (unsigned) fd >= ctx->nr_user_files))
-                       return -EBADF;
-               kiocb->ki_filp = ctx->user_files[fd];
-               req->flags |= REQ_F_FIXED_FILE;
-       } else {
-               if (s->needs_fixed_file)
-                       return -EBADF;
-               kiocb->ki_filp = io_file_get(state, fd);
-               if (unlikely(!kiocb->ki_filp))
-                       return -EBADF;
-               if (force_nonblock && !io_file_supports_async(kiocb->ki_filp))
-                       force_nonblock = false;
-       }
        kiocb->ki_pos = READ_ONCE(sqe->off);
        kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
        kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
@@ -764,7 +767,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
        if (ioprio) {
                ret = ioprio_check_cap(ioprio);
                if (ret)
-                       goto out_fput;
+                       return ret;
 
                kiocb->ki_ioprio = ioprio;
        } else
@@ -772,38 +775,26 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
 
        ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
        if (unlikely(ret))
-               goto out_fput;
+               return ret;
        if (force_nonblock) {
                kiocb->ki_flags |= IOCB_NOWAIT;
                req->flags |= REQ_F_FORCE_NONBLOCK;
        }
        if (ctx->flags & IORING_SETUP_IOPOLL) {
-               ret = -EOPNOTSUPP;
                if (!(kiocb->ki_flags & IOCB_DIRECT) ||
                    !kiocb->ki_filp->f_op->iopoll)
-                       goto out_fput;
+                       return -EOPNOTSUPP;
 
                req->error = 0;
                kiocb->ki_flags |= IOCB_HIPRI;
                kiocb->ki_complete = io_complete_rw_iopoll;
        } else {
-               if (kiocb->ki_flags & IOCB_HIPRI) {
-                       ret = -EINVAL;
-                       goto out_fput;
-               }
+               if (kiocb->ki_flags & IOCB_HIPRI)
+                       return -EINVAL;
                kiocb->ki_complete = io_complete_rw;
        }
+       req->flags |= REQ_F_PREPPED;
        return 0;
-out_fput:
-       if (!(flags & IOSQE_FIXED_FILE)) {
-               /*
-                * in case of error, we didn't use this file reference. drop it.
-                */
-               if (state)
-                       state->used_refs--;
-               io_file_put(state, kiocb->ki_filp);
-       }
-       return ret;
 }
 
 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
@@ -864,6 +855,9 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
        iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
        if (offset)
                iov_iter_advance(iter, offset);
+
+       /* don't drop a reference to these pages */
+       iter->type |= ITER_BVEC_FLAG_NO_REF;
        return 0;
 }
 
@@ -887,7 +881,7 @@ static int io_import_iovec(struct io_ring_ctx *ctx, int rw,
        opcode = READ_ONCE(sqe->opcode);
        if (opcode == IORING_OP_READ_FIXED ||
            opcode == IORING_OP_WRITE_FIXED) {
-               ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
+               int ret = io_import_fixed(ctx, rw, sqe, iter);
                *iovec = NULL;
                return ret;
        }
@@ -945,31 +939,29 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
        async_list->io_end = io_end;
 }
 
-static ssize_t io_read(struct io_kiocb *req, const struct sqe_submit *s,
-                      bool force_nonblock, struct io_submit_state *state)
+static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
+                  bool force_nonblock, struct io_submit_state *state)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct kiocb *kiocb = &req->rw;
        struct iov_iter iter;
        struct file *file;
        size_t iov_count;
-       ssize_t ret;
+       int ret;
 
        ret = io_prep_rw(req, s, force_nonblock, state);
        if (ret)
                return ret;
        file = kiocb->ki_filp;
 
-       ret = -EBADF;
        if (unlikely(!(file->f_mode & FMODE_READ)))
-               goto out_fput;
-       ret = -EINVAL;
+               return -EBADF;
        if (unlikely(!file->f_op->read_iter))
-               goto out_fput;
+               return -EINVAL;
 
        ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
        if (ret)
-               goto out_fput;
+               return ret;
 
        iov_count = iov_iter_count(&iter);
        ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
@@ -991,38 +983,32 @@ static ssize_t io_read(struct io_kiocb *req, const struct sqe_submit *s,
                }
        }
        kfree(iovec);
-out_fput:
-       /* Hold on to the file for -EAGAIN */
-       if (unlikely(ret && ret != -EAGAIN))
-               io_fput(req);
        return ret;
 }
 
-static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
-                       bool force_nonblock, struct io_submit_state *state)
+static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
+                   bool force_nonblock, struct io_submit_state *state)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct kiocb *kiocb = &req->rw;
        struct iov_iter iter;
        struct file *file;
        size_t iov_count;
-       ssize_t ret;
+       int ret;
 
        ret = io_prep_rw(req, s, force_nonblock, state);
        if (ret)
                return ret;
 
-       ret = -EBADF;
        file = kiocb->ki_filp;
        if (unlikely(!(file->f_mode & FMODE_WRITE)))
-               goto out_fput;
-       ret = -EINVAL;
+               return -EBADF;
        if (unlikely(!file->f_op->write_iter))
-               goto out_fput;
+               return -EINVAL;
 
        ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
        if (ret)
-               goto out_fput;
+               return ret;
 
        iov_count = iov_iter_count(&iter);
 
@@ -1036,6 +1022,8 @@ static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
 
        ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
        if (!ret) {
+               ssize_t ret2;
+
                /*
                 * Open-code file_start_write here to grab freeze protection,
                 * which will be released by another thread in
@@ -1050,14 +1038,22 @@ static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
                                                SB_FREEZE_WRITE);
                }
                kiocb->ki_flags |= IOCB_WRITE;
-               io_rw_done(kiocb, call_write_iter(file, kiocb, &iter));
+
+               ret2 = call_write_iter(file, kiocb, &iter);
+               if (!force_nonblock || ret2 != -EAGAIN) {
+                       io_rw_done(kiocb, ret2);
+               } else {
+                       /*
+                        * If ->needs_lock is true, we're already in async
+                        * context.
+                        */
+                       if (!s->needs_lock)
+                               io_async_list_note(WRITE, req, iov_count);
+                       ret = -EAGAIN;
+               }
        }
 out_free:
        kfree(iovec);
-out_fput:
-       /* Hold on to the file for -EAGAIN */
-       if (unlikely(ret && ret != -EAGAIN))
-               io_fput(req);
        return ret;
 }
 
@@ -1072,29 +1068,19 @@ static int io_nop(struct io_kiocb *req, u64 user_data)
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
 
-       /*
-        * Twilight zone - it's possible that someone issued an opcode that
-        * has a file attached, then got -EAGAIN on submission, and changed
-        * the sqe before we retried it from async context. Avoid dropping
-        * a file reference for this malicious case, and flag the error.
-        */
-       if (req->rw.ki_filp) {
-               err = -EBADF;
-               io_fput(req);
-       }
        io_cqring_add_event(ctx, user_data, err, 0);
-       io_free_req(req);
+       io_put_req(req);
        return 0;
 }
 
 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       unsigned flags;
-       int fd;
 
-       /* Prep already done */
-       if (req->rw.ki_filp)
+       if (!req->file)
+               return -EBADF;
+       /* Prep already done (EAGAIN retry) */
+       if (req->flags & REQ_F_PREPPED)
                return 0;
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
@@ -1102,20 +1088,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
                return -EINVAL;
 
-       fd = READ_ONCE(sqe->fd);
-       flags = READ_ONCE(sqe->flags);
-
-       if (flags & IOSQE_FIXED_FILE) {
-               if (unlikely(!ctx->user_files || fd >= ctx->nr_user_files))
-                       return -EBADF;
-               req->rw.ki_filp = ctx->user_files[fd];
-               req->flags |= REQ_F_FIXED_FILE;
-       } else {
-               req->rw.ki_filp = fget(fd);
-               if (unlikely(!req->rw.ki_filp))
-                       return -EBADF;
-       }
-
+       req->flags |= REQ_F_PREPPED;
        return 0;
 }
 
@@ -1144,9 +1117,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                                end > 0 ? end : LLONG_MAX,
                                fsync_flags & IORING_FSYNC_DATASYNC);
 
-       io_fput(req);
        io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
-       io_free_req(req);
+       io_put_req(req);
        return 0;
 }
 
@@ -1204,15 +1176,16 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        spin_unlock_irq(&ctx->completion_lock);
 
        io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
-       io_free_req(req);
+       io_put_req(req);
        return 0;
 }
 
-static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
+static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
+                            __poll_t mask)
 {
-       io_cqring_add_event(req->ctx, req->user_data, mangle_poll(mask), 0);
-       io_fput(req);
-       io_free_req(req);
+       req->poll.done = true;
+       io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask), 0);
+       io_commit_cqring(ctx);
 }
 
 static void io_poll_complete_work(struct work_struct *work)
@@ -1240,9 +1213,11 @@ static void io_poll_complete_work(struct work_struct *work)
                return;
        }
        list_del_init(&req->list);
+       io_poll_complete(ctx, req, mask);
        spin_unlock_irq(&ctx->completion_lock);
 
-       io_poll_complete(req, mask);
+       io_cqring_ev_posted(ctx);
+       io_put_req(req);
 }
 
 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -1253,29 +1228,25 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
        struct io_ring_ctx *ctx = req->ctx;
        __poll_t mask = key_to_poll(key);
-
-       poll->woken = true;
+       unsigned long flags;
 
        /* for instances that support it check for an event match first: */
-       if (mask) {
-               unsigned long flags;
+       if (mask && !(mask & poll->events))
+               return 0;
 
-               if (!(mask & poll->events))
-                       return 0;
+       list_del_init(&poll->wait.entry);
 
-               /* try to complete the iocb inline if we can: */
-               if (spin_trylock_irqsave(&ctx->completion_lock, flags)) {
-                       list_del(&req->list);
-                       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
+               list_del(&req->list);
+               io_poll_complete(ctx, req, mask);
+               spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-                       list_del_init(&poll->wait.entry);
-                       io_poll_complete(req, mask);
-                       return 1;
-               }
+               io_cqring_ev_posted(ctx);
+               io_put_req(req);
+       } else {
+               queue_work(ctx->sqo_wq, &req->work);
        }
 
-       list_del_init(&poll->wait.entry);
-       queue_work(ctx->sqo_wq, &req->work);
        return 1;
 }
 
@@ -1305,36 +1276,23 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        struct io_poll_iocb *poll = &req->poll;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_poll_table ipt;
-       unsigned flags;
+       bool cancel = false;
        __poll_t mask;
        u16 events;
-       int fd;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
                return -EINVAL;
+       if (!poll->file)
+               return -EBADF;
 
        INIT_WORK(&req->work, io_poll_complete_work);
        events = READ_ONCE(sqe->poll_events);
        poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
 
-       flags = READ_ONCE(sqe->flags);
-       fd = READ_ONCE(sqe->fd);
-
-       if (flags & IOSQE_FIXED_FILE) {
-               if (unlikely(!ctx->user_files || fd >= ctx->nr_user_files))
-                       return -EBADF;
-               poll->file = ctx->user_files[fd];
-               req->flags |= REQ_F_FIXED_FILE;
-       } else {
-               poll->file = fget(fd);
-       }
-       if (unlikely(!poll->file))
-               return -EBADF;
-
        poll->head = NULL;
-       poll->woken = false;
+       poll->done = false;
        poll->canceled = false;
 
        ipt.pt._qproc = io_poll_queue_proc;
@@ -1346,56 +1304,44 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        INIT_LIST_HEAD(&poll->wait.entry);
        init_waitqueue_func_entry(&poll->wait, io_poll_wake);
 
-       /* one for removal from waitqueue, one for this function */
-       refcount_set(&req->refs, 2);
-
        mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
-       if (unlikely(!poll->head)) {
-               /* we did not manage to set up a waitqueue, done */
-               goto out;
-       }
 
        spin_lock_irq(&ctx->completion_lock);
-       spin_lock(&poll->head->lock);
-       if (poll->woken) {
-               /* wake_up context handles the rest */
-               mask = 0;
+       if (likely(poll->head)) {
+               spin_lock(&poll->head->lock);
+               if (unlikely(list_empty(&poll->wait.entry))) {
+                       if (ipt.error)
+                               cancel = true;
+                       ipt.error = 0;
+                       mask = 0;
+               }
+               if (mask || ipt.error)
+                       list_del_init(&poll->wait.entry);
+               else if (cancel)
+                       WRITE_ONCE(poll->canceled, true);
+               else if (!poll->done) /* actually waiting for an event */
+                       list_add_tail(&req->list, &ctx->cancel_list);
+               spin_unlock(&poll->head->lock);
+       }
+       if (mask) { /* no async, we'd stolen it */
+               req->error = mangle_poll(mask);
                ipt.error = 0;
-       } else if (mask || ipt.error) {
-               /* if we get an error or a mask we are done */
-               WARN_ON_ONCE(list_empty(&poll->wait.entry));
-               list_del_init(&poll->wait.entry);
-       } else {
-               /* actually waiting for an event */
-               list_add_tail(&req->list, &ctx->cancel_list);
+               io_poll_complete(ctx, req, mask);
        }
-       spin_unlock(&poll->head->lock);
        spin_unlock_irq(&ctx->completion_lock);
 
-out:
-       if (unlikely(ipt.error)) {
-               if (!(flags & IOSQE_FIXED_FILE))
-                       fput(poll->file);
-               /*
-                * Drop one of our refs to this req, __io_submit_sqe() will
-                * drop the other one since we're returning an error.
-                */
-               io_free_req(req);
-               return ipt.error;
+       if (mask) {
+               io_cqring_ev_posted(ctx);
+               io_put_req(req);
        }
-
-       if (mask)
-               io_poll_complete(req, mask);
-       io_free_req(req);
-       return 0;
+       return ipt.error;
 }
 
 static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
                           const struct sqe_submit *s, bool force_nonblock,
                           struct io_submit_state *state)
 {
-       ssize_t ret;
-       int opcode;
+       int ret, opcode;
 
        if (unlikely(s->index >= ctx->sq_entries))
                return -EINVAL;
@@ -1524,10 +1470,13 @@ restart:
                                        break;
                                cond_resched();
                        } while (1);
+
+                       /* drop submission reference */
+                       io_put_req(req);
                }
                if (ret) {
                        io_cqring_add_event(ctx, sqe->user_data, ret, 0);
-                       io_free_req(req);
+                       io_put_req(req);
                }
 
                /* async context always use a copy of the sqe */
@@ -1614,11 +1563,55 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
        return ret;
 }
 
+static bool io_op_needs_file(const struct io_uring_sqe *sqe)
+{
+       int op = READ_ONCE(sqe->opcode);
+
+       switch (op) {
+       case IORING_OP_NOP:
+       case IORING_OP_POLL_REMOVE:
+               return false;
+       default:
+               return true;
+       }
+}
+
+static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
+                          struct io_submit_state *state, struct io_kiocb *req)
+{
+       unsigned flags;
+       int fd;
+
+       flags = READ_ONCE(s->sqe->flags);
+       fd = READ_ONCE(s->sqe->fd);
+
+       if (!io_op_needs_file(s->sqe)) {
+               req->file = NULL;
+               return 0;
+       }
+
+       if (flags & IOSQE_FIXED_FILE) {
+               if (unlikely(!ctx->user_files ||
+                   (unsigned) fd >= ctx->nr_user_files))
+                       return -EBADF;
+               req->file = ctx->user_files[fd];
+               req->flags |= REQ_F_FIXED_FILE;
+       } else {
+               if (s->needs_fixed_file)
+                       return -EBADF;
+               req->file = io_file_get(state, fd);
+               if (unlikely(!req->file))
+                       return -EBADF;
+       }
+
+       return 0;
+}
+
 static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
                         struct io_submit_state *state)
 {
        struct io_kiocb *req;
-       ssize_t ret;
+       int ret;
 
        /* enforce forwards compatibility on users */
        if (unlikely(s->sqe->flags & ~IOSQE_FIXED_FILE))
@@ -1628,7 +1621,9 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
        if (unlikely(!req))
                return -EAGAIN;
 
-       req->rw.ki_filp = NULL;
+       ret = io_req_set_file(ctx, s, state, req);
+       if (unlikely(ret))
+               goto out;
 
        ret = __io_submit_sqe(ctx, req, s, true, state);
        if (ret == -EAGAIN) {
@@ -1649,11 +1644,23 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
                                INIT_WORK(&req->work, io_sq_wq_submit_work);
                                queue_work(ctx->sqo_wq, &req->work);
                        }
-                       ret = 0;
+
+                       /*
+                        * Queued up for async execution, worker will release
+                        * submit reference when the iocb is actually
+                        * submitted.
+                        */
+                       return 0;
                }
        }
+
+out:
+       /* drop submission reference */
+       io_put_req(req);
+
+       /* and drop final reference, if we failed */
        if (ret)
-               io_free_req(req);
+               io_put_req(req);
 
        return ret;
 }
@@ -1975,7 +1982,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                return 0;
 
        if (sig) {
-               ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz);
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
+                                                     &ksigmask, &sigsaved, sigsz);
+               else
+#endif
+                       ret = set_user_sigmask(sig, &ksigmask,
+                                              &sigsaved, sigsz);
+
                if (ret)
                        return ret;
        }
index 97cb9d486a7da38527aa8bc7349692fedcce9e70..abdd18e404f8cdd4f85d4e014af1d4a5edf8bbd2 100644 (file)
@@ -1589,12 +1589,14 @@ static void iomap_dio_bio_end_io(struct bio *bio)
        if (should_dirty) {
                bio_check_pages_dirty(bio);
        } else {
-               struct bio_vec *bvec;
-               int i;
-               struct bvec_iter_all iter_all;
+               if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+                       struct bvec_iter_all iter_all;
+                       struct bio_vec *bvec;
+                       int i;
 
-               bio_for_each_segment_all(bvec, bio, i, iter_all)
-                       put_page(bvec->bv_page);
+                       bio_for_each_segment_all(bvec, bio, i, iter_all)
+                               put_page(bvec->bv_page);
+               }
                bio_put(bio);
        }
 }
index 93fb7cf0b92b631358cf36eab60d5947cc0312a7..f0b5c987d6ae14cc39a281668d5daf4d658cbe67 100644 (file)
@@ -290,12 +290,11 @@ void nlmclnt_release_host(struct nlm_host *host)
 
        WARN_ON_ONCE(host->h_server);
 
-       if (refcount_dec_and_test(&host->h_count)) {
+       if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) {
                WARN_ON_ONCE(!list_empty(&host->h_lockowners));
                WARN_ON_ONCE(!list_empty(&host->h_granted));
                WARN_ON_ONCE(!list_empty(&host->h_reclaim));
 
-               mutex_lock(&nlm_host_mutex);
                nlm_destroy_host_locked(host);
                mutex_unlock(&nlm_host_mutex);
        }
index eaa1cfaf73b08c8fda256ea57ab816dcc4985217..71d0c6c2aac5ccde4d69cdf7396fbdbdfb40c3e6 100644 (file)
@@ -1160,6 +1160,11 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
                         */
                        error = -EDEADLK;
                        spin_lock(&blocked_lock_lock);
+                       /*
+                        * Ensure that we don't find any locks blocked on this
+                        * request during deadlock detection.
+                        */
+                       __locks_wake_up_blocks(request);
                        if (likely(!posix_locks_deadlock(request, fl))) {
                                error = FILE_LOCK_DEFERRED;
                                __locks_insert_block(fl, request,
index fb1cf1a4bda2a105e60cb23d95dea4b3abc09f70..90d71fda65cecfb3958cc4391240e2a09bac783e 100644 (file)
@@ -453,7 +453,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
        case XPRT_TRANSPORT_RDMA:
                if (retrans == NFS_UNSPEC_RETRANS)
                        to->to_retries = NFS_DEF_TCP_RETRANS;
-               if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
+               if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
                        to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
                if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
                        to->to_initval = NFS_MAX_TCP_TIMEOUT;
index f9264e1922a28b836367b145c215d9ceb8883843..6673d4ff5a2a846c01e2de3e909e167da30156cb 100644 (file)
@@ -1289,6 +1289,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
 static int ff_layout_read_done_cb(struct rpc_task *task,
                                struct nfs_pgio_header *hdr)
 {
+       int new_idx = hdr->pgio_mirror_idx;
        int err;
 
        trace_nfs4_pnfs_read(hdr, task->tk_status);
@@ -1307,7 +1308,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
        case -NFS4ERR_RESET_TO_PNFS:
                if (ff_layout_choose_best_ds_for_read(hdr->lseg,
                                        hdr->pgio_mirror_idx + 1,
-                                       &hdr->pgio_mirror_idx))
+                                       &new_idx))
                        goto out_layouterror;
                set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
@@ -1320,7 +1321,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
 
        return 0;
 out_layouterror:
+       ff_layout_read_record_layoutstats_done(task, hdr);
        ff_layout_send_layouterror(hdr->lseg);
+       hdr->pgio_mirror_idx = new_idx;
 out_eagain:
        rpc_restart_call_prepare(task);
        return -EAGAIN;
index 4dbb0ee234324db3275de7c7a26fc3bcd040171a..741ff8c9c6ed3f7cda214ec0157eb6d9461ebdca 100644 (file)
@@ -2933,7 +2933,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
        }
 
 out:
-       nfs4_sequence_free_slot(&opendata->o_res.seq_res);
+       if (!opendata->cancelled)
+               nfs4_sequence_free_slot(&opendata->o_res.seq_res);
        return ret;
 }
 
@@ -6301,7 +6302,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
        p->arg.seqid = seqid;
        p->res.seqid = seqid;
        p->lsp = lsp;
-       refcount_inc(&lsp->ls_count);
        /* Ensure we don't close file until we're done freeing locks! */
        p->ctx = get_nfs_open_context(ctx);
        p->l_ctx = nfs_get_lock_context(ctx);
@@ -6526,7 +6526,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
        p->res.lock_seqid = p->arg.lock_seqid;
        p->lsp = lsp;
        p->server = server;
-       refcount_inc(&lsp->ls_count);
        p->ctx = get_nfs_open_context(ctx);
        locks_init_lock(&p->fl);
        locks_copy_lock(&p->fl, fl);
index 56992b32c6bbb63839bbc95a50cb83a57398d8bb..a90bb19dcfa287c34234b2954524d7047481fe40 100644 (file)
@@ -208,6 +208,7 @@ static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
 {
        struct fanotify_event_info_fid info = { };
        struct file_handle handle = { };
+       unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh;
        size_t fh_len = event->fh_len;
        size_t len = fanotify_event_info_len(event);
 
@@ -233,7 +234,16 @@ static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
 
        buf += sizeof(handle);
        len -= sizeof(handle);
-       if (copy_to_user(buf, fanotify_event_fh(event), fh_len))
+       /*
+        * For an inline fh, copy through stack to exclude the copy from
+        * usercopy hardening protections.
+        */
+       fh = fanotify_event_fh(event);
+       if (fh_len <= FANOTIFY_INLINE_FH_LEN) {
+               memcpy(bounce, fh, fh_len);
+               fh = bounce;
+       }
+       if (copy_to_user(buf, fh, fh_len))
                return -EFAULT;
 
        /* Pad with 0's */
index e2901fbb9f76c97a7abe1607f5aa51f53a842966..7b53598c88046f4eab660817ae165620bd48a48b 100644 (file)
@@ -519,8 +519,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
        fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
        if (!fsn_mark)
                return -ENOENT;
-       else if (create)
-               return -EEXIST;
+       else if (create) {
+               ret = -EEXIST;
+               goto out;
+       }
 
        i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
 
@@ -548,6 +550,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
        /* return the wd */
        ret = i_mark->wd;
 
+out:
        /* match the get from fsnotify_find_mark() */
        fsnotify_put_mark(fsn_mark);
 
index bbcc185062bb5c8198f7cefbc85520c5adeac0e2..d29d869abec17c3b6d4b56d6500e9b3d510cd385 100644 (file)
@@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head);
 static DECLARE_RWSEM(kclist_lock);
 static int kcore_need_update = 1;
 
+/*
+ * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
+ * Same as oldmem_pfn_is_ram in vmcore
+ */
+static int (*mem_pfn_is_ram)(unsigned long pfn);
+
+int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
+{
+       if (mem_pfn_is_ram)
+               return -EBUSY;
+       mem_pfn_is_ram = fn;
+       return 0;
+}
+
+static int pfn_is_ram(unsigned long pfn)
+{
+       if (mem_pfn_is_ram)
+               return mem_pfn_is_ram(pfn);
+       else
+               return 1;
+}
+
 /* This doesn't grab kclist_lock, so it should only be used at init time. */
 void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
                       int type)
@@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                                goto out;
                        }
                        m = NULL;       /* skip the list anchor */
+               } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
+                       if (clear_user(buffer, tsz)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
                } else if (m->type == KCORE_VMALLOC) {
                        vread(buf, (char *)start, tsz);
                        /* we have to zero-fill user buffer even if no read */
index ae796e10f68b2524423dae6d3840c7a9d05cf4c3..e7276932e433c9cdc1bdd10f9a35a3d0ef09ece7 100644 (file)
@@ -1242,8 +1242,10 @@ set_size:
                truncate_setsize(inode, newsize);
                down_write(&iinfo->i_data_sem);
                udf_clear_extent_cache(inode);
-               udf_truncate_extents(inode);
+               err = udf_truncate_extents(inode);
                up_write(&iinfo->i_data_sem);
+               if (err)
+                       return err;
        }
 update_time:
        inode->i_mtime = inode->i_ctime = current_time(inode);
index b647f0bd150c46ba6b202989052b1849bb60c4c5..63a47f1e1d529c1c98a1f25c86cd25880c1737ac 100644 (file)
@@ -199,7 +199,7 @@ static void udf_update_alloc_ext_desc(struct inode *inode,
  * for making file shorter. For making file longer, udf_extend_file() has to
  * be used.
  */
-void udf_truncate_extents(struct inode *inode)
+int udf_truncate_extents(struct inode *inode)
 {
        struct extent_position epos;
        struct kernel_lb_addr eloc, neloc = {};
@@ -224,7 +224,7 @@ void udf_truncate_extents(struct inode *inode)
        if (etype == -1) {
                /* We should extend the file? */
                WARN_ON(byte_offset);
-               return;
+               return 0;
        }
        epos.offset -= adsize;
        extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset);
@@ -260,6 +260,9 @@ void udf_truncate_extents(struct inode *inode)
                        epos.block = eloc;
                        epos.bh = udf_tread(sb,
                                        udf_get_lb_pblock(sb, &eloc, 0));
+                       /* Error reading indirect block? */
+                       if (!epos.bh)
+                               return -EIO;
                        if (elen)
                                indirect_ext_len =
                                        (elen + sb->s_blocksize - 1) >>
@@ -283,4 +286,5 @@ void udf_truncate_extents(struct inode *inode)
        iinfo->i_lenExtents = inode->i_size;
 
        brelse(epos.bh);
+       return 0;
 }
index ee246769dee4a2c54f31b56c5237f77f7db3a040..d89ef71887fcfa1aed13bacc77a1c42ee3a55c1a 100644 (file)
@@ -235,7 +235,7 @@ extern struct inode *udf_new_inode(struct inode *, umode_t);
 /* truncate.c */
 extern void udf_truncate_tail_extent(struct inode *);
 extern void udf_discard_prealloc(struct inode *);
-extern void udf_truncate_extents(struct inode *);
+extern int udf_truncate_extents(struct inode *);
 
 /* balloc.c */
 extern void udf_free_blocks(struct super_block *, struct inode *,
index 48502cb9990f184a55b780372adaef3bda406509..4637ae1ae91ca8ef6007c05ba060dd9fb208fdf1 100644 (file)
@@ -1191,7 +1191,10 @@ xfs_iread_extents(
         * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
         */
        level = be16_to_cpu(block->bb_level);
-       ASSERT(level > 0);
+       if (unlikely(level == 0)) {
+               XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
        pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
        bno = be64_to_cpu(*pp);
 
@@ -4249,9 +4252,13 @@ xfs_bmapi_write(
        struct xfs_bmbt_irec    *mval,          /* output: map values */
        int                     *nmap)          /* i/o: mval size/count */
 {
+       struct xfs_bmalloca     bma = {
+               .tp             = tp,
+               .ip             = ip,
+               .total          = total,
+       };
        struct xfs_mount        *mp = ip->i_mount;
        struct xfs_ifork        *ifp;
-       struct xfs_bmalloca     bma = { NULL }; /* args for xfs_bmap_alloc */
        xfs_fileoff_t           end;            /* end of mapped file region */
        bool                    eof = false;    /* after the end of extents */
        int                     error;          /* error return */
@@ -4319,10 +4326,6 @@ xfs_bmapi_write(
                eof = true;
        if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
                bma.prev.br_startoff = NULLFILEOFF;
-       bma.tp = tp;
-       bma.ip = ip;
-       bma.total = total;
-       bma.datatype = 0;
        bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
 
        n = 0;
index 6f94d1f7322d0a33bd00134c8136ac3831b4fe2c..117910db51b809ebeea0196182e05f0dd0c54611 100644 (file)
@@ -415,8 +415,17 @@ xchk_btree_check_owner(
        struct xfs_btree_cur    *cur = bs->cur;
        struct check_owner      *co;
 
-       if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
+       /*
+        * In theory, xfs_btree_get_block should only give us a null buffer
+        * pointer for the root of a root-in-inode btree type, but we need
+        * to check defensively here in case the cursor state is also screwed
+        * up.
+        */
+       if (bp == NULL) {
+               if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
+                       xchk_btree_set_corrupt(bs->sc, bs->cur, level);
                return 0;
+       }
 
        /*
         * We want to cross-reference each btree block with the bnobt
index f1260b4bfdeed62440cc238138e7fd405c2dccf1..90527b094878971f831c78daafe2483dd99e83d2 100644 (file)
@@ -574,6 +574,11 @@ xchk_da_btree(
                /* Drill another level deeper. */
                blkno = be32_to_cpu(key->before);
                level++;
+               if (level >= XFS_DA_NODE_MAXDEPTH) {
+                       /* Too deep! */
+                       xchk_da_set_corrupt(&ds, level - 1);
+                       break;
+               }
                ds.tree_level--;
                error = xchk_da_btree_block(&ds, level, blkno);
                if (error)
index 93f07edafd8183a14ca55fae7ffdfad0370ccc89..9ee2a7d02e7059f29c103da1088b7c854ca023bb 100644 (file)
@@ -161,6 +161,14 @@ xfs_ioc_trim(
                return -EPERM;
        if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
+
+       /*
+        * We haven't recovered the log, so we cannot use our bnobt-guided
+        * storage zapping commands.
+        */
+       if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+               return -EROFS;
+
        if (copy_from_user(&range, urange, sizeof(range)))
                return -EFAULT;
 
index 1f2e2845eb76c2c78a932c913057e1028cec2f05..a7ceae90110eded646f13acf4314131574d46a69 100644 (file)
@@ -529,18 +529,17 @@ xfs_file_dio_aio_write(
        count = iov_iter_count(from);
 
        /*
-        * If we are doing unaligned IO, wait for all other IO to drain,
-        * otherwise demote the lock if we had to take the exclusive lock
-        * for other reasons in xfs_file_aio_write_checks.
+        * If we are doing unaligned IO, we can't allow any other overlapping IO
+        * in-flight at the same time or we risk data corruption. Wait for all
+        * other IO to drain before we submit. If the IO is aligned, demote the
+        * iolock if we had to take the exclusive lock in
+        * xfs_file_aio_write_checks() for other reasons.
         */
        if (unaligned_io) {
-               /* If we are going to wait for other DIO to finish, bail */
-               if (iocb->ki_flags & IOCB_NOWAIT) {
-                       if (atomic_read(&inode->i_dio_count))
-                               return -EAGAIN;
-               } else {
-                       inode_dio_wait(inode);
-               }
+               /* unaligned dio always waits, bail */
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+               inode_dio_wait(inode);
        } else if (iolock == XFS_IOLOCK_EXCL) {
                xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
                iolock = XFS_IOLOCK_SHARED;
@@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
        ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
+
+       /*
+        * If unaligned, this is the only IO in-flight. If it has not yet
+        * completed, wait on it before we release the iolock to prevent
+        * subsequent overlapping IO.
+        */
+       if (ret == -EIOCBQUEUED && unaligned_io)
+               inode_dio_wait(inode);
 out:
        xfs_iunlock(ip, iolock);
 
index d5cfc0b15b7640e6c4b99fc05b479f25f5df7f44..f6034ba774be313a86f006ee2e0fbe96403de6f6 100644 (file)
@@ -108,7 +108,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
 #define AARP_RESOLVE_TIME      (10 * HZ)
 
 extern struct datalink_proto *ddp_dl, *aarp_dl;
-extern void aarp_proto_init(void);
+extern int aarp_proto_init(void);
 
 /* Inter module exports */
 
index b0c814bcc7e3ed903f6b78ca24885ce9c67cba08..cb2aa7ecafff5cb772772db11d5ee06314f60359 100644 (file)
@@ -57,7 +57,6 @@ struct blk_mq_hw_ctx {
        unsigned int            queue_num;
 
        atomic_t                nr_active;
-       unsigned int            nr_expired;
 
        struct hlist_node       cpuhp_dead;
        struct kobject          kobj;
@@ -300,8 +299,6 @@ void blk_mq_end_request(struct request *rq, blk_status_t error);
 void __blk_mq_end_request(struct request *rq, blk_status_t error);
 
 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
-                               bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 bool blk_mq_complete_request(struct request *rq);
index d66bf5f32610adce133e522b7f3852bd08817ff7..791fee35df8886d11580d4c54f4b55dae64de3db 100644 (file)
@@ -215,6 +215,7 @@ struct bio {
 /*
  * bio flags
  */
+#define BIO_NO_PAGE_REF        0       /* don't put release vec pages */
 #define BIO_SEG_VALID  1       /* bi_phys_segments valid */
 #define BIO_CLONED     2       /* doesn't own data */
 #define BIO_BOUNCED    3       /* bio is a bounce bio */
index 0de92b29f589c949307ef716a750fb7f7a671ae7..5c58a3b2bf0038083b9dc2b88293349fa8afb22b 100644 (file)
@@ -50,6 +50,9 @@ struct blk_stat_callback;
 /* Must be consistent with blk_mq_poll_stats_bkt() */
 #define BLK_MQ_POLL_STATS_BKTS 16
 
+/* Doing classic polling */
+#define BLK_MQ_POLL_CLASSIC -1
+
 /*
  * Maximum number of blkcg policies allowed to be registered concurrently.
  * Defined here to simplify include dependency.
index a2132e09dc1c422731e9533a2a2efa2f5b0132dd..f02367faa58dbe44171454de6af50777ddc0ebf3 100644 (file)
@@ -193,7 +193,6 @@ enum bpf_arg_type {
 
        ARG_PTR_TO_CTX,         /* pointer to context */
        ARG_ANYTHING,           /* any (initialized) argument is ok */
-       ARG_PTR_TO_SOCKET,      /* pointer to bpf_sock */
        ARG_PTR_TO_SPIN_LOCK,   /* pointer to bpf_spin_lock */
        ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
 };
index 69f7a3449eda83a8a25fd1f5ac3dedc36108deba..7d8228d1c8981d9b73fb72a8953687c1f550eb19 100644 (file)
@@ -66,6 +66,46 @@ struct bpf_reg_state {
         * same reference to the socket, to determine proper reference freeing.
         */
        u32 id;
+       /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
+        * from a pointer-cast helper, bpf_sk_fullsock() and
+        * bpf_tcp_sock().
+        *
+        * Consider the following where "sk" is a reference counted
+        * pointer returned from "sk = bpf_sk_lookup_tcp();":
+        *
+        * 1: sk = bpf_sk_lookup_tcp();
+        * 2: if (!sk) { return 0; }
+        * 3: fullsock = bpf_sk_fullsock(sk);
+        * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
+        * 5: tp = bpf_tcp_sock(fullsock);
+        * 6: if (!tp) { bpf_sk_release(sk); return 0; }
+        * 7: bpf_sk_release(sk);
+        * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
+        *
+        * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
+        * "tp" ptr should be invalidated also.  In order to do that,
+        * the reg holding "fullsock" and "sk" need to remember
+        * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
+        * such that the verifier can reset all regs which have
+        * ref_obj_id matching the sk_reg->id.
+        *
+        * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
+        * sk_reg->id will stay as NULL-marking purpose only.
+        * After NULL-marking is done, sk_reg->id can be reset to 0.
+        *
+        * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
+        * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
+        *
+        * After "tp = bpf_tcp_sock(fullsock);" at line 5,
+        * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
+        * which is the same as sk_reg->ref_obj_id.
+        *
+        * From the verifier perspective, if sk, fullsock and tp
+        * are not NULL, they are the same ptr with different
+        * reg->type.  In particular, bpf_sk_release(tp) is also
+        * allowed and has the same effect as bpf_sk_release(sk).
+        */
+       u32 ref_obj_id;
        /* For scalar types (SCALAR_VALUE), this represents our knowledge of
         * the actual value.
         * For pointer types, this represents the variable part of the offset
index 9cd00a37b8d32b83e3539ed5ffce694c63efc8f0..6db2d9a6e503106261e042e5d65fdc68aa92194b 100644 (file)
 #define BCM_LED_SRC_OFF                0xe     /* Tied high */
 #define BCM_LED_SRC_ON         0xf     /* Tied low */
 
+/*
+ * Broadcom Multicolor LED configurations (expansion register 4)
+ */
+#define BCM_EXP_MULTICOLOR             (MII_BCM54XX_EXP_SEL_ER + 0x04)
+#define BCM_LED_MULTICOLOR_IN_PHASE    BIT(8)
+#define BCM_LED_MULTICOLOR_LINK_ACT    0x0
+#define BCM_LED_MULTICOLOR_SPEED       0x1
+#define BCM_LED_MULTICOLOR_ACT_FLASH   0x2
+#define BCM_LED_MULTICOLOR_FDX         0x3
+#define BCM_LED_MULTICOLOR_OFF         0x4
+#define BCM_LED_MULTICOLOR_ON          0x5
+#define BCM_LED_MULTICOLOR_ALT         0x6
+#define BCM_LED_MULTICOLOR_FLASH       0x7
+#define BCM_LED_MULTICOLOR_LINK                0x8
+#define BCM_LED_MULTICOLOR_ACT         0x9
+#define BCM_LED_MULTICOLOR_PROGRAM     0xa
 
 /*
  * BCM5482: Shadow registers
index a420c07904bcd7f6e7a082ebe0e78d913f94674e..337d5049ff93b5849b925086f33d509f29da5efa 100644 (file)
@@ -294,6 +294,8 @@ extern void ceph_destroy_client(struct ceph_client *client);
 extern int __ceph_open_session(struct ceph_client *client,
                               unsigned long started);
 extern int ceph_open_session(struct ceph_client *client);
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+                               unsigned long timeout);
 
 /* pagevec.c */
 extern void ceph_release_page_vector(struct page **pages, int num_pages);
index d6160d479b14520485b3d2ba2c1bafb56491d2ee..7ae8de5ad0f2fae4a5c50ad30903ecdf631bb0fd 100644 (file)
@@ -195,7 +195,7 @@ struct irq_data {
  * IRQD_LEVEL                  - Interrupt is level triggered
  * IRQD_WAKEUP_STATE           - Interrupt is configured for wakeup
  *                               from suspend
- * IRDQ_MOVE_PCNTXT            - Interrupt can be moved in process
+ * IRQD_MOVE_PCNTXT            - Interrupt can be moved in process
  *                               context
  * IRQD_IRQ_DISABLED           - Disabled state of the interrupt
  * IRQD_IRQ_MASKED             - Masked state of the interrupt
index 626179077bb03d1b97ea7e2f42c95b2a4e79b1a0..0f049b384ccddf7325eb5449c82eda9d8dab63cb 100644 (file)
@@ -158,8 +158,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
  * Legacy platforms not converted to DT yet must use this to init
  * their GIC
  */
-void gic_init(unsigned int nr, int start,
-             void __iomem *dist , void __iomem *cpu);
+void gic_init(void __iomem *dist , void __iomem *cpu);
 
 int gicv2m_init(struct fwnode_handle *parent_handle,
                struct irq_domain *parent);
index 8c3f8c14eeaafd5b1035f4ef2fec81384f94f77c..c843f4a9c512588edc333075cdc3856dc9a582d3 100644 (file)
@@ -44,6 +44,8 @@ void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
        m->vaddr = (unsigned long)vaddr;
        kclist_add(m, addr, sz, KCORE_REMAP);
 }
+
+extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
 #else
 static inline
 void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
index b26ea90773840eb9a6b127abc56b9334c67fb9a6..0343c81d4c5f5d7d994718c1f645014241314b2a 100644 (file)
@@ -557,7 +557,8 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
 
 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
                         struct mlx5_core_dct *qp,
-                        u32 *in, int inlen);
+                        u32 *in, int inlen,
+                        u32 *out, int outlen);
 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
                        struct mlx5_core_qp *qp,
                        u32 *in,
index 651fca72286c4838307b8fe83d78bbda2dc81015..c606c72311d0e08564b274fba3aaa2d2cb3a6e7c 100644 (file)
@@ -83,6 +83,12 @@ enum sock_type {
 
 #endif /* ARCH_HAS_SOCKET_TYPES */
 
+/**
+ * enum sock_shutdown_cmd - Shutdown types
+ * @SHUT_RD: shutdown receptions
+ * @SHUT_WR: shutdown transmissions
+ * @SHUT_RDWR: shutdown receptions/transmissions
+ */
 enum sock_shutdown_cmd {
        SHUT_RD,
        SHUT_WR,
index f41f1d041e2c5e325e4d817781e13ed60695276c..397607a0c0ebef2969a3c4ea81893be860c8aacc 100644 (file)
@@ -460,7 +460,6 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
                                              void *, size_t, int);
 
 /* IEEE1284.3 functions */
-#define daisy_dev_name "Device ID probe"
 extern int parport_daisy_init (struct parport *port);
 extern void parport_daisy_fini (struct parport *port);
 extern struct pardevice *parport_open (int devnum, const char *name);
@@ -469,18 +468,6 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
 extern void parport_daisy_deselect_all (struct parport *port);
 extern int parport_daisy_select (struct parport *port, int daisy, int mode);
 
-#ifdef CONFIG_PARPORT_1284
-extern int daisy_drv_init(void);
-extern void daisy_drv_exit(void);
-#else
-static inline int daisy_drv_init(void)
-{
-       return 0;
-}
-
-static inline void daisy_drv_exit(void) {}
-#endif
-
 /* Lowlevel drivers _can_ call this support function to handle irqs.  */
 static inline void parport_generic_irq(struct parport *port)
 {
index 14d558146aea20e9ccc5267bd6a30856bce61406..20f3e3f029b9cadde3586aa725632871ee4d4a3e 100644 (file)
@@ -330,7 +330,7 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
 /*
  * This one is special, since it doesn't actually clear the bit, rather it
  * sets the corresponding bit in the ->cleared mask instead. Paired with
- * the caller doing sbitmap_batch_clear() if a given index is full, which
+ * the caller doing sbitmap_deferred_clear() if a given index is full, which
  * will clear the previously freed entries in the corresponding ->word.
  */
 static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
index 6016daeecee41f28511f46c8618fbf05a16aff8a..b57cd8bf96e2b67c6588716cdfbd92dba58d7b0f 100644 (file)
@@ -26,7 +26,7 @@ typedef __kernel_sa_family_t  sa_family_t;
 /*
  *     1003.1g requires sa_family_t and that sa_data is char.
  */
+
 struct sockaddr {
        sa_family_t     sa_family;      /* address family, AF_xxx       */
        char            sa_data[14];    /* 14 bytes of protocol address */
@@ -44,7 +44,7 @@ struct linger {
  *     system, not 4.3. Thus msg_accrights(len) are now missing. They
  *     belong in an obscure libc emulation or the bin.
  */
+
 struct msghdr {
        void            *msg_name;      /* ptr to socket address structure */
        int             msg_namelen;    /* size of socket address structure */
@@ -54,7 +54,7 @@ struct msghdr {
        unsigned int    msg_flags;      /* flags on received message */
        struct kiocb    *msg_iocb;      /* ptr to iocb for async requests */
 };
+
 struct user_msghdr {
        void            __user *msg_name;       /* ptr to socket address structure */
        int             msg_namelen;            /* size of socket address structure */
@@ -122,7 +122,7 @@ struct cmsghdr {
  *     inside range, given by msg->msg_controllen before using
  *     ancillary object DATA.                          --ANK (980731)
  */
+
 static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
                                               struct cmsghdr *__cmsg)
 {
@@ -264,10 +264,10 @@ struct ucred {
 /* Maximum queue length specifiable by listen.  */
 #define SOMAXCONN      128
 
-/* Flags we can use with send/ and recv. 
+/* Flags we can use with send/ and recv.
    Added those for 1003.1g not all are supported yet
  */
+
 #define MSG_OOB                1
 #define MSG_PEEK       2
 #define MSG_DONTROUTE  4
index 87477e1640f9217223f7cbcde6b3fa416ef58ac5..f184af1999a8e8c9f8216eb7aa64a689889c66a6 100644 (file)
@@ -23,14 +23,23 @@ struct kvec {
 };
 
 enum iter_type {
-       ITER_IOVEC = 0,
-       ITER_KVEC = 2,
-       ITER_BVEC = 4,
-       ITER_PIPE = 8,
-       ITER_DISCARD = 16,
+       /* set if ITER_BVEC doesn't hold a bv_page ref */
+       ITER_BVEC_FLAG_NO_REF = 2,
+
+       /* iter types */
+       ITER_IOVEC = 4,
+       ITER_KVEC = 8,
+       ITER_BVEC = 16,
+       ITER_PIPE = 32,
+       ITER_DISCARD = 64,
 };
 
 struct iov_iter {
+       /*
+        * Bit 0 is the read/write bit, set if we're writing.
+        * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
+        * the caller isn't expecting to drop a page reference when done.
+        */
        unsigned int type;
        size_t iov_offset;
        size_t count;
@@ -84,6 +93,11 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
        return i->type & (READ | WRITE);
 }
 
+static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i)
+{
+       return (i->type & ITER_BVEC_FLAG_NO_REF) != 0;
+}
+
 /*
  * Total number of bytes covered by an iovec.
  *
index a240ed2a0372c20281e03a45fe49dea6a2fd60a3..ff56c443180cd6d35ec6f354ea2d519ce5443b83 100644 (file)
@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
 #define vbg_debug pr_debug
 #endif
 
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
                     struct vmmdev_hgcm_service_location *loc,
                     u32 *client_id, int *vbox_status);
 
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+                       u32 client_id, int *vbox_status);
 
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
-                 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
-                 u32 parm_count, int *vbox_status);
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+                 u32 function, u32 timeout_ms,
+                 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+                 int *vbox_status);
 
 /**
  * Convert a VirtualBox status code to a standard Linux kernel return value.
index 23f61850f3639ae1686f9f83e0bac6e6a5015ba5..1832402324cef7a00cc1ca97327aa21dd65e836b 100644 (file)
@@ -35,6 +35,7 @@ struct charlcd_ops {
 };
 
 struct charlcd *charlcd_alloc(unsigned int drvdata_size);
+void charlcd_free(struct charlcd *lcd);
 
 int charlcd_register(struct charlcd *lcd);
 int charlcd_unregister(struct charlcd *lcd);
index c745e9ccfab2d6f86a58c1a038e416fe29a63d67..c61a1bf4e3de544dd41886e9126b7ff20eb829c3 100644 (file)
@@ -39,7 +39,7 @@ struct tc_action {
        struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
        struct gnet_stats_queue __percpu *cpu_qstats;
        struct tc_cookie        __rcu *act_cookie;
-       struct tcf_chain        *goto_chain;
+       struct tcf_chain        __rcu *goto_chain;
 };
 #define tcf_index      common.tcfa_index
 #define tcf_refcnt     common.tcfa_refcnt
@@ -90,7 +90,7 @@ struct tc_action_ops {
        int     (*lookup)(struct net *net, struct tc_action **a, u32 index);
        int     (*init)(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **act, int ovr,
-                       int bind, bool rtnl_held,
+                       int bind, bool rtnl_held, struct tcf_proto *tp,
                        struct netlink_ext_ack *extack);
        int     (*walk)(struct net *, struct sk_buff *,
                        struct netlink_callback *, int,
@@ -181,6 +181,11 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
 
+int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
+                            struct tcf_chain **handle,
+                            struct netlink_ext_ack *newchain);
+struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
+                                        struct tcf_chain *newchain);
 #endif /* CONFIG_NET_CLS_ACT */
 
 static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
index 31284c078d06b6bb426fec08e4776a1e67ada41b..7d1a0483a17ba01b94643fd78acd72095a2e0adb 100644 (file)
@@ -378,6 +378,7 @@ struct tcf_chain {
        bool flushing;
        const struct tcf_proto_ops *tmplt_ops;
        void *tmplt_priv;
+       struct rcu_head rcu;
 };
 
 struct tcf_block {
index 32ee65a30aff1146dcafcc533e73833e190cb887..1c6e6c0766ca09b771d865883c7c4daf390215c6 100644 (file)
@@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
 static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
                                        unsigned int offset)
 {
-       struct sctphdr *sh = sctp_hdr(skb);
+       struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
        const struct skb_checksum_ops ops = {
                .update  = sctp_csum_update,
                .combine = sctp_csum_combine,
index 328cb7cb7b0bb93f1eb3ec2708c43b29b56efb15..8de5ee258b93a50b2fdcde796bae3a5b53ce4d6a 100644 (file)
@@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
                hlist_add_head_rcu(&sk->sk_node, list);
 }
 
+static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
+{
+       sock_hold(sk);
+       hlist_add_tail_rcu(&sk->sk_node, list);
+}
+
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
        hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
index ee8d005f56fcddb0e3dc5d68b10a5dd1efdeb8f7..eb8f01c819e636aca55019cbe791b0e40b612baf 100644 (file)
@@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
 
 static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
 {
-       return a->goto_chain->index;
+       return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
 }
 
 #endif /* __NET_TC_GACT_H */
index 61cf7dbb678298559ccf9ea432adef04507bc597..d074b6d60f8af77a355b6755ef3d096237ff55bc 100644 (file)
@@ -36,7 +36,6 @@ struct xdp_umem {
        u32 headroom;
        u32 chunk_size_nohr;
        struct user_struct *user;
-       struct pid *pid;
        unsigned long address;
        refcount_t users;
        struct work_struct work;
index 3c38ac9a92a7c4b18cbb4ac49ac60bf887b03d20..929c8e537a14a517c0a3c7ca5b6b15353d622c30 100644 (file)
@@ -502,16 +502,6 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- *     Description
- *             Push an element *value* in *map*. *flags* is one of:
- *
- *             **BPF_EXIST**
- *             If the queue/stack is full, the oldest element is removed to
- *             make room for this.
- *     Return
- *             0 on success, or a negative error in case of failure.
- *
  * int bpf_probe_read(void *dst, u32 size, const void *src)
  *     Description
  *             For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
  * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_addr** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
  * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_ops** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
@@ -2098,52 +2088,52 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded key press with *scancode*,
- *             *toggle* value in the given *protocol*. The scancode will be
- *             translated to a keycode using the rc keymap, and reported as
- *             an input key down event. After a period a key up event is
- *             generated. This period can be extended by calling either
- *             **bpf_rc_keydown**\ () again with the same values, or calling
- *             **bpf_rc_repeat**\ ().
+ *             report a successfully decoded repeat key message. This delays
+ *             the generation of a key up event for previously generated
+ *             key down event.
  *
- *             Some protocols include a toggle bit, in case the button was
- *             released and pressed again between consecutive scancodes.
+ *             Some IR protocols like NEC have a special IR message for
+ *             repeating last button, for when a button is held down.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
- *             The *protocol* is the decoded protocol number (see
- *             **enum rc_proto** for some predefined values).
- *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded repeat key message. This delays
- *             the generation of a key up event for previously generated
- *             key down event.
+ *             report a successfully decoded key press with *scancode*,
+ *             *toggle* value in the given *protocol*. The scancode will be
+ *             translated to a keycode using the rc keymap, and reported as
+ *             an input key down event. After a period a key up event is
+ *             generated. This period can be extended by calling either
+ *             **bpf_rc_keydown**\ () again with the same values, or calling
+ *             **bpf_rc_repeat**\ ().
  *
- *             Some IR protocols like NEC have a special IR message for
- *             repeating last button, for when a button is held down.
+ *             Some protocols include a toggle bit, in case the button was
+ *             released and pressed again between consecutive scancodes.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
+ *             The *protocol* is the decoded protocol number (see
+ *             **enum rc_proto** for some predefined values).
+ *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
  *     Description
  *             Return the cgroup v2 id of the socket associated with the *skb*.
  *             This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- *     Description
- *             Return id of cgroup v2 that is ancestor of cgroup associated
- *             with the *skb* at the *ancestor_level*.  The root cgroup is at
- *             *ancestor_level* zero and each step down the hierarchy
- *             increments the level. If *ancestor_level* == level of cgroup
- *             associated with *skb*, then return value will be same as that
- *             of **bpf_skb_cgroup_id**\ ().
- *
- *             The helper is useful to implement policies based on cgroups
- *             that are upper in hierarchy than immediate cgroup associated
- *             with *skb*.
- *
- *             The format of returned id and helper limitations are same as in
- *             **bpf_skb_cgroup_id**\ ().
- *     Return
- *             The id is returned or 0 in case the id could not be retrieved.
- *
  * u64 bpf_get_current_cgroup_id(void)
  *     Return
  *             A 64-bit integer containing the current cgroup id based
  *             on the cgroup within which the current task is running.
  *
- * voidget_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
  *     Description
  *             Get the pointer to the local storage area.
  *             The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ *     Description
+ *             Return id of cgroup v2 that is ancestor of cgroup associated
+ *             with the *skb* at the *ancestor_level*.  The root cgroup is at
+ *             *ancestor_level* zero and each step down the hierarchy
+ *             increments the level. If *ancestor_level* == level of cgroup
+ *             associated with *skb*, then return value will be same as that
+ *             of **bpf_skb_cgroup_id**\ ().
+ *
+ *             The helper is useful to implement policies based on cgroups
+ *             that are upper in hierarchy than immediate cgroup associated
+ *             with *skb*.
+ *
+ *             The format of returned id and helper limitations are same as in
+ *             **bpf_skb_cgroup_id**\ ().
+ *     Return
+ *             The id is returned or 0 in case the id could not be retrieved.
+ *
  * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ *     Description
+ *             Push an element *value* in *map*. *flags* is one of:
+ *
+ *             **BPF_EXIST**
+ *                     If the queue/stack is full, the oldest element is
+ *                     removed to make room for this.
+ *     Return
+ *             0 on success, or a negative error in case of failure.
+ *
  * int bpf_map_pop_elem(struct bpf_map *map, void *value)
  *     Description
  *             Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
  *     Return
  *             0
  *
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Acquire a spinlock represented by the pointer *lock*, which is
+ *             stored as part of a value of a map. Taking the lock allows to
+ *             safely update the rest of the fields in that value. The
+ *             spinlock can (and must) later be released with a call to
+ *             **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ *             Spinlocks in BPF programs come with a number of restrictions
+ *             and constraints:
+ *
+ *             * **bpf_spin_lock** objects are only allowed inside maps of
+ *               types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ *               list could be extended in the future).
+ *             * BTF description of the map is mandatory.
+ *             * The BPF program can take ONE lock at a time, since taking two
+ *               or more could cause dead locks.
+ *             * Only one **struct bpf_spin_lock** is allowed per map element.
+ *             * When the lock is taken, calls (either BPF to BPF or helpers)
+ *               are not allowed.
+ *             * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ *               allowed inside a spinlock-ed region.
+ *             * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ *               the lock, on all execution paths, before it returns.
+ *             * The BPF program can access **struct bpf_spin_lock** only via
+ *               the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ *               helpers. Loading or storing data into the **struct
+ *               bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ *             * To use the **bpf_spin_lock**\ () helper, the BTF description
+ *               of the map value must be a struct and have **struct
+ *               bpf_spin_lock** *anyname*\ **;** field at the top level.
+ *               Nested lock inside another struct is not allowed.
+ *             * The **struct bpf_spin_lock** *lock* field in a map value must
+ *               be aligned on a multiple of 4 bytes in that value.
+ *             * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ *               the **bpf_spin_lock** field to user space.
+ *             * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ *               a BPF program, do not update the **bpf_spin_lock** field.
+ *             * **bpf_spin_lock** cannot be on the stack or inside a
+ *               networking packet (it can only be inside of a map values).
+ *             * **bpf_spin_lock** is available to root only.
+ *             * Tracing programs and socket filter programs cannot use
+ *               **bpf_spin_lock**\ () due to insufficient preemption checks
+ *               (but this may change in the future).
+ *             * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ *     Return
+ *             0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Release the *lock* previously locked by a call to
+ *             **bpf_spin_lock**\ (\ *lock*\ ).
+ *     Return
+ *             0
+ *
  * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_sock** pointer such
- *             that all the fields in bpf_sock can be accessed.
+ *             that all the fields in this **bpf_sock** can be accessed.
  *     Return
- *             A **struct bpf_sock** pointer on success, or NULL in
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_tcp_sock** pointer from a
  *             **struct bpf_sock** pointer.
- *
  *     Return
- *             A **struct bpf_tcp_sock** pointer on success, or NULL in
+ *             A **struct bpf_tcp_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
- *     Description
- *             Sets ECN of IP header to ce (congestion encountered) if
- *             current value is ect (ECN capable). Works with IPv6 and IPv4.
- *     Return
- *             1 if set, 0 if not set.
+ *     Description
+ *             Set ECN (Explicit Congestion Notification) field of IP header
+ *             to **CE** (Congestion Encountered) if current value is **ECT**
+ *             (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ *             and IPv4.
+ *     Return
+ *             1 if the **CE** flag is set (either by the current helper call
+ *             or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ *     Description
+ *             Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ *             **bpf_sk_release**\ () is unnecessary and not allowed.
+ *     Return
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
+ *             case of failure.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -2465,7 +2530,8 @@ union bpf_attr {
        FN(spin_unlock),                \
        FN(sk_fullsock),                \
        FN(tcp_sock),                   \
-       FN(skb_ecn_set_ce),
+       FN(skb_ecn_set_ce),             \
+       FN(get_listener_sock),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
index 0e68024f36c712dcb295f6214914ea5dc3371a25..26f39816af14c149ab1d8be5842112f4bf36c18c 100644 (file)
@@ -102,6 +102,66 @@ enum vmmdev_request_type {
 #define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
 #endif
 
+/* vmmdev_request_header.requestor defines */
+
+/* Requestor user not given. */
+#define VMMDEV_REQUESTOR_USR_NOT_GIVEN                      0x00000000
+/* The kernel driver (vboxguest) is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV                            0x00000001
+/* Some other kernel driver is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV_OTHER                      0x00000002
+/* The root or a admin user is the requestor. */
+#define VMMDEV_REQUESTOR_USR_ROOT                           0x00000003
+/* Regular joe user is making the request. */
+#define VMMDEV_REQUESTOR_USR_USER                           0x00000006
+/* User classification mask. */
+#define VMMDEV_REQUESTOR_USR_MASK                           0x00000007
+
+/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
+#define VMMDEV_REQUESTOR_KERNEL                             0x00000000
+/* User mode request. */
+#define VMMDEV_REQUESTOR_USERMODE                           0x00000008
+/* User or kernel mode classification mask. */
+#define VMMDEV_REQUESTOR_MODE_MASK                          0x00000008
+
+/* Don't know the physical console association of the requestor. */
+#define VMMDEV_REQUESTOR_CON_DONT_KNOW                      0x00000000
+/*
+ * The request originates with a process that is NOT associated with the
+ * physical console.
+ */
+#define VMMDEV_REQUESTOR_CON_NO                             0x00000010
+/* Requestor process is associated with the physical console. */
+#define VMMDEV_REQUESTOR_CON_YES                            0x00000020
+/* Console classification mask. */
+#define VMMDEV_REQUESTOR_CON_MASK                           0x00000030
+
+/* Requestor is member of special VirtualBox user group. */
+#define VMMDEV_REQUESTOR_GRP_VBOX                           0x00000080
+
+/* Note: trust level is for windows guests only, linux always uses not-given */
+/* Requestor trust level: Unspecified */
+#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN                    0x00000000
+/* Requestor trust level: Untrusted (SID S-1-16-0) */
+#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED                    0x00001000
+/* Requestor trust level: Untrusted (SID S-1-16-4096) */
+#define VMMDEV_REQUESTOR_TRUST_LOW                          0x00002000
+/* Requestor trust level: Medium (SID S-1-16-8192) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM                       0x00003000
+/* Requestor trust level: Medium plus (SID S-1-16-8448) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS                  0x00004000
+/* Requestor trust level: High (SID S-1-16-12288) */
+#define VMMDEV_REQUESTOR_TRUST_HIGH                         0x00005000
+/* Requestor trust level: System (SID S-1-16-16384) */
+#define VMMDEV_REQUESTOR_TRUST_SYSTEM                       0x00006000
+/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
+#define VMMDEV_REQUESTOR_TRUST_PROTECTED                    0x00007000
+/* Requestor trust level mask */
+#define VMMDEV_REQUESTOR_TRUST_MASK                         0x00007000
+
+/* Requestor is using the less trusted user device node (/dev/vboxuser) */
+#define VMMDEV_REQUESTOR_USER_DEVICE                        0x00008000
+
 /** HGCM service location types. */
 enum vmmdev_hgcm_service_location_type {
        VMMDEV_HGCM_LOC_INVALID    = 0,
index 62f6bced3a3c486732dd871693d5d44cf19ab8c2..afca36f53c492718820ecacdb588af585dbb50e4 100644 (file)
@@ -136,21 +136,29 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 
 void *bpf_map_area_alloc(size_t size, int numa_node)
 {
-       /* We definitely need __GFP_NORETRY, so OOM killer doesn't
-        * trigger under memory pressure as we really just want to
-        * fail instead.
+       /* We really just want to fail instead of triggering OOM killer
+        * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
+        * which is used for lower order allocation requests.
+        *
+        * It has been observed that higher order allocation requests done by
+        * vmalloc with __GFP_NORETRY being set might fail due to not trying
+        * to reclaim memory from the page cache, thus we set
+        * __GFP_RETRY_MAYFAIL to avoid such situations.
         */
-       const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
+
+       const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
        void *area;
 
        if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
-               area = kmalloc_node(size, GFP_USER | flags, numa_node);
+               area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
+                                   numa_node);
                if (area != NULL)
                        return area;
        }
 
-       return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
-                                          __builtin_return_address(0));
+       return __vmalloc_node_flags_caller(size, numa_node,
+                                          GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+                                          flags, __builtin_return_address(0));
 }
 
 void bpf_map_area_free(void *area)
index ce166a002d161a08eff6bdbb77158886dbba8012..fd502c1f71eb003e5975ec58e33ccc8f8e1c0586 100644 (file)
@@ -212,7 +212,7 @@ struct bpf_call_arg_meta {
        int access_size;
        s64 msize_smax_value;
        u64 msize_umax_value;
-       int ptr_id;
+       int ref_obj_id;
        int func_id;
 };
 
@@ -346,35 +346,23 @@ static bool reg_type_may_be_null(enum bpf_reg_type type)
               type == PTR_TO_TCP_SOCK_OR_NULL;
 }
 
-static bool type_is_refcounted(enum bpf_reg_type type)
-{
-       return type == PTR_TO_SOCKET;
-}
-
-static bool type_is_refcounted_or_null(enum bpf_reg_type type)
-{
-       return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
-}
-
-static bool reg_is_refcounted(const struct bpf_reg_state *reg)
-{
-       return type_is_refcounted(reg->type);
-}
-
 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
 {
        return reg->type == PTR_TO_MAP_VALUE &&
                map_value_has_spin_lock(reg->map_ptr);
 }
 
-static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
+static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
 {
-       return type_is_refcounted_or_null(reg->type);
+       return type == PTR_TO_SOCKET ||
+               type == PTR_TO_SOCKET_OR_NULL ||
+               type == PTR_TO_TCP_SOCK ||
+               type == PTR_TO_TCP_SOCK_OR_NULL;
 }
 
-static bool arg_type_is_refcounted(enum bpf_arg_type type)
+static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
 {
-       return type == ARG_PTR_TO_SOCKET;
+       return type == ARG_PTR_TO_SOCK_COMMON;
 }
 
 /* Determine whether the function releases some resources allocated by another
@@ -392,6 +380,12 @@ static bool is_acquire_function(enum bpf_func_id func_id)
                func_id == BPF_FUNC_sk_lookup_udp;
 }
 
+static bool is_ptr_cast_function(enum bpf_func_id func_id)
+{
+       return func_id == BPF_FUNC_tcp_sock ||
+               func_id == BPF_FUNC_sk_fullsock;
+}
+
 /* string representation of 'enum bpf_reg_type' */
 static const char * const reg_type_str[] = {
        [NOT_INIT]              = "?",
@@ -466,6 +460,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
                                verbose(env, ",call_%d", func(env, reg)->callsite);
                } else {
                        verbose(env, "(id=%d", reg->id);
+                       if (reg_type_may_be_refcounted_or_null(t))
+                               verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
                        if (t != SCALAR_VALUE)
                                verbose(env, ",off=%d", reg->off);
                        if (type_is_pkt_pointer(t))
@@ -2414,16 +2410,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
                if (!type_is_sk_pointer(type))
                        goto err_type;
-       } else if (arg_type == ARG_PTR_TO_SOCKET) {
-               expected_type = PTR_TO_SOCKET;
-               if (type != expected_type)
-                       goto err_type;
-               if (meta->ptr_id || !reg->id) {
-                       verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
-                               meta->ptr_id, reg->id);
-                       return -EFAULT;
+               if (reg->ref_obj_id) {
+                       if (meta->ref_obj_id) {
+                               verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
+                                       regno, reg->ref_obj_id,
+                                       meta->ref_obj_id);
+                               return -EFAULT;
+                       }
+                       meta->ref_obj_id = reg->ref_obj_id;
                }
-               meta->ptr_id = reg->id;
        } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
                if (meta->func_id == BPF_FUNC_spin_lock) {
                        if (process_spin_lock(env, regno, true))
@@ -2740,32 +2735,38 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
        return true;
 }
 
-static bool check_refcount_ok(const struct bpf_func_proto *fn)
+static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
 {
        int count = 0;
 
-       if (arg_type_is_refcounted(fn->arg1_type))
+       if (arg_type_may_be_refcounted(fn->arg1_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg2_type))
+       if (arg_type_may_be_refcounted(fn->arg2_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg3_type))
+       if (arg_type_may_be_refcounted(fn->arg3_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg4_type))
+       if (arg_type_may_be_refcounted(fn->arg4_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg5_type))
+       if (arg_type_may_be_refcounted(fn->arg5_type))
                count++;
 
+       /* A reference acquiring function cannot acquire
+        * another refcounted ptr.
+        */
+       if (is_acquire_function(func_id) && count)
+               return false;
+
        /* We only support one arg being unreferenced at the moment,
         * which is sufficient for the helper functions we have right now.
         */
        return count <= 1;
 }
 
-static int check_func_proto(const struct bpf_func_proto *fn)
+static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
 {
        return check_raw_mode_ok(fn) &&
               check_arg_pair_ok(fn) &&
-              check_refcount_ok(fn) ? 0 : -EINVAL;
+              check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
 }
 
 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
@@ -2799,19 +2800,20 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
 }
 
 static void release_reg_references(struct bpf_verifier_env *env,
-                                  struct bpf_func_state *state, int id)
+                                  struct bpf_func_state *state,
+                                  int ref_obj_id)
 {
        struct bpf_reg_state *regs = state->regs, *reg;
        int i;
 
        for (i = 0; i < MAX_BPF_REG; i++)
-               if (regs[i].id == id)
+               if (regs[i].ref_obj_id == ref_obj_id)
                        mark_reg_unknown(env, regs, i);
 
        bpf_for_each_spilled_reg(i, state, reg) {
                if (!reg)
                        continue;
-               if (reg_is_refcounted(reg) && reg->id == id)
+               if (reg->ref_obj_id == ref_obj_id)
                        __mark_reg_unknown(reg);
        }
 }
@@ -2820,15 +2822,20 @@ static void release_reg_references(struct bpf_verifier_env *env,
  * resources. Identify all copies of the same pointer and clear the reference.
  */
 static int release_reference(struct bpf_verifier_env *env,
-                            struct bpf_call_arg_meta *meta)
+                            int ref_obj_id)
 {
        struct bpf_verifier_state *vstate = env->cur_state;
+       int err;
        int i;
 
+       err = release_reference_state(cur_func(env), ref_obj_id);
+       if (err)
+               return err;
+
        for (i = 0; i <= vstate->curframe; i++)
-               release_reg_references(env, vstate->frame[i], meta->ptr_id);
+               release_reg_references(env, vstate->frame[i], ref_obj_id);
 
-       return release_reference_state(cur_func(env), meta->ptr_id);
+       return 0;
 }
 
 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
@@ -3047,7 +3054,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
        memset(&meta, 0, sizeof(meta));
        meta.pkt_access = fn->pkt_access;
 
-       err = check_func_proto(fn);
+       err = check_func_proto(fn, func_id);
        if (err) {
                verbose(env, "kernel subsystem misconfigured func %s#%d\n",
                        func_id_name(func_id), func_id);
@@ -3093,7 +3100,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                        return err;
                }
        } else if (is_release_function(func_id)) {
-               err = release_reference(env, &meta);
+               err = release_reference(env, meta.ref_obj_id);
                if (err) {
                        verbose(env, "func %s#%d reference has not been acquired before\n",
                                func_id_name(func_id), func_id);
@@ -3154,8 +3161,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
 
                        if (id < 0)
                                return id;
-                       /* For release_reference() */
+                       /* For mark_ptr_or_null_reg() */
                        regs[BPF_REG_0].id = id;
+                       /* For release_reference() */
+                       regs[BPF_REG_0].ref_obj_id = id;
                } else {
                        /* For mark_ptr_or_null_reg() */
                        regs[BPF_REG_0].id = ++env->id_gen;
@@ -3170,6 +3179,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                return -EINVAL;
        }
 
+       if (is_ptr_cast_function(func_id))
+               /* For release_reference() */
+               regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
+
        do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
 
        err = check_map_func_compatibility(env, meta.map_ptr, func_id);
@@ -3368,7 +3381,7 @@ do_sim:
                *dst_reg = *ptr_reg;
        }
        ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
-       if (!ptr_is_dst_reg)
+       if (!ptr_is_dst_reg && ret)
                *dst_reg = tmp;
        return !ret ? -EFAULT : 0;
 }
@@ -4665,11 +4678,19 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
                } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
                        reg->type = PTR_TO_TCP_SOCK;
                }
-               if (is_null || !(reg_is_refcounted(reg) ||
-                                reg_may_point_to_spin_lock(reg))) {
-                       /* We don't need id from this point onwards anymore,
-                        * thus we should better reset it, so that state
-                        * pruning has chances to take effect.
+               if (is_null) {
+                       /* We don't need id and ref_obj_id from this point
+                        * onwards anymore, thus we should better reset it,
+                        * so that state pruning has chances to take effect.
+                        */
+                       reg->id = 0;
+                       reg->ref_obj_id = 0;
+               } else if (!reg_may_point_to_spin_lock(reg)) {
+                       /* For not-NULL ptr, reg->ref_obj_id will be reset
+                        * in release_reg_references().
+                        *
+                        * reg->id is still used by spin_lock ptr. Other
+                        * than spin_lock ptr type, reg->id can be reset.
                         */
                        reg->id = 0;
                }
@@ -4684,11 +4705,16 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
 {
        struct bpf_func_state *state = vstate->frame[vstate->curframe];
        struct bpf_reg_state *reg, *regs = state->regs;
+       u32 ref_obj_id = regs[regno].ref_obj_id;
        u32 id = regs[regno].id;
        int i, j;
 
-       if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
-               release_reference_state(state, id);
+       if (ref_obj_id && ref_obj_id == id && is_null)
+               /* regs[regno] is in the " == NULL" branch.
+                * No one could have freed the reference state before
+                * doing the NULL check.
+                */
+               WARN_ON_ONCE(release_reference_state(state, id));
 
        for (i = 0; i < MAX_BPF_REG; i++)
                mark_ptr_or_null_reg(state, &regs[i], id, is_null);
@@ -6052,15 +6078,17 @@ static int propagate_liveness(struct bpf_verifier_env *env,
        }
        /* Propagate read liveness of registers... */
        BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
-       /* We don't need to worry about FP liveness because it's read-only */
-       for (i = 0; i < BPF_REG_FP; i++) {
-               if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
-                       continue;
-               if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
-                       err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
-                                           &vparent->frame[vstate->curframe]->regs[i]);
-                       if (err)
-                               return err;
+       for (frame = 0; frame <= vstate->curframe; frame++) {
+               /* We don't need to worry about FP liveness, it's read-only */
+               for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
+                       if (vparent->frame[frame]->regs[i].live & REG_LIVE_READ)
+                               continue;
+                       if (vstate->frame[frame]->regs[i].live & REG_LIVE_READ) {
+                               err = mark_reg_read(env, &vstate->frame[frame]->regs[i],
+                                                   &vparent->frame[frame]->regs[i]);
+                               if (err)
+                                       return err;
+                       }
                }
        }
 
index 1032a16bd1866d228623f4ef1f572fcd5cac64f8..72d06e302e9938dcaee6dbf57324d3ed31397035 100644 (file)
@@ -7189,6 +7189,7 @@ static void perf_event_mmap_output(struct perf_event *event,
        struct perf_output_handle handle;
        struct perf_sample_data sample;
        int size = mmap_event->event_id.header.size;
+       u32 type = mmap_event->event_id.header.type;
        int ret;
 
        if (!perf_event_mmap_match(event, data))
@@ -7232,6 +7233,7 @@ static void perf_event_mmap_output(struct perf_event *event,
        perf_output_end(&handle);
 out:
        mmap_event->event_id.header.size = size;
+       mmap_event->event_id.header.type = type;
 }
 
 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
index c3b73b0311bc7c79d4b8a34d344cc943d6511b32..9e40cf7be60662f0fb8f6c61a6066532954b3fb1 100644 (file)
@@ -3436,6 +3436,10 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int p
 {
        u32 uval, uninitialized_var(nval), mval;
 
+       /* Futex address must be 32bit aligned */
+       if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
+               return -1;
+
 retry:
        if (get_user(uval, uaddr))
                return -1;
index 5d5378ea0afe316e3d5d569cc541f1ef23b0600a..f808c6a97dccc79865a97afa53fd01d1c89bacc3 100644 (file)
@@ -84,8 +84,6 @@ EXPORT_SYMBOL(devm_request_threaded_irq);
  *     @dev: device to request interrupt for
  *     @irq: Interrupt line to allocate
  *     @handler: Function to be called when the IRQ occurs
- *     @thread_fn: function to be called in a threaded interrupt context. NULL
- *                 for devices which handle everything in @handler
  *     @irqflags: Interrupt type flags
  *     @devname: An ascii name for the claiming device, dev_name(dev) if NULL
  *     @dev_id: A cookie passed back to the handler function
index 9ec34a2a6638d4a242be9002e85e3ec87fc878a9..1401afa0d58a4774348129bc14bd3292187c5156 100644 (file)
@@ -196,6 +196,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
        case IRQ_SET_MASK_OK:
        case IRQ_SET_MASK_OK_DONE:
                cpumask_copy(desc->irq_common_data.affinity, mask);
+               /* fall through */
        case IRQ_SET_MASK_OK_NOCOPY:
                irq_validate_effective_affinity(data);
                irq_set_thread_affinity(desc);
index ead464a0f2e5dfc71a0c0df16a9823f3677f847a..4778c48a7fda4d78cd1dbff0afa658f82da38ba1 100644 (file)
@@ -6998,7 +6998,7 @@ static int __maybe_unused cpu_period_quota_parse(char *buf,
 {
        char tok[21];   /* U64_MAX */
 
-       if (!sscanf(buf, "%s %llu", tok, periodp))
+       if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
                return -EINVAL;
 
        *periodp *= NSEC_PER_USEC;
index 2efe629425be2491918d6331f093e76b3d806948..5c41ea3674223616b0fb4887681d895748fca4f3 100644 (file)
@@ -48,10 +48,10 @@ struct sugov_cpu {
 
        bool                    iowait_boost_pending;
        unsigned int            iowait_boost;
-       unsigned int            iowait_boost_max;
        u64                     last_update;
 
        unsigned long           bw_dl;
+       unsigned long           min;
        unsigned long           max;
 
        /* The field below is for single-CPU policies only: */
@@ -303,8 +303,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
        if (delta_ns <= TICK_NSEC)
                return false;
 
-       sg_cpu->iowait_boost = set_iowait_boost
-               ? sg_cpu->sg_policy->policy->min : 0;
+       sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
        sg_cpu->iowait_boost_pending = set_iowait_boost;
 
        return true;
@@ -344,14 +343,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
 
        /* Double the boost at each request */
        if (sg_cpu->iowait_boost) {
-               sg_cpu->iowait_boost <<= 1;
-               if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
-                       sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+               sg_cpu->iowait_boost =
+                       min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
                return;
        }
 
        /* First wakeup after IO: start with minimum boost */
-       sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
+       sg_cpu->iowait_boost = sg_cpu->min;
 }
 
 /**
@@ -373,47 +371,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
  * This mechanism is designed to boost high frequently IO waiting tasks, while
  * being more conservative on tasks which does sporadic IO operations.
  */
-static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
-                              unsigned long *util, unsigned long *max)
+static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
+                                       unsigned long util, unsigned long max)
 {
-       unsigned int boost_util, boost_max;
+       unsigned long boost;
 
        /* No boost currently required */
        if (!sg_cpu->iowait_boost)
-               return;
+               return util;
 
        /* Reset boost if the CPU appears to have been idle enough */
        if (sugov_iowait_reset(sg_cpu, time, false))
-               return;
+               return util;
 
-       /*
-        * An IO waiting task has just woken up:
-        * allow to further double the boost value
-        */
-       if (sg_cpu->iowait_boost_pending) {
-               sg_cpu->iowait_boost_pending = false;
-       } else {
+       if (!sg_cpu->iowait_boost_pending) {
                /*
-                * Otherwise: reduce the boost value and disable it when we
-                * reach the minimum.
+                * No boost pending; reduce the boost value.
                 */
                sg_cpu->iowait_boost >>= 1;
-               if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
+               if (sg_cpu->iowait_boost < sg_cpu->min) {
                        sg_cpu->iowait_boost = 0;
-                       return;
+                       return util;
                }
        }
 
+       sg_cpu->iowait_boost_pending = false;
+
        /*
-        * Apply the current boost value: a CPU is boosted only if its current
-        * utilization is smaller then the current IO boost level.
+        * @util is already in capacity scale; convert iowait_boost
+        * into the same scale so we can compare.
         */
-       boost_util = sg_cpu->iowait_boost;
-       boost_max = sg_cpu->iowait_boost_max;
-       if (*util * boost_max < *max * boost_util) {
-               *util = boost_util;
-               *max = boost_max;
-       }
+       boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
+       return max(boost, util);
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
@@ -460,7 +449,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
 
        util = sugov_get_util(sg_cpu);
        max = sg_cpu->max;
-       sugov_iowait_apply(sg_cpu, time, &util, &max);
+       util = sugov_iowait_apply(sg_cpu, time, util, max);
        next_f = get_next_freq(sg_policy, util, max);
        /*
         * Do not reduce the frequency if the CPU has not been idle
@@ -500,7 +489,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
 
                j_util = sugov_get_util(j_sg_cpu);
                j_max = j_sg_cpu->max;
-               sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
+               j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
 
                if (j_util * max > j_max * util) {
                        util = j_util;
@@ -837,7 +826,9 @@ static int sugov_start(struct cpufreq_policy *policy)
                memset(sg_cpu, 0, sizeof(*sg_cpu));
                sg_cpu->cpu                     = cpu;
                sg_cpu->sg_policy               = sg_policy;
-               sg_cpu->iowait_boost_max        = policy->cpuinfo.max_freq;
+               sg_cpu->min                     =
+                       (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
+                       policy->cpuinfo.max_freq;
        }
 
        for_each_cpu(cpu, policy->cpus) {
index ea74d43924b25f7ae98788532d2070152b6cb5fc..fdab7eb6f3517af0ca9581a730b0771c28092fa0 100644 (file)
@@ -8059,6 +8059,18 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
                                (rq->cpu_capacity_orig * 100));
 }
 
+/*
+ * Check whether a rq has a misfit task and if it looks like we can actually
+ * help that task: we can migrate the task to a CPU of higher capacity, or
+ * the task's current CPU is heavily pressured.
+ */
+static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
+{
+       return rq->misfit_task_load &&
+               (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
+                check_cpu_capacity(rq, sd));
+}
+
 /*
  * Group imbalance indicates (and tries to solve) the problem where balancing
  * groups is inadequate due to ->cpus_allowed constraints.
@@ -9586,35 +9598,21 @@ static void nohz_balancer_kick(struct rq *rq)
        if (time_before(now, nohz.next_balance))
                goto out;
 
-       if (rq->nr_running >= 2 || rq->misfit_task_load) {
+       if (rq->nr_running >= 2) {
                flags = NOHZ_KICK_MASK;
                goto out;
        }
 
        rcu_read_lock();
-       sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
-       if (sds) {
-               /*
-                * If there is an imbalance between LLC domains (IOW we could
-                * increase the overall cache use), we need some less-loaded LLC
-                * domain to pull some load. Likewise, we may need to spread
-                * load within the current LLC domain (e.g. packed SMT cores but
-                * other CPUs are idle). We can't really know from here how busy
-                * the others are - so just get a nohz balance going if it looks
-                * like this LLC domain has tasks we could move.
-                */
-               nr_busy = atomic_read(&sds->nr_busy_cpus);
-               if (nr_busy > 1) {
-                       flags = NOHZ_KICK_MASK;
-                       goto unlock;
-               }
-
-       }
 
        sd = rcu_dereference(rq->sd);
        if (sd) {
-               if ((rq->cfs.h_nr_running >= 1) &&
-                   check_cpu_capacity(rq, sd)) {
+               /*
+                * If there's a CFS task and the current CPU has reduced
+                * capacity; kick the ILB to see if there's a better CPU to run
+                * on.
+                */
+               if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
                        flags = NOHZ_KICK_MASK;
                        goto unlock;
                }
@@ -9622,6 +9620,11 @@ static void nohz_balancer_kick(struct rq *rq)
 
        sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
        if (sd) {
+               /*
+                * When ASYM_PACKING; see if there's a more preferred CPU
+                * currently idle; in which case, kick the ILB to move tasks
+                * around.
+                */
                for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
                        if (sched_asym_prefer(i, cpu)) {
                                flags = NOHZ_KICK_MASK;
@@ -9629,6 +9632,45 @@ static void nohz_balancer_kick(struct rq *rq)
                        }
                }
        }
+
+       sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
+       if (sd) {
+               /*
+                * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
+                * to run the misfit task on.
+                */
+               if (check_misfit_status(rq, sd)) {
+                       flags = NOHZ_KICK_MASK;
+                       goto unlock;
+               }
+
+               /*
+                * For asymmetric systems, we do not want to nicely balance
+                * cache use, instead we want to embrace asymmetry and only
+                * ensure tasks have enough CPU capacity.
+                *
+                * Skip the LLC logic because it's not relevant in that case.
+                */
+               goto unlock;
+       }
+
+       sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
+       if (sds) {
+               /*
+                * If there is an imbalance between LLC domains (IOW we could
+                * increase the overall cache use), we need some less-loaded LLC
+                * domain to pull some load. Likewise, we may need to spread
+                * load within the current LLC domain (e.g. packed SMT cores but
+                * other CPUs are idle). We can't really know from here how busy
+                * the others are - so just get a nohz balance going if it looks
+                * like this LLC domain has tasks we could move.
+                */
+               nr_busy = atomic_read(&sds->nr_busy_cpus);
+               if (nr_busy > 1) {
+                       flags = NOHZ_KICK_MASK;
+                       goto unlock;
+               }
+       }
 unlock:
        rcu_read_unlock();
 out:
index dc1b6f1929f9b00db9e47c1bc97c82a33feaa728..ac9c03dd6c7d3134c1901c52b8b45ea1eedffad2 100644 (file)
@@ -89,7 +89,7 @@ struct clocksource * __init __weak clocksource_default_clock(void)
        return &clocksource_jiffies;
 }
 
-struct clocksource refined_jiffies;
+static struct clocksource refined_jiffies;
 
 int register_refined_jiffies(long cycles_per_second)
 {
index fa79323331b22f327872ae41e185a17562f6d78a..26c8ca9bd06b6725b84f42d6b635c0d256118ebb 100644 (file)
@@ -1992,7 +1992,7 @@ static void print_bug_type(void)
  * modifying the code. @failed should be one of either:
  * EFAULT - if the problem happens on reading the @ip address
  * EINVAL - if what is read at @ip is not what was expected
- * EPERM - if the problem happens on writting to the @ip address
+ * EPERM - if the problem happens on writing to the @ip address
  */
 void ftrace_bug(int failed, struct dyn_ftrace *rec)
 {
@@ -2391,7 +2391,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
        }
 
-       return -1; /* unknow ftrace bug */
+       return -1; /* unknown ftrace bug */
 }
 
 void __weak ftrace_replace_code(int mod_flags)
@@ -3004,7 +3004,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
        int cnt;
 
        if (!num_to_init)
-               return 0;
+               return NULL;
 
        start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
        if (!pg)
@@ -4755,7 +4755,7 @@ static int
 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
                int reset, int enable)
 {
-       return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
+       return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
 }
 
 /**
@@ -5463,7 +5463,7 @@ void ftrace_create_filter_files(struct ftrace_ops *ops,
 
 /*
  * The name "destroy_filter_files" is really a misnomer. Although
- * in the future, it may actualy delete the files, but this is
+ * in the future, it may actually delete the files, but this is
  * really intended to make sure the ops passed in are disabled
  * and that when this function returns, the caller is free to
  * free the ops.
@@ -5786,7 +5786,7 @@ void ftrace_module_enable(struct module *mod)
        /*
         * If the tracing is enabled, go ahead and enable the record.
         *
-        * The reason not to enable the record immediatelly is the
+        * The reason not to enable the record immediately is the
         * inherent check of ftrace_make_nop/ftrace_make_call for
         * correct previous instructions.  Making first the NOP
         * conversion puts the module to the correct state, thus
index dd1f43588d7097a62a84966d8a90b425fd0f2f64..fa100ed3b4de9d128cd05215c972828ee13fbb26 100644 (file)
@@ -74,7 +74,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
 static int create_dyn_event(int argc, char **argv)
 {
        struct dyn_event_operations *ops;
-       int ret;
+       int ret = -ENODEV;
 
        if (argv[0][0] == '-' || argv[0][0] == '!')
                return dyn_event_release(argc, argv, NULL);
index ca46339f30090d84d72667b4207a8272d7fa1e9f..795aa203837733f6968f26ae1f8f4ca7b399f695 100644 (file)
@@ -3713,7 +3713,6 @@ static void track_data_destroy(struct hist_trigger_data *hist_data,
        struct trace_event_file *file = hist_data->event_file;
 
        destroy_hist_field(data->track_data.track_var, 0);
-       destroy_hist_field(data->track_data.var_ref, 0);
 
        if (data->action == ACTION_SNAPSHOT) {
                struct track_data *track_data;
index 8fbfda94a67be8e6fe587e5425d1773e351b7431..403c9bd9041395a1d7919977acc7ed68d8c746ca 100644 (file)
@@ -42,9 +42,9 @@ int __read_mostly watchdog_user_enabled = 1;
 int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
 int __read_mostly soft_watchdog_user_enabled = 1;
 int __read_mostly watchdog_thresh = 10;
-int __read_mostly nmi_watchdog_available;
+static int __read_mostly nmi_watchdog_available;
 
-struct cpumask watchdog_allowed_mask __read_mostly;
+static struct cpumask watchdog_allowed_mask __read_mostly;
 
 struct cpumask watchdog_cpumask __read_mostly;
 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
index 4026d1871407ecbc78dbeaff6c88b2b41aa56647..ddee541ea97aa63863ebcdbee26c437de6b63e42 100644 (file)
@@ -4266,7 +4266,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
        INIT_LIST_HEAD(&wq->list);
 
        if (alloc_and_link_pwqs(wq) < 0)
-               goto err_free_wq;
+               goto err_unreg_lockdep;
 
        if (wq_online && init_rescuer(wq) < 0)
                goto err_destroy;
@@ -4292,9 +4292,10 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
 
        return wq;
 
-err_free_wq:
+err_unreg_lockdep:
        wq_unregister_lockdep(wq);
        wq_free_lockdep(wq);
+err_free_wq:
        free_workqueue_attrs(wq->unbound_attrs);
        kfree(wq);
        return NULL;
index 0a105d4af16644bcbdf8a4f62f295d3ac534e31d..97f59abc3e92583917769f232bdd9f7d5e67520f 100644 (file)
@@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
        else if (tbl->nest)
                err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
 
-       if (!err)
-               err = rhashtable_rehash_table(ht);
+       if (!err || err == -EEXIST) {
+               int nerr;
+
+               nerr = rhashtable_rehash_table(ht);
+               err = err ?: nerr;
+       }
 
        mutex_unlock(&ht->mutex);
 
index 5b382c1244ede33c14016142ac2d7fec4d0608da..155fe38756ecfda251f26fa8616a325dddd8d455 100644 (file)
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
                         unsigned int cpu)
 {
+       /*
+        * Once the clear bit is set, the bit may be allocated out.
+        *
+        * Orders READ/WRITE on the asssociated instance(such as request
+        * of blk_mq) by this bit for avoiding race with re-allocation,
+        * and its pair is the memory barrier implied in __sbitmap_get_word.
+        *
+        * One invariant is that the clear bit has to be zero when the bit
+        * is in use.
+        */
+       smp_mb__before_atomic();
        sbitmap_deferred_clear_bit(&sbq->sb, nr);
 
        /*
index 49a16cee2aae97f013ef79401feb0b4de3c4a44e..420a98bf79b536d11f862026ff71747a67301cf4 100644 (file)
@@ -879,15 +879,24 @@ static struct notifier_block aarp_notifier = {
 
 static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 };
 
-void __init aarp_proto_init(void)
+int __init aarp_proto_init(void)
 {
+       int rc;
+
        aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
-       if (!aarp_dl)
+       if (!aarp_dl) {
                printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
+               return -ENOMEM;
+       }
        timer_setup(&aarp_timer, aarp_expire_timeout, 0);
        aarp_timer.expires  = jiffies + sysctl_aarp_expiry_time;
        add_timer(&aarp_timer);
-       register_netdevice_notifier(&aarp_notifier);
+       rc = register_netdevice_notifier(&aarp_notifier);
+       if (rc) {
+               del_timer_sync(&aarp_timer);
+               unregister_snap_client(aarp_dl);
+       }
+       return rc;
 }
 
 /* Remove the AARP entries associated with a device. */
index 795fbc6c06aa7a9e7078aafafad97024373afeb3..709d2542f7295ee71a5ddb201f81fed09ac669cb 100644 (file)
@@ -1904,9 +1904,6 @@ static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B };
 EXPORT_SYMBOL(atrtr_get_dev);
 EXPORT_SYMBOL(atalk_find_dev_addr);
 
-static const char atalk_err_snap[] __initconst =
-       KERN_CRIT "Unable to register DDP with SNAP.\n";
-
 /* Called by proto.c on kernel start up */
 static int __init atalk_init(void)
 {
@@ -1921,17 +1918,22 @@ static int __init atalk_init(void)
                goto out_proto;
 
        ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
-       if (!ddp_dl)
-               printk(atalk_err_snap);
+       if (!ddp_dl) {
+               pr_crit("Unable to register DDP with SNAP.\n");
+               goto out_sock;
+       }
 
        dev_add_pack(&ltalk_packet_type);
        dev_add_pack(&ppptalk_packet_type);
 
        rc = register_netdevice_notifier(&ddp_notifier);
        if (rc)
-               goto out_sock;
+               goto out_snap;
+
+       rc = aarp_proto_init();
+       if (rc)
+               goto out_dev;
 
-       aarp_proto_init();
        rc = atalk_proc_init();
        if (rc)
                goto out_aarp;
@@ -1945,11 +1947,13 @@ out_proc:
        atalk_proc_exit();
 out_aarp:
        aarp_cleanup_module();
+out_dev:
        unregister_netdevice_notifier(&ddp_notifier);
-out_sock:
+out_snap:
        dev_remove_pack(&ppptalk_packet_type);
        dev_remove_pack(&ltalk_packet_type);
        unregister_snap_client(ddp_dl);
+out_sock:
        sock_unregister(PF_APPLETALK);
 out_proto:
        proto_unregister(&ddp_proto);
index 9d34de68571be8969ee7a57d9f2eb680777b1cea..22afa566cbce9cd6d58abe3ead14ffd7199fd18d 100644 (file)
@@ -502,6 +502,7 @@ static unsigned int br_nf_pre_routing(void *priv,
        nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
 
        skb->protocol = htons(ETH_P_IP);
+       skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
 
        NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
                skb->dev, NULL,
index 564710f88f938cb314f2f59177289ecb127454f4..e88d6641647bab45397f5206737b367ea60cb9b0 100644 (file)
@@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
        nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
 
        skb->protocol = htons(ETH_P_IPV6);
+       skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+
        NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
                skb->dev, NULL,
                br_nf_pre_routing_finish_ipv6);
index 9cab80207ced6346b5b5b97b56e98674f28def73..79eac465ec65e99cf22e475f3edb0609ea4787e5 100644 (file)
@@ -738,7 +738,6 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started)
 }
 EXPORT_SYMBOL(__ceph_open_session);
 
-
 int ceph_open_session(struct ceph_client *client)
 {
        int ret;
@@ -754,6 +753,23 @@ int ceph_open_session(struct ceph_client *client)
 }
 EXPORT_SYMBOL(ceph_open_session);
 
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+                               unsigned long timeout)
+{
+       u64 newest_epoch;
+       int ret;
+
+       ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
+       if (ret)
+               return ret;
+
+       if (client->osdc.osdmap->epoch >= newest_epoch)
+               return 0;
+
+       ceph_osdc_maybe_request_map(&client->osdc);
+       return ceph_monc_wait_osdmap(&client->monc, newest_epoch, timeout);
+}
+EXPORT_SYMBOL(ceph_wait_for_latest_osdmap);
 
 static int __init init_ceph_lib(void)
 {
index 7e71b0df1fbc9185b192a43427c7cb281b778ca1..3083988ce729dbe01771e9433b7de72e484394f9 100644 (file)
@@ -840,6 +840,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct ceph_bio_iter *it = &cursor->bio_iter;
+       struct page *page = bio_iter_page(it->bio, it->iter);
 
        BUG_ON(bytes > cursor->resid);
        BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
@@ -851,7 +852,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                return false;   /* no more data */
        }
 
-       if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done))
+       if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
+                      page == bio_iter_page(it->bio, it->iter)))
                return false;   /* more bytes to process in this segment */
 
        if (!it->iter.bi_size) {
@@ -899,6 +901,7 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
+       struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
 
        BUG_ON(bytes > cursor->resid);
        BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
@@ -910,7 +913,8 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
                return false;   /* no more data */
        }
 
-       if (!bytes || cursor->bvec_iter.bi_bvec_done)
+       if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
+                      page == bvec_iter_page(bvecs, cursor->bvec_iter)))
                return false;   /* more bytes to process in this segment */
 
        BUG_ON(cursor->last_piece);
index 18deb3d889c4ae94a10417457fa0275a77568b48..a53e4fbb631918ccf94536849e5e25acad2dfdc6 100644 (file)
@@ -922,6 +922,15 @@ int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
        mutex_unlock(&monc->mutex);
 
        ret = wait_generic_request(req);
+       if (!ret)
+               /*
+                * Make sure we have the osdmap that includes the blacklist
+                * entry.  This is needed to ensure that the OSDs pick up the
+                * new blacklist before processing any future requests from
+                * this client.
+                */
+               ret = ceph_wait_for_latest_osdmap(monc->client, 0);
+
 out:
        put_generic_request(req);
        return ret;
index 78e22cea4cc79589e22781849a843397904e220c..da0a29f30885d1f54b975ecfa4f83b47c68be543 100644 (file)
@@ -3897,6 +3897,11 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
                        continue;
                }
 
+               if (!devlink->ops->info_get) {
+                       idx++;
+                       continue;
+               }
+
                mutex_lock(&devlink->lock);
                err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
                                           NETLINK_CB(cb->skb).portid,
index f274620945ff06085beaf411d7d6f9912ec4ba66..647c63a7b25b6745e75a812b65a4052f3c72b690 100644 (file)
@@ -1796,8 +1796,6 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
 
 BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
 {
-       sk = sk_to_full_sk(sk);
-
        return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
 }
 
@@ -5266,7 +5264,7 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
        .func           = bpf_sk_release,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_PTR_TO_SOCKET,
+       .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
 };
 
 BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
@@ -5407,8 +5405,6 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
 
 BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
 {
-       sk = sk_to_full_sk(sk);
-
        if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
                return (unsigned long)sk;
 
@@ -5422,6 +5418,23 @@ static const struct bpf_func_proto bpf_tcp_sock_proto = {
        .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
 };
 
+BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
+{
+       sk = sk_to_full_sk(sk);
+
+       if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
+               return (unsigned long)sk;
+
+       return (unsigned long)NULL;
+}
+
+static const struct bpf_func_proto bpf_get_listener_sock_proto = {
+       .func           = bpf_get_listener_sock,
+       .gpl_only       = false,
+       .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
+       .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
+};
+
 BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
 {
        unsigned int iphdr_len;
@@ -5607,6 +5620,8 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 #ifdef CONFIG_INET
        case BPF_FUNC_tcp_sock:
                return &bpf_tcp_sock_proto;
+       case BPF_FUNC_get_listener_sock:
+               return &bpf_get_listener_sock_proto;
        case BPF_FUNC_skb_ecn_set_ce:
                return &bpf_skb_ecn_set_ce_proto;
 #endif
@@ -5702,6 +5717,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_sk_release_proto;
        case BPF_FUNC_tcp_sock:
                return &bpf_tcp_sock_proto;
+       case BPF_FUNC_get_listener_sock:
+               return &bpf_get_listener_sock_proto;
 #endif
        default:
                return bpf_base_func_proto(func_id);
index 4ff661f6f989ae10ca49a1e81c825be56683d026..f8f94303a1f57203eaa28b5ea459ac28c89e1b12 100644 (file)
@@ -928,6 +928,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
        if (error)
                return error;
 
+       dev_hold(queue->dev);
+
        if (dev->sysfs_rx_queue_group) {
                error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
                if (error) {
@@ -937,7 +939,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
        }
 
        kobject_uevent(kobj, KOBJ_ADD);
-       dev_hold(queue->dev);
 
        return error;
 }
@@ -1464,6 +1465,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
        if (error)
                return error;
 
+       dev_hold(queue->dev);
+
 #ifdef CONFIG_BQL
        error = sysfs_create_group(kobj, &dql_group);
        if (error) {
@@ -1473,7 +1476,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
 #endif
 
        kobject_uevent(kobj, KOBJ_ADD);
-       dev_hold(queue->dev);
 
        return 0;
 }
@@ -1745,16 +1747,20 @@ int netdev_register_kobject(struct net_device *ndev)
 
        error = device_add(dev);
        if (error)
-               return error;
+               goto error_put_device;
 
        error = register_queue_kobjects(ndev);
-       if (error) {
-               device_del(dev);
-               return error;
-       }
+       if (error)
+               goto error_device_del;
 
        pm_runtime_set_memalloc_noio(dev, true);
 
+       return 0;
+
+error_device_del:
+       device_del(dev);
+error_put_device:
+       put_device(dev);
        return error;
 }
 
index d5740bad5b1811cd42e44fb3b0da6edabbf18095..57d84e9b7b6fc820a4616e3ac326633f14e2fd95 100644 (file)
@@ -436,8 +436,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
                newnp->ipv6_mc_list = NULL;
                newnp->ipv6_ac_list = NULL;
                newnp->ipv6_fl_list = NULL;
-               newnp->mcast_oif   = inet6_iif(skb);
-               newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
+               newnp->mcast_oif   = inet_iif(skb);
+               newnp->mcast_hops  = ip_hdr(skb)->ttl;
 
                /*
                 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
index 1059894a6f4c3f009a92b30fb257e6b35f3a4a26..4cb83fb69844354d6d5d6ad18d552060e9e28b84 100644 (file)
@@ -210,6 +210,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                psidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
                          ((srh->segments_left + 1) * sizeof(struct in6_addr));
                psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid);
+               if (!psid)
+                       return false;
                if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID,
                                ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk,
                                                     &srhinfo->psid_addr)))
@@ -223,6 +225,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
                          ((srh->segments_left - 1) * sizeof(struct in6_addr));
                nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid);
+               if (!nsid)
+                       return false;
                if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID,
                                ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk,
                                                     &srhinfo->nsid_addr)))
@@ -233,6 +237,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
        if (srhinfo->mt_flags & IP6T_SRH_LSID) {
                lsidoff = srhoff + sizeof(struct ipv6_sr_hdr);
                lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid);
+               if (!lsid)
+                       return false;
                if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID,
                                ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk,
                                                     &srhinfo->lsid_addr)))
index 4ef4bbdb49d4b203974bf95de47534921baed3f6..0302e0eb07af1d270a615bcadfcb9bc08ca61d6c 100644 (file)
@@ -1040,14 +1040,20 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
        struct rt6_info *nrt;
 
        if (!fib6_info_hold_safe(rt))
-               return NULL;
+               goto fallback;
 
        nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
-       if (nrt)
-               ip6_rt_copy_init(nrt, rt);
-       else
+       if (!nrt) {
                fib6_info_release(rt);
+               goto fallback;
+       }
 
+       ip6_rt_copy_init(nrt, rt);
+       return nrt;
+
+fallback:
+       nrt = dev_net(dev)->ipv6.ip6_null_entry;
+       dst_hold(&nrt->dst);
        return nrt;
 }
 
@@ -1096,10 +1102,6 @@ restart:
                dst_hold(&rt->dst);
        } else {
                rt = ip6_create_rt_rcu(f6i);
-               if (!rt) {
-                       rt = net->ipv6.ip6_null_entry;
-                       dst_hold(&rt->dst);
-               }
        }
 
        rcu_read_unlock();
index 57ef69a1088908fc624ecfca99a728fa296ae0bf..44d431849d391d6903d263ae547fc9bed1e67aa7 100644 (file)
@@ -1110,11 +1110,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                newnp->ipv6_fl_list = NULL;
                newnp->pktoptions  = NULL;
                newnp->opt         = NULL;
-               newnp->mcast_oif   = tcp_v6_iif(skb);
-               newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
-               newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
+               newnp->mcast_oif   = inet_iif(skb);
+               newnp->mcast_hops  = ip_hdr(skb)->ttl;
+               newnp->rcv_flowinfo = 0;
                if (np->repflow)
-                       newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
+                       newnp->flow_label = 0;
 
                /*
                 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
index dda8930f20e790c77c808674f6e35b133bb5657a..f3a8557494d60e4d1ffe1f89fa32ea00c13eabab 100644 (file)
@@ -140,9 +140,15 @@ static int mpls_xmit(struct sk_buff *skb)
        if (rt)
                err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
                                 skb);
-       else if (rt6)
-               err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
-                                skb);
+       else if (rt6) {
+               if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
+                       /* 6PE (RFC 4798) */
+                       err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt6->rt6i_gateway.s6_addr32[3],
+                                        skb);
+               } else
+                       err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
+                                        skb);
+       }
        if (err)
                net_dbg_ratelimited("%s: packet transmission failed: %d\n",
                                    __func__, err);
index 5d782445d2fcf629367777f415e000eb326eab2a..bad17bba8ba786f589212a2575d346850bab6300 100644 (file)
@@ -251,6 +251,10 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
        }
 
        attr = nla_nest_start(skb, NCSI_ATTR_PACKAGE_LIST);
+       if (!attr) {
+               rc = -EMSGSIZE;
+               goto err;
+       }
        rc = ncsi_write_package_info(skb, ndp, package->id);
        if (rc) {
                nla_nest_cancel(skb, attr);
index d43ffb09939bd3641b213b826a3e0229bcdbb550..6548271209a05c2fce99628c9b23d2cedbf8a087 100644 (file)
@@ -1007,6 +1007,7 @@ config NETFILTER_XT_TARGET_TEE
        depends on NETFILTER_ADVANCED
        depends on IPV6 || IPV6=n
        depends on !NF_CONNTRACK || NF_CONNTRACK
+       depends on IP6_NF_IPTABLES || !IP6_NF_IPTABLES
        select NF_DUP_IPV4
        select NF_DUP_IPV6 if IP6_NF_IPTABLES
        ---help---
index f067c6b508572a9ab31bc4b3b6281e1482ffdc6d..39fcc1ed18f3501b3120fc9aeffbe44e27fda933 100644 (file)
@@ -20,9 +20,9 @@
 #include <linux/udp.h>
 #include <linux/tcp.h>
 #include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
 
-#include <net/route.h>
-#include <net/ip6_route.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_expect.h>
@@ -871,38 +871,33 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
        } else if (sip_external_media) {
                struct net_device *dev = skb_dst(skb)->dev;
                struct net *net = dev_net(dev);
-               struct rtable *rt;
-               struct flowi4 fl4 = {};
-#if IS_ENABLED(CONFIG_IPV6)
-               struct flowi6 fl6 = {};
-#endif
+               struct flowi fl;
                struct dst_entry *dst = NULL;
 
+               memset(&fl, 0, sizeof(fl));
+
                switch (nf_ct_l3num(ct)) {
                        case NFPROTO_IPV4:
-                               fl4.daddr = daddr->ip;
-                               rt = ip_route_output_key(net, &fl4);
-                               if (!IS_ERR(rt))
-                                       dst = &rt->dst;
+                               fl.u.ip4.daddr = daddr->ip;
+                               nf_ip_route(net, &dst, &fl, false);
                                break;
 
-#if IS_ENABLED(CONFIG_IPV6)
                        case NFPROTO_IPV6:
-                               fl6.daddr = daddr->in6;
-                               dst = ip6_route_output(net, NULL, &fl6);
-                               if (dst->error) {
-                                       dst_release(dst);
-                                       dst = NULL;
-                               }
+                               fl.u.ip6.daddr = daddr->in6;
+                               nf_ip6_route(net, &dst, &fl, false);
                                break;
-#endif
                }
 
                /* Don't predict any conntracks when media endpoint is reachable
                 * through the same interface as the signalling peer.
                 */
-               if (dst && dst->dev == dev)
-                       return NF_ACCEPT;
+               if (dst) {
+                       bool external_media = (dst->dev == dev);
+
+                       dst_release(dst);
+                       if (external_media)
+                               return NF_ACCEPT;
+               }
        }
 
        /* We need to check whether the registration exists before attempting
index 513f931186043f2ded3f1844374decd79768c44b..ef7772e976cc802afc64ea25d28f1fbecde773be 100644 (file)
@@ -2806,8 +2806,11 @@ err2:
        nf_tables_rule_release(&ctx, rule);
 err1:
        for (i = 0; i < n; i++) {
-               if (info[i].ops != NULL)
+               if (info[i].ops) {
                        module_put(info[i].ops->type->owner);
+                       if (info[i].ops->type->release_ops)
+                               info[i].ops->type->release_ops(info[i].ops);
+               }
        }
        kvfree(info);
        return err;
index 457a9ceb46af2061546da95f46d05c3578c1826a..8dfa798ea68330645c1dea590d05e98539ca2aa7 100644 (file)
@@ -65,21 +65,34 @@ nla_put_failure:
        return -1;
 }
 
-static void nft_objref_destroy(const struct nft_ctx *ctx,
-                              const struct nft_expr *expr)
+static void nft_objref_deactivate(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr,
+                                 enum nft_trans_phase phase)
 {
        struct nft_object *obj = nft_objref_priv(expr);
 
+       if (phase == NFT_TRANS_COMMIT)
+               return;
+
        obj->use--;
 }
 
+static void nft_objref_activate(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr)
+{
+       struct nft_object *obj = nft_objref_priv(expr);
+
+       obj->use++;
+}
+
 static struct nft_expr_type nft_objref_type;
 static const struct nft_expr_ops nft_objref_ops = {
        .type           = &nft_objref_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
        .eval           = nft_objref_eval,
        .init           = nft_objref_init,
-       .destroy        = nft_objref_destroy,
+       .activate       = nft_objref_activate,
+       .deactivate     = nft_objref_deactivate,
        .dump           = nft_objref_dump,
 };
 
index f8092926f704add7a7cc6843b89d9a509a5db046..a340cd8a751b483766e4ed7274ce0fb2c2b193e2 100644 (file)
@@ -233,5 +233,5 @@ module_exit(nft_redir_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
-MODULE_ALIAS_NFT_AF_EXPR(AF_INET4, "redir");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "redir");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir");
index fa61208371f8b222ceb43388773f5c19c691d764..321a0036fdf5b95cc8d356b63ad47fc76826498e 100644 (file)
@@ -308,10 +308,6 @@ static void *nft_rbtree_deactivate(const struct net *net,
                else if (d > 0)
                        parent = parent->rb_right;
                else {
-                       if (!nft_set_elem_active(&rbe->ext, genmask)) {
-                               parent = parent->rb_left;
-                               continue;
-                       }
                        if (nft_rbtree_interval_end(rbe) &&
                            !nft_rbtree_interval_end(this)) {
                                parent = parent->rb_left;
@@ -320,6 +316,9 @@ static void *nft_rbtree_deactivate(const struct net *net,
                                   nft_rbtree_interval_end(this)) {
                                parent = parent->rb_right;
                                continue;
+                       } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+                               parent = parent->rb_left;
+                               continue;
                        }
                        nft_rbtree_flush(net, set, rbe);
                        return rbe;
index 25eeb6d2a75a69059f387be103345e844284f743..f0ec068e1d02fc0ebd39c2b1ecd935a301647ab1 100644 (file)
@@ -366,7 +366,7 @@ int genl_register_family(struct genl_family *family)
                               start, end + 1, GFP_KERNEL);
        if (family->id < 0) {
                err = family->id;
-               goto errout_locked;
+               goto errout_free;
        }
 
        err = genl_validate_assign_mc_groups(family);
@@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family)
 
 errout_remove:
        idr_remove(&genl_fam_idr, family->id);
+errout_free:
        kfree(family->attrbuf);
 errout_locked:
        genl_unlock_all();
index ae296273ce3db96cdaeafba66a7ff460d8a59794..17dcd0b5eb3287989d5a72a19194bc5674f3cb1e 100644 (file)
@@ -726,6 +726,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        llcp_sock->service_name = kmemdup(addr->service_name,
                                          llcp_sock->service_name_len,
                                          GFP_KERNEL);
+       if (!llcp_sock->service_name) {
+               ret = -ENOMEM;
+               goto sock_llcp_release;
+       }
 
        nfc_llcp_sock_link(&local->connecting_sockets, sk);
 
@@ -745,10 +749,11 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        return ret;
 
 sock_unlink:
-       nfc_llcp_put_ssap(local, llcp_sock->ssap);
-
        nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
 
+sock_llcp_release:
+       nfc_llcp_put_ssap(local, llcp_sock->ssap);
+
 put_dev:
        nfc_put_device(dev);
 
index 6679e96ab1dcdf8761845b863c39e1b6aac20d2e..9dd158ab51b310e28354237ff3bcd101a8b829f5 100644 (file)
@@ -448,6 +448,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
                             0, upcall_info->cmd);
+       if (!upcall) {
+               err = -EINVAL;
+               goto out;
+       }
        upcall->dp_ifindex = dp_ifindex;
 
        err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
@@ -460,6 +464,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        if (upcall_info->egress_tun_info) {
                nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
+               if (!nla) {
+                       err = -EMSGSIZE;
+                       goto out;
+               }
                err = ovs_nla_put_tunnel_info(user_skb,
                                              upcall_info->egress_tun_info);
                BUG_ON(err);
@@ -468,6 +476,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        if (upcall_info->actions_len) {
                nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
+               if (!nla) {
+                       err = -EMSGSIZE;
+                       goto out;
+               }
                err = ovs_nla_put_actions(upcall_info->actions,
                                          upcall_info->actions_len,
                                          user_skb);
index 8376bc1c1508170aa333f8feaa8a154908e0eb3c..9419c5cf4de5e8443fd760c0f73612ce691483a9 100644 (file)
@@ -1852,7 +1852,8 @@ oom:
 
 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
 {
-       if (!skb->protocol && sock->type == SOCK_RAW) {
+       if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
+           sock->type == SOCK_RAW) {
                skb_reset_mac_header(skb);
                skb->protocol = dev_parse_header_protocol(skb);
        }
@@ -3243,7 +3244,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
        }
 
        mutex_lock(&net->packet.sklist_lock);
-       sk_add_node_rcu(sk, &net->packet.sklist);
+       sk_add_node_tail_rcu(sk, &net->packet.sklist);
        mutex_unlock(&net->packet.sklist_lock);
 
        preempt_disable();
@@ -4209,7 +4210,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
        struct pgv *pg_vec;
        int i;
 
-       pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
+       pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
        if (unlikely(!pg_vec))
                goto out;
 
index 7ca57741b2fbbbc8f5ccf139f5ffbe56b969c458..7849f286bb9331dbfce00e58cbe0c325a36894f5 100644 (file)
@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
        struct sk_buff *skb;
        unsigned char  *dptr;
        unsigned char  lci1, lci2;
-       char buffer[100];
-       int len, faclen = 0;
+       int maxfaclen = 0;
+       int len, faclen;
+       int reserve;
 
-       len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
+       reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
+       len = ROSE_MIN_LEN;
 
        switch (frametype) {
        case ROSE_CALL_REQUEST:
                len   += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
-               faclen = rose_create_facilities(buffer, rose);
-               len   += faclen;
+               maxfaclen = 256;
                break;
        case ROSE_CALL_ACCEPTED:
        case ROSE_CLEAR_REQUEST:
@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
                break;
        }
 
-       if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+       skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
+       if (!skb)
                return;
 
        /*
         *      Space for AX.25 header and PID.
         */
-       skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
+       skb_reserve(skb, reserve);
 
-       dptr = skb_put(skb, skb_tailroom(skb));
+       dptr = skb_put(skb, len);
 
        lci1 = (rose->lci >> 8) & 0x0F;
        lci2 = (rose->lci >> 0) & 0xFF;
@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
                dptr   += ROSE_ADDR_LEN;
                memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
                dptr   += ROSE_ADDR_LEN;
-               memcpy(dptr, buffer, faclen);
+               faclen = rose_create_facilities(dptr, rose);
+               skb_put(skb, faclen);
                dptr   += faclen;
                break;
 
index 736aa92811004cfe5d157abd4827710783f8d57c..004c762c2e8d063cfda32c0f93325fb779f08737 100644 (file)
@@ -335,7 +335,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
        struct kvec iov[2];
        rxrpc_serial_t serial;
        size_t len;
-       bool lost = false;
        int ret, opt;
 
        _enter(",{%d}", skb->len);
@@ -393,14 +392,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                static int lose;
                if ((lose++ & 7) == 7) {
                        ret = 0;
-                       lost = true;
+                       trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
+                                           whdr.flags, retrans, true);
+                       goto done;
                }
        }
 
-       trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
-                           retrans, lost);
-       if (lost)
-               goto done;
+       trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans,
+                           false);
 
        /* send the packet with the don't fragment bit set if we currently
         * think it's small enough */
index 1b9afdee5ba976ba64200d8f85050cf053b7d65c..5c02ad97ef239a5eb22df8b22be80010a77b0151 100644 (file)
@@ -358,8 +358,7 @@ config NET_SCH_PIE
        help
          Say Y here if you want to use the Proportional Integral controller
          Enhanced scheduler packet scheduling algorithm.
-         For more information, please see
-         http://tools.ietf.org/html/draft-pan-tsvwg-pie-00
+         For more information, please see https://tools.ietf.org/html/rfc8033
 
          To compile this driver as a module, choose M here: the module
          will be called sch_pie.
index aecf1bf233c8362673812b5ab212f32e5f868a5b..5a87e271d35a2416b3589888bcfadee0c31b2142 100644 (file)
 #include <net/act_api.h>
 #include <net/netlink.h>
 
-static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
-{
-       u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
-
-       if (!tp)
-               return -EINVAL;
-       a->goto_chain = tcf_chain_get_by_act(tp->chain->block, chain_index);
-       if (!a->goto_chain)
-               return -ENOMEM;
-       return 0;
-}
-
-static void tcf_action_goto_chain_fini(struct tc_action *a)
-{
-       tcf_chain_put_by_act(a->goto_chain);
-}
-
 static void tcf_action_goto_chain_exec(const struct tc_action *a,
                                       struct tcf_result *res)
 {
-       const struct tcf_chain *chain = a->goto_chain;
+       const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
 
        res->goto_tp = rcu_dereference_bh(chain->filter_chain);
 }
@@ -71,6 +54,51 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
                call_rcu(&old->rcu, tcf_free_cookie_rcu);
 }
 
+int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
+                            struct tcf_chain **newchain,
+                            struct netlink_ext_ack *extack)
+{
+       int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
+       u32 chain_index;
+
+       if (!opcode)
+               ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
+       else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
+               ret = 0;
+       if (ret) {
+               NL_SET_ERR_MSG(extack, "invalid control action");
+               goto end;
+       }
+
+       if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
+               chain_index = action & TC_ACT_EXT_VAL_MASK;
+               if (!tp || !newchain) {
+                       ret = -EINVAL;
+                       NL_SET_ERR_MSG(extack,
+                                      "can't goto NULL proto/chain");
+                       goto end;
+               }
+               *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
+               if (!*newchain) {
+                       ret = -ENOMEM;
+                       NL_SET_ERR_MSG(extack,
+                                      "can't allocate goto_chain");
+               }
+       }
+end:
+       return ret;
+}
+EXPORT_SYMBOL(tcf_action_check_ctrlact);
+
+struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
+                                        struct tcf_chain *goto_chain)
+{
+       a->tcfa_action = action;
+       rcu_swap_protected(a->goto_chain, goto_chain, 1);
+       return goto_chain;
+}
+EXPORT_SYMBOL(tcf_action_set_ctrlact);
+
 /* XXX: For standalone actions, we don't need a RCU grace period either, because
  * actions are always connected to filters and filters are already destroyed in
  * RCU callbacks, so after a RCU grace period actions are already disconnected
@@ -78,13 +106,15 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
  */
 static void free_tcf(struct tc_action *p)
 {
+       struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
+
        free_percpu(p->cpu_bstats);
        free_percpu(p->cpu_bstats_hw);
        free_percpu(p->cpu_qstats);
 
        tcf_set_action_cookie(&p->act_cookie, NULL);
-       if (p->goto_chain)
-               tcf_action_goto_chain_fini(p);
+       if (chain)
+               tcf_chain_put_by_act(chain);
 
        kfree(p);
 }
@@ -654,6 +684,10 @@ repeat:
                                        return TC_ACT_OK;
                        }
                } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
+                       if (unlikely(!rcu_access_pointer(a->goto_chain))) {
+                               net_warn_ratelimited("can't go to NULL chain!\n");
+                               return TC_ACT_SHOT;
+                       }
                        tcf_action_goto_chain_exec(a, res);
                }
 
@@ -800,15 +834,6 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
        return c;
 }
 
-static bool tcf_action_valid(int action)
-{
-       int opcode = TC_ACT_EXT_OPCODE(action);
-
-       if (!opcode)
-               return action <= TC_ACT_VALUE_MAX;
-       return opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC;
-}
-
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
@@ -890,10 +915,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        /* backward compatibility for policer */
        if (name == NULL)
                err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
-                               rtnl_held, extack);
+                               rtnl_held, tp, extack);
        else
                err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
-                               extack);
+                               tp, extack);
        if (err < 0)
                goto err_mod;
 
@@ -907,18 +932,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (err != ACT_P_CREATED)
                module_put(a_o->owner);
 
-       if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
-               err = tcf_action_goto_chain_init(a, tp);
-               if (err) {
-                       tcf_action_destroy_1(a, bind);
-                       NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
-                       return ERR_PTR(err);
-               }
-       }
-
-       if (!tcf_action_valid(a->tcfa_action)) {
+       if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
+           !rcu_access_pointer(a->goto_chain)) {
                tcf_action_destroy_1(a, bind);
-               NL_SET_ERR_MSG(extack, "Invalid control action value");
+               NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
                return ERR_PTR(-EINVAL);
        }
 
index aa5c38d11a3079644d36329c8b4637e0bdfaa5c6..3841156aa09f778c285765b51342cd5d218a34ac 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_bpf.h>
 #include <net/tc_act/tc_bpf.h>
@@ -278,10 +279,11 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
 static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **act,
                        int replace, int bind, bool rtnl_held,
-                       struct netlink_ext_ack *extack)
+                       struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, bpf_net_id);
        struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_bpf_cfg cfg, old;
        struct tc_act_bpf *parm;
        struct tcf_bpf *prog;
@@ -323,12 +325,16 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                return ret;
        }
 
+       ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (ret < 0)
+               goto release_idr;
+
        is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
        is_ebpf = tb[TCA_ACT_BPF_FD];
 
        if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
                ret = -EINVAL;
-               goto out;
+               goto put_chain;
        }
 
        memset(&cfg, 0, sizeof(cfg));
@@ -336,7 +342,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
                       tcf_bpf_init_from_efd(tb, &cfg);
        if (ret < 0)
-               goto out;
+               goto put_chain;
 
        prog = to_bpf(*act);
 
@@ -350,10 +356,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        if (cfg.bpf_num_ops)
                prog->bpf_num_ops = cfg.bpf_num_ops;
 
-       prog->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
        rcu_assign_pointer(prog->filter, cfg.filter);
        spin_unlock_bh(&prog->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
        if (res == ACT_P_CREATED) {
                tcf_idr_insert(tn, *act);
        } else {
@@ -363,9 +372,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        }
 
        return res;
-out:
-       tcf_idr_release(*act, bind);
 
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
+release_idr:
+       tcf_idr_release(*act, bind);
        return ret;
 }
 
index 5d24993cccfebead613c7dd15bb41a1087c8024e..32ae0cd6e31c67e36793081ac11371c2250eb0f1 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/act_api.h>
+#include <net/pkt_cls.h>
 #include <uapi/linux/tc_act/tc_connmark.h>
 #include <net/tc_act/tc_connmark.h>
 
@@ -97,13 +98,15 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
 static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                             struct nlattr *est, struct tc_action **a,
                             int ovr, int bind, bool rtnl_held,
+                            struct tcf_proto *tp,
                             struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, connmark_net_id);
        struct nlattr *tb[TCA_CONNMARK_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_connmark_info *ci;
        struct tc_connmark *parm;
-       int ret = 0;
+       int ret = 0, err;
 
        if (!nla)
                return -EINVAL;
@@ -128,7 +131,11 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                }
 
                ci = to_connmark(*a);
-               ci->tcf_action = parm->action;
+               err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+                                              extack);
+               if (err < 0)
+                       goto release_idr;
+               tcf_action_set_ctrlact(*a, parm->action, goto_ch);
                ci->net = net;
                ci->zone = parm->zone;
 
@@ -142,15 +149,24 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                        tcf_idr_release(*a, bind);
                        return -EEXIST;
                }
+               err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+                                              extack);
+               if (err < 0)
+                       goto release_idr;
                /* replacing action and zone */
                spin_lock_bh(&ci->tcf_lock);
-               ci->tcf_action = parm->action;
+               goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
                ci->zone = parm->zone;
                spin_unlock_bh(&ci->tcf_lock);
+               if (goto_ch)
+                       tcf_chain_put_by_act(goto_ch);
                ret = 0;
        }
 
        return ret;
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
index c79aca29505e33a44b7402b3ccc1ccaf8827b280..0c77e7bdf6d5d282965eb3970f3b8934e570e0d4 100644 (file)
@@ -33,6 +33,7 @@
 #include <net/sctp/checksum.h>
 
 #include <net/act_api.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_csum.h>
 #include <net/tc_act/tc_csum.h>
@@ -46,12 +47,13 @@ static struct tc_action_ops act_csum_ops;
 
 static int tcf_csum_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a, int ovr,
-                        int bind, bool rtnl_held,
+                        int bind, bool rtnl_held, struct tcf_proto *tp,
                         struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, csum_net_id);
        struct tcf_csum_params *params_new;
        struct nlattr *tb[TCA_CSUM_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_csum *parm;
        struct tcf_csum *p;
        int ret = 0, err;
@@ -87,21 +89,27 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
                return err;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
+
        p = to_tcf_csum(*a);
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
        params_new->update_flags = parm->update_flags;
 
        spin_lock_bh(&p->tcf_lock);
-       p->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(p->params, params_new,
                           lockdep_is_held(&p->tcf_lock));
        spin_unlock_bh(&p->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (params_new)
                kfree_rcu(params_new, rcu);
 
@@ -109,6 +117,12 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
                tcf_idr_insert(tn, *a);
 
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 /**
index 93da0004e9f415e9eb8439eeacdc0ae73f5783fd..e540e31069d746106eb82c5ea3c2f99f9438cbf4 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <linux/tc_act/tc_gact.h>
 #include <net/tc_act/tc_gact.h>
 
@@ -57,10 +58,11 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
 static int tcf_gact_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, gact_net_id);
        struct nlattr *tb[TCA_GACT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_gact *parm;
        struct tcf_gact *gact;
        int ret = 0;
@@ -116,10 +118,13 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
                return err;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
        gact = to_gact(*a);
 
        spin_lock_bh(&gact->tcf_lock);
-       gact->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
 #ifdef CONFIG_GACT_PROB
        if (p_parm) {
                gact->tcfg_paction = p_parm->paction;
@@ -133,9 +138,15 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
 #endif
        spin_unlock_bh(&gact->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
index 9b1f2b3990eedeeca44ba6ad5c7e334043727dd1..31c6ffb6abe7c607972de7b80fe77f03f8782723 100644 (file)
@@ -29,6 +29,7 @@
 #include <net/net_namespace.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <uapi/linux/tc_act/tc_ife.h>
 #include <net/tc_act/tc_ife.h>
 #include <linux/etherdevice.h>
@@ -469,11 +470,12 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
 static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a,
                        int ovr, int bind, bool rtnl_held,
-                       struct netlink_ext_ack *extack)
+                       struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, ife_net_id);
        struct nlattr *tb[TCA_IFE_MAX + 1];
        struct nlattr *tb2[IFE_META_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_ife_params *p;
        struct tcf_ife_info *ife;
        u16 ife_type = ETH_P_IFE;
@@ -531,6 +533,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        }
 
        ife = to_ife(*a);
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
+
        p->flags = parm->flags;
 
        if (parm->flags & IFE_ENCODE) {
@@ -563,13 +569,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_IFE_METALST]) {
                err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
                                       NULL, NULL);
-               if (err) {
-metadata_parse_err:
-                       tcf_idr_release(*a, bind);
-                       kfree(p);
-                       return err;
-               }
-
+               if (err)
+                       goto metadata_parse_err;
                err = populate_metalist(ife, tb2, exists, rtnl_held);
                if (err)
                        goto metadata_parse_err;
@@ -581,21 +582,20 @@ metadata_parse_err:
                 * going to bail out
                 */
                err = use_all_metadata(ife, exists);
-               if (err) {
-                       tcf_idr_release(*a, bind);
-                       kfree(p);
-                       return err;
-               }
+               if (err)
+                       goto metadata_parse_err;
        }
 
        if (exists)
                spin_lock_bh(&ife->tcf_lock);
-       ife->tcf_action = parm->action;
        /* protected by tcf_lock when modifying existing action */
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(ife->params, p, 1);
 
        if (exists)
                spin_unlock_bh(&ife->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (p)
                kfree_rcu(p, rcu);
 
@@ -603,6 +603,13 @@ metadata_parse_err:
                tcf_idr_insert(tn, *a);
 
        return ret;
+metadata_parse_err:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       kfree(p);
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
index 98f5b6ea77b46ea7a55e1c325fd60542020b2165..04a0b5c611943a4e10bfa0855f1c3928e4e141de 100644 (file)
@@ -97,7 +97,8 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
 
 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
-                         const struct tc_action_ops *ops, int ovr, int bind)
+                         const struct tc_action_ops *ops, int ovr, int bind,
+                         struct tcf_proto *tp)
 {
        struct tc_action_net *tn = net_generic(net, id);
        struct nlattr *tb[TCA_IPT_MAX + 1];
@@ -205,20 +206,20 @@ err1:
 
 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a, int ovr,
-                       int bind, bool rtnl_held,
+                       int bind, bool rtnl_held, struct tcf_proto *tp,
                        struct netlink_ext_ack *extack)
 {
        return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
-                             bind);
+                             bind, tp);
 }
 
 static int tcf_xt_init(struct net *net, struct nlattr *nla,
                       struct nlattr *est, struct tc_action **a, int ovr,
-                      int bind, bool unlocked,
+                      int bind, bool unlocked, struct tcf_proto *tp,
                       struct netlink_ext_ack *extack)
 {
        return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
-                             bind);
+                             bind, tp);
 }
 
 static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
index 6692fd0546177347a70123a959156d1120eace90..17cc6bd4c57c3a6f12786c3d1109c6e48af185dd 100644 (file)
@@ -94,10 +94,12 @@ static struct tc_action_ops act_mirred_ops;
 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
+                          struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, mirred_net_id);
        struct nlattr *tb[TCA_MIRRED_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        bool mac_header_xmit = false;
        struct tc_mirred *parm;
        struct tcf_mirred *m;
@@ -157,18 +159,23 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+
        m = to_mirred(*a);
+       if (ret == ACT_P_CREATED)
+               INIT_LIST_HEAD(&m->tcfm_list);
+
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        spin_lock_bh(&m->tcf_lock);
-       m->tcf_action = parm->action;
-       m->tcfm_eaction = parm->eaction;
 
        if (parm->ifindex) {
                dev = dev_get_by_index(net, parm->ifindex);
                if (!dev) {
                        spin_unlock_bh(&m->tcf_lock);
-                       tcf_idr_release(*a, bind);
-                       return -ENODEV;
+                       err = -ENODEV;
+                       goto put_chain;
                }
                mac_header_xmit = dev_is_mac_header_xmit(dev);
                rcu_swap_protected(m->tcfm_dev, dev,
@@ -177,7 +184,11 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                        dev_put(dev);
                m->tcfm_mac_header_xmit = mac_header_xmit;
        }
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+       m->tcfm_eaction = parm->eaction;
        spin_unlock_bh(&m->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED) {
                spin_lock(&mirred_list_lock);
@@ -188,6 +199,12 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        }
 
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
index 543eab9193f17ca94756bff060fee937078a1e21..e91bb8eb81ec5e2e7dfe86d2832cdb6530f8e327 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/string.h>
 #include <linux/tc_act/tc_nat.h>
 #include <net/act_api.h>
+#include <net/pkt_cls.h>
 #include <net/icmp.h>
 #include <net/ip.h>
 #include <net/netlink.h>
@@ -38,10 +39,12 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
 
 static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                        struct tc_action **a, int ovr, int bind,
-                       bool rtnl_held, struct netlink_ext_ack *extack)
+                       bool rtnl_held, struct tcf_proto *tp,
+                       struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, nat_net_id);
        struct nlattr *tb[TCA_NAT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_nat *parm;
        int ret = 0, err;
        struct tcf_nat *p;
@@ -76,6 +79,9 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        } else {
                return err;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
        p = to_tcf_nat(*a);
 
        spin_lock_bh(&p->tcf_lock);
@@ -84,13 +90,18 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        p->mask = parm->mask;
        p->flags = parm->flags;
 
-       p->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        spin_unlock_bh(&p->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
 
        return ret;
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
index a80373878df769d180a6c34a9c6fdda4e846efd8..287793abfaf9bae9aba9c4f23552890c795010c1 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/tc_act/tc_pedit.h>
 #include <net/tc_act/tc_pedit.h>
 #include <uapi/linux/tc_act/tc_pedit.h>
+#include <net/pkt_cls.h>
 
 static unsigned int pedit_net_id;
 static struct tc_action_ops act_pedit_ops;
@@ -138,10 +139,11 @@ nla_failure:
 static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
                          int ovr, int bind, bool rtnl_held,
-                         struct netlink_ext_ack *extack)
+                         struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, pedit_net_id);
        struct nlattr *tb[TCA_PEDIT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_pedit_key *keys = NULL;
        struct tcf_pedit_key_ex *keys_ex;
        struct tc_pedit *parm;
@@ -205,6 +207,11 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                goto out_free;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0) {
+               ret = err;
+               goto out_release;
+       }
        p = to_pedit(*a);
        spin_lock_bh(&p->tcf_lock);
 
@@ -214,7 +221,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                if (!keys) {
                        spin_unlock_bh(&p->tcf_lock);
                        ret = -ENOMEM;
-                       goto out_release;
+                       goto put_chain;
                }
                kfree(p->tcfp_keys);
                p->tcfp_keys = keys;
@@ -223,16 +230,21 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        memcpy(p->tcfp_keys, parm->keys, ksize);
 
        p->tcfp_flags = parm->flags;
-       p->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
 
        kfree(p->tcfp_keys_ex);
        p->tcfp_keys_ex = keys_ex;
 
        spin_unlock_bh(&p->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
 
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 out_release:
        tcf_idr_release(*a, bind);
 out_free:
index 8271a6263824bf53aaa92f97a35916d6d31aa244..2b8581f6ab510100e66fc3e2445ed6460ff65323 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <net/act_api.h>
 #include <net/netlink.h>
+#include <net/pkt_cls.h>
 
 struct tcf_police_params {
        int                     tcfp_result;
@@ -83,10 +84,12 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
 static int tcf_police_init(struct net *net, struct nlattr *nla,
                               struct nlattr *est, struct tc_action **a,
                               int ovr, int bind, bool rtnl_held,
+                              struct tcf_proto *tp,
                               struct netlink_ext_ack *extack)
 {
        int ret = 0, tcfp_result = TC_ACT_OK, err, size;
        struct nlattr *tb[TCA_POLICE_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_police *parm;
        struct tcf_police *police;
        struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
@@ -128,6 +131,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        police = to_police(*a);
        if (parm->rate.rate) {
@@ -213,12 +219,14 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
        if (new->peak_present)
                police->tcfp_ptoks = new->tcfp_mtu_ptoks;
        spin_unlock_bh(&police->tcfp_lock);
-       police->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(police->params,
                           new,
                           lockdep_is_held(&police->tcf_lock));
        spin_unlock_bh(&police->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (new)
                kfree_rcu(new, rcu);
 
@@ -229,6 +237,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
 failure:
        qdisc_put_rtab(P_tab);
        qdisc_put_rtab(R_tab);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
        tcf_idr_release(*a, bind);
        return err;
 }
index 203e399e5c85a293b29d52a84f31bff929827cf4..4060b0955c97db68872a88d6bd05d5143fdc2e7c 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/tc_act/tc_sample.h>
 #include <net/tc_act/tc_sample.h>
 #include <net/psample.h>
+#include <net/pkt_cls.h>
 
 #include <linux/if_arp.h>
 
@@ -37,12 +38,13 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
 
 static int tcf_sample_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a, int ovr,
-                          int bind, bool rtnl_held,
+                          int bind, bool rtnl_held, struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, sample_net_id);
        struct nlattr *tb[TCA_SAMPLE_MAX + 1];
        struct psample_group *psample_group;
+       struct tcf_chain *goto_ch = NULL;
        struct tc_sample *parm;
        u32 psample_group_num;
        struct tcf_sample *s;
@@ -79,18 +81,21 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
        psample_group = psample_group_get(net, psample_group_num);
        if (!psample_group) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        s = to_sample(*a);
 
        spin_lock_bh(&s->tcf_lock);
-       s->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
        s->psample_group_num = psample_group_num;
        RCU_INIT_POINTER(s->psample_group, psample_group);
@@ -100,10 +105,18 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
                s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
        }
        spin_unlock_bh(&s->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static void tcf_sample_cleanup(struct tc_action *a)
index d54cb608dbafae7ea9a333bd6a117824f2a1caac..23c8ca5615e59b85d22bacda25a22cc02e54d9f6 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/rtnetlink.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_defact.h>
 #include <net/tc_act/tc_defact.h>
@@ -60,14 +61,26 @@ static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
        return 0;
 }
 
-static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
-                        struct tc_defact *p)
+static int reset_policy(struct tc_action *a, const struct nlattr *defdata,
+                       struct tc_defact *p, struct tcf_proto *tp,
+                       struct netlink_ext_ack *extack)
 {
+       struct tcf_chain *goto_ch = NULL;
+       struct tcf_defact *d;
+       int err;
+
+       err = tcf_action_check_ctrlact(p->action, tp, &goto_ch, extack);
+       if (err < 0)
+               return err;
+       d = to_defact(a);
        spin_lock_bh(&d->tcf_lock);
-       d->tcf_action = p->action;
+       goto_ch = tcf_action_set_ctrlact(a, p->action, goto_ch);
        memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
        nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
        spin_unlock_bh(&d->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+       return 0;
 }
 
 static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
@@ -78,10 +91,11 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
 static int tcf_simp_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, simp_net_id);
        struct nlattr *tb[TCA_DEF_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_defact *parm;
        struct tcf_defact *d;
        bool exists = false;
@@ -122,27 +136,37 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
                }
 
                d = to_defact(*a);
-               ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
-               if (ret < 0) {
-                       tcf_idr_release(*a, bind);
-                       return ret;
-               }
-               d->tcf_action = parm->action;
+               err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+                                              extack);
+               if (err < 0)
+                       goto release_idr;
+
+               err = alloc_defdata(d, tb[TCA_DEF_DATA]);
+               if (err < 0)
+                       goto put_chain;
+
+               tcf_action_set_ctrlact(*a, parm->action, goto_ch);
                ret = ACT_P_CREATED;
        } else {
-               d = to_defact(*a);
-
                if (!ovr) {
-                       tcf_idr_release(*a, bind);
-                       return -EEXIST;
+                       err = -EEXIST;
+                       goto release_idr;
                }
 
-               reset_policy(d, tb[TCA_DEF_DATA], parm);
+               err = reset_policy(*a, tb[TCA_DEF_DATA], parm, tp, extack);
+               if (err)
+                       goto release_idr;
        }
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
index 65879500b688bca58c451b017928f22994fa9d82..7e1d261a31d2e73460f5d8b2d1d7869f18a1ea24 100644 (file)
@@ -26,6 +26,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/dsfield.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_skbedit.h>
 #include <net/tc_act/tc_skbedit.h>
@@ -96,11 +97,13 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
 static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                            struct nlattr *est, struct tc_action **a,
                            int ovr, int bind, bool rtnl_held,
+                           struct tcf_proto *tp,
                            struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
        struct tcf_skbedit_params *params_new;
        struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_skbedit *parm;
        struct tcf_skbedit *d;
        u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
@@ -186,11 +189,14 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                        return -EEXIST;
                }
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        params_new->flags = flags;
@@ -208,16 +214,24 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                params_new->mask = *mask;
 
        spin_lock_bh(&d->tcf_lock);
-       d->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(d->params, params_new,
                           lockdep_is_held(&d->tcf_lock));
        spin_unlock_bh(&d->tcf_lock);
        if (params_new)
                kfree_rcu(params_new, rcu);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
index 7bac1d78e7a39994ccb9bea33fd3aa0fd7030a7e..1d4c324d0a42bd2cd707a987a5595f3119f66155 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/rtnetlink.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_skbmod.h>
 #include <net/tc_act/tc_skbmod.h>
@@ -82,11 +83,13 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
 static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
+                          struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, skbmod_net_id);
        struct nlattr *tb[TCA_SKBMOD_MAX + 1];
        struct tcf_skbmod_params *p, *p_old;
+       struct tcf_chain *goto_ch = NULL;
        struct tc_skbmod *parm;
        struct tcf_skbmod *d;
        bool exists = false;
@@ -153,21 +156,24 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        d = to_skbmod(*a);
 
        p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
        if (unlikely(!p)) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        p->flags = lflags;
-       d->tcf_action = parm->action;
 
        if (ovr)
                spin_lock_bh(&d->tcf_lock);
        /* Protected by tcf_lock if overwriting existing action. */
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        p_old = rcu_dereference_protected(d->skbmod_p, 1);
 
        if (lflags & SKBMOD_F_DMAC)
@@ -183,10 +189,18 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
 
        if (p_old)
                kfree_rcu(p_old, rcu);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static void tcf_skbmod_cleanup(struct tc_action *a)
index 7c6591b991d510318f0eba8ec8036f62d8019a1d..d5aaf90a39712982685cbed5f60d16576324f17b 100644 (file)
@@ -17,6 +17,7 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_tunnel_key.h>
 #include <net/tc_act/tc_tunnel_key.h>
@@ -210,12 +211,14 @@ static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
 static int tunnel_key_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
+                          struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
        struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
        struct tcf_tunnel_key_params *params_new;
        struct metadata_dst *metadata = NULL;
+       struct tcf_chain *goto_ch = NULL;
        struct tc_tunnel_key *parm;
        struct tcf_tunnel_key *t;
        bool exists = false;
@@ -359,6 +362,12 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                goto release_tun_meta;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0) {
+               ret = err;
+               exists = true;
+               goto release_tun_meta;
+       }
        t = to_tunnel_key(*a);
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
@@ -366,23 +375,29 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
                ret = -ENOMEM;
                exists = true;
-               goto release_tun_meta;
+               goto put_chain;
        }
        params_new->tcft_action = parm->t_action;
        params_new->tcft_enc_metadata = metadata;
 
        spin_lock_bh(&t->tcf_lock);
-       t->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(t->params, params_new,
                           lockdep_is_held(&t->tcf_lock));
        spin_unlock_bh(&t->tcf_lock);
        tunnel_key_release_params(params_new);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
 
        return ret;
 
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
 release_tun_meta:
        if (metadata)
                dst_release(&metadata->dst);
index ac0061599225b6871d846dce30dd83ae37abff25..0f40d0a74423b8d91bf8bb8838eb382846710851 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/if_vlan.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_vlan.h>
 #include <net/tc_act/tc_vlan.h>
@@ -105,10 +106,11 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
 static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, vlan_net_id);
        struct nlattr *tb[TCA_VLAN_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_vlan_params *p;
        struct tc_vlan *parm;
        struct tcf_vlan *v;
@@ -200,12 +202,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                return -EEXIST;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
+
        v = to_vlan(*a);
 
        p = kzalloc(sizeof(*p), GFP_KERNEL);
        if (!p) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        p->tcfv_action = action;
@@ -214,16 +220,24 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        p->tcfv_push_proto = push_proto;
 
        spin_lock_bh(&v->tcf_lock);
-       v->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
        spin_unlock_bh(&v->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (p)
                kfree_rcu(p, rcu);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static void tcf_vlan_cleanup(struct tc_action *a)
index dc10525e90e7073563f9a3b220fc092323946a19..99ae30c177c76783dae71bf7955f4d4d0bb3b639 100644 (file)
@@ -367,7 +367,7 @@ static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
        struct tcf_block *block = chain->block;
 
        mutex_destroy(&chain->filter_chain_lock);
-       kfree(chain);
+       kfree_rcu(chain, rcu);
        if (free_block)
                tcf_block_destroy(block);
 }
index 1d2a12132abcccdea4a237a9b06bb9296ff28f0b..acc9b9da985f81ffd9b485e082cf1781e6731ba2 100644 (file)
@@ -211,6 +211,9 @@ struct cake_sched_data {
        u8              ack_filter;
        u8              atm_mode;
 
+       u32             fwmark_mask;
+       u16             fwmark_shft;
+
        /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
        u16             rate_shft;
        ktime_t         time_next_packet;
@@ -258,8 +261,7 @@ enum {
        CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
        CAKE_FLAG_INGRESS          = BIT(2),
        CAKE_FLAG_WASH             = BIT(3),
-       CAKE_FLAG_SPLIT_GSO        = BIT(4),
-       CAKE_FLAG_FWMARK           = BIT(5)
+       CAKE_FLAG_SPLIT_GSO        = BIT(4)
 };
 
 /* COBALT operates the Codel and BLUE algorithms in parallel, in order to
@@ -1543,7 +1545,7 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
                                             struct sk_buff *skb)
 {
        struct cake_sched_data *q = qdisc_priv(sch);
-       u32 tin;
+       u32 tin, mark;
        u8 dscp;
 
        /* Tin selection: Default to diffserv-based selection, allow overriding
@@ -1551,14 +1553,13 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
         */
        dscp = cake_handle_diffserv(skb,
                                    q->rate_flags & CAKE_FLAG_WASH);
+       mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
 
        if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
                tin = 0;
 
-       else if (q->rate_flags & CAKE_FLAG_FWMARK && /* use fw mark */
-                skb->mark &&
-                skb->mark <= q->tin_cnt)
-               tin = q->tin_order[skb->mark - 1];
+       else if (mark && mark <= q->tin_cnt)
+               tin = q->tin_order[mark - 1];
 
        else if (TC_H_MAJ(skb->priority) == sch->handle &&
                 TC_H_MIN(skb->priority) > 0 &&
@@ -2172,6 +2173,7 @@ static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
        [TCA_CAKE_MPU]           = { .type = NLA_U32 },
        [TCA_CAKE_INGRESS]       = { .type = NLA_U32 },
        [TCA_CAKE_ACK_FILTER]    = { .type = NLA_U32 },
+       [TCA_CAKE_FWMARK]        = { .type = NLA_U32 },
 };
 
 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
@@ -2619,10 +2621,8 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
        }
 
        if (tb[TCA_CAKE_FWMARK]) {
-               if (!!nla_get_u32(tb[TCA_CAKE_FWMARK]))
-                       q->rate_flags |= CAKE_FLAG_FWMARK;
-               else
-                       q->rate_flags &= ~CAKE_FLAG_FWMARK;
+               q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
+               q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
        }
 
        if (q->tins) {
@@ -2784,8 +2784,7 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
                        !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
                goto nla_put_failure;
 
-       if (nla_put_u32(skb, TCA_CAKE_FWMARK,
-                       !!(q->rate_flags & CAKE_FLAG_FWMARK)))
+       if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
                goto nla_put_failure;
 
        return nla_nest_end(skb, opts);
index 6140471efd4b8cf851d238a8c80c22858346740e..9874e60c9b0d00924042c1b377bc0c777edfc4cb 100644 (file)
@@ -999,7 +999,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
        if (unlikely(addrs_size <= 0))
                return -EINVAL;
 
-       kaddrs = vmemdup_user(addrs, addrs_size);
+       kaddrs = memdup_user(addrs, addrs_size);
        if (unlikely(IS_ERR(kaddrs)))
                return PTR_ERR(kaddrs);
 
@@ -1007,7 +1007,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
        addr_buf = kaddrs;
        while (walk_size < addrs_size) {
                if (walk_size + sizeof(sa_family_t) > addrs_size) {
-                       kvfree(kaddrs);
+                       kfree(kaddrs);
                        return -EINVAL;
                }
 
@@ -1018,7 +1018,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
                 * causes the address buffer to overflow return EINVAL.
                 */
                if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
-                       kvfree(kaddrs);
+                       kfree(kaddrs);
                        return -EINVAL;
                }
                addrcnt++;
@@ -1054,7 +1054,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
        }
 
 out:
-       kvfree(kaddrs);
+       kfree(kaddrs);
 
        return err;
 }
@@ -1329,7 +1329,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
        if (unlikely(addrs_size <= 0))
                return -EINVAL;
 
-       kaddrs = vmemdup_user(addrs, addrs_size);
+       kaddrs = memdup_user(addrs, addrs_size);
        if (unlikely(IS_ERR(kaddrs)))
                return PTR_ERR(kaddrs);
 
@@ -1349,7 +1349,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
        err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
 
 out_free:
-       kvfree(kaddrs);
+       kfree(kaddrs);
 
        return err;
 }
@@ -2920,6 +2920,9 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               params.sack_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.sack_assoc_id == SCTP_FUTURE_ASSOC ||
            params.sack_assoc_id == SCTP_ALL_ASSOC) {
                if (params.sack_delay) {
@@ -3024,6 +3027,9 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               info.sinfo_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
            info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
                sp->default_stream = info.sinfo_stream;
@@ -3081,6 +3087,9 @@ static int sctp_setsockopt_default_sndinfo(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               info.snd_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (info.snd_assoc_id == SCTP_FUTURE_ASSOC ||
            info.snd_assoc_id == SCTP_ALL_ASSOC) {
                sp->default_stream = info.snd_sid;
@@ -3531,6 +3540,9 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                sp->default_rcv_context = params.assoc_value;
@@ -3670,6 +3682,9 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                sp->max_burst = params.assoc_value;
@@ -3798,6 +3813,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
                goto out;
        }
 
+       if (sctp_style(sk, TCP))
+               authkey->sca_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC ||
            authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_set_key(ep, asoc, authkey);
@@ -3853,6 +3871,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
        if (asoc)
                return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
 
+       if (sctp_style(sk, TCP))
+               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
            val.scact_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
@@ -3904,6 +3925,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
        if (asoc)
                return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
 
+       if (sctp_style(sk, TCP))
+               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
            val.scact_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
@@ -3954,6 +3978,9 @@ static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
        if (asoc)
                return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
 
+       if (sctp_style(sk, TCP))
+               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
            val.scact_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
@@ -4169,6 +4196,9 @@ static int sctp_setsockopt_default_prinfo(struct sock *sk,
                goto out;
        }
 
+       if (sctp_style(sk, TCP))
+               info.pr_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (info.pr_assoc_id == SCTP_FUTURE_ASSOC ||
            info.pr_assoc_id == SCTP_ALL_ASSOC) {
                SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
@@ -4251,6 +4281,9 @@ static int sctp_setsockopt_enable_strreset(struct sock *sk,
                goto out;
        }
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                ep->strreset_enable = params.assoc_value;
@@ -4376,6 +4409,9 @@ static int sctp_setsockopt_scheduler(struct sock *sk,
        if (asoc)
                return sctp_sched_set_sched(asoc, params.assoc_value);
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                sp->default_ss = params.assoc_value;
@@ -4541,6 +4577,9 @@ static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
        if (asoc)
                return sctp_assoc_ulpevent_type_set(&param, asoc);
 
+       if (sctp_style(sk, TCP))
+               param.se_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (param.se_assoc_id == SCTP_FUTURE_ASSOC ||
            param.se_assoc_id == SCTP_ALL_ASSOC)
                sctp_ulpevent_type_set(&sp->subscribe,
@@ -9169,7 +9208,7 @@ static inline void sctp_copy_descendant(struct sock *sk_to,
 {
        int ancestor_size = sizeof(struct inet_sock) +
                            sizeof(struct sctp_sock) -
-                           offsetof(struct sctp_sock, auto_asconf_list);
+                           offsetof(struct sctp_sock, pd_lobby);
 
        if (sk_from->sk_family == PF_INET6)
                ancestor_size += sizeof(struct ipv6_pinfo);
@@ -9253,7 +9292,6 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
         * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
         * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
         */
-       skb_queue_head_init(&newsp->pd_lobby);
        atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
 
        if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
index 3c176a12fe48048613d89add95189bb47cd5e14e..8255f5bda0aa07dbeb78460d5bbf52a9f4510fd3 100644 (file)
@@ -384,6 +384,18 @@ static struct file_system_type sock_fs_type = {
  *     but we take care of internal coherence yet.
  */
 
+/**
+ *     sock_alloc_file - Bind a &socket to a &file
+ *     @sock: socket
+ *     @flags: file status flags
+ *     @dname: protocol name
+ *
+ *     Returns the &file bound with @sock, implicitly storing it
+ *     in sock->file. If dname is %NULL, sets to "".
+ *     On failure the return is a ERR pointer (see linux/err.h).
+ *     This function uses GFP_KERNEL internally.
+ */
+
 struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
 {
        struct file *file;
@@ -424,6 +436,14 @@ static int sock_map_fd(struct socket *sock, int flags)
        return PTR_ERR(newfile);
 }
 
+/**
+ *     sock_from_file - Return the &socket bounded to @file.
+ *     @file: file
+ *     @err: pointer to an error code return
+ *
+ *     On failure returns %NULL and assigns -ENOTSOCK to @err.
+ */
+
 struct socket *sock_from_file(struct file *file, int *err)
 {
        if (file->f_op == &socket_file_ops)
@@ -532,11 +552,11 @@ static const struct inode_operations sockfs_inode_ops = {
 };
 
 /**
- *     sock_alloc      -       allocate a socket
+ *     sock_alloc - allocate a socket
  *
  *     Allocate a new inode and socket object. The two are bound together
  *     and initialised. The socket is then returned. If we are out of inodes
- *     NULL is returned.
+ *     NULL is returned. This functions uses GFP_KERNEL internally.
  */
 
 struct socket *sock_alloc(void)
@@ -561,7 +581,7 @@ struct socket *sock_alloc(void)
 EXPORT_SYMBOL(sock_alloc);
 
 /**
- *     sock_release    -       close a socket
+ *     sock_release - close a socket
  *     @sock: socket to close
  *
  *     The socket is released from the protocol stack if it has a release
@@ -617,6 +637,15 @@ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
 }
 EXPORT_SYMBOL(__sock_tx_timestamp);
 
+/**
+ *     sock_sendmsg - send a message through @sock
+ *     @sock: socket
+ *     @msg: message to send
+ *
+ *     Sends @msg through @sock, passing through LSM.
+ *     Returns the number of bytes sent, or an error code.
+ */
+
 static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
 {
        int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg));
@@ -633,6 +662,18 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
 }
 EXPORT_SYMBOL(sock_sendmsg);
 
+/**
+ *     kernel_sendmsg - send a message through @sock (kernel-space)
+ *     @sock: socket
+ *     @msg: message header
+ *     @vec: kernel vec
+ *     @num: vec array length
+ *     @size: total message data size
+ *
+ *     Builds the message data with @vec and sends it through @sock.
+ *     Returns the number of bytes sent, or an error code.
+ */
+
 int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
                   struct kvec *vec, size_t num, size_t size)
 {
@@ -641,6 +682,19 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
 }
 EXPORT_SYMBOL(kernel_sendmsg);
 
+/**
+ *     kernel_sendmsg_locked - send a message through @sock (kernel-space)
+ *     @sk: sock
+ *     @msg: message header
+ *     @vec: output s/g array
+ *     @num: output s/g array length
+ *     @size: total message data size
+ *
+ *     Builds the message data with @vec and sends it through @sock.
+ *     Returns the number of bytes sent, or an error code.
+ *     Caller must hold @sk.
+ */
+
 int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
                          struct kvec *vec, size_t num, size_t size)
 {
@@ -811,6 +865,16 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
 
+/**
+ *     sock_recvmsg - receive a message from @sock
+ *     @sock: socket
+ *     @msg: message to receive
+ *     @flags: message flags
+ *
+ *     Receives @msg from @sock, passing through LSM. Returns the total number
+ *     of bytes received, or an error.
+ */
+
 static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
                                     int flags)
 {
@@ -826,20 +890,21 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags)
 EXPORT_SYMBOL(sock_recvmsg);
 
 /**
- * kernel_recvmsg - Receive a message from a socket (kernel space)
- * @sock:       The socket to receive the message from
- * @msg:        Received message
- * @vec:        Input s/g array for message data
- * @num:        Size of input s/g array
- * @size:       Number of bytes to read
- * @flags:      Message flags (MSG_DONTWAIT, etc...)
+ *     kernel_recvmsg - Receive a message from a socket (kernel space)
+ *     @sock: The socket to receive the message from
+ *     @msg: Received message
+ *     @vec: Input s/g array for message data
+ *     @num: Size of input s/g array
+ *     @size: Number of bytes to read
+ *     @flags: Message flags (MSG_DONTWAIT, etc...)
  *
- * On return the msg structure contains the scatter/gather array passed in the
- * vec argument. The array is modified so that it consists of the unfilled
- * portion of the original array.
+ *     On return the msg structure contains the scatter/gather array passed in the
+ *     vec argument. The array is modified so that it consists of the unfilled
+ *     portion of the original array.
  *
- * The returned value is the total number of bytes received, or an error.
+ *     The returned value is the total number of bytes received, or an error.
  */
+
 int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
                   struct kvec *vec, size_t num, size_t size, int flags)
 {
@@ -1005,6 +1070,13 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
  *     what to do with it - that's up to the protocol still.
  */
 
+/**
+ *     get_net_ns - increment the refcount of the network namespace
+ *     @ns: common namespace (net)
+ *
+ *     Returns the net's common namespace.
+ */
+
 struct ns_common *get_net_ns(struct ns_common *ns)
 {
        return &get_net(container_of(ns, struct net, ns))->ns;
@@ -1099,6 +1171,19 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        return err;
 }
 
+/**
+ *     sock_create_lite - creates a socket
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *
+ *     Creates a new socket and assigns it to @res, passing through LSM.
+ *     The new socket initialization is not complete, see kernel_accept().
+ *     Returns 0 or an error. On failure @res is set to %NULL.
+ *     This function internally uses GFP_KERNEL.
+ */
+
 int sock_create_lite(int family, int type, int protocol, struct socket **res)
 {
        int err;
@@ -1224,6 +1309,21 @@ call_kill:
 }
 EXPORT_SYMBOL(sock_wake_async);
 
+/**
+ *     __sock_create - creates a socket
+ *     @net: net namespace
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *     @kern: boolean for kernel space sockets
+ *
+ *     Creates a new socket and assigns it to @res, passing through LSM.
+ *     Returns 0 or an error. On failure @res is set to %NULL. @kern must
+ *     be set to true if the socket resides in kernel space.
+ *     This function internally uses GFP_KERNEL.
+ */
+
 int __sock_create(struct net *net, int family, int type, int protocol,
                         struct socket **res, int kern)
 {
@@ -1333,12 +1433,35 @@ out_release:
 }
 EXPORT_SYMBOL(__sock_create);
 
+/**
+ *     sock_create - creates a socket
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *
+ *     A wrapper around __sock_create().
+ *     Returns 0 or an error. This function internally uses GFP_KERNEL.
+ */
+
 int sock_create(int family, int type, int protocol, struct socket **res)
 {
        return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
 }
 EXPORT_SYMBOL(sock_create);
 
+/**
+ *     sock_create_kern - creates a socket (kernel space)
+ *     @net: net namespace
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *
+ *     A wrapper around __sock_create().
+ *     Returns 0 or an error. This function internally uses GFP_KERNEL.
+ */
+
 int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res)
 {
        return __sock_create(net, family, type, protocol, res, 1);
@@ -3322,18 +3445,46 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd,
 }
 #endif
 
+/**
+ *     kernel_bind - bind an address to a socket (kernel space)
+ *     @sock: socket
+ *     @addr: address
+ *     @addrlen: length of address
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
 {
        return sock->ops->bind(sock, addr, addrlen);
 }
 EXPORT_SYMBOL(kernel_bind);
 
+/**
+ *     kernel_listen - move socket to listening state (kernel space)
+ *     @sock: socket
+ *     @backlog: pending connections queue size
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_listen(struct socket *sock, int backlog)
 {
        return sock->ops->listen(sock, backlog);
 }
 EXPORT_SYMBOL(kernel_listen);
 
+/**
+ *     kernel_accept - accept a connection (kernel space)
+ *     @sock: listening socket
+ *     @newsock: new connected socket
+ *     @flags: flags
+ *
+ *     @flags must be SOCK_CLOEXEC, SOCK_NONBLOCK or 0.
+ *     If it fails, @newsock is guaranteed to be %NULL.
+ *     Returns 0 or an error.
+ */
+
 int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
 {
        struct sock *sk = sock->sk;
@@ -3359,6 +3510,19 @@ done:
 }
 EXPORT_SYMBOL(kernel_accept);
 
+/**
+ *     kernel_connect - connect a socket (kernel space)
+ *     @sock: socket
+ *     @addr: address
+ *     @addrlen: address length
+ *     @flags: flags (O_NONBLOCK, ...)
+ *
+ *     For datagram sockets, @addr is the addres to which datagrams are sent
+ *     by default, and the only address from which datagrams are received.
+ *     For stream sockets, attempts to connect to @addr.
+ *     Returns 0 or an error code.
+ */
+
 int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
                   int flags)
 {
@@ -3366,18 +3530,48 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
 }
 EXPORT_SYMBOL(kernel_connect);
 
+/**
+ *     kernel_getsockname - get the address which the socket is bound (kernel space)
+ *     @sock: socket
+ *     @addr: address holder
+ *
+ *     Fills the @addr pointer with the address which the socket is bound.
+ *     Returns 0 or an error code.
+ */
+
 int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
 {
        return sock->ops->getname(sock, addr, 0);
 }
 EXPORT_SYMBOL(kernel_getsockname);
 
+/**
+ *     kernel_peername - get the address which the socket is connected (kernel space)
+ *     @sock: socket
+ *     @addr: address holder
+ *
+ *     Fills the @addr pointer with the address which the socket is connected.
+ *     Returns 0 or an error code.
+ */
+
 int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
 {
        return sock->ops->getname(sock, addr, 1);
 }
 EXPORT_SYMBOL(kernel_getpeername);
 
+/**
+ *     kernel_getsockopt - get a socket option (kernel space)
+ *     @sock: socket
+ *     @level: API level (SOL_SOCKET, ...)
+ *     @optname: option tag
+ *     @optval: option value
+ *     @optlen: option length
+ *
+ *     Assigns the option length to @optlen.
+ *     Returns 0 or an error.
+ */
+
 int kernel_getsockopt(struct socket *sock, int level, int optname,
                        char *optval, int *optlen)
 {
@@ -3400,6 +3594,17 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
 }
 EXPORT_SYMBOL(kernel_getsockopt);
 
+/**
+ *     kernel_setsockopt - set a socket option (kernel space)
+ *     @sock: socket
+ *     @level: API level (SOL_SOCKET, ...)
+ *     @optname: option tag
+ *     @optval: option value
+ *     @optlen: option length
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_setsockopt(struct socket *sock, int level, int optname,
                        char *optval, unsigned int optlen)
 {
@@ -3420,6 +3625,17 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
 }
 EXPORT_SYMBOL(kernel_setsockopt);
 
+/**
+ *     kernel_sendpage - send a &page through a socket (kernel space)
+ *     @sock: socket
+ *     @page: page
+ *     @offset: page offset
+ *     @size: total size in bytes
+ *     @flags: flags (MSG_DONTWAIT, ...)
+ *
+ *     Returns the total amount sent in bytes or an error.
+ */
+
 int kernel_sendpage(struct socket *sock, struct page *page, int offset,
                    size_t size, int flags)
 {
@@ -3430,6 +3646,18 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
 }
 EXPORT_SYMBOL(kernel_sendpage);
 
+/**
+ *     kernel_sendpage_locked - send a &page through the locked sock (kernel space)
+ *     @sk: sock
+ *     @page: page
+ *     @offset: page offset
+ *     @size: total size in bytes
+ *     @flags: flags (MSG_DONTWAIT, ...)
+ *
+ *     Returns the total amount sent in bytes or an error.
+ *     Caller must hold @sk.
+ */
+
 int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
                           size_t size, int flags)
 {
@@ -3443,17 +3671,30 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
 }
 EXPORT_SYMBOL(kernel_sendpage_locked);
 
+/**
+ *     kernel_shutdown - shut down part of a full-duplex connection (kernel space)
+ *     @sock: socket
+ *     @how: connection part
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
 {
        return sock->ops->shutdown(sock, how);
 }
 EXPORT_SYMBOL(kernel_sock_shutdown);
 
-/* This routine returns the IP overhead imposed by a socket i.e.
- * the length of the underlying IP header, depending on whether
- * this is an IPv4 or IPv6 socket and the length from IP options turned
- * on at the socket. Assumes that the caller has a lock on the socket.
+/**
+ *     kernel_sock_ip_overhead - returns the IP overhead imposed by a socket
+ *     @sk: socket
+ *
+ *     This routine returns the IP overhead imposed by a socket i.e.
+ *     the length of the underlying IP header, depending on whether
+ *     this is an IPv4 or IPv6 socket and the length from IP options turned
+ *     on at the socket. Assumes that the caller has a lock on the socket.
  */
+
 u32 kernel_sock_ip_overhead(struct sock *sk)
 {
        struct inet_sock *inet;
index da1a676860cad3c8a2a95acf11f0e908fe2bc255..860dcfb95ee472fed5d74e6015af2acce178c0a7 100644 (file)
@@ -550,6 +550,8 @@ EXPORT_SYMBOL_GPL(strp_check_rcv);
 static int __init strp_mod_init(void)
 {
        strp_wq = create_singlethread_workqueue("kstrp");
+       if (unlikely(!strp_wq))
+               return -ENOMEM;
 
        return 0;
 }
index 228970e6e52ba8b407be724d055976dd67530f81..187d10443a1584e196245afc9837add06daa1c86 100644 (file)
@@ -2311,6 +2311,15 @@ out_exit:
        rpc_exit(task, status);
 }
 
+static bool
+rpc_check_connected(const struct rpc_rqst *req)
+{
+       /* No allocated request or transport? return true */
+       if (!req || !req->rq_xprt)
+               return true;
+       return xprt_connected(req->rq_xprt);
+}
+
 static void
 rpc_check_timeout(struct rpc_task *task)
 {
@@ -2322,10 +2331,11 @@ rpc_check_timeout(struct rpc_task *task)
        dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
        task->tk_timeouts++;
 
-       if (RPC_IS_SOFTCONN(task)) {
+       if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
                rpc_exit(task, -ETIMEDOUT);
                return;
        }
+
        if (RPC_IS_SOFT(task)) {
                if (clnt->cl_chatty) {
                        printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
index 9359539907bafb7ca1c13ad51485756ac89ce5d0..732d4b57411a2562ad8dc4ee2633c8441f204ba0 100644 (file)
@@ -495,8 +495,8 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
                int flags, struct rpc_rqst *req)
 {
        struct xdr_buf *buf = &req->rq_private_buf;
-       size_t want, read;
-       ssize_t ret;
+       size_t want, uninitialized_var(read);
+       ssize_t uninitialized_var(ret);
 
        xs_read_header(transport, buf);
 
index 06fee142f09fbea05a8b27bb240a4f3d3480b5b2..63f39201e41e699104d838f206d43f7f24806d3a 100644 (file)
@@ -919,6 +919,9 @@ int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
 {
        struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP);
 
+       if (!group)
+               return -EMSGSIZE;
+
        if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
                        grp->type) ||
            nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
index f076edb74338247f0bad99cfaa5d23e5b14730ab..7ce1e86b024f09cb7345840d4a4d8e3949c5107d 100644 (file)
@@ -163,12 +163,9 @@ void tipc_sched_net_finalize(struct net *net, u32 addr)
 
 void tipc_net_stop(struct net *net)
 {
-       u32 self = tipc_own_addr(net);
-
-       if (!self)
+       if (!tipc_own_id(net))
                return;
 
-       tipc_nametbl_withdraw(net, TIPC_CFG_SRV, self, self, self);
        rtnl_lock();
        tipc_bearer_stop(net);
        tipc_node_stop(net);
index 2dc4919ab23cace02749ddb9b4838c2b64c09152..dd3b6dc17662fc42eb0b567501c6b9e8bee67031 100644 (file)
@@ -817,10 +817,10 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
 {
        struct tipc_link_entry *le = &n->links[bearer_id];
+       struct tipc_media_addr *maddr = NULL;
        struct tipc_link *l = le->link;
-       struct tipc_media_addr *maddr;
-       struct sk_buff_head xmitq;
        int old_bearer_id = bearer_id;
+       struct sk_buff_head xmitq;
 
        if (!l)
                return;
@@ -844,7 +844,8 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
        tipc_node_write_unlock(n);
        if (delete)
                tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
-       tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+       if (!skb_queue_empty(&xmitq))
+               tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
        tipc_sk_rcv(n->net, &le->inputq);
 }
 
index 3274ef625dba1b3417405d8537b4b30e919d44d1..b542f14ed444bfcedac61ef2d1eda45d1af1add2 100644 (file)
@@ -2349,6 +2349,16 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
        return 0;
 }
 
+static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
+{
+       if (addr->family != AF_TIPC)
+               return false;
+       if (addr->addrtype == TIPC_SERVICE_RANGE)
+               return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
+       return (addr->addrtype == TIPC_SERVICE_ADDR ||
+               addr->addrtype == TIPC_SOCKET_ADDR);
+}
+
 /**
  * tipc_connect - establish a connection to another TIPC port
  * @sock: socket structure
@@ -2384,18 +2394,18 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
                if (!tipc_sk_type_connectionless(sk))
                        res = -EINVAL;
                goto exit;
-       } else if (dst->family != AF_TIPC) {
-               res = -EINVAL;
        }
-       if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
+       if (!tipc_sockaddr_is_sane(dst)) {
                res = -EINVAL;
-       if (res)
                goto exit;
-
+       }
        /* DGRAM/RDM connect(), just save the destaddr */
        if (tipc_sk_type_connectionless(sk)) {
                memcpy(&tsk->peer, dest, destlen);
                goto exit;
+       } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
+               res = -EINVAL;
+               goto exit;
        }
 
        previous = sk->sk_state;
@@ -3255,6 +3265,8 @@ static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
        peer_port = tsk_peer_port(tsk);
 
        nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
+       if (!nest)
+               return -EMSGSIZE;
 
        if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
                goto msg_full;
index 4a708a4e8583b5db2022b80e80529e25e787e7bb..b45932d780040a35c099a8ead80ccea40e0910b9 100644 (file)
@@ -363,6 +363,7 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
        struct tipc_subscription *sub;
 
        if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
+               s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
                tipc_conn_delete_sub(con, s);
                return 0;
        }
index 77520eacee8f18da45781ea70cc82ee16e3de94f..989e52386c358a34a566660933002827ba165068 100644 (file)
@@ -193,9 +193,6 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
 
 static void xdp_umem_release(struct xdp_umem *umem)
 {
-       struct task_struct *task;
-       struct mm_struct *mm;
-
        xdp_umem_clear_dev(umem);
 
        ida_simple_remove(&umem_ida, umem->id);
@@ -214,21 +211,10 @@ static void xdp_umem_release(struct xdp_umem *umem)
 
        xdp_umem_unpin_pages(umem);
 
-       task = get_pid_task(umem->pid, PIDTYPE_PID);
-       put_pid(umem->pid);
-       if (!task)
-               goto out;
-       mm = get_task_mm(task);
-       put_task_struct(task);
-       if (!mm)
-               goto out;
-
-       mmput(mm);
        kfree(umem->pages);
        umem->pages = NULL;
 
        xdp_umem_unaccount_pages(umem);
-out:
        kfree(umem);
 }
 
@@ -357,7 +343,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        if (size_chk < 0)
                return -EINVAL;
 
-       umem->pid = get_task_pid(current, PIDTYPE_PID);
        umem->address = (unsigned long)addr;
        umem->chunk_mask = ~((u64)chunk_size - 1);
        umem->size = size;
@@ -373,7 +358,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
        err = xdp_umem_account_pages(umem);
        if (err)
-               goto out;
+               return err;
 
        err = xdp_umem_pin_pages(umem);
        if (err)
@@ -392,8 +377,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
 out_account:
        xdp_umem_unaccount_pages(umem);
-out:
-       put_pid(umem->pid);
        return err;
 }
 
index 2554a15ecf2b8796c41e593d97c8b55ee97a4620..76ca30cc4791912fde4d7f36e4a90549e653cfbb 100644 (file)
@@ -199,11 +199,8 @@ sub_cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
        "$(if $(part-of-module),1,0)" "$(@)";
 recordmcount_source := $(srctree)/scripts/recordmcount.pl
 endif # BUILD_C_RECORDMCOUNT
-cmd_record_mcount =                                            \
-       if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" =   \
-            "$(CC_FLAGS_FTRACE)" ]; then                       \
-               $(sub_cmd_record_mcount)                        \
-       fi
+cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)),        \
+       $(sub_cmd_record_mcount))
 endif # CC_USING_RECORD_MCOUNT
 endif # CONFIG_FTRACE_MCOUNT_RECORD
 
index 7395697e7f19a5f524d883b7079197434c2983ae..c9f071b0a0ab70b647bec3633571059934ece1bc 100644 (file)
@@ -32,6 +32,7 @@ if (id == NULL || ...) { ... return ...; }
 (    id
 |    (T2)dev_get_drvdata(&id->dev)
 |    (T3)platform_get_drvdata(id)
+|    &id->dev
 );
 | return@p2 ...;
 )
index 481cf301ccfc3abf2b68c8dcc8b59612ed2e9841..08470362199c7389009982ec41fed3b9860b89cc 100644 (file)
@@ -1,4 +1,4 @@
-/// Use ARRAY_SIZE instead of dividing sizeof array with sizeof an element
+/// Correct the size argument to alloc functions
 ///
 //# This makes an effort to find cases where the argument to sizeof is wrong
 //# in memory allocation functions by checking the type of the allocated memory
index 611945611bf8352d4831a51c411c2d3d5d7afc59..1dcfb288ee63630e7e73be6fe28f1fd1a3bc5857 100644 (file)
@@ -113,7 +113,8 @@ do_resize:
                        case KEY_DOWN:
                                break;
                        case KEY_BACKSPACE:
-                       case 127:
+                       case 8:   /* ^H */
+                       case 127: /* ^? */
                                if (pos) {
                                        wattrset(dialog, dlg.inputbox.atr);
                                        if (input_x == 0) {
index a4670f4e825a8c779cf4894587b6e7e31f556b02..ac92c0ded6c5c627e974679ef967d4bc37b25a53 100644 (file)
@@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans)
                state->match_direction = FIND_NEXT_MATCH_UP;
                *ans = get_mext_match(state->pattern,
                                state->match_direction);
-       } else if (key == KEY_BACKSPACE || key == 127) {
+       } else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
                state->pattern[strlen(state->pattern)-1] = '\0';
                adj_match_dir(&state->match_direction);
        } else
index 7be620a1fcdb8191639aaeaca7b5c6ae421d9769..77f525a8617c27788cc30f9a65c41041050806ed 100644 (file)
@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window,
                case KEY_F(F_EXIT):
                case KEY_F(F_BACK):
                        break;
-               case 127:
+               case 8:   /* ^H */
+               case 127: /* ^? */
                case KEY_BACKSPACE:
                        if (cursor_position > 0) {
                                memmove(&result[cursor_position-1],
index 0b0d1080b1c5ef4903a3b87d8fbfbc11b165e739..f277e116e0ebf64e350c636443b2f7c663b599ea 100644 (file)
@@ -639,7 +639,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
                               info->sechdrs[sym->st_shndx].sh_offset -
                               (info->hdr->e_type != ET_REL ?
                                info->sechdrs[sym->st_shndx].sh_addr : 0);
-                       crc = *crcp;
+                       crc = TO_NATIVE(*crcp);
                }
                sym_update_crc(symname + strlen("__crc_"), mod, crc,
                                export);
index 6b576e58872550b991e9ae941999f9a3ffb968b7..daecdfb15a9cf91ef789ecafb76240d706f4c543 100644 (file)
@@ -828,9 +828,11 @@ void policydb_destroy(struct policydb *p)
        hashtab_map(p->range_tr, range_tr_destroy, NULL);
        hashtab_destroy(p->range_tr);
 
-       for (i = 0; i < p->p_types.nprim; i++)
-               ebitmap_destroy(&p->type_attr_map_array[i]);
-       kvfree(p->type_attr_map_array);
+       if (p->type_attr_map_array) {
+               for (i = 0; i < p->p_types.nprim; i++)
+                       ebitmap_destroy(&p->type_attr_map_array[i]);
+               kvfree(p->type_attr_map_array);
+       }
 
        ebitmap_destroy(&p->filename_trans_ttypes);
        ebitmap_destroy(&p->policycaps);
@@ -2496,10 +2498,13 @@ int policydb_read(struct policydb *p, void *fp)
        if (!p->type_attr_map_array)
                goto bad;
 
+       /* just in case ebitmap_init() becomes more than just a memset(0): */
+       for (i = 0; i < p->p_types.nprim; i++)
+               ebitmap_init(&p->type_attr_map_array[i]);
+
        for (i = 0; i < p->p_types.nprim; i++) {
                struct ebitmap *e = &p->type_attr_map_array[i];
 
-               ebitmap_init(e);
                if (p->policyvers >= POLICYDB_VERSION_AVTAB) {
                        rc = ebitmap_read(e, fp);
                        if (rc)
index d5b0d7ba83c4204db42df492a5e35f54a67c470c..f6ae68017608d83cedc394cd5c11891439e8cbec 100644 (file)
@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
        oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
                         params_channels(params) / 8;
 
+       err = snd_pcm_oss_period_size(substream, params, sparams);
+       if (err < 0)
+               goto failure;
+
+       n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
+       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
+       if (err < 0)
+               goto failure;
+
+       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
+                                    runtime->oss.periods, NULL);
+       if (err < 0)
+               goto failure;
+
+       snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+
+       err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
+       if (err < 0) {
+               pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
+               goto failure;
+       }
+
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
        snd_pcm_oss_plugin_clear(substream);
        if (!direct) {
@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
        }
 #endif
 
-       err = snd_pcm_oss_period_size(substream, params, sparams);
-       if (err < 0)
-               goto failure;
-
-       n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
-       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
-       if (err < 0)
-               goto failure;
-
-       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
-                                    runtime->oss.periods, NULL);
-       if (err < 0)
-               goto failure;
-
-       snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
-
-       if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
-               pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
-               goto failure;
-       }
-
        if (runtime->oss.trigger) {
                sw_params->start_threshold = 1;
        } else {
index f731f904e8ccb4e9671523e3b68e7825c779d8d8..1d8452912b14af7b211acc8796d1526d936007a2 100644 (file)
@@ -1445,8 +1445,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
-       if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+       switch (runtime->status->state) {
+       case SNDRV_PCM_STATE_SUSPENDED:
                return -EBUSY;
+       /* unresumable PCM state; return -EBUSY for skipping suspend */
+       case SNDRV_PCM_STATE_OPEN:
+       case SNDRV_PCM_STATE_SETUP:
+       case SNDRV_PCM_STATE_DISCONNECTED:
+               return -EBUSY;
+       }
        runtime->trigger_master = substream;
        return 0;
 }
index ee601d7f092694aecd7e853845b4e3f73cd0f261..c0690d1ecd55c1ce33c9bc155abd82d2d0ec9edd 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
+#include <linux/nospec.h>
 #include <sound/rawmidi.h>
 #include <sound/info.h>
 #include <sound/control.h>
@@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
                return -ENXIO;
        if (info->stream < 0 || info->stream > 1)
                return -EINVAL;
+       info->stream = array_index_nospec(info->stream, 2);
        pstr = &rmidi->streams[info->stream];
        if (pstr->substream_count == 0)
                return -ENOENT;
index 278ebb9931225998dd07f0606eeabe289d71aff5..c939459172353dee5ee651ee4694f43f2aa9be7d 100644 (file)
@@ -617,13 +617,14 @@ int
 snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
 {
        struct seq_oss_synth *rec;
+       struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
 
-       if (dev < 0 || dev >= dp->max_synthdev)
+       if (!info)
                return -ENXIO;
 
-       if (dp->synths[dev].is_midi) {
+       if (info->is_midi) {
                struct midi_info minf;
-               snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
+               snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
                inf->synth_type = SYNTH_TYPE_MIDI;
                inf->synth_subtype = 0;
                inf->nr_voices = 16;
index 5b02bd49fde407a5f020eb44cf09a88d5413f37e..4e4ecc21760bccd5a7da41d275c53682461ae60f 100644 (file)
@@ -41,7 +41,7 @@ void snd_opl3_timer_func(struct timer_list *t);
 
 /* Prototypes for opl3_drums.c */
 void snd_opl3_load_drums(struct snd_opl3 *opl3);
-void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
+void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
 
 /* Prototypes for opl3_oss.c */
 #if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
index 220e61926ea4193c02f66e3d6b8265d15be8d28b..513291ba0ab072d5e478ef936eda27d575e11c91 100644 (file)
@@ -36,7 +36,7 @@ static void name_card(struct snd_motu *motu)
        fw_csr_iterator_init(&it, motu->unit->directory);
        while (fw_csr_iterator_next(&it, &key, &val)) {
                switch (key) {
-               case CSR_VERSION:
+               case CSR_MODEL:
                        version = val;
                        break;
                }
@@ -46,7 +46,7 @@ static void name_card(struct snd_motu *motu)
        strcpy(motu->card->shortname, motu->spec->name);
        strcpy(motu->card->mixername, motu->spec->name);
        snprintf(motu->card->longname, sizeof(motu->card->longname),
-                "MOTU %s (version:%d), GUID %08x%08x at %s, S%d",
+                "MOTU %s (version:%06x), GUID %08x%08x at %s, S%d",
                 motu->spec->name, version,
                 fw_dev->config_rom[3], fw_dev->config_rom[4],
                 dev_name(&motu->unit->device), 100 << fw_dev->max_speed);
@@ -237,20 +237,20 @@ static const struct snd_motu_spec motu_audio_express = {
 #define SND_MOTU_DEV_ENTRY(model, data)                        \
 {                                                      \
        .match_flags    = IEEE1394_MATCH_VENDOR_ID |    \
-                         IEEE1394_MATCH_MODEL_ID |     \
-                         IEEE1394_MATCH_SPECIFIER_ID,  \
+                         IEEE1394_MATCH_SPECIFIER_ID | \
+                         IEEE1394_MATCH_VERSION,       \
        .vendor_id      = OUI_MOTU,                     \
-       .model_id       = model,                        \
        .specifier_id   = OUI_MOTU,                     \
+       .version        = model,                        \
        .driver_data    = (kernel_ulong_t)data,         \
 }
 
 static const struct ieee1394_device_id motu_id_table[] = {
-       SND_MOTU_DEV_ENTRY(0x101800, &motu_828mk2),
-       SND_MOTU_DEV_ENTRY(0x107800, &snd_motu_spec_traveler),
-       SND_MOTU_DEV_ENTRY(0x106800, &motu_828mk3),     /* FireWire only. */
-       SND_MOTU_DEV_ENTRY(0x100800, &motu_828mk3),     /* Hybrid. */
-       SND_MOTU_DEV_ENTRY(0x104800, &motu_audio_express),
+       SND_MOTU_DEV_ENTRY(0x000003, &motu_828mk2),
+       SND_MOTU_DEV_ENTRY(0x000009, &snd_motu_spec_traveler),
+       SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3),     /* FireWire only. */
+       SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3),     /* Hybrid. */
+       SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
        { }
 };
 MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
index aa2a83eb81a988a5e5947d9edce8afae4c91cf6f..dc27a480c2d9bf8156a0376cd0f1788eb34df2b3 100644 (file)
@@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
 
        /* block the 0x388 port to avoid PnP conflicts */
        acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
+       if (!acard->fm_res) {
+               err = -EBUSY;
+               goto _err;
+       }
 
        if (port[dev] != SNDRV_AUTO_PORT) {
                if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
index ea876b0b02b9f0d8400a7ee08291ff283999dbc6..dc0084dc8550daae4412f75e7bfc487c69c98beb 100644 (file)
@@ -1952,6 +1952,11 @@ static int snd_echo_create(struct snd_card *card,
        }
        chip->dsp_registers = (volatile u32 __iomem *)
                ioremap_nocache(chip->dsp_registers_phys, sz);
+       if (!chip->dsp_registers) {
+               dev_err(chip->card->dev, "ioremap failed\n");
+               snd_echo_free(chip);
+               return -ENOMEM;
+       }
 
        if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
                        KBUILD_MODNAME, chip)) {
index 5f2005098a60bed87fd16248c295aaff66b1c67d..ec0b8595eb4da448a51d3376c9913e96f09e3075 100644 (file)
@@ -2939,6 +2939,20 @@ static int hda_codec_runtime_resume(struct device *dev)
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_SLEEP
+static int hda_codec_force_resume(struct device *dev)
+{
+       int ret;
+
+       /* The get/put pair below enforces the runtime resume even if the
+        * device hasn't been used at suspend time.  This trick is needed to
+        * update the jack state change during the sleep.
+        */
+       pm_runtime_get_noresume(dev);
+       ret = pm_runtime_force_resume(dev);
+       pm_runtime_put(dev);
+       return ret;
+}
+
 static int hda_codec_pm_suspend(struct device *dev)
 {
        dev->power.power_state = PMSG_SUSPEND;
@@ -2948,7 +2962,7 @@ static int hda_codec_pm_suspend(struct device *dev)
 static int hda_codec_pm_resume(struct device *dev)
 {
        dev->power.power_state = PMSG_RESUME;
-       return pm_runtime_force_resume(dev);
+       return hda_codec_force_resume(dev);
 }
 
 static int hda_codec_pm_freeze(struct device *dev)
@@ -2960,13 +2974,13 @@ static int hda_codec_pm_freeze(struct device *dev)
 static int hda_codec_pm_thaw(struct device *dev)
 {
        dev->power.power_state = PMSG_THAW;
-       return pm_runtime_force_resume(dev);
+       return hda_codec_force_resume(dev);
 }
 
 static int hda_codec_pm_restore(struct device *dev)
 {
        dev->power.power_state = PMSG_RESTORE;
-       return pm_runtime_force_resume(dev);
+       return hda_codec_force_resume(dev);
 }
 #endif /* CONFIG_PM_SLEEP */
 
index e5c49003e75fdd81a62fbea142089500d0eb6eae..ece256a3b48f3b9108615931d8727c86d0d0ded5 100644 (file)
@@ -947,7 +947,7 @@ static void __azx_runtime_suspend(struct azx *chip)
        display_power(chip, false);
 }
 
-static void __azx_runtime_resume(struct azx *chip)
+static void __azx_runtime_resume(struct azx *chip, bool from_rt)
 {
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
        struct hdac_bus *bus = azx_bus(chip);
@@ -964,7 +964,7 @@ static void __azx_runtime_resume(struct azx *chip)
        azx_init_pci(chip);
        hda_intel_init_chip(chip, true);
 
-       if (status) {
+       if (status && from_rt) {
                list_for_each_codec(codec, &chip->bus)
                        if (status & (1 << codec->addr))
                                schedule_delayed_work(&codec->jackpoll_work,
@@ -1016,7 +1016,7 @@ static int azx_resume(struct device *dev)
                        chip->msi = 0;
        if (azx_acquire_irq(chip, 1) < 0)
                return -EIO;
-       __azx_runtime_resume(chip);
+       __azx_runtime_resume(chip, false);
        snd_power_change_state(card, SNDRV_CTL_POWER_D0);
 
        trace_azx_resume(chip);
@@ -1081,7 +1081,7 @@ static int azx_runtime_resume(struct device *dev)
        chip = card->private_data;
        if (!azx_has_pm_runtime(chip))
                return 0;
-       __azx_runtime_resume(chip);
+       __azx_runtime_resume(chip, true);
 
        /* disable controller Wake Up event*/
        azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
@@ -2144,10 +2144,12 @@ static struct snd_pci_quirk power_save_blacklist[] = {
        SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
        SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
-       SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
        /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
        SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1689623 */
+       SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+       SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
        {}
 };
 #endif /* CONFIG_PM */
index 29882bda763289374069ec4777e62b781b416673..e1ebc6d5f38226b10f689b2bc04fd0504331ced2 100644 (file)
@@ -1005,7 +1005,6 @@ struct ca0132_spec {
        unsigned int scp_resp_header;
        unsigned int scp_resp_data[4];
        unsigned int scp_resp_count;
-       bool alt_firmware_present;
        bool startup_check_entered;
        bool dsp_reload;
 
@@ -7518,7 +7517,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
        bool dsp_loaded = false;
        struct ca0132_spec *spec = codec->spec;
        const struct dsp_image_seg *dsp_os_image;
-       const struct firmware *fw_entry;
+       const struct firmware *fw_entry = NULL;
        /*
         * Alternate firmwares for different variants. The Recon3Di apparently
         * can use the default firmware, but I'll leave the option in case
@@ -7529,33 +7528,26 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
        case QUIRK_R3D:
        case QUIRK_AE5:
                if (request_firmware(&fw_entry, DESKTOP_EFX_FILE,
-                                       codec->card->dev) != 0) {
+                                       codec->card->dev) != 0)
                        codec_dbg(codec, "Desktop firmware not found.");
-                       spec->alt_firmware_present = false;
-               } else {
+               else
                        codec_dbg(codec, "Desktop firmware selected.");
-                       spec->alt_firmware_present = true;
-               }
                break;
        case QUIRK_R3DI:
                if (request_firmware(&fw_entry, R3DI_EFX_FILE,
-                                       codec->card->dev) != 0) {
+                                       codec->card->dev) != 0)
                        codec_dbg(codec, "Recon3Di alt firmware not detected.");
-                       spec->alt_firmware_present = false;
-               } else {
+               else
                        codec_dbg(codec, "Recon3Di firmware selected.");
-                       spec->alt_firmware_present = true;
-               }
                break;
        default:
-               spec->alt_firmware_present = false;
                break;
        }
        /*
         * Use default ctefx.bin if no alt firmware is detected, or if none
         * exists for your particular codec.
         */
-       if (!spec->alt_firmware_present) {
+       if (!fw_entry) {
                codec_dbg(codec, "Default firmware selected.");
                if (request_firmware(&fw_entry, EFX_FILE,
                                        codec->card->dev) != 0)
index 384719d5c44ec89158e781b63a912734e3d24216..a3fb3d4c573090a156bf32534b1eed033f3ade16 100644 (file)
@@ -5687,6 +5687,9 @@ enum {
        ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
        ALC225_FIXUP_WYSE_AUTO_MUTE,
        ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
+       ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+       ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+       ALC299_FIXUP_PREDATOR_SPK,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6685,6 +6688,32 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
        },
+       [ALC286_FIXUP_ACER_AIO_HEADSET_MIC] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x4f },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x5029 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
+       },
+       [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+       },
+       [ALC299_FIXUP_PREDATOR_SPK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
+                       { }
+               }
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6701,9 +6730,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
-       SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
+       SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
@@ -7100,6 +7133,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
        {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
        {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
+       {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
        {}
 };
 #define ALC225_STANDARD_PINS \
@@ -7320,6 +7354,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
                {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x21, 0x04211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x1a, 0x90a70130},
+               {0x1b, 0x90170110},
+               {0x21, 0x03211020}),
        SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
                {0x12, 0xb7a60130},
                {0x13, 0xb8a61140},
index dae1584cf017f6aa311a5b78c3311b0bf55c2b18..4703d218663a2ad81e7c8d4fd0749bed8199ef4f 100644 (file)
@@ -17,5 +17,7 @@
 
 #define __ARCH_WANT_RENAMEAT
 #define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
 
 #include <asm-generic/unistd.h>
index 8ef80d65a474f001466c4a8ee940b2bb93859881..d2be5a06c339155f355058946f655f38f97b5db9 100644 (file)
@@ -401,41 +401,31 @@ static int do_show(int argc, char **argv)
 
 static int do_dump(int argc, char **argv)
 {
-       unsigned int finfo_rec_size, linfo_rec_size, jited_linfo_rec_size;
-       void *func_info = NULL, *linfo = NULL, *jited_linfo = NULL;
-       unsigned int nr_finfo, nr_linfo = 0, nr_jited_linfo = 0;
+       struct bpf_prog_info_linear *info_linear;
        struct bpf_prog_linfo *prog_linfo = NULL;
-       unsigned long *func_ksyms = NULL;
-       struct bpf_prog_info info = {};
-       unsigned int *func_lens = NULL;
+       enum {DUMP_JITED, DUMP_XLATED} mode;
        const char *disasm_opt = NULL;
-       unsigned int nr_func_ksyms;
-       unsigned int nr_func_lens;
+       struct bpf_prog_info *info;
        struct dump_data dd = {};
-       __u32 len = sizeof(info);
+       void *func_info = NULL;
        struct btf *btf = NULL;
-       unsigned int buf_size;
        char *filepath = NULL;
        bool opcodes = false;
        bool visual = false;
        char func_sig[1024];
        unsigned char *buf;
        bool linum = false;
-       __u32 *member_len;
-       __u64 *member_ptr;
+       __u32 member_len;
+       __u64 arrays;
        ssize_t n;
-       int err;
        int fd;
 
        if (is_prefix(*argv, "jited")) {
                if (disasm_init())
                        return -1;
-
-               member_len = &info.jited_prog_len;
-               member_ptr = &info.jited_prog_insns;
+               mode = DUMP_JITED;
        } else if (is_prefix(*argv, "xlated")) {
-               member_len = &info.xlated_prog_len;
-               member_ptr = &info.xlated_prog_insns;
+               mode = DUMP_XLATED;
        } else {
                p_err("expected 'xlated' or 'jited', got: %s", *argv);
                return -1;
@@ -474,175 +464,50 @@ static int do_dump(int argc, char **argv)
                return -1;
        }
 
-       err = bpf_obj_get_info_by_fd(fd, &info, &len);
-       if (err) {
-               p_err("can't get prog info: %s", strerror(errno));
-               return -1;
-       }
-
-       if (!*member_len) {
-               p_info("no instructions returned");
-               close(fd);
-               return 0;
-       }
+       if (mode == DUMP_JITED)
+               arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
+       else
+               arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
 
-       buf_size = *member_len;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+       arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 
-       buf = malloc(buf_size);
-       if (!buf) {
-               p_err("mem alloc failed");
-               close(fd);
+       info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+       close(fd);
+       if (IS_ERR_OR_NULL(info_linear)) {
+               p_err("can't get prog info: %s", strerror(errno));
                return -1;
        }
 
-       nr_func_ksyms = info.nr_jited_ksyms;
-       if (nr_func_ksyms) {
-               func_ksyms = malloc(nr_func_ksyms * sizeof(__u64));
-               if (!func_ksyms) {
-                       p_err("mem alloc failed");
-                       close(fd);
-                       goto err_free;
-               }
-       }
-
-       nr_func_lens = info.nr_jited_func_lens;
-       if (nr_func_lens) {
-               func_lens = malloc(nr_func_lens * sizeof(__u32));
-               if (!func_lens) {
-                       p_err("mem alloc failed");
-                       close(fd);
+       info = &info_linear->info;
+       if (mode == DUMP_JITED) {
+               if (info->jited_prog_len == 0) {
+                       p_info("no instructions returned");
                        goto err_free;
                }
-       }
-
-       nr_finfo = info.nr_func_info;
-       finfo_rec_size = info.func_info_rec_size;
-       if (nr_finfo && finfo_rec_size) {
-               func_info = malloc(nr_finfo * finfo_rec_size);
-               if (!func_info) {
-                       p_err("mem alloc failed");
-                       close(fd);
+               buf = (unsigned char *)(info->jited_prog_insns);
+               member_len = info->jited_prog_len;
+       } else {        /* DUMP_XLATED */
+               if (info->xlated_prog_len == 0) {
+                       p_err("error retrieving insn dump: kernel.kptr_restrict set?");
                        goto err_free;
                }
+               buf = (unsigned char *)info->xlated_prog_insns;
+               member_len = info->xlated_prog_len;
        }
 
-       linfo_rec_size = info.line_info_rec_size;
-       if (info.nr_line_info && linfo_rec_size && info.btf_id) {
-               nr_linfo = info.nr_line_info;
-               linfo = malloc(nr_linfo * linfo_rec_size);
-               if (!linfo) {
-                       p_err("mem alloc failed");
-                       close(fd);
-                       goto err_free;
-               }
-       }
-
-       jited_linfo_rec_size = info.jited_line_info_rec_size;
-       if (info.nr_jited_line_info &&
-           jited_linfo_rec_size &&
-           info.nr_jited_ksyms &&
-           info.nr_jited_func_lens &&
-           info.btf_id) {
-               nr_jited_linfo = info.nr_jited_line_info;
-               jited_linfo = malloc(nr_jited_linfo * jited_linfo_rec_size);
-               if (!jited_linfo) {
-                       p_err("mem alloc failed");
-                       close(fd);
-                       goto err_free;
-               }
-       }
-
-       memset(&info, 0, sizeof(info));
-
-       *member_ptr = ptr_to_u64(buf);
-       *member_len = buf_size;
-       info.jited_ksyms = ptr_to_u64(func_ksyms);
-       info.nr_jited_ksyms = nr_func_ksyms;
-       info.jited_func_lens = ptr_to_u64(func_lens);
-       info.nr_jited_func_lens = nr_func_lens;
-       info.nr_func_info = nr_finfo;
-       info.func_info_rec_size = finfo_rec_size;
-       info.func_info = ptr_to_u64(func_info);
-       info.nr_line_info = nr_linfo;
-       info.line_info_rec_size = linfo_rec_size;
-       info.line_info = ptr_to_u64(linfo);
-       info.nr_jited_line_info = nr_jited_linfo;
-       info.jited_line_info_rec_size = jited_linfo_rec_size;
-       info.jited_line_info = ptr_to_u64(jited_linfo);
-
-       err = bpf_obj_get_info_by_fd(fd, &info, &len);
-       close(fd);
-       if (err) {
-               p_err("can't get prog info: %s", strerror(errno));
-               goto err_free;
-       }
-
-       if (*member_len > buf_size) {
-               p_err("too many instructions returned");
-               goto err_free;
-       }
-
-       if (info.nr_jited_ksyms > nr_func_ksyms) {
-               p_err("too many addresses returned");
-               goto err_free;
-       }
-
-       if (info.nr_jited_func_lens > nr_func_lens) {
-               p_err("too many values returned");
-               goto err_free;
-       }
-
-       if (info.nr_func_info != nr_finfo) {
-               p_err("incorrect nr_func_info %d vs. expected %d",
-                     info.nr_func_info, nr_finfo);
-               goto err_free;
-       }
-
-       if (info.func_info_rec_size != finfo_rec_size) {
-               p_err("incorrect func_info_rec_size %d vs. expected %d",
-                     info.func_info_rec_size, finfo_rec_size);
-               goto err_free;
-       }
-
-       if (linfo && info.nr_line_info != nr_linfo) {
-               p_err("incorrect nr_line_info %u vs. expected %u",
-                     info.nr_line_info, nr_linfo);
-               goto err_free;
-       }
-
-       if (info.line_info_rec_size != linfo_rec_size) {
-               p_err("incorrect line_info_rec_size %u vs. expected %u",
-                     info.line_info_rec_size, linfo_rec_size);
-               goto err_free;
-       }
-
-       if (jited_linfo && info.nr_jited_line_info != nr_jited_linfo) {
-               p_err("incorrect nr_jited_line_info %u vs. expected %u",
-                     info.nr_jited_line_info, nr_jited_linfo);
-               goto err_free;
-       }
-
-       if (info.jited_line_info_rec_size != jited_linfo_rec_size) {
-               p_err("incorrect jited_line_info_rec_size %u vs. expected %u",
-                     info.jited_line_info_rec_size, jited_linfo_rec_size);
-               goto err_free;
-       }
-
-       if ((member_len == &info.jited_prog_len &&
-            info.jited_prog_insns == 0) ||
-           (member_len == &info.xlated_prog_len &&
-            info.xlated_prog_insns == 0)) {
-               p_err("error retrieving insn dump: kernel.kptr_restrict set?");
-               goto err_free;
-       }
-
-       if (info.btf_id && btf__get_from_id(info.btf_id, &btf)) {
+       if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
                p_err("failed to get btf");
                goto err_free;
        }
 
-       if (nr_linfo) {
-               prog_linfo = bpf_prog_linfo__new(&info);
+       func_info = (void *)info->func_info;
+
+       if (info->nr_line_info) {
+               prog_linfo = bpf_prog_linfo__new(info);
                if (!prog_linfo)
                        p_info("error in processing bpf_line_info.  continue without it.");
        }
@@ -655,9 +520,9 @@ static int do_dump(int argc, char **argv)
                        goto err_free;
                }
 
-               n = write(fd, buf, *member_len);
+               n = write(fd, buf, member_len);
                close(fd);
-               if (n != *member_len) {
+               if (n != member_len) {
                        p_err("error writing output file: %s",
                              n < 0 ? strerror(errno) : "short write");
                        goto err_free;
@@ -665,19 +530,19 @@ static int do_dump(int argc, char **argv)
 
                if (json_output)
                        jsonw_null(json_wtr);
-       } else if (member_len == &info.jited_prog_len) {
+       } else if (mode == DUMP_JITED) {
                const char *name = NULL;
 
-               if (info.ifindex) {
-                       name = ifindex_to_bfd_params(info.ifindex,
-                                                    info.netns_dev,
-                                                    info.netns_ino,
+               if (info->ifindex) {
+                       name = ifindex_to_bfd_params(info->ifindex,
+                                                    info->netns_dev,
+                                                    info->netns_ino,
                                                     &disasm_opt);
                        if (!name)
                                goto err_free;
                }
 
-               if (info.nr_jited_func_lens && info.jited_func_lens) {
+               if (info->nr_jited_func_lens && info->jited_func_lens) {
                        struct kernel_sym *sym = NULL;
                        struct bpf_func_info *record;
                        char sym_name[SYM_MAX_NAME];
@@ -685,17 +550,16 @@ static int do_dump(int argc, char **argv)
                        __u64 *ksyms = NULL;
                        __u32 *lens;
                        __u32 i;
-
-                       if (info.nr_jited_ksyms) {
+                       if (info->nr_jited_ksyms) {
                                kernel_syms_load(&dd);
-                               ksyms = (__u64 *) info.jited_ksyms;
+                               ksyms = (__u64 *) info->jited_ksyms;
                        }
 
                        if (json_output)
                                jsonw_start_array(json_wtr);
 
-                       lens = (__u32 *) info.jited_func_lens;
-                       for (i = 0; i < info.nr_jited_func_lens; i++) {
+                       lens = (__u32 *) info->jited_func_lens;
+                       for (i = 0; i < info->nr_jited_func_lens; i++) {
                                if (ksyms) {
                                        sym = kernel_syms_search(&dd, ksyms[i]);
                                        if (sym)
@@ -707,7 +571,7 @@ static int do_dump(int argc, char **argv)
                                }
 
                                if (func_info) {
-                                       record = func_info + i * finfo_rec_size;
+                                       record = func_info + i * info->func_info_rec_size;
                                        btf_dumper_type_only(btf, record->type_id,
                                                             func_sig,
                                                             sizeof(func_sig));
@@ -744,49 +608,37 @@ static int do_dump(int argc, char **argv)
                        if (json_output)
                                jsonw_end_array(json_wtr);
                } else {
-                       disasm_print_insn(buf, *member_len, opcodes, name,
+                       disasm_print_insn(buf, member_len, opcodes, name,
                                          disasm_opt, btf, NULL, 0, 0, false);
                }
        } else if (visual) {
                if (json_output)
                        jsonw_null(json_wtr);
                else
-                       dump_xlated_cfg(buf, *member_len);
+                       dump_xlated_cfg(buf, member_len);
        } else {
                kernel_syms_load(&dd);
-               dd.nr_jited_ksyms = info.nr_jited_ksyms;
-               dd.jited_ksyms = (__u64 *) info.jited_ksyms;
+               dd.nr_jited_ksyms = info->nr_jited_ksyms;
+               dd.jited_ksyms = (__u64 *) info->jited_ksyms;
                dd.btf = btf;
                dd.func_info = func_info;
-               dd.finfo_rec_size = finfo_rec_size;
+               dd.finfo_rec_size = info->func_info_rec_size;
                dd.prog_linfo = prog_linfo;
 
                if (json_output)
-                       dump_xlated_json(&dd, buf, *member_len, opcodes,
+                       dump_xlated_json(&dd, buf, member_len, opcodes,
                                         linum);
                else
-                       dump_xlated_plain(&dd, buf, *member_len, opcodes,
+                       dump_xlated_plain(&dd, buf, member_len, opcodes,
                                          linum);
                kernel_syms_destroy(&dd);
        }
 
-       free(buf);
-       free(func_ksyms);
-       free(func_lens);
-       free(func_info);
-       free(linfo);
-       free(jited_linfo);
-       bpf_prog_linfo__free(prog_linfo);
+       free(info_linear);
        return 0;
 
 err_free:
-       free(buf);
-       free(func_ksyms);
-       free(func_lens);
-       free(func_info);
-       free(linfo);
-       free(jited_linfo);
-       bpf_prog_linfo__free(prog_linfo);
+       free(info_linear);
        return -1;
 }
 
index 61e46d54a67c0dff521e665782b316f92ef9c320..8d3864b061f3879c2bb93a8aeffbe8b105863d4b 100644 (file)
@@ -66,7 +66,8 @@ FEATURE_TESTS_BASIC :=                  \
         sched_getcpu                   \
         sdt                            \
         setns                          \
-        libaio
+        libaio                         \
+        disassembler-four-args
 
 # FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
 # of all feature tests
@@ -118,7 +119,8 @@ FEATURE_DISPLAY ?=              \
          lzma                   \
          get_cpuid              \
          bpf                   \
-         libaio
+         libaio                        \
+         disassembler-four-args
 
 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
 # If in the future we need per-feature checks/flags for features not
index e903b86b742f29d31c383165dab4bec120ee04ae..7853e6d91090cd7170db0ddc4ba26f2c49108894 100644 (file)
 # include "test-reallocarray.c"
 #undef main
 
+#define main main_test_disassembler_four_args
+# include "test-disassembler-four-args.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
        main_test_libpython();
@@ -219,6 +223,7 @@ int main(int argc, char *argv[])
        main_test_setns();
        main_test_libaio();
        main_test_reallocarray();
+       main_test_disassembler_four_args();
 
        return 0;
 }
index d90127298f12d1536b7594f7cdebd324f3fe4db3..12cdf611d217e1ace70b3d0a19e1e8242c99bc88 100644 (file)
@@ -38,8 +38,10 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
 __SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
 #define __NR_io_cancel 3
 __SYSCALL(__NR_io_cancel, sys_io_cancel)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_io_getevents 4
-__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
+__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
+#endif
 
 /* fs/xattr.c */
 #define __NR_setxattr 5
@@ -179,7 +181,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat)
 #define __NR_fchown 55
 __SYSCALL(__NR_fchown, sys_fchown)
 #define __NR_openat 56
-__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
+__SYSCALL(__NR_openat, sys_openat)
 #define __NR_close 57
 __SYSCALL(__NR_close, sys_close)
 #define __NR_vhangup 58
@@ -222,10 +224,12 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
 __SYSCALL(__NR3264_sendfile, sys_sendfile64)
 
 /* fs/select.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_pselect6 72
-__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
+__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
 #define __NR_ppoll 73
-__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
+__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
+#endif
 
 /* fs/signalfd.c */
 #define __NR_signalfd4 74
@@ -269,16 +273,20 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
 /* fs/timerfd.c */
 #define __NR_timerfd_create 85
 __SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_timerfd_settime 86
-__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
-         compat_sys_timerfd_settime)
+__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
+         sys_timerfd_settime)
 #define __NR_timerfd_gettime 87
-__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
-         compat_sys_timerfd_gettime)
+__SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
+         sys_timerfd_gettime)
+#endif
 
 /* fs/utimes.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_utimensat 88
-__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
+__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
+#endif
 
 /* kernel/acct.c */
 #define __NR_acct 89
@@ -309,8 +317,10 @@ __SYSCALL(__NR_set_tid_address, sys_set_tid_address)
 __SYSCALL(__NR_unshare, sys_unshare)
 
 /* kernel/futex.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_futex 98
-__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
+__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
+#endif
 #define __NR_set_robust_list 99
 __SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
          compat_sys_set_robust_list)
@@ -319,8 +329,10 @@ __SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
          compat_sys_get_robust_list)
 
 /* kernel/hrtimer.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_nanosleep 101
-__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
+__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
+#endif
 
 /* kernel/itimer.c */
 #define __NR_getitimer 102
@@ -341,23 +353,29 @@ __SYSCALL(__NR_delete_module, sys_delete_module)
 /* kernel/posix-timers.c */
 #define __NR_timer_create 107
 __SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_timer_gettime 108
-__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
+__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
+#endif
 #define __NR_timer_getoverrun 109
 __SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_timer_settime 110
-__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
+__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
+#endif
 #define __NR_timer_delete 111
 __SYSCALL(__NR_timer_delete, sys_timer_delete)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_clock_settime 112
-__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
+__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
 #define __NR_clock_gettime 113
-__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
+__SC_3264(__NR_clock_gettime, sys_clock_gettime32, sys_clock_gettime)
 #define __NR_clock_getres 114
-__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
+__SC_3264(__NR_clock_getres, sys_clock_getres_time32, sys_clock_getres)
 #define __NR_clock_nanosleep 115
-__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
-         compat_sys_clock_nanosleep)
+__SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
+         sys_clock_nanosleep)
+#endif
 
 /* kernel/printk.c */
 #define __NR_syslog 116
@@ -388,9 +406,11 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
 __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
 #define __NR_sched_get_priority_min 126
 __SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_sched_rr_get_interval 127
-__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
-         compat_sys_sched_rr_get_interval)
+__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
+         sys_sched_rr_get_interval)
+#endif
 
 /* kernel/signal.c */
 #define __NR_restart_syscall 128
@@ -411,9 +431,11 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
 __SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
 #define __NR_rt_sigpending 136
 __SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_rt_sigtimedwait 137
-__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
-         compat_sys_rt_sigtimedwait)
+__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
+         sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
+#endif
 #define __NR_rt_sigqueueinfo 138
 __SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
          compat_sys_rt_sigqueueinfo)
@@ -467,10 +489,15 @@ __SYSCALL(__NR_uname, sys_newuname)
 __SYSCALL(__NR_sethostname, sys_sethostname)
 #define __NR_setdomainname 162
 __SYSCALL(__NR_setdomainname, sys_setdomainname)
+
+#ifdef __ARCH_WANT_SET_GET_RLIMIT
+/* getrlimit and setrlimit are superseded with prlimit64 */
 #define __NR_getrlimit 163
 __SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
 #define __NR_setrlimit 164
 __SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
+#endif
+
 #define __NR_getrusage 165
 __SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
 #define __NR_umask 166
@@ -481,12 +508,14 @@ __SYSCALL(__NR_prctl, sys_prctl)
 __SYSCALL(__NR_getcpu, sys_getcpu)
 
 /* kernel/time.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_gettimeofday 169
 __SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
 #define __NR_settimeofday 170
 __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
 #define __NR_adjtimex 171
-__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
+__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
+#endif
 
 /* kernel/timer.c */
 #define __NR_getpid 172
@@ -511,11 +540,13 @@ __SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
 __SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
 #define __NR_mq_unlink 181
 __SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_mq_timedsend 182
-__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
+__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
 #define __NR_mq_timedreceive 183
-__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
-         compat_sys_mq_timedreceive)
+__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
+         sys_mq_timedreceive)
+#endif
 #define __NR_mq_notify 184
 __SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
 #define __NR_mq_getsetattr 185
@@ -536,8 +567,10 @@ __SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
 __SYSCALL(__NR_semget, sys_semget)
 #define __NR_semctl 191
 __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_semtimedop 192
-__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
+__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
+#endif
 #define __NR_semop 193
 __SYSCALL(__NR_semop, sys_semop)
 
@@ -658,8 +691,10 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
 __SYSCALL(__NR_perf_event_open, sys_perf_event_open)
 #define __NR_accept4 242
 __SYSCALL(__NR_accept4, sys_accept4)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_recvmmsg 243
-__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
+__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
+#endif
 
 /*
  * Architectures may provide up to 16 syscalls of their own
@@ -667,8 +702,10 @@ __SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
  */
 #define __NR_arch_specific_syscall 244
 
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_wait4 260
 __SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
+#endif
 #define __NR_prlimit64 261
 __SYSCALL(__NR_prlimit64, sys_prlimit64)
 #define __NR_fanotify_init 262
@@ -678,10 +715,11 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
 #define __NR_name_to_handle_at         264
 __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
 #define __NR_open_by_handle_at         265
-__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
-         compat_sys_open_by_handle_at)
+__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_clock_adjtime 266
-__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
+__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
+#endif
 #define __NR_syncfs 267
 __SYSCALL(__NR_syncfs, sys_syncfs)
 #define __NR_setns 268
@@ -734,15 +772,60 @@ __SYSCALL(__NR_pkey_alloc,    sys_pkey_alloc)
 __SYSCALL(__NR_pkey_free,     sys_pkey_free)
 #define __NR_statx 291
 __SYSCALL(__NR_statx,     sys_statx)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_io_pgetevents 292
-__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
+#endif
 #define __NR_rseq 293
 __SYSCALL(__NR_rseq, sys_rseq)
 #define __NR_kexec_file_load 294
 __SYSCALL(__NR_kexec_file_load,     sys_kexec_file_load)
+/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
+#if __BITS_PER_LONG == 32
+#define __NR_clock_gettime64 403
+__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
+#define __NR_clock_settime64 404
+__SYSCALL(__NR_clock_settime64, sys_clock_settime)
+#define __NR_clock_adjtime64 405
+__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime)
+#define __NR_clock_getres_time64 406
+__SYSCALL(__NR_clock_getres_time64, sys_clock_getres)
+#define __NR_clock_nanosleep_time64 407
+__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep)
+#define __NR_timer_gettime64 408
+__SYSCALL(__NR_timer_gettime64, sys_timer_gettime)
+#define __NR_timer_settime64 409
+__SYSCALL(__NR_timer_settime64, sys_timer_settime)
+#define __NR_timerfd_gettime64 410
+__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime)
+#define __NR_timerfd_settime64 411
+__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime)
+#define __NR_utimensat_time64 412
+__SYSCALL(__NR_utimensat_time64, sys_utimensat)
+#define __NR_pselect6_time64 413
+__SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
+#define __NR_ppoll_time64 414
+__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
+#define __NR_io_pgetevents_time64 416
+__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+#define __NR_recvmmsg_time64 417
+__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
+#define __NR_mq_timedsend_time64 418
+__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend)
+#define __NR_mq_timedreceive_time64 419
+__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive)
+#define __NR_semtimedop_time64 420
+__SYSCALL(__NR_semtimedop_time64, sys_semtimedop)
+#define __NR_rt_sigtimedwait_time64 421
+__SC_COMP(__NR_rt_sigtimedwait_time64, sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time64)
+#define __NR_futex_time64 422
+__SYSCALL(__NR_futex_time64, sys_futex)
+#define __NR_sched_rr_get_interval_time64 423
+__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
+#endif
 
 #undef __NR_syscalls
-#define __NR_syscalls 295
+#define __NR_syscalls 424
 
 /*
  * 32 bit systems traditionally used different
index 3c38ac9a92a7c4b18cbb4ac49ac60bf887b03d20..929c8e537a14a517c0a3c7ca5b6b15353d622c30 100644 (file)
@@ -502,16 +502,6 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- *     Description
- *             Push an element *value* in *map*. *flags* is one of:
- *
- *             **BPF_EXIST**
- *             If the queue/stack is full, the oldest element is removed to
- *             make room for this.
- *     Return
- *             0 on success, or a negative error in case of failure.
- *
  * int bpf_probe_read(void *dst, u32 size, const void *src)
  *     Description
  *             For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
  * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_addr** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
  * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_ops** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
@@ -2098,52 +2088,52 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded key press with *scancode*,
- *             *toggle* value in the given *protocol*. The scancode will be
- *             translated to a keycode using the rc keymap, and reported as
- *             an input key down event. After a period a key up event is
- *             generated. This period can be extended by calling either
- *             **bpf_rc_keydown**\ () again with the same values, or calling
- *             **bpf_rc_repeat**\ ().
+ *             report a successfully decoded repeat key message. This delays
+ *             the generation of a key up event for previously generated
+ *             key down event.
  *
- *             Some protocols include a toggle bit, in case the button was
- *             released and pressed again between consecutive scancodes.
+ *             Some IR protocols like NEC have a special IR message for
+ *             repeating last button, for when a button is held down.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
- *             The *protocol* is the decoded protocol number (see
- *             **enum rc_proto** for some predefined values).
- *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded repeat key message. This delays
- *             the generation of a key up event for previously generated
- *             key down event.
+ *             report a successfully decoded key press with *scancode*,
+ *             *toggle* value in the given *protocol*. The scancode will be
+ *             translated to a keycode using the rc keymap, and reported as
+ *             an input key down event. After a period a key up event is
+ *             generated. This period can be extended by calling either
+ *             **bpf_rc_keydown**\ () again with the same values, or calling
+ *             **bpf_rc_repeat**\ ().
  *
- *             Some IR protocols like NEC have a special IR message for
- *             repeating last button, for when a button is held down.
+ *             Some protocols include a toggle bit, in case the button was
+ *             released and pressed again between consecutive scancodes.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
+ *             The *protocol* is the decoded protocol number (see
+ *             **enum rc_proto** for some predefined values).
+ *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
  *     Description
  *             Return the cgroup v2 id of the socket associated with the *skb*.
  *             This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- *     Description
- *             Return id of cgroup v2 that is ancestor of cgroup associated
- *             with the *skb* at the *ancestor_level*.  The root cgroup is at
- *             *ancestor_level* zero and each step down the hierarchy
- *             increments the level. If *ancestor_level* == level of cgroup
- *             associated with *skb*, then return value will be same as that
- *             of **bpf_skb_cgroup_id**\ ().
- *
- *             The helper is useful to implement policies based on cgroups
- *             that are upper in hierarchy than immediate cgroup associated
- *             with *skb*.
- *
- *             The format of returned id and helper limitations are same as in
- *             **bpf_skb_cgroup_id**\ ().
- *     Return
- *             The id is returned or 0 in case the id could not be retrieved.
- *
  * u64 bpf_get_current_cgroup_id(void)
  *     Return
  *             A 64-bit integer containing the current cgroup id based
  *             on the cgroup within which the current task is running.
  *
- * voidget_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
  *     Description
  *             Get the pointer to the local storage area.
  *             The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ *     Description
+ *             Return id of cgroup v2 that is ancestor of cgroup associated
+ *             with the *skb* at the *ancestor_level*.  The root cgroup is at
+ *             *ancestor_level* zero and each step down the hierarchy
+ *             increments the level. If *ancestor_level* == level of cgroup
+ *             associated with *skb*, then return value will be same as that
+ *             of **bpf_skb_cgroup_id**\ ().
+ *
+ *             The helper is useful to implement policies based on cgroups
+ *             that are upper in hierarchy than immediate cgroup associated
+ *             with *skb*.
+ *
+ *             The format of returned id and helper limitations are same as in
+ *             **bpf_skb_cgroup_id**\ ().
+ *     Return
+ *             The id is returned or 0 in case the id could not be retrieved.
+ *
  * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ *     Description
+ *             Push an element *value* in *map*. *flags* is one of:
+ *
+ *             **BPF_EXIST**
+ *                     If the queue/stack is full, the oldest element is
+ *                     removed to make room for this.
+ *     Return
+ *             0 on success, or a negative error in case of failure.
+ *
  * int bpf_map_pop_elem(struct bpf_map *map, void *value)
  *     Description
  *             Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
  *     Return
  *             0
  *
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Acquire a spinlock represented by the pointer *lock*, which is
+ *             stored as part of a value of a map. Taking the lock allows to
+ *             safely update the rest of the fields in that value. The
+ *             spinlock can (and must) later be released with a call to
+ *             **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ *             Spinlocks in BPF programs come with a number of restrictions
+ *             and constraints:
+ *
+ *             * **bpf_spin_lock** objects are only allowed inside maps of
+ *               types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ *               list could be extended in the future).
+ *             * BTF description of the map is mandatory.
+ *             * The BPF program can take ONE lock at a time, since taking two
+ *               or more could cause dead locks.
+ *             * Only one **struct bpf_spin_lock** is allowed per map element.
+ *             * When the lock is taken, calls (either BPF to BPF or helpers)
+ *               are not allowed.
+ *             * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ *               allowed inside a spinlock-ed region.
+ *             * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ *               the lock, on all execution paths, before it returns.
+ *             * The BPF program can access **struct bpf_spin_lock** only via
+ *               the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ *               helpers. Loading or storing data into the **struct
+ *               bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ *             * To use the **bpf_spin_lock**\ () helper, the BTF description
+ *               of the map value must be a struct and have **struct
+ *               bpf_spin_lock** *anyname*\ **;** field at the top level.
+ *               Nested lock inside another struct is not allowed.
+ *             * The **struct bpf_spin_lock** *lock* field in a map value must
+ *               be aligned on a multiple of 4 bytes in that value.
+ *             * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ *               the **bpf_spin_lock** field to user space.
+ *             * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ *               a BPF program, do not update the **bpf_spin_lock** field.
+ *             * **bpf_spin_lock** cannot be on the stack or inside a
+ *               networking packet (it can only be inside of a map values).
+ *             * **bpf_spin_lock** is available to root only.
+ *             * Tracing programs and socket filter programs cannot use
+ *               **bpf_spin_lock**\ () due to insufficient preemption checks
+ *               (but this may change in the future).
+ *             * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ *     Return
+ *             0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Release the *lock* previously locked by a call to
+ *             **bpf_spin_lock**\ (\ *lock*\ ).
+ *     Return
+ *             0
+ *
  * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_sock** pointer such
- *             that all the fields in bpf_sock can be accessed.
+ *             that all the fields in this **bpf_sock** can be accessed.
  *     Return
- *             A **struct bpf_sock** pointer on success, or NULL in
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_tcp_sock** pointer from a
  *             **struct bpf_sock** pointer.
- *
  *     Return
- *             A **struct bpf_tcp_sock** pointer on success, or NULL in
+ *             A **struct bpf_tcp_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
- *     Description
- *             Sets ECN of IP header to ce (congestion encountered) if
- *             current value is ect (ECN capable). Works with IPv6 and IPv4.
- *     Return
- *             1 if set, 0 if not set.
+ *     Description
+ *             Set ECN (Explicit Congestion Notification) field of IP header
+ *             to **CE** (Congestion Encountered) if current value is **ECT**
+ *             (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ *             and IPv4.
+ *     Return
+ *             1 if the **CE** flag is set (either by the current helper call
+ *             or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ *     Description
+ *             Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ *             **bpf_sk_release**\ () is unnecessary and not allowed.
+ *     Return
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
+ *             case of failure.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -2465,7 +2530,8 @@ union bpf_attr {
        FN(spin_unlock),                \
        FN(sk_fullsock),                \
        FN(tcp_sock),                   \
-       FN(skb_ecn_set_ce),
+       FN(skb_ecn_set_ce),             \
+       FN(get_listener_sock),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
index a55cb8b10165abcf8a07d8228b590bbc1d8a0c08..e7ad9d350a283d81e89696e1bc42438030accc90 100644 (file)
@@ -292,10 +292,11 @@ struct sockaddr_in {
 #define        IN_LOOPBACK(a)          ((((long int) (a)) & 0xff000000) == 0x7f000000)
 
 /* Defines for Multicast INADDR */
-#define INADDR_UNSPEC_GROUP    0xe0000000U     /* 224.0.0.0   */
-#define INADDR_ALLHOSTS_GROUP  0xe0000001U     /* 224.0.0.1   */
-#define INADDR_ALLRTRS_GROUP    0xe0000002U    /* 224.0.0.2 */
-#define INADDR_MAX_LOCAL_GROUP  0xe00000ffU    /* 224.0.0.255 */
+#define INADDR_UNSPEC_GROUP            0xe0000000U     /* 224.0.0.0   */
+#define INADDR_ALLHOSTS_GROUP          0xe0000001U     /* 224.0.0.1   */
+#define INADDR_ALLRTRS_GROUP           0xe0000002U     /* 224.0.0.2 */
+#define INADDR_ALLSNOOPERS_GROUP       0xe000006aU     /* 224.0.0.106 */
+#define INADDR_MAX_LOCAL_GROUP         0xe00000ffU     /* 224.0.0.255 */
 #endif
 
 /* <asm/byteorder.h> contains the htonl type stuff.. */
index 61aaacf0cfa153bd8e798b2a028fa2832cc1ab93..5bf8e52c41fcaf2bb38127d4bb076a9164539ddb 100644 (file)
@@ -3,7 +3,7 @@
 
 BPF_VERSION = 0
 BPF_PATCHLEVEL = 0
-BPF_EXTRAVERSION = 1
+BPF_EXTRAVERSION = 2
 
 MAKEFLAGS += --no-print-directory
 
@@ -79,8 +79,6 @@ export prefix libdir src obj
 libdir_SQ = $(subst ','\'',$(libdir))
 libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
 
-LIB_FILE = libbpf.a libbpf.so
-
 VERSION                = $(BPF_VERSION)
 PATCHLEVEL     = $(BPF_PATCHLEVEL)
 EXTRAVERSION   = $(BPF_EXTRAVERSION)
@@ -88,7 +86,10 @@ EXTRAVERSION = $(BPF_EXTRAVERSION)
 OBJ            = $@
 N              =
 
-LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+
+LIB_TARGET     = libbpf.a libbpf.so.$(LIBBPF_VERSION)
+LIB_FILE       = libbpf.a libbpf.so*
 
 # Set compile option CFLAGS
 ifdef EXTRA_CFLAGS
@@ -128,16 +129,18 @@ all:
 export srctree OUTPUT CC LD CFLAGS V
 include $(srctree)/tools/build/Makefile.include
 
-BPF_IN    := $(OUTPUT)libbpf-in.o
-LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
-VERSION_SCRIPT := libbpf.map
+BPF_IN         := $(OUTPUT)libbpf-in.o
+VERSION_SCRIPT := libbpf.map
+
+LIB_TARGET     := $(addprefix $(OUTPUT),$(LIB_TARGET))
+LIB_FILE       := $(addprefix $(OUTPUT),$(LIB_FILE))
 
 GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
                           awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
 VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
                              grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
 
-CMD_TARGETS = $(LIB_FILE)
+CMD_TARGETS = $(LIB_TARGET)
 
 CXX_TEST_TARGET = $(OUTPUT)test_libbpf
 
@@ -170,9 +173,13 @@ $(BPF_IN): force elfdep bpfdep
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
        $(Q)$(MAKE) $(build)=libbpf
 
-$(OUTPUT)libbpf.so: $(BPF_IN)
-       $(QUIET_LINK)$(CC) --shared -Wl,--version-script=$(VERSION_SCRIPT) \
-               $^ -o $@
+$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
+
+$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
+       $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
+                                   -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
+       @ln -sf $(@F) $(OUTPUT)libbpf.so
+       @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
 
 $(OUTPUT)libbpf.a: $(BPF_IN)
        $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
@@ -192,6 +199,12 @@ check_abi: $(OUTPUT)libbpf.so
                exit 1;                                                  \
        fi
 
+define do_install_mkdir
+       if [ ! -d '$(DESTDIR_SQ)$1' ]; then             \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+       fi
+endef
+
 define do_install
        if [ ! -d '$(DESTDIR_SQ)$2' ]; then             \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
@@ -200,8 +213,9 @@ define do_install
 endef
 
 install_lib: all_cmd
-       $(call QUIET_INSTALL, $(LIB_FILE)) \
-               $(call do_install,$(LIB_FILE),$(libdir_SQ))
+       $(call QUIET_INSTALL, $(LIB_TARGET)) \
+               $(call do_install_mkdir,$(libdir_SQ)); \
+               cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
 
 install_headers:
        $(call QUIET_INSTALL, headers) \
@@ -219,7 +233,7 @@ config-clean:
 
 clean:
        $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
-               *.o *~ *.a *.so .*.d .*.cmd LIBBPF-CFLAGS
+               *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd LIBBPF-CFLAGS
        $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
 
 
index 5788479384cad11141cb77b1295eea26ca91bc09..cef7b77eab69507bcafa8cfe99b68cabb4f4ac78 100644 (file)
@@ -111,6 +111,7 @@ starting from ``0.0.1``.
 
 Every time ABI is being changed, e.g. because a new symbol is added or
 semantic of existing symbol is changed, ABI version should be bumped.
+This bump in ABI version is at most once per kernel development cycle.
 
 For example, if current state of ``libbpf.map`` is:
 
index 1b8d8cdd35750f7b9fc4f2fffb044f300e6bade9..87e3020ac1bc8b3772d98ce58751fc2d6f979184 100644 (file)
@@ -1602,16 +1602,12 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
 /* Calculate type signature hash of ENUM. */
 static __u32 btf_hash_enum(struct btf_type *t)
 {
-       struct btf_enum *member = (struct btf_enum *)(t + 1);
-       __u32 vlen = BTF_INFO_VLEN(t->info);
-       __u32 h = btf_hash_common(t);
-       int i;
+       __u32 h;
 
-       for (i = 0; i < vlen; i++) {
-               h = hash_combine(h, member->name_off);
-               h = hash_combine(h, member->val);
-               member++;
-       }
+       /* don't hash vlen and enum members to support enum fwd resolving */
+       h = hash_combine(0, t->name_off);
+       h = hash_combine(h, t->info & ~0xffff);
+       h = hash_combine(h, t->size);
        return h;
 }
 
@@ -1637,6 +1633,22 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
        return true;
 }
 
+static inline bool btf_is_enum_fwd(struct btf_type *t)
+{
+       return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM &&
+              BTF_INFO_VLEN(t->info) == 0;
+}
+
+static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
+{
+       if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
+               return btf_equal_enum(t1, t2);
+       /* ignore vlen when comparing */
+       return t1->name_off == t2->name_off &&
+              (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
+              t1->size == t2->size;
+}
+
 /*
  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
  * as referenced type IDs equivalence is established separately during type
@@ -1860,6 +1872,17 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
                                new_id = cand_node->type_id;
                                break;
                        }
+                       if (d->opts.dont_resolve_fwds)
+                               continue;
+                       if (btf_compat_enum(t, cand)) {
+                               if (btf_is_enum_fwd(t)) {
+                                       /* resolve fwd to full enum */
+                                       new_id = cand_node->type_id;
+                                       break;
+                               }
+                               /* resolve canonical enum fwd to full enum */
+                               d->map[cand_node->type_id] = type_id;
+                       }
                }
                break;
 
@@ -2084,15 +2107,15 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
                return fwd_kind == real_kind;
        }
 
-       if (cand_type->info != canon_type->info)
-               return 0;
-
        switch (cand_kind) {
        case BTF_KIND_INT:
                return btf_equal_int(cand_type, canon_type);
 
        case BTF_KIND_ENUM:
-               return btf_equal_enum(cand_type, canon_type);
+               if (d->opts.dont_resolve_fwds)
+                       return btf_equal_enum(cand_type, canon_type);
+               else
+                       return btf_compat_enum(cand_type, canon_type);
 
        case BTF_KIND_FWD:
                return btf_equal_common(cand_type, canon_type);
@@ -2103,6 +2126,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
        case BTF_KIND_PTR:
        case BTF_KIND_TYPEDEF:
        case BTF_KIND_FUNC:
+               if (cand_type->info != canon_type->info)
+                       return 0;
                return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
 
        case BTF_KIND_ARRAY: {
index d5b830d606010ad33fd780a514e3684f07d92506..11c25d9ea43124fc6e67fab5a7dc8b93f5dc9e4a 100644 (file)
@@ -112,6 +112,11 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
 #endif
 
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+       return (__u64) (unsigned long) ptr;
+}
+
 struct bpf_capabilities {
        /* v4.14: kernel support for program & map names. */
        __u32 name:1;
@@ -622,7 +627,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
        bool strict = !(flags & MAPS_RELAX_COMPAT);
        int i, map_idx, map_def_sz, nr_maps = 0;
        Elf_Scn *scn;
-       Elf_Data *data;
+       Elf_Data *data = NULL;
        Elf_Data *symbols = obj->efile.symbols;
 
        if (obj->efile.maps_shndx < 0)
@@ -835,12 +840,19 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
                        obj->efile.maps_shndx = idx;
                else if (strcmp(name, BTF_ELF_SEC) == 0) {
                        obj->btf = btf__new(data->d_buf, data->d_size);
-                       if (IS_ERR(obj->btf) || btf__load(obj->btf)) {
+                       if (IS_ERR(obj->btf)) {
                                pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
                                           BTF_ELF_SEC, PTR_ERR(obj->btf));
-                               if (!IS_ERR(obj->btf))
-                                       btf__free(obj->btf);
                                obj->btf = NULL;
+                               continue;
+                       }
+                       err = btf__load(obj->btf);
+                       if (err) {
+                               pr_warning("Error loading %s into kernel: %d. Ignored and continue.\n",
+                                          BTF_ELF_SEC, err);
+                               btf__free(obj->btf);
+                               obj->btf = NULL;
+                               err = 0;
                        }
                } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
                        btf_ext_data = data;
@@ -2999,3 +3011,249 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
        ring_buffer_write_tail(header, data_tail);
        return ret;
 }
+
+struct bpf_prog_info_array_desc {
+       int     array_offset;   /* e.g. offset of jited_prog_insns */
+       int     count_offset;   /* e.g. offset of jited_prog_len */
+       int     size_offset;    /* > 0: offset of rec size,
+                                * < 0: fix size of -size_offset
+                                */
+};
+
+static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
+       [BPF_PROG_INFO_JITED_INSNS] = {
+               offsetof(struct bpf_prog_info, jited_prog_insns),
+               offsetof(struct bpf_prog_info, jited_prog_len),
+               -1,
+       },
+       [BPF_PROG_INFO_XLATED_INSNS] = {
+               offsetof(struct bpf_prog_info, xlated_prog_insns),
+               offsetof(struct bpf_prog_info, xlated_prog_len),
+               -1,
+       },
+       [BPF_PROG_INFO_MAP_IDS] = {
+               offsetof(struct bpf_prog_info, map_ids),
+               offsetof(struct bpf_prog_info, nr_map_ids),
+               -(int)sizeof(__u32),
+       },
+       [BPF_PROG_INFO_JITED_KSYMS] = {
+               offsetof(struct bpf_prog_info, jited_ksyms),
+               offsetof(struct bpf_prog_info, nr_jited_ksyms),
+               -(int)sizeof(__u64),
+       },
+       [BPF_PROG_INFO_JITED_FUNC_LENS] = {
+               offsetof(struct bpf_prog_info, jited_func_lens),
+               offsetof(struct bpf_prog_info, nr_jited_func_lens),
+               -(int)sizeof(__u32),
+       },
+       [BPF_PROG_INFO_FUNC_INFO] = {
+               offsetof(struct bpf_prog_info, func_info),
+               offsetof(struct bpf_prog_info, nr_func_info),
+               offsetof(struct bpf_prog_info, func_info_rec_size),
+       },
+       [BPF_PROG_INFO_LINE_INFO] = {
+               offsetof(struct bpf_prog_info, line_info),
+               offsetof(struct bpf_prog_info, nr_line_info),
+               offsetof(struct bpf_prog_info, line_info_rec_size),
+       },
+       [BPF_PROG_INFO_JITED_LINE_INFO] = {
+               offsetof(struct bpf_prog_info, jited_line_info),
+               offsetof(struct bpf_prog_info, nr_jited_line_info),
+               offsetof(struct bpf_prog_info, jited_line_info_rec_size),
+       },
+       [BPF_PROG_INFO_PROG_TAGS] = {
+               offsetof(struct bpf_prog_info, prog_tags),
+               offsetof(struct bpf_prog_info, nr_prog_tags),
+               -(int)sizeof(__u8) * BPF_TAG_SIZE,
+       },
+
+};
+
+static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
+{
+       __u32 *array = (__u32 *)info;
+
+       if (offset >= 0)
+               return array[offset / sizeof(__u32)];
+       return -(int)offset;
+}
+
+static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
+{
+       __u64 *array = (__u64 *)info;
+
+       if (offset >= 0)
+               return array[offset / sizeof(__u64)];
+       return -(int)offset;
+}
+
+static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
+                                        __u32 val)
+{
+       __u32 *array = (__u32 *)info;
+
+       if (offset >= 0)
+               array[offset / sizeof(__u32)] = val;
+}
+
+static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
+                                        __u64 val)
+{
+       __u64 *array = (__u64 *)info;
+
+       if (offset >= 0)
+               array[offset / sizeof(__u64)] = val;
+}
+
+struct bpf_prog_info_linear *
+bpf_program__get_prog_info_linear(int fd, __u64 arrays)
+{
+       struct bpf_prog_info_linear *info_linear;
+       struct bpf_prog_info info = {};
+       __u32 info_len = sizeof(info);
+       __u32 data_len = 0;
+       int i, err;
+       void *ptr;
+
+       if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
+               return ERR_PTR(-EINVAL);
+
+       /* step 1: get array dimensions */
+       err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+       if (err) {
+               pr_debug("can't get prog info: %s", strerror(errno));
+               return ERR_PTR(-EFAULT);
+       }
+
+       /* step 2: calculate total size of all arrays */
+       for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+               bool include_array = (arrays & (1UL << i)) > 0;
+               struct bpf_prog_info_array_desc *desc;
+               __u32 count, size;
+
+               desc = bpf_prog_info_array_desc + i;
+
+               /* kernel is too old to support this field */
+               if (info_len < desc->array_offset + sizeof(__u32) ||
+                   info_len < desc->count_offset + sizeof(__u32) ||
+                   (desc->size_offset > 0 && info_len < desc->size_offset))
+                       include_array = false;
+
+               if (!include_array) {
+                       arrays &= ~(1UL << i);  /* clear the bit */
+                       continue;
+               }
+
+               count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+               size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+
+               data_len += count * size;
+       }
+
+       /* step 3: allocate continuous memory */
+       data_len = roundup(data_len, sizeof(__u64));
+       info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
+       if (!info_linear)
+               return ERR_PTR(-ENOMEM);
+
+       /* step 4: fill data to info_linear->info */
+       info_linear->arrays = arrays;
+       memset(&info_linear->info, 0, sizeof(info));
+       ptr = info_linear->data;
+
+       for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+               struct bpf_prog_info_array_desc *desc;
+               __u32 count, size;
+
+               if ((arrays & (1UL << i)) == 0)
+                       continue;
+
+               desc  = bpf_prog_info_array_desc + i;
+               count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+               size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+               bpf_prog_info_set_offset_u32(&info_linear->info,
+                                            desc->count_offset, count);
+               bpf_prog_info_set_offset_u32(&info_linear->info,
+                                            desc->size_offset, size);
+               bpf_prog_info_set_offset_u64(&info_linear->info,
+                                            desc->array_offset,
+                                            ptr_to_u64(ptr));
+               ptr += count * size;
+       }
+
+       /* step 5: call syscall again to get required arrays */
+       err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
+       if (err) {
+               pr_debug("can't get prog info: %s", strerror(errno));
+               free(info_linear);
+               return ERR_PTR(-EFAULT);
+       }
+
+       /* step 6: verify the data */
+       for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+               struct bpf_prog_info_array_desc *desc;
+               __u32 v1, v2;
+
+               if ((arrays & (1UL << i)) == 0)
+                       continue;
+
+               desc = bpf_prog_info_array_desc + i;
+               v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+               v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+                                                  desc->count_offset);
+               if (v1 != v2)
+                       pr_warning("%s: mismatch in element count\n", __func__);
+
+               v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+               v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+                                                  desc->size_offset);
+               if (v1 != v2)
+                       pr_warning("%s: mismatch in rec size\n", __func__);
+       }
+
+       /* step 7: update info_len and data_len */
+       info_linear->info_len = sizeof(struct bpf_prog_info);
+       info_linear->data_len = data_len;
+
+       return info_linear;
+}
+
+void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
+{
+       int i;
+
+       for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+               struct bpf_prog_info_array_desc *desc;
+               __u64 addr, offs;
+
+               if ((info_linear->arrays & (1UL << i)) == 0)
+                       continue;
+
+               desc = bpf_prog_info_array_desc + i;
+               addr = bpf_prog_info_read_offset_u64(&info_linear->info,
+                                                    desc->array_offset);
+               offs = addr - ptr_to_u64(info_linear->data);
+               bpf_prog_info_set_offset_u64(&info_linear->info,
+                                            desc->array_offset, offs);
+       }
+}
+
+void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
+{
+       int i;
+
+       for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+               struct bpf_prog_info_array_desc *desc;
+               __u64 addr, offs;
+
+               if ((info_linear->arrays & (1UL << i)) == 0)
+                       continue;
+
+               desc = bpf_prog_info_array_desc + i;
+               offs = bpf_prog_info_read_offset_u64(&info_linear->info,
+                                                    desc->array_offset);
+               addr = offs + ptr_to_u64(info_linear->data);
+               bpf_prog_info_set_offset_u64(&info_linear->info,
+                                            desc->array_offset, addr);
+       }
+}
index b4652aa1a58adf6d8a42c70467d31e5d434f74f4..c70785cc8ef560165e323abcf9e83f8cc05eec31 100644 (file)
@@ -10,6 +10,7 @@
 #ifndef __LIBBPF_LIBBPF_H
 #define __LIBBPF_LIBBPF_H
 
+#include <stdarg.h>
 #include <stdio.h>
 #include <stdint.h>
 #include <stdbool.h>
@@ -377,6 +378,69 @@ LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
 LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
                                 enum bpf_prog_type prog_type, __u32 ifindex);
 
+/*
+ * Get bpf_prog_info in continuous memory
+ *
+ * struct bpf_prog_info has multiple arrays. The user has option to choose
+ * arrays to fetch from kernel. The following APIs provide an uniform way to
+ * fetch these data. All arrays in bpf_prog_info are stored in a single
+ * continuous memory region. This makes it easy to store the info in a
+ * file.
+ *
+ * Before writing bpf_prog_info_linear to files, it is necessary to
+ * translate pointers in bpf_prog_info to offsets. Helper functions
+ * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
+ * are introduced to switch between pointers and offsets.
+ *
+ * Examples:
+ *   # To fetch map_ids and prog_tags:
+ *   __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
+ *           (1UL << BPF_PROG_INFO_PROG_TAGS);
+ *   struct bpf_prog_info_linear *info_linear =
+ *           bpf_program__get_prog_info_linear(fd, arrays);
+ *
+ *   # To save data in file
+ *   bpf_program__bpil_addr_to_offs(info_linear);
+ *   write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
+ *
+ *   # To read data from file
+ *   read(f, info_linear, <proper_size>);
+ *   bpf_program__bpil_offs_to_addr(info_linear);
+ */
+enum bpf_prog_info_array {
+       BPF_PROG_INFO_FIRST_ARRAY = 0,
+       BPF_PROG_INFO_JITED_INSNS = 0,
+       BPF_PROG_INFO_XLATED_INSNS,
+       BPF_PROG_INFO_MAP_IDS,
+       BPF_PROG_INFO_JITED_KSYMS,
+       BPF_PROG_INFO_JITED_FUNC_LENS,
+       BPF_PROG_INFO_FUNC_INFO,
+       BPF_PROG_INFO_LINE_INFO,
+       BPF_PROG_INFO_JITED_LINE_INFO,
+       BPF_PROG_INFO_PROG_TAGS,
+       BPF_PROG_INFO_LAST_ARRAY,
+};
+
+struct bpf_prog_info_linear {
+       /* size of struct bpf_prog_info, when the tool is compiled */
+       __u32                   info_len;
+       /* total bytes allocated for data, round up to 8 bytes */
+       __u32                   data_len;
+       /* which arrays are included in data */
+       __u64                   arrays;
+       struct bpf_prog_info    info;
+       __u8                    data[];
+};
+
+LIBBPF_API struct bpf_prog_info_linear *
+bpf_program__get_prog_info_linear(int fd, __u64 arrays);
+
+LIBBPF_API void
+bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
+
+LIBBPF_API void
+bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
+
 #ifdef __cplusplus
 } /* extern "C" */
 #endif
index 778a26702a707882e126d7d347a13250d79d70a7..f3ce50500cf2985edae7202b5dbf09b63c402d3f 100644 (file)
@@ -153,4 +153,7 @@ LIBBPF_0.0.2 {
                xsk_socket__delete;
                xsk_umem__fd;
                xsk_socket__fd;
+               bpf_program__get_prog_info_linear;
+               bpf_program__bpil_addr_to_offs;
+               bpf_program__bpil_offs_to_addr;
 } LIBBPF_0.0.1;
index f98ac82c9aea51fa5e0d9314c8673f0c0771e0a9..8d0078b65486f45730f3d967c84a6709afb23cdb 100644 (file)
@@ -126,8 +126,8 @@ static void xsk_set_umem_config(struct xsk_umem_config *cfg,
        cfg->frame_headroom = usr_cfg->frame_headroom;
 }
 
-static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
-                                     const struct xsk_socket_config *usr_cfg)
+static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
+                                    const struct xsk_socket_config *usr_cfg)
 {
        if (!usr_cfg) {
                cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
@@ -135,14 +135,19 @@ static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
                cfg->libbpf_flags = 0;
                cfg->xdp_flags = 0;
                cfg->bind_flags = 0;
-               return;
+               return 0;
        }
 
+       if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
+               return -EINVAL;
+
        cfg->rx_size = usr_cfg->rx_size;
        cfg->tx_size = usr_cfg->tx_size;
        cfg->libbpf_flags = usr_cfg->libbpf_flags;
        cfg->xdp_flags = usr_cfg->xdp_flags;
        cfg->bind_flags = usr_cfg->bind_flags;
+
+       return 0;
 }
 
 int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
@@ -557,7 +562,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
        }
        strncpy(xsk->ifname, ifname, IFNAMSIZ);
 
-       xsk_set_xdp_socket_config(&xsk->config, usr_config);
+       err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
+       if (err)
+               goto out_socket;
 
        if (rx) {
                err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
index 0414a0d522621d4ca973240979e89c07d6cd4f8a..5dde107083c60bc8dd12538a2838de73c8b1470c 100644 (file)
@@ -2184,9 +2184,10 @@ static void cleanup(struct objtool_file *file)
        elf_close(file->elf);
 }
 
+static struct objtool_file file;
+
 int check(const char *_objname, bool orc)
 {
-       struct objtool_file file;
        int ret, warnings = 0;
 
        objname = _objname;
index f6fc6507ba55ecade8065382a10723f67f914529..3766886c4bca3d967ed2e714982be5016c4816f4 100644 (file)
@@ -47,3 +47,27 @@ Those objects are then used in final linking:
 
 NOTE this description is omitting other libraries involved, only
      focusing on build framework outcomes
+
+3) Build with ASan or UBSan
+==========================
+  $ cd tools/perf
+  $ make DESTDIR=/usr
+  $ make DESTDIR=/usr install
+
+AddressSanitizer (or ASan) is a GCC feature that detects memory corruption bugs
+such as buffer overflows and memory leaks.
+
+  $ cd tools/perf
+  $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=address'
+  $ ASAN_OPTIONS=log_path=asan.log ./perf record -a
+
+ASan outputs all detected issues into a log file named 'asan.log.<pid>'.
+
+UndefinedBehaviorSanitizer (or UBSan) is a fast undefined behavior detector
+supported by GCC. UBSan detects undefined behaviors of programs at runtime.
+
+  $ cd tools/perf
+  $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=undefined'
+  $ UBSAN_OPTIONS=print_stacktrace=1 ./perf record -a
+
+If UBSan detects any problem at runtime, it outputs a “runtime error:” message.
index 86f3dcc15f8375726f0ed90d333e79e3cfadb669..462b3cde067546dae83fdf7930621e6e67450974 100644 (file)
@@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this:
 
        [report]
                # Defaults
-               sort-order = comm,dso,symbol
+               sort_order = comm,dso,symbol
                percent-limit = 0
                queue-size = 0
                children = true
@@ -584,6 +584,20 @@ llvm.*::
        llvm.opts::
                Options passed to llc.
 
+samples.*::
+
+       samples.context::
+               Define how many ns worth of time to show
+               around samples in perf report sample context browser.
+
+scripts.*::
+
+       Any option defines a script that is added to the scripts menu
+       in the interactive perf browser and whose output is displayed.
+       The name of the option is the name, the value is a script command line.
+       The script gets the same options passed as a full perf script,
+       in particular -i perfdata file, --cpu, --tid
+
 SEE ALSO
 --------
 linkperf:perf[1]
index 8f0c2be34848f1a382722be4fa2ff25df7bdbace..8fe4dffcadd0e12df00bce7078edb0bef449e904 100644 (file)
@@ -495,6 +495,10 @@ overhead. You can still switch them on with:
 
   --switch-output --no-no-buildid  --no-no-buildid-cache
 
+--switch-max-files=N::
+
+When rotating perf.data with --switch-output, only keep N files.
+
 --dry-run::
 Parse options then exit. --dry-run can be used to detect errors in cmdline
 options.
index 1a27bfe05039f8284abea73947759fcccce9ecab..f441baa794ce826eff34b24d07796847c4a65127 100644 (file)
@@ -105,6 +105,8 @@ OPTIONS
        guest machine
        - sample: Number of sample
        - period: Raw number of event count of sample
+       - time: Separate the samples by time stamp with the resolution specified by
+       --time-quantum (default 100ms). Specify with overhead and before it.
 
        By default, comm, dso and symbol keys are used.
        (i.e. --sort comm,dso,symbol)
@@ -459,6 +461,10 @@ include::itrace.txt[]
 --socket-filter::
        Only report the samples on the processor socket that match with this filter
 
+--samples=N::
+       Save N individual samples for each histogram entry to show context in perf
+       report tui browser.
+
 --raw-trace::
        When displaying traceevent output, do not use print fmt or plugins.
 
@@ -477,6 +483,9 @@ include::itrace.txt[]
        Please note that not all mmaps are stored, options affecting which ones
        are include 'perf record --data', for instance.
 
+--ns::
+       Show time stamps in nanoseconds.
+
 --stats::
        Display overall events statistics without any further processing.
        (like the one at the end of the perf report -D command)
@@ -494,6 +503,10 @@ include::itrace.txt[]
        The period/hits keywords set the base the percentage is computed
        on - the samples period or the number of samples (hits).
 
+--time-quantum::
+       Configure time quantum for time sort key. Default 100ms.
+       Accepts s, us, ms, ns units.
+
 include::callchain-overhead-calculation.txt[]
 
 SEE ALSO
index 2e19fd7ffe35135a94fb55f66ea367171cc3fb2d..9b0d04dd2a615827593c8b60d449a97c488cfe78 100644 (file)
@@ -380,6 +380,9 @@ include::itrace.txt[]
        Set the maximum number of program blocks to print with brstackasm for
        each sample.
 
+--reltime::
+       Print time stamps relative to trace start.
+
 --per-event-dump::
        Create per event files with a "perf.data.EVENT.dump" name instead of
         printing to stdout, useful, for instance, for generating flamegraphs.
index 4bc2085e5197a2ea59ecb18820a379ddc1034c85..39c05f89104e78dc6b577ee2d2a4a3b20729539e 100644 (file)
@@ -72,9 +72,8 @@ report::
 --all-cpus::
         system-wide collection from all CPUs (default if no target is specified)
 
--c::
---scale::
-       scale/normalize counter values
+--no-scale::
+       Don't scale/normalize counter values
 
 -d::
 --detailed::
index 849599f39c5e9c128fc21b888a07fa77108a46a1..869965d629ce0a1e7940a03288b905e70446a53c 100644 (file)
@@ -15,6 +15,7 @@ To see callchains in a more compact form: perf report -g folded
 Show individual samples with: perf script
 Limit to show entries above 5% only: perf report --percent-limit 5
 Profiling branch (mis)predictions with: perf record -b / perf report
+To show assembler sample contexts use perf record -b / perf script -F +brstackinsn --xed
 Treat branches as callchains: perf report --branch-history
 To count events in every 1000 msec: perf stat -I 1000
 Print event counts in CSV format with: perf stat -x,
@@ -34,3 +35,9 @@ Show current config key-value pairs: perf config --list
 Show user configuration overrides: perf config --user --list
 To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node`
 To report cacheline events from previous recording: perf c2c report
+To browse sample contexts use perf report --sample 10 and select in context menu
+To separate samples by time use perf report --sort time,overhead,sym
+To set sample time separation other than 100ms with --sort time use --time-quantum
+Add -I to perf report to sample register values visible in perf report context.
+To show IPC for sampling periods use perf record -e '{cycles,instructions}:S' and then browse context
+To show context switches in perf report sample context add --switch-events to perf record.
index 0f11d5891301ad17cf7b2e3d572464e670abb03f..fe3f97e342fae6789d09a2fb8cd9a78fe7e72b2b 100644 (file)
@@ -227,6 +227,8 @@ FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
 
 FEATURE_CHECK_LDFLAGS-libaio = -lrt
 
+FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes
+
 CFLAGS += -fno-omit-frame-pointer
 CFLAGS += -ggdb3
 CFLAGS += -funwind-tables
@@ -713,7 +715,7 @@ else
 endif
 
 ifeq ($(feature-libbfd), 1)
-  EXTLIBS += -lbfd
+  EXTLIBS += -lbfd -lopcodes
 else
   # we are on a system that requires -liberty and (maybe) -lz
   # to link against -lbfd; test each case individually here
@@ -724,12 +726,15 @@ else
   $(call feature_check,libbfd-liberty-z)
 
   ifeq ($(feature-libbfd-liberty), 1)
-    EXTLIBS += -lbfd -liberty
+    EXTLIBS += -lbfd -lopcodes -liberty
+    FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
   else
     ifeq ($(feature-libbfd-liberty-z), 1)
-      EXTLIBS += -lbfd -liberty -lz
+      EXTLIBS += -lbfd -lopcodes -liberty -lz
+      FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
     endif
   endif
+  $(call feature_check,disassembler-four-args)
 endif
 
 ifdef NO_DEMANGLE
@@ -808,6 +813,10 @@ ifdef HAVE_KVM_STAT_SUPPORT
     CFLAGS += -DHAVE_KVM_STAT_SUPPORT
 endif
 
+ifeq ($(feature-disassembler-four-args), 1)
+    CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
 ifeq (${IS_64_BIT}, 1)
   ifndef NO_PERF_READ_VDSO32
     $(call feature_check,compile-32)
index f0b1709a5ffb2b0901d7f2492252876d17bc25a0..2ae92fddb6d5f336de25e36e61b8775e0b797c50 100644 (file)
 332    common  statx                   __x64_sys_statx
 333    common  io_pgetevents           __x64_sys_io_pgetevents
 334    common  rseq                    __x64_sys_rseq
+# don't use numbers 387 through 423, add new calls after the last
+# 'common' entry
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
 520    x32     execve                  __x32_compat_sys_execve/ptregs
 521    x32     ptrace                  __x32_compat_sys_ptrace
 522    x32     rt_sigpending           __x32_compat_sys_rt_sigpending
-523    x32     rt_sigtimedwait         __x32_compat_sys_rt_sigtimedwait
+523    x32     rt_sigtimedwait         __x32_compat_sys_rt_sigtimedwait_time64
 524    x32     rt_sigqueueinfo         __x32_compat_sys_rt_sigqueueinfo
 525    x32     sigaltstack             __x32_compat_sys_sigaltstack
 526    x32     timer_create            __x32_compat_sys_timer_create
 534    x32     preadv                  __x32_compat_sys_preadv64
 535    x32     pwritev                 __x32_compat_sys_pwritev64
 536    x32     rt_tgsigqueueinfo       __x32_compat_sys_rt_tgsigqueueinfo
-537    x32     recvmmsg                __x32_compat_sys_recvmmsg
+537    x32     recvmmsg                __x32_compat_sys_recvmmsg_time64
 538    x32     sendmmsg                __x32_compat_sys_sendmmsg
 539    x32     process_vm_readv        __x32_compat_sys_process_vm_readv
 540    x32     process_vm_writev       __x32_compat_sys_process_vm_writev
index 7aab0be5fc5ffb8fc09081b2617cbeccae9dfc42..47f9c56e744f8c3751e6cf22ec9f738182ea8370 100644 (file)
@@ -14,5 +14,6 @@ perf-$(CONFIG_LOCAL_LIBUNWIND)    += unwind-libunwind.o
 perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
 
 perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += archinsn.o
 perf-$(CONFIG_AUXTRACE) += intel-pt.o
 perf-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c
new file mode 100644 (file)
index 0000000..4237bb2
--- /dev/null
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "perf.h"
+#include "archinsn.h"
+#include "util/intel-pt-decoder/insn.h"
+#include "machine.h"
+#include "thread.h"
+#include "symbol.h"
+
+void arch_fetch_insn(struct perf_sample *sample,
+                    struct thread *thread,
+                    struct machine *machine)
+{
+       struct insn insn;
+       int len;
+       bool is64bit = false;
+
+       if (!sample->ip)
+               return;
+       len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit);
+       if (len <= 0)
+               return;
+       insn_init(&insn, sample->insn, len, is64bit);
+       insn_get_length(&insn);
+       if (insn_complete(&insn) && insn.length <= len)
+               sample->insn_len = insn.length;
+}
index 0c0a6e824934149997c64190fff22a0f8338f6d1..2af067859966599cf2df1b96ab30100311c61a05 100644 (file)
@@ -224,7 +224,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
        pthread_attr_t thread_attr, *attrp = NULL;
        cpu_set_t cpuset;
        unsigned int i, j;
-       int ret;
+       int ret = 0;
 
        if (!noaffinity)
                pthread_attr_init(&thread_attr);
index 5a11534e96a0c52cf15fcda34b55e8126010b479..fe85448abd454b373828ff2d0a6824356e9fff86 100644 (file)
@@ -293,7 +293,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
        pthread_attr_t thread_attr, *attrp = NULL;
        cpu_set_t cpuset;
        unsigned int i, j;
-       int ret, events = EPOLLIN;
+       int ret = 0, events = EPOLLIN;
 
        if (oneshot)
                events |= EPOLLONESHOT;
index c9f98d00c0e998292d334c92bcfa4186a74dd6bc..a8394b4f116746250d5ee5bbb7339e12a54ca5e9 100644 (file)
@@ -119,7 +119,7 @@ int cmd_list(int argc, const char **argv)
                                                details_flag);
                        print_tracepoint_events(NULL, s, raw_dump);
                        print_sdt_events(NULL, s, raw_dump);
-                       metricgroup__print(true, true, NULL, raw_dump, details_flag);
+                       metricgroup__print(true, true, s, raw_dump, details_flag);
                        free(s);
                }
        }
index f3f7f310033663f5b61752cfb6c835b280f8436e..4e2d953d4bc58d158c4b7d135bdcee8e079b84be 100644 (file)
@@ -62,6 +62,9 @@ struct switch_output {
        unsigned long    time;
        const char      *str;
        bool             set;
+       char             **filenames;
+       int              num_files;
+       int              cur_file;
 };
 
 struct record {
@@ -392,7 +395,7 @@ static int record__process_auxtrace(struct perf_tool *tool,
        size_t padding;
        u8 pad[8] = {0};
 
-       if (!perf_data__is_pipe(data)) {
+       if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
                off_t file_offset;
                int fd = perf_data__fd(data);
                int err;
@@ -837,6 +840,8 @@ static void record__init_features(struct record *rec)
        if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
                perf_header__clear_feat(&session->header, HEADER_CLOCKID);
 
+       perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
+
        perf_header__clear_feat(&session->header, HEADER_STAT);
 }
 
@@ -890,6 +895,7 @@ record__switch_output(struct record *rec, bool at_exit)
 {
        struct perf_data *data = &rec->data;
        int fd, err;
+       char *new_filename;
 
        /* Same Size:      "2015122520103046"*/
        char timestamp[] = "InvalidTimestamp";
@@ -910,7 +916,7 @@ record__switch_output(struct record *rec, bool at_exit)
 
        fd = perf_data__switch(data, timestamp,
                                    rec->session->header.data_offset,
-                                   at_exit);
+                                   at_exit, &new_filename);
        if (fd >= 0 && !at_exit) {
                rec->bytes_written = 0;
                rec->session->header.data_size = 0;
@@ -920,6 +926,21 @@ record__switch_output(struct record *rec, bool at_exit)
                fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
                        data->path, timestamp);
 
+       if (rec->switch_output.num_files) {
+               int n = rec->switch_output.cur_file + 1;
+
+               if (n >= rec->switch_output.num_files)
+                       n = 0;
+               rec->switch_output.cur_file = n;
+               if (rec->switch_output.filenames[n]) {
+                       remove(rec->switch_output.filenames[n]);
+                       free(rec->switch_output.filenames[n]);
+               }
+               rec->switch_output.filenames[n] = new_filename;
+       } else {
+               free(new_filename);
+       }
+
        /* Output tracking events */
        if (!at_exit) {
                record__synthesize(rec, false);
@@ -1093,7 +1114,7 @@ static int record__synthesize(struct record *rec, bool tail)
                return err;
        }
 
-       err = perf_event__synthesize_bpf_events(tool, process_synthesized_event,
+       err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
                                                machine, opts);
        if (err < 0)
                pr_warning("Couldn't synthesize bpf events.\n");
@@ -1116,6 +1137,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        struct perf_data *data = &rec->data;
        struct perf_session *session;
        bool disabled = false, draining = false;
+       struct perf_evlist *sb_evlist = NULL;
        int fd;
 
        atexit(record__sig_exit);
@@ -1216,6 +1238,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                goto out_child;
        }
 
+       if (!opts->no_bpf_event)
+               bpf_event__add_sb_event(&sb_evlist, &session->header.env);
+
+       if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
+               pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+               opts->no_bpf_event = true;
+       }
+
        err = record__synthesize(rec, false);
        if (err < 0)
                goto out_child;
@@ -1466,6 +1496,9 @@ out_child:
 
 out_delete_session:
        perf_session__delete(session);
+
+       if (!opts->no_bpf_event)
+               perf_evlist__stop_sb_thread(sb_evlist);
        return status;
 }
 
@@ -1870,7 +1903,7 @@ static struct option __record_options[] = {
        OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
                    "synthesize non-sample events at the end of output"),
        OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
-       OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"),
+       OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
        OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
                    "Fail if the specified frequency can't be used"),
        OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@@ -1968,9 +2001,11 @@ static struct option __record_options[] = {
        OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
                    "Record timestamp boundary (time of first/last samples)"),
        OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
-                         &record.switch_output.set, "signal,size,time",
-                         "Switch output when receive SIGUSR2 or cross size,time threshold",
+                         &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
+                         "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
                          "signal"),
+       OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
+                  "Limit number of switch output generated files"),
        OPT_BOOLEAN(0, "dry-run", &dry_run,
                    "Parse options then exit"),
 #ifdef HAVE_AIO_SUPPORT
@@ -2057,6 +2092,13 @@ int cmd_record(int argc, const char **argv)
                alarm(rec->switch_output.time);
        }
 
+       if (rec->switch_output.num_files) {
+               rec->switch_output.filenames = calloc(sizeof(char *),
+                                                     rec->switch_output.num_files);
+               if (!rec->switch_output.filenames)
+                       return -EINVAL;
+       }
+
        /*
         * Allow aliases to facilitate the lookup of symbols for address
         * filters. Refer to auxtrace_parse_filters().
index ee93c18a6685c1c3ed3bcb8a5d5c71e1720ad1e8..4054eb1f98ac19d956cf680dfb84dcf13d509db5 100644 (file)
 #include <errno.h>
 #include <inttypes.h>
 #include <regex.h>
+#include "sane_ctype.h"
 #include <signal.h>
 #include <linux/bitmap.h>
 #include <linux/stringify.h>
+#include <linux/time64.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
@@ -926,6 +928,43 @@ report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
        return parse_callchain_report_opt(arg);
 }
 
+static int
+parse_time_quantum(const struct option *opt, const char *arg,
+                  int unset __maybe_unused)
+{
+       unsigned long *time_q = opt->value;
+       char *end;
+
+       *time_q = strtoul(arg, &end, 0);
+       if (end == arg)
+               goto parse_err;
+       if (*time_q == 0) {
+               pr_err("time quantum cannot be 0");
+               return -1;
+       }
+       while (isspace(*end))
+               end++;
+       if (*end == 0)
+               return 0;
+       if (!strcmp(end, "s")) {
+               *time_q *= NSEC_PER_SEC;
+               return 0;
+       }
+       if (!strcmp(end, "ms")) {
+               *time_q *= NSEC_PER_MSEC;
+               return 0;
+       }
+       if (!strcmp(end, "us")) {
+               *time_q *= NSEC_PER_USEC;
+               return 0;
+       }
+       if (!strcmp(end, "ns"))
+               return 0;
+parse_err:
+       pr_err("Cannot parse time quantum `%s'\n", arg);
+       return -1;
+}
+
 int
 report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
                                const char *arg, int unset __maybe_unused)
@@ -1044,10 +1083,9 @@ int cmd_report(int argc, const char **argv)
        OPT_BOOLEAN(0, "header-only", &report.header_only,
                    "Show only data header."),
        OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
-                  "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
-                  " Please refer the man page for the complete list."),
+                  sort_help("sort by key(s):")),
        OPT_STRING('F', "fields", &field_order, "key[,keys...]",
-                  "output field(s): overhead, period, sample plus all of sort keys"),
+                  sort_help("output field(s): overhead period sample ")),
        OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
                    "Show sample percentage for different cpu modes"),
        OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -1120,6 +1158,8 @@ int cmd_report(int argc, const char **argv)
        OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
                    "Enable kernel symbol demangling"),
        OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
+       OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
+                   "Number of samples to save per histogram entry for individual browsing"),
        OPT_CALLBACK(0, "percent-limit", &report, "percent",
                     "Don't show entries under that percent", parse_percent_limit),
        OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
@@ -1147,6 +1187,10 @@ int cmd_report(int argc, const char **argv)
        OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
                     "Set percent type local/global-period/hits",
                     annotate_parse_percent_type),
+       OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
+       OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
+                    "Set time quantum for time sort key (default 100ms)",
+                    parse_time_quantum),
        OPT_END()
        };
        struct perf_data data = {
index 53f78cf3113f9ed5fe4a9d9de42fdf2a3a0b9d6e..61cfd8f70989235de6112e114925d08831b505dc 100644 (file)
 #include "util/time-utils.h"
 #include "util/path.h"
 #include "print_binary.h"
+#include "archinsn.h"
 #include <linux/bitmap.h>
 #include <linux/kernel.h>
 #include <linux/stringify.h>
 #include <linux/time64.h>
+#include <sys/utsname.h>
 #include "asm/bug.h"
 #include "util/mem-events.h"
 #include "util/dump-insn.h"
@@ -51,6 +53,8 @@
 
 static char const              *script_name;
 static char const              *generate_script_lang;
+static bool                    reltime;
+static u64                     initial_time;
 static bool                    debug_mode;
 static u64                     last_timestamp;
 static u64                     nr_unordered;
@@ -58,11 +62,11 @@ static bool                 no_callchain;
 static bool                    latency_format;
 static bool                    system_wide;
 static bool                    print_flags;
-static bool                    nanosecs;
 static const char              *cpu_list;
 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 static struct perf_stat_config stat_config;
 static int                     max_blocks;
+static bool                    native_arch;
 
 unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
 
@@ -684,15 +688,21 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
        }
 
        if (PRINT_FIELD(TIME)) {
-               nsecs = sample->time;
+               u64 t = sample->time;
+               if (reltime) {
+                       if (!initial_time)
+                               initial_time = sample->time;
+                       t = sample->time - initial_time;
+               }
+               nsecs = t;
                secs = nsecs / NSEC_PER_SEC;
                nsecs -= secs * NSEC_PER_SEC;
 
-               if (nanosecs)
+               if (symbol_conf.nanosecs)
                        printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
                else {
                        char sample_time[32];
-                       timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time));
+                       timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
                        printed += fprintf(fp, "%12s: ", sample_time);
                }
        }
@@ -1227,6 +1237,12 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
        return len + dlen;
 }
 
+__weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
+                           struct thread *thread __maybe_unused,
+                           struct machine *machine __maybe_unused)
+{
+}
+
 static int perf_sample__fprintf_insn(struct perf_sample *sample,
                                     struct perf_event_attr *attr,
                                     struct thread *thread,
@@ -1234,9 +1250,12 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
 {
        int printed = 0;
 
+       if (sample->insn_len == 0 && native_arch)
+               arch_fetch_insn(sample, thread, machine);
+
        if (PRINT_FIELD(INSNLEN))
                printed += fprintf(fp, " ilen: %d", sample->insn_len);
-       if (PRINT_FIELD(INSN)) {
+       if (PRINT_FIELD(INSN) && sample->insn_len) {
                int i;
 
                printed += fprintf(fp, " insn:");
@@ -1922,6 +1941,13 @@ static int cleanup_scripting(void)
        return scripting_ops ? scripting_ops->stop_script() : 0;
 }
 
+static bool filter_cpu(struct perf_sample *sample)
+{
+       if (cpu_list)
+               return !test_bit(sample->cpu, cpu_bitmap);
+       return false;
+}
+
 static int process_sample_event(struct perf_tool *tool,
                                union perf_event *event,
                                struct perf_sample *sample,
@@ -1956,7 +1982,7 @@ static int process_sample_event(struct perf_tool *tool,
        if (al.filtered)
                goto out_put;
 
-       if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+       if (filter_cpu(sample))
                goto out_put;
 
        if (scripting_ops)
@@ -2041,9 +2067,11 @@ static int process_comm_event(struct perf_tool *tool,
                sample->tid = event->comm.tid;
                sample->pid = event->comm.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
                                   PERF_RECORD_COMM, stdout);
-       perf_event__fprintf(event, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        ret = 0;
 out:
        thread__put(thread);
@@ -2077,9 +2105,11 @@ static int process_namespaces_event(struct perf_tool *tool,
                sample->tid = event->namespaces.tid;
                sample->pid = event->namespaces.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_NAMESPACES, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_NAMESPACES, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        ret = 0;
 out:
        thread__put(thread);
@@ -2111,9 +2141,11 @@ static int process_fork_event(struct perf_tool *tool,
                sample->tid = event->fork.tid;
                sample->pid = event->fork.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_FORK, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_FORK, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        thread__put(thread);
 
        return 0;
@@ -2141,9 +2173,11 @@ static int process_exit_event(struct perf_tool *tool,
                sample->tid = event->fork.tid;
                sample->pid = event->fork.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_EXIT, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_EXIT, stdout);
+               perf_event__fprintf(event, stdout);
+       }
 
        if (perf_event__process_exit(tool, event, sample, machine) < 0)
                err = -1;
@@ -2177,9 +2211,11 @@ static int process_mmap_event(struct perf_tool *tool,
                sample->tid = event->mmap.tid;
                sample->pid = event->mmap.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_MMAP, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_MMAP, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        thread__put(thread);
        return 0;
 }
@@ -2209,9 +2245,11 @@ static int process_mmap2_event(struct perf_tool *tool,
                sample->tid = event->mmap2.tid;
                sample->pid = event->mmap2.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_MMAP2, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_MMAP2, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        thread__put(thread);
        return 0;
 }
@@ -2236,9 +2274,11 @@ static int process_switch_event(struct perf_tool *tool,
                return -1;
        }
 
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_SWITCH, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_SWITCH, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        thread__put(thread);
        return 0;
 }
@@ -2259,9 +2299,11 @@ process_lost_event(struct perf_tool *tool,
        if (thread == NULL)
                return -1;
 
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_LOST, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_LOST, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        thread__put(thread);
        return 0;
 }
@@ -2948,7 +2990,8 @@ static int check_ev_match(char *dir_name, char *scriptname,
  * will list all statically runnable scripts, select one, execute it and
  * show the output in a perf browser.
  */
-int find_scripts(char **scripts_array, char **scripts_path_array)
+int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+                int pathlen)
 {
        struct dirent *script_dirent, *lang_dirent;
        char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
@@ -2993,7 +3036,10 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
                        /* Skip those real time scripts: xxxtop.p[yl] */
                        if (strstr(script_dirent->d_name, "top."))
                                continue;
-                       sprintf(scripts_path_array[i], "%s/%s", lang_path,
+                       if (i >= num)
+                               break;
+                       snprintf(scripts_path_array[i], pathlen, "%s/%s",
+                               lang_path,
                                script_dirent->d_name);
                        temp = strchr(script_dirent->d_name, '.');
                        snprintf(scripts_array[i],
@@ -3232,7 +3278,7 @@ static int parse_insn_trace(const struct option *opt __maybe_unused,
 {
        parse_output_fields(NULL, "+insn,-event,-period", 0);
        itrace_parse_synth_opts(opt, "i0ns", 0);
-       nanosecs = true;
+       symbol_conf.nanosecs = true;
        return 0;
 }
 
@@ -3250,7 +3296,7 @@ static int parse_call_trace(const struct option *opt __maybe_unused,
 {
        parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
        itrace_parse_synth_opts(opt, "cewp", 0);
-       nanosecs = true;
+       symbol_conf.nanosecs = true;
        return 0;
 }
 
@@ -3260,7 +3306,7 @@ static int parse_callret_trace(const struct option *opt __maybe_unused,
 {
        parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0);
        itrace_parse_synth_opts(opt, "crewp", 0);
-       nanosecs = true;
+       symbol_conf.nanosecs = true;
        return 0;
 }
 
@@ -3277,6 +3323,7 @@ int cmd_script(int argc, const char **argv)
                .set = false,
                .default_no_sample = true,
        };
+       struct utsname uts;
        char *script_path = NULL;
        const char **__argv;
        int i, j, err = 0;
@@ -3374,6 +3421,7 @@ int cmd_script(int argc, const char **argv)
                     "Set the maximum stack depth when parsing the callchain, "
                     "anything beyond the specified depth will be ignored. "
                     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
+       OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
        OPT_BOOLEAN('I', "show-info", &show_full_info,
                    "display extended information from perf.data file"),
        OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@@ -3395,7 +3443,7 @@ int cmd_script(int argc, const char **argv)
        OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
        OPT_INTEGER(0, "max-blocks", &max_blocks,
                    "Maximum number of code blocks to dump with brstackinsn"),
-       OPT_BOOLEAN(0, "ns", &nanosecs,
+       OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs,
                    "Use 9 decimal places when displaying time"),
        OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
                            "Instruction Tracing options\n" ITRACE_HELP,
@@ -3448,6 +3496,11 @@ int cmd_script(int argc, const char **argv)
                }
        }
 
+       if (script.time_str && reltime) {
+               fprintf(stderr, "Don't combine --reltime with --time\n");
+               return -1;
+       }
+
        if (itrace_synth_opts.callchain &&
            itrace_synth_opts.callchain_sz > scripting_max_stack)
                scripting_max_stack = itrace_synth_opts.callchain_sz;
@@ -3615,6 +3668,12 @@ int cmd_script(int argc, const char **argv)
        if (symbol__init(&session->header.env) < 0)
                goto out_delete;
 
+       uname(&uts);
+       if (!strcmp(uts.machine, session->header.env.arch) ||
+           (!strcmp(uts.machine, "x86_64") &&
+            !strcmp(session->header.env.arch, "i386")))
+               native_arch = true;
+
        script.session = session;
        script__setup_sample_type(&script);
 
index 7b8f09b0b8bf7139463fb8d8736346c73f3cfe65..49ee3c2033ecbd8df8408445f141c8312f7efc44 100644 (file)
@@ -718,7 +718,8 @@ static struct option stat_options[] = {
                    "system-wide collection from all CPUs"),
        OPT_BOOLEAN('g', "group", &group,
                    "put the counters into a counter group"),
-       OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
+       OPT_BOOLEAN(0, "scale", &stat_config.scale,
+                   "Use --no-scale to disable counter scaling for multiplexing"),
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show counter open errors, etc)"),
        OPT_INTEGER('r', "repeat", &stat_config.run_count,
index 231a90daa958131e132fb79ef7f7d4b0f3d10ff0..1999d6533d12a35e672e4caf8c98e24010191bcf 100644 (file)
@@ -1189,30 +1189,26 @@ static int __cmd_top(struct perf_top *top)
        pthread_t thread, thread_process;
        int ret;
 
-       top->session = perf_session__new(NULL, false, NULL);
-       if (top->session == NULL)
-               return -1;
-
        if (!top->annotation_opts.objdump_path) {
                ret = perf_env__lookup_objdump(&top->session->header.env,
                                               &top->annotation_opts.objdump_path);
                if (ret)
-                       goto out_delete;
+                       return ret;
        }
 
        ret = callchain_param__setup_sample_type(&callchain_param);
        if (ret)
-               goto out_delete;
+               return ret;
 
        if (perf_session__register_idle_thread(top->session) < 0)
-               goto out_delete;
+               return ret;
 
        if (top->nr_threads_synthesize > 1)
                perf_set_multithreaded();
 
        init_process_thread(top);
 
-       ret = perf_event__synthesize_bpf_events(&top->tool, perf_event__process,
+       ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
                                                &top->session->machines.host,
                                                &top->record_opts);
        if (ret < 0)
@@ -1227,13 +1223,18 @@ static int __cmd_top(struct perf_top *top)
 
        if (perf_hpp_list.socket) {
                ret = perf_env__read_cpu_topology_map(&perf_env);
-               if (ret < 0)
-                       goto out_err_cpu_topo;
+               if (ret < 0) {
+                       char errbuf[BUFSIZ];
+                       const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
+
+                       ui__error("Could not read the CPU topology map: %s\n", err);
+                       return ret;
+               }
        }
 
        ret = perf_top__start_counters(top);
        if (ret)
-               goto out_delete;
+               return ret;
 
        top->session->evlist = top->evlist;
        perf_session__set_id_hdr_size(top->session);
@@ -1252,7 +1253,7 @@ static int __cmd_top(struct perf_top *top)
        ret = -1;
        if (pthread_create(&thread_process, NULL, process_thread, top)) {
                ui__error("Could not create process thread.\n");
-               goto out_delete;
+               return ret;
        }
 
        if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
@@ -1296,19 +1297,7 @@ out_join:
 out_join_thread:
        pthread_cond_signal(&top->qe.cond);
        pthread_join(thread_process, NULL);
-out_delete:
-       perf_session__delete(top->session);
-       top->session = NULL;
-
        return ret;
-
-out_err_cpu_topo: {
-       char errbuf[BUFSIZ];
-       const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
-
-       ui__error("Could not read the CPU topology map: %s\n", err);
-       goto out_delete;
-}
 }
 
 static int
@@ -1480,6 +1469,7 @@ int cmd_top(int argc, const char **argv)
                    "Display raw encoding of assembly instructions (default)"),
        OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
                    "Enable kernel symbol demangling"),
+       OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
        OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
                    "objdump binary to use for disassembly and annotations"),
        OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
@@ -1511,6 +1501,7 @@ int cmd_top(int argc, const char **argv)
                        "number of thread to run event synthesize"),
        OPT_END()
        };
+       struct perf_evlist *sb_evlist = NULL;
        const char * const top_usage[] = {
                "perf top [<options>]",
                NULL
@@ -1628,8 +1619,9 @@ int cmd_top(int argc, const char **argv)
        annotation_config__init();
 
        symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
-       if (symbol__init(NULL) < 0)
-               return -1;
+       status = symbol__init(NULL);
+       if (status < 0)
+               goto out_delete_evlist;
 
        sort__setup_elide(stdout);
 
@@ -1639,10 +1631,28 @@ int cmd_top(int argc, const char **argv)
                signal(SIGWINCH, winch_sig);
        }
 
+       top.session = perf_session__new(NULL, false, NULL);
+       if (top.session == NULL) {
+               status = -1;
+               goto out_delete_evlist;
+       }
+
+       if (!top.record_opts.no_bpf_event)
+               bpf_event__add_sb_event(&sb_evlist, &perf_env);
+
+       if (perf_evlist__start_sb_thread(sb_evlist, target)) {
+               pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+               opts->no_bpf_event = true;
+       }
+
        status = __cmd_top(&top);
 
+       if (!opts->no_bpf_event)
+               perf_evlist__stop_sb_thread(sb_evlist);
+
 out_delete_evlist:
        perf_evlist__delete(top.evlist);
+       perf_session__delete(top.session);
 
        return status;
 }
index 05745f3ce912dadf75495297d2b9978327d97774..999fe9170122e3962b5154842344d9e0639febee 100644 (file)
@@ -40,5 +40,6 @@ int cmd_mem(int argc, const char **argv);
 int cmd_data(int argc, const char **argv);
 int cmd_ftrace(int argc, const char **argv);
 
-int find_scripts(char **scripts_array, char **scripts_path_array);
+int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+                int pathlen);
 #endif
index a11cb006f9682ed15300ee6fa1abce0c0125f78e..72df4b6fa36fd12ad45cf00cdb0750d27c5d5061 100644 (file)
@@ -298,6 +298,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
                use_pager = 1;
        commit_pager_choice();
 
+       perf_env__init(&perf_env);
        perf_env__set_cmdline(&perf_env, argc, argv);
        status = p->fn(argc, argv);
        perf_config__exit();
index b120e547ddc7b7fd9b4a03c8723a797f010b9511..c59743def8d36f539d7c196befa216a4b6c55a66 100644 (file)
@@ -66,7 +66,7 @@ struct record_opts {
        bool         ignore_missing_thread;
        bool         strict_freq;
        bool         sample_id;
-       bool         bpf_event;
+       bool         no_bpf_event;
        unsigned int freq;
        unsigned int mmap_pages;
        unsigned int auxtrace_mmap_pages;
index 704302c3e67dd16752cdcb10aac076d7dd4fa260..9dc2f6b70354a2327db3b4a94127b83b63b55f49 100644 (file)
     "BriefDescription": "CO mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x517082",
-    "EventName": "PM_CO_DISP_FAIL",
-    "BriefDescription": "CO dispatch failed due to all CO machines being busy",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x527084",
-    "EventName": "PM_CO_TM_SC_FOOTPRINT",
-    "BriefDescription": "L2 did a cleanifdirty CO to the L3 (ie created an SC line in the L3)",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x3608a",
     "EventName": "PM_CO_USAGE",
     "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a instruction side request",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x617082",
-    "EventName": "PM_ISIDE_DISP",
-    "BriefDescription": "All i-side dispatch attempts",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x627084",
-    "EventName": "PM_ISIDE_DISP_FAIL",
-    "BriefDescription": "All i-side dispatch attempts that failed due to a addr collision with another machine",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x627086",
-    "EventName": "PM_ISIDE_DISP_FAIL_OTHER",
-    "BriefDescription": "All i-side dispatch attempts that failed due to a reason other than addrs collision",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x4608e",
     "EventName": "PM_ISIDE_L2MEMACC",
     "BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x44608e",
-    "EventName": "PM_ISIDE_MRU_TOUCH",
-    "BriefDescription": "Iside L2 MRU touch",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x30ac",
     "EventName": "PM_ISU_REF_FX0",
     "BriefDescription": "Instruction Demand sectors wriittent into IL1",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x417080",
-    "EventName": "PM_L2_CASTOUT_MOD",
-    "BriefDescription": "L2 Castouts - Modified (M, Mu, Me)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x417082",
-    "EventName": "PM_L2_CASTOUT_SHR",
-    "BriefDescription": "L2 Castouts - Shared (T, Te, Si, S)",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x27084",
     "EventName": "PM_L2_CHIP_PUMP",
     "BriefDescription": "RC requests that were local on chip pump attempts",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x427086",
-    "EventName": "PM_L2_DC_INV",
-    "BriefDescription": "Dcache invalidates from L2",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x44608c",
-    "EventName": "PM_L2_DISP_ALL_L2MISS",
-    "BriefDescription": "All successful Ld/St dispatches for this thread that were an L2miss",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x27086",
     "EventName": "PM_L2_GROUP_PUMP",
     "BriefDescription": "RC requests that were on Node Pump attempts",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x626084",
-    "EventName": "PM_L2_GRP_GUESS_CORRECT",
-    "BriefDescription": "L2 guess grp and guess was correct (data intra-6chip AND ^on-chip)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x626086",
-    "EventName": "PM_L2_GRP_GUESS_WRONG",
-    "BriefDescription": "L2 guess grp and guess was not correct (ie data on-chip OR beyond-6chip)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x427084",
-    "EventName": "PM_L2_IC_INV",
-    "BriefDescription": "Icache Invalidates from L2",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x436088",
-    "EventName": "PM_L2_INST",
-    "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x43608a",
-    "EventName": "PM_L2_INST_MISS",
-    "BriefDescription": "All successful i-side dispatches that were an L2miss for this thread (excludes i_l2mru_tch reqs)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x416080",
-    "EventName": "PM_L2_LD",
-    "BriefDescription": "All successful D-side Load dispatches for this thread",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x437088",
-    "EventName": "PM_L2_LD_DISP",
-    "BriefDescription": "All successful load dispatches",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x43708a",
-    "EventName": "PM_L2_LD_HIT",
-    "BriefDescription": "All successful load dispatches that were L2 hits",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x426084",
-    "EventName": "PM_L2_LD_MISS",
-    "BriefDescription": "All successful D-Side Load dispatches that were an L2miss for this thread",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x616080",
-    "EventName": "PM_L2_LOC_GUESS_CORRECT",
-    "BriefDescription": "L2 guess loc and guess was correct (ie data local)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x616082",
-    "EventName": "PM_L2_LOC_GUESS_WRONG",
-    "BriefDescription": "L2 guess loc and guess was not correct (ie data not on chip)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x516080",
-    "EventName": "PM_L2_RCLD_DISP",
-    "BriefDescription": "L2 RC load dispatch attempt",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x516082",
-    "EventName": "PM_L2_RCLD_DISP_FAIL_ADDR",
-    "BriefDescription": "L2 RC load dispatch attempt failed due to address collision with RC/CO/SN/SQ",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x526084",
-    "EventName": "PM_L2_RCLD_DISP_FAIL_OTHER",
-    "BriefDescription": "L2 RC load dispatch attempt failed due to other reasons",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x536088",
-    "EventName": "PM_L2_RCST_DISP",
-    "BriefDescription": "L2 RC store dispatch attempt",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x53608a",
-    "EventName": "PM_L2_RCST_DISP_FAIL_ADDR",
-    "BriefDescription": "L2 RC store dispatch attempt failed due to address collision with RC/CO/SN/SQ",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x54608c",
-    "EventName": "PM_L2_RCST_DISP_FAIL_OTHER",
-    "BriefDescription": "L2 RC store dispatch attempt failed due to other reasons",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x537088",
-    "EventName": "PM_L2_RC_ST_DONE",
-    "BriefDescription": "RC did st to line that was Tx or Sx",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x63708a",
-    "EventName": "PM_L2_RTY_LD",
-    "BriefDescription": "RC retries on PB for any load from core",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x3708a",
     "EventName": "PM_L2_RTY_ST",
     "BriefDescription": "RC retries on PB for any store from core",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x54708c",
-    "EventName": "PM_L2_SN_M_RD_DONE",
-    "BriefDescription": "SNP dispatched for a read and was M",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x54708e",
-    "EventName": "PM_L2_SN_M_WR_DONE",
-    "BriefDescription": "SNP dispatched for a write and was M",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x53708a",
-    "EventName": "PM_L2_SN_SX_I_DONE",
-    "BriefDescription": "SNP dispatched and went from Sx or Tx to Ix",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x17080",
     "EventName": "PM_L2_ST",
     "BriefDescription": "All successful D-side store dispatches for this thread",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x44708c",
-    "EventName": "PM_L2_ST_DISP",
-    "BriefDescription": "All successful store dispatches",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x44708e",
-    "EventName": "PM_L2_ST_HIT",
-    "BriefDescription": "All successful store dispatches that were L2Hits",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x17082",
     "EventName": "PM_L2_ST_MISS",
     "BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x636088",
-    "EventName": "PM_L2_SYS_GUESS_CORRECT",
-    "BriefDescription": "L2 guess sys and guess was correct (ie data beyond-6chip)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x63608a",
-    "EventName": "PM_L2_SYS_GUESS_WRONG",
-    "BriefDescription": "L2 guess sys and guess was not correct (ie data ^beyond-6chip)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x617080",
-    "EventName": "PM_L2_SYS_PUMP",
-    "BriefDescription": "RC requests that were system pump attempts",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x1e05e",
     "EventName": "PM_L2_TM_REQ_ABORT",
     "BriefDescription": "TM marked store abort",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x23808a",
-    "EventName": "PM_L3_CINJ",
-    "BriefDescription": "l3 ci of cache inject",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x128084",
-    "EventName": "PM_L3_CI_HIT",
-    "BriefDescription": "L3 Castins Hit (total count",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x128086",
-    "EventName": "PM_L3_CI_MISS",
-    "BriefDescription": "L3 castins miss (total count",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x819082",
     "EventName": "PM_L3_CI_USAGE",
     "BriefDescription": "rotating sample of 16 CI or CO actives",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x438088",
-    "EventName": "PM_L3_CO",
-    "BriefDescription": "l3 castout occurring ( does not include casthrough or log writes (cinj/dmaw)",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x83908b",
     "EventName": "PM_L3_CO0_ALLOC",
     "BriefDescription": "L3 CO to L3.1 OR of port 0 and 1 ( lossy)",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x238088",
-    "EventName": "PM_L3_CO_LCO",
-    "BriefDescription": "Total L3 castouts occurred on LCO",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x28084",
     "EventName": "PM_L3_CO_MEM",
     "BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0xb19082",
-    "EventName": "PM_L3_GRP_GUESS_CORRECT",
-    "BriefDescription": "Initial scope=group and data from same group (near) (pred successful)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xb3908a",
-    "EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
-    "BriefDescription": "Initial scope=group but data from local node. Predition too high",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xb39088",
-    "EventName": "PM_L3_GRP_GUESS_WRONG_LOW",
-    "BriefDescription": "Initial scope=group but data from outside group (far or rem). Prediction too Low",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x218080",
-    "EventName": "PM_L3_HIT",
-    "BriefDescription": "L3 Hits",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x138088",
-    "EventName": "PM_L3_L2_CO_HIT",
-    "BriefDescription": "L2 castout hits",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x13808a",
-    "EventName": "PM_L3_L2_CO_MISS",
-    "BriefDescription": "L2 castout miss",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x14808c",
-    "EventName": "PM_L3_LAT_CI_HIT",
-    "BriefDescription": "L3 Lateral Castins Hit",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x14808e",
-    "EventName": "PM_L3_LAT_CI_MISS",
-    "BriefDescription": "L3 Lateral Castins Miss",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x228084",
-    "EventName": "PM_L3_LD_HIT",
-    "BriefDescription": "L3 demand LD Hits",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x228086",
-    "EventName": "PM_L3_LD_MISS",
-    "BriefDescription": "L3 demand LD Miss",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x1e052",
     "EventName": "PM_L3_LD_PREF",
     "BriefDescription": "L3 Load Prefetches",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0xb19080",
-    "EventName": "PM_L3_LOC_GUESS_CORRECT",
-    "BriefDescription": "initial scope=node/chip and data from local node (local) (pred successful)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xb29086",
-    "EventName": "PM_L3_LOC_GUESS_WRONG",
-    "BriefDescription": "Initial scope=node but data from out side local node (near or far or rem). Prediction too Low",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x218082",
-    "EventName": "PM_L3_MISS",
-    "BriefDescription": "L3 Misses",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x54808c",
-    "EventName": "PM_L3_P0_CO_L31",
-    "BriefDescription": "l3 CO to L3.1 (lco) port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x538088",
-    "EventName": "PM_L3_P0_CO_MEM",
-    "BriefDescription": "l3 CO to memory port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x929084",
-    "EventName": "PM_L3_P0_CO_RTY",
-    "BriefDescription": "L3 CO received retry port 0",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0xa29084",
     "EventName": "PM_L3_P0_GRP_PUMP",
     "BriefDescription": "L3 LCO received retry port 0",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0xa19080",
-    "EventName": "PM_L3_P0_NODE_PUMP",
-    "BriefDescription": "L3 pf sent with nodal scope port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x919080",
-    "EventName": "PM_L3_P0_PF_RTY",
-    "BriefDescription": "L3 PF received retry port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x939088",
-    "EventName": "PM_L3_P0_SN_HIT",
-    "BriefDescription": "L3 snoop hit port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x118080",
-    "EventName": "PM_L3_P0_SN_INV",
-    "BriefDescription": "Port0 snooper detects someone doing a store to a line thats Sx",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x94908c",
-    "EventName": "PM_L3_P0_SN_MISS",
-    "BriefDescription": "L3 snoop miss port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xa39088",
-    "EventName": "PM_L3_P0_SYS_PUMP",
-    "BriefDescription": "L3 pf sent with sys scope port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x54808e",
-    "EventName": "PM_L3_P1_CO_L31",
-    "BriefDescription": "l3 CO to L3.1 (lco) port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x53808a",
-    "EventName": "PM_L3_P1_CO_MEM",
-    "BriefDescription": "l3 CO to memory port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x929086",
-    "EventName": "PM_L3_P1_CO_RTY",
-    "BriefDescription": "L3 CO received retry port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xa29086",
-    "EventName": "PM_L3_P1_GRP_PUMP",
-    "BriefDescription": "L3 pf sent with grp scope port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x528086",
-    "EventName": "PM_L3_P1_LCO_DATA",
-    "BriefDescription": "lco sent with data port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x518082",
-    "EventName": "PM_L3_P1_LCO_NO_DATA",
-    "BriefDescription": "dataless l3 lco sent port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xa4908e",
-    "EventName": "PM_L3_P1_LCO_RTY",
-    "BriefDescription": "L3 LCO received retry port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xa19082",
-    "EventName": "PM_L3_P1_NODE_PUMP",
-    "BriefDescription": "L3 pf sent with nodal scope port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x919082",
-    "EventName": "PM_L3_P1_PF_RTY",
-    "BriefDescription": "L3 PF received retry port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x93908a",
-    "EventName": "PM_L3_P1_SN_HIT",
-    "BriefDescription": "L3 snoop hit port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x118082",
-    "EventName": "PM_L3_P1_SN_INV",
-    "BriefDescription": "Port1 snooper detects someone doing a store to a line thats Sx",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x94908e",
-    "EventName": "PM_L3_P1_SN_MISS",
-    "BriefDescription": "L3 snoop miss port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xa3908a",
-    "EventName": "PM_L3_P1_SYS_PUMP",
-    "BriefDescription": "L3 pf sent with sys scope port 1",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x84908d",
     "EventName": "PM_L3_PF0_ALLOC",
     "BriefDescription": "lifetime, sample of PF machine 0 valid",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x428084",
-    "EventName": "PM_L3_PF_HIT_L3",
-    "BriefDescription": "l3 pf hit in l3",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x18080",
     "EventName": "PM_L3_PF_MISS_L3",
     "BriefDescription": "Data stream touchto L3",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0xb29084",
-    "EventName": "PM_L3_SYS_GUESS_CORRECT",
-    "BriefDescription": "Initial scope=system and data from outside group (far or rem)(pred successful)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xb4908c",
-    "EventName": "PM_L3_SYS_GUESS_WRONG",
-    "BriefDescription": "Initial scope=system but data from local or near. Predction too high",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x24808e",
-    "EventName": "PM_L3_TRANS_PF",
-    "BriefDescription": "L3 Transient prefetch",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x18081",
     "EventName": "PM_L3_WI0_ALLOC",
     "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
     "PublicDescription": "0.0"
   },
-  {,
-    "EventCode": "0x418080",
-    "EventName": "PM_L3_WI0_BUSY",
-    "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x418082",
-    "EventName": "PM_L3_WI_USAGE",
-    "BriefDescription": "rotating sample of 8 WI actives",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0xc080",
     "EventName": "PM_LD_REF_L1_LSU0",
     "BriefDescription": "Dispatch time non favored tbegin",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x328084",
-    "EventName": "PM_NON_TM_RST_SC",
-    "BriefDescription": "non tm snp rst tm sc",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x2001a",
     "EventName": "PM_NTCG_ALL_FIN",
     "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 RC machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x34808e",
-    "EventName": "PM_RD_CLEARING_SC",
-    "BriefDescription": "rd clearing sc",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x34808c",
-    "EventName": "PM_RD_FORMING_SC",
-    "BriefDescription": "rd forming sc",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x428086",
-    "EventName": "PM_RD_HIT_PF",
-    "BriefDescription": "rd machine hit l3 pf machine",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x20004",
     "EventName": "PM_REAL_SRQ_FULL",
     "BriefDescription": "TLBIE snoop",
     "PublicDescription": "TLBIE snoopSnoop TLBIE"
   },
-  {,
-    "EventCode": "0x338088",
-    "EventName": "PM_SNP_TM_HIT_M",
-    "BriefDescription": "snp tm st hit m mu",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x33808a",
-    "EventName": "PM_SNP_TM_HIT_T",
-    "BriefDescription": "snp tm_st_hit t tn te",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x4608c",
     "EventName": "PM_SN_USAGE",
     "BriefDescription": "STCX executed reported at sent to nest",
     "PublicDescription": "STCX executed reported at sent to nest42"
   },
-  {,
-    "EventCode": "0x717080",
-    "EventName": "PM_ST_CAUSED_FAIL",
-    "BriefDescription": "Non TM St caused any thread to fail",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x3090",
     "EventName": "PM_SWAP_CANCEL",
     "BriefDescription": "Tm any tbegin",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x318082",
-    "EventName": "PM_TM_CAM_OVERFLOW",
-    "BriefDescription": "l3 tm cam overflow during L2 co of SC",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x74708c",
-    "EventName": "PM_TM_CAP_OVERFLOW",
-    "BriefDescription": "TM Footprint Capactiy Overflow",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x20ba",
     "EventName": "PM_TM_END_ALL",
     "BriefDescription": "Transactional conflict from LSU, whatever gets reported to texas",
     "PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42"
   },
-  {,
-    "EventCode": "0x727086",
-    "EventName": "PM_TM_FAV_CAUSED_FAIL",
-    "BriefDescription": "TM Load (fav) caused another thread to fail",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x717082",
-    "EventName": "PM_TM_LD_CAUSED_FAIL",
-    "BriefDescription": "Non TM Ld caused any thread to fail",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x727084",
-    "EventName": "PM_TM_LD_CONF",
-    "BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x328086",
-    "EventName": "PM_TM_RST_SC",
-    "BriefDescription": "tm snp rst tm sc",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x318080",
-    "EventName": "PM_TM_SC_CO",
-    "BriefDescription": "l3 castout tm Sc line",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x73708a",
-    "EventName": "PM_TM_ST_CAUSED_FAIL",
-    "BriefDescription": "TM Store (fav or non-fav) caused another thread to fail",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x737088",
-    "EventName": "PM_TM_ST_CONF",
-    "BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x20bc",
     "EventName": "PM_TM_TBEGIN",
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json b/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
new file mode 100644 (file)
index 0000000..93ddfd8
--- /dev/null
@@ -0,0 +1,12 @@
+[
+  {
+    "EventName": "bp_l1_btb_correct",
+    "EventCode": "0x8a",
+    "BriefDescription": "L1 BTB Correction."
+  },
+  {
+    "EventName": "bp_l2_btb_correct",
+    "EventCode": "0x8b",
+    "BriefDescription": "L2 BTB Correction."
+  }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
new file mode 100644 (file)
index 0000000..fad4af9
--- /dev/null
@@ -0,0 +1,287 @@
+[
+  {
+    "EventName": "ic_fw32",
+    "EventCode": "0x80",
+    "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
+  },
+  {
+    "EventName": "ic_fw32_miss",
+    "EventCode": "0x81",
+    "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
+  },
+  {
+    "EventName": "ic_cache_fill_l2",
+    "EventCode": "0x82",
+    "BriefDescription": "The number of 64 byte instruction cache line was fulfilled from the L2 cache."
+  },
+  {
+    "EventName": "ic_cache_fill_sys",
+    "EventCode": "0x83",
+    "BriefDescription": "The number of 64 byte instruction cache line fulfilled from system memory or another cache."
+  },
+  {
+    "EventName": "bp_l1_tlb_miss_l2_hit",
+    "EventCode": "0x84",
+    "BriefDescription": "The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+  },
+  {
+    "EventName": "bp_l1_tlb_miss_l2_miss",
+    "EventCode": "0x85",
+    "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs."
+  },
+  {
+    "EventName": "bp_snp_re_sync",
+    "EventCode": "0x86",
+    "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
+  },
+  {
+    "EventName": "ic_fetch_stall.ic_stall_any",
+    "EventCode": "0x87",
+    "BriefDescription": "IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+    "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ic_fetch_stall.ic_stall_dq_empty",
+    "EventCode": "0x87",
+    "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+    "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ic_fetch_stall.ic_stall_back_pressure",
+    "EventCode": "0x87",
+    "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+    "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ic_cache_inval.l2_invalidating_probe",
+    "EventCode": "0x8c",
+    "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS).",
+    "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to L2 invalidating probe (external or LS).",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ic_cache_inval.fill_invalidated",
+    "EventCode": "0x8c",
+    "BriefDescription": "IC line invalidated due to overwriting fill response.",
+    "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to overwriting fill response.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "bp_tlb_rel",
+    "EventCode": "0x99",
+    "BriefDescription": "The number of ITLB reload requests."
+  },
+  {
+    "EventName": "l2_request_g1.rd_blk_l",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x80"
+  },
+  {
+    "EventName": "l2_request_g1.rd_blk_x",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "l2_request_g1.ls_rd_blk_c_s",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "l2_request_g1.cacheable_ic_read",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "l2_request_g1.change_to_x",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "l2_request_g1.prefetch_l2",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "l2_request_g1.l2_hw_pf",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "l2_request_g1.other_requests",
+    "EventCode": "0x60",
+    "BriefDescription": "Events covered by l2_request_g2.",
+    "PublicDescription": "Requests to L2 Group1. Events covered by l2_request_g2.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "l2_request_g2.group1",
+    "EventCode": "0x61",
+    "BriefDescription": "All Group 1 commands not in unit0.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. All Group 1 commands not in unit0.",
+    "UMask": "0x80"
+  },
+  {
+    "EventName": "l2_request_g2.ls_rd_sized",
+    "EventCode": "0x61",
+    "BriefDescription": "RdSized, RdSized32, RdSized64.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSized, RdSized32, RdSized64.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "l2_request_g2.ls_rd_sized_nc",
+    "EventCode": "0x61",
+    "BriefDescription": "RdSizedNC, RdSized32NC, RdSized64NC.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSizedNC, RdSized32NC, RdSized64NC.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "l2_request_g2.ic_rd_sized",
+    "EventCode": "0x61",
+    "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "l2_request_g2.ic_rd_sized_nc",
+    "EventCode": "0x61",
+    "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "l2_request_g2.smc_inval",
+    "EventCode": "0x61",
+    "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "l2_request_g2.bus_locks_originator",
+    "EventCode": "0x61",
+    "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "l2_request_g2.bus_locks_responses",
+    "EventCode": "0x61",
+    "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "l2_latency.l2_cycles_waiting_on_fills",
+    "EventCode": "0x62",
+    "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+    "PublicDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "l2_wcb_req.wcb_write",
+    "EventCode": "0x63",
+    "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
+    "BriefDescription": "LS to L2 WCB write requests.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "l2_wcb_req.wcb_close",
+    "EventCode": "0x63",
+    "BriefDescription": "LS to L2 WCB close requests.",
+    "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "l2_wcb_req.zero_byte_store",
+    "EventCode": "0x63",
+    "BriefDescription": "LS to L2 WCB zero byte store requests.",
+    "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "l2_wcb_req.cl_zero",
+    "EventCode": "0x63",
+    "PublicDescription": "LS to L2 WCB cache line zeroing requests.",
+    "BriefDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+    "EventCode": "0x64",
+    "BriefDescription": "LS ReadBlock C/S Hit.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS ReadBlock C/S Hit.",
+    "UMask": "0x80"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+    "EventCode": "0x64",
+    "BriefDescription": "LS Read Block L Hit X.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block L Hit X.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+    "EventCode": "0x64",
+    "BriefDescription": "LsRdBlkL Hit Shared.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkL Hit Shared.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+    "EventCode": "0x64",
+    "BriefDescription": "LsRdBlkX/ChgToX Hit X.  Count RdBlkX finding Shared as a Miss.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkX/ChgToX Hit X.  Count RdBlkX finding Shared as a Miss.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+    "EventCode": "0x64",
+    "BriefDescription": "LS Read Block C S L X Change to X Miss.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block C S L X Change to X Miss.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+    "EventCode": "0x64",
+    "BriefDescription": "IC Fill Hit Exclusive Stale.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Exclusive Stale.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+    "EventCode": "0x64",
+    "BriefDescription": "IC Fill Hit Shared.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Shared.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ic_fill_miss",
+    "EventCode": "0x64",
+    "BriefDescription": "IC Fill Miss.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Miss.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "l2_fill_pending.l2_fill_busy",
+    "EventCode": "0x6d",
+    "BriefDescription": "Total cycles spent with one or more fill requests in flight from L2.",
+    "PublicDescription": "Total cycles spent with one or more fill requests in flight from L2.",
+    "UMask": "0x1"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/core.json b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
new file mode 100644 (file)
index 0000000..7b285b0
--- /dev/null
@@ -0,0 +1,134 @@
+[
+  {
+    "EventName": "ex_ret_instr",
+    "EventCode": "0xc0",
+    "BriefDescription": "Retired Instructions."
+  },
+  {
+    "EventName": "ex_ret_cops",
+    "EventCode": "0xc1",
+    "BriefDescription": "Retired Uops.",
+    "PublicDescription": "The number of uOps retired. This includes all processor activity (instructions, exceptions, interrupts, microcode assists, etc.). The number of events logged per cycle can vary from 0 to 4."
+  },
+  {
+    "EventName": "ex_ret_brn",
+    "EventCode": "0xc2",
+    "BriefDescription": "[Retired Branch Instructions.",
+    "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+  },
+  {
+    "EventName": "ex_ret_brn_misp",
+    "EventCode": "0xc3",
+    "BriefDescription": "Retired Branch Instructions Mispredicted.",
+    "PublicDescription": "The number of branch instructions retired, of any type, that were not correctly predicted. This includes those for which prediction is not attempted (far control transfers, exceptions and interrupts)."
+  },
+  {
+    "EventName": "ex_ret_brn_tkn",
+    "EventCode": "0xc4",
+    "BriefDescription": "Retired Taken Branch Instructions.",
+    "PublicDescription": "The number of taken branches that were retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+  },
+  {
+    "EventName": "ex_ret_brn_tkn_misp",
+    "EventCode": "0xc5",
+    "BriefDescription": "Retired Taken Branch Instructions Mispredicted.",
+    "PublicDescription": "The number of retired taken branch instructions that were mispredicted."
+  },
+  {
+    "EventName": "ex_ret_brn_far",
+    "EventCode": "0xc6",
+    "BriefDescription": "Retired Far Control Transfers.",
+    "PublicDescription": "The number of far control transfers retired including far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts. Far control transfers are not subject to branch prediction."
+  },
+  {
+    "EventName": "ex_ret_brn_resync",
+    "EventCode": "0xc7",
+    "BriefDescription": "Retired Branch Resyncs.",
+    "PublicDescription": "The number of resync branches. These reflect pipeline restarts due to certain microcode assists and events such as writes to the active instruction stream, among other things. Each occurrence reflects a restart penalty similar to a branch mispredict. This is relatively rare."
+  },
+  {
+    "EventName": "ex_ret_near_ret",
+    "EventCode": "0xc8",
+    "BriefDescription": "Retired Near Returns.",
+    "PublicDescription": "The number of near return instructions (RET or RET Iw) retired."
+  },
+  {
+    "EventName": "ex_ret_near_ret_mispred",
+    "EventCode": "0xc9",
+    "BriefDescription": "Retired Near Returns Mispredicted.",
+    "PublicDescription": "The number of near returns retired that were not correctly predicted by the return address predictor. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction."
+  },
+  {
+    "EventName": "ex_ret_brn_ind_misp",
+    "EventCode": "0xca",
+    "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+    "PublicDescription": "Retired Indirect Branch Instructions Mispredicted."
+  },
+  {
+    "EventName": "ex_ret_mmx_fp_instr.sse_instr",
+    "EventCode": "0xcb",
+    "BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+    "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ex_ret_mmx_fp_instr.mmx_instr",
+    "EventCode": "0xcb",
+    "BriefDescription": "MMX instructions.",
+    "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ex_ret_mmx_fp_instr.x87_instr",
+    "EventCode": "0xcb",
+    "BriefDescription": "x87 instructions.",
+    "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ex_ret_cond",
+    "EventCode": "0xd1",
+    "BriefDescription": "Retired Conditional Branch Instructions."
+  },
+  {
+    "EventName": "ex_ret_cond_misp",
+    "EventCode": "0xd2",
+    "BriefDescription": "Retired Conditional Branch Instructions Mispredicted."
+  },
+  {
+    "EventName": "ex_div_busy",
+    "EventCode": "0xd3",
+    "BriefDescription": "Div Cycles Busy count."
+  },
+  {
+    "EventName": "ex_div_count",
+    "EventCode": "0xd4",
+    "BriefDescription": "Div Op Count."
+  },
+  {
+    "EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
+    "EventCode": "0x1cf",
+    "BriefDescription": "Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+    "PublicDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
+    "EventCode": "0x1cf",
+    "BriefDescription": "Number of Ops tagged by IBS that retired.",
+    "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
+    "EventCode": "0x1cf",
+    "BriefDescription": "Number of Ops tagged by IBS.",
+    "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ex_ret_fus_brnch_inst",
+    "EventCode": "0x1d0",
+    "BriefDescription": "The number of fused retired branch instructions retired per cycle. The number of events logged per cycle can vary from 0 to 3."
+  }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json b/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
new file mode 100644 (file)
index 0000000..ea47119
--- /dev/null
@@ -0,0 +1,168 @@
+[
+  {
+    "EventName": "fpu_pipe_assignment.dual",
+    "EventCode": "0x00",
+    "BriefDescription": "Total number multi-pipe uOps.",
+    "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to Pipe 3.",
+    "UMask": "0xf0"
+  },
+  {
+    "EventName": "fpu_pipe_assignment.total",
+    "EventCode": "0x00",
+    "BriefDescription": "Total number uOps.",
+    "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to Pipe 3.",
+    "UMask": "0xf"
+  },
+  {
+    "EventName": "fp_sched_empty",
+    "EventCode": "0x01",
+    "BriefDescription": "This is a speculative event. The number of cycles in which the FPU scheduler is empty. Note that some Ops like FP loads bypass the scheduler."
+  },
+  {
+    "EventName": "fp_retx87_fp_ops.all",
+    "EventCode": "0x02",
+    "BriefDescription": "All Ops.",
+    "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8.",
+    "UMask": "0x7"
+  },
+  {
+    "EventName": "fp_retx87_fp_ops.div_sqr_r_ops",
+    "EventCode": "0x02",
+    "BriefDescription": "Divide and square root Ops.",
+    "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Divide and square root Ops.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "fp_retx87_fp_ops.mul_ops",
+    "EventCode": "0x02",
+    "BriefDescription": "Multiply Ops.",
+    "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Multiply Ops.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "fp_retx87_fp_ops.add_sub_ops",
+    "EventCode": "0x02",
+    "BriefDescription": "Add/subtract Ops.",
+    "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Add/subtract Ops.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.all",
+    "EventCode": "0x03",
+    "BriefDescription": "All FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+    "UMask": "0xff"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.dp_mult_add_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+    "UMask": "0x80"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.dp_div_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Double precision divide/square root FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision divide/square root FLOPS.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.dp_mult_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Double precision multiply FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply FLOPS.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.dp_add_sub_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Double precision add/subtract FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision add/subtract FLOPS.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.sp_mult_add_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.sp_div_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Single-precision divide/square root FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision divide/square root FLOPS.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.sp_mult_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Single-precision multiply FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision multiply FLOPS.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.sp_add_sub_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Single-precision add/subtract FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision add/subtract FLOPS.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "fp_num_mov_elim_scal_op.optimized",
+    "EventCode": "0x04",
+    "BriefDescription": "Number of Scalar Ops optimized.",
+    "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Scalar Ops optimized.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "fp_num_mov_elim_scal_op.opt_potential",
+    "EventCode": "0x04",
+    "BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
+    "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
+    "EventCode": "0x04",
+    "BriefDescription": "Number of SSE Move Ops eliminated.",
+    "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops eliminated.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
+    "EventCode": "0x04",
+    "BriefDescription": "Number of SSE Move Ops.",
+    "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
+    "EventCode": "0x05",
+    "BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
+    "PublicDescription": "The number of serializing Ops retired. x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "fp_retired_ser_ops.x87_bot_ret",
+    "EventCode": "0x05",
+    "BriefDescription": "x87 bottom-executing uOps retired.",
+    "PublicDescription": "The number of serializing Ops retired. x87 bottom-executing uOps retired.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
+    "EventCode": "0x05",
+    "BriefDescription": "SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+    "PublicDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "fp_retired_ser_ops.sse_bot_ret",
+    "EventCode": "0x05",
+    "BriefDescription": "SSE bottom-executing uOps retired.",
+    "PublicDescription": "The number of serializing Ops retired. SSE bottom-executing uOps retired.",
+    "UMask": "0x1"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json b/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
new file mode 100644 (file)
index 0000000..fa2d60d
--- /dev/null
@@ -0,0 +1,162 @@
+[
+  {
+    "EventName": "ls_locks.bus_lock",
+    "EventCode": "0x25",
+    "BriefDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
+    "PublicDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ls_dispatch.ld_st_dispatch",
+    "EventCode": "0x29",
+    "BriefDescription": "Load-op-Stores.",
+    "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed. Load-op-Stores.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ls_dispatch.store_dispatch",
+    "EventCode": "0x29",
+    "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+    "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ls_dispatch.ld_dispatch",
+    "EventCode": "0x29",
+    "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+    "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ls_stlf",
+    "EventCode": "0x35",
+    "BriefDescription": "Number of STLF hits."
+  },
+  {
+    "EventName": "ls_dc_accesses",
+    "EventCode": "0x40",
+    "BriefDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.all",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Miss or Reload off all sizes.",
+    "PublicDescription": "L1 DTLB Miss or Reload off all sizes.",
+    "UMask": "0xff"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Miss of a page of 1G size.",
+    "PublicDescription": "L1 DTLB Miss of a page of 1G size.",
+    "UMask": "0x80"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Miss of a page of 2M size.",
+    "PublicDescription": "L1 DTLB Miss of a page of 2M size.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_miss",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Miss of a page of 32K size.",
+    "PublicDescription": "L1 DTLB Miss of a page of 32K size.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Miss of a page of 4K size.",
+    "PublicDescription": "L1 DTLB Miss of a page of 4K size.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Reload of a page of 1G size.",
+    "PublicDescription": "L1 DTLB Reload of a page of 1G size.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Reload of a page of 2M size.",
+    "PublicDescription": "L1 DTLB Reload of a page of 2M size.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_hit",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Reload of a page of 32K size.",
+    "PublicDescription": "L1 DTLB Reload of a page of 32K size.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Reload of a page of 4K size.",
+    "PublicDescription": "L1 DTLB Reload of a page of 4K size.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_iside",
+    "EventCode": "0x46",
+    "BriefDescription": "Tablewalker allocation.",
+    "PublicDescription": "Tablewalker allocation.",
+    "UMask": "0xc"
+  },
+  {
+    "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_dside",
+    "EventCode": "0x46",
+    "BriefDescription": "Tablewalker allocation.",
+    "PublicDescription": "Tablewalker allocation.",
+    "UMask": "0x3"
+  },
+  {
+    "EventName": "ls_misal_accesses",
+    "EventCode": "0x47",
+    "BriefDescription": "Misaligned loads."
+  },
+  {
+    "EventName": "ls_pref_instr_disp.prefetch_nta",
+    "EventCode": "0x4b",
+    "BriefDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
+    "PublicDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ls_pref_instr_disp.store_prefetch_w",
+    "EventCode": "0x4b",
+    "BriefDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
+    "PublicDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ls_pref_instr_disp.load_prefetch_w",
+    "EventCode": "0x4b",
+    "BriefDescription": "Prefetch, Prefetch_T0_T1_T2.",
+    "PublicDescription": "Software Prefetch Instructions Dispatched. Prefetch, Prefetch_T0_T1_T2.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ls_inef_sw_pref.mab_mch_cnt",
+    "EventCode": "0x52",
+    "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+    "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
+    "EventCode": "0x52",
+    "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+    "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ls_not_halted_cyc",
+    "EventCode": "0x76",
+    "BriefDescription": "Cycles not in Halt."
+  }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/other.json b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
new file mode 100644 (file)
index 0000000..b26a00d
--- /dev/null
@@ -0,0 +1,65 @@
+[
+  {
+    "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
+    "EventCode": "0x28a",
+    "BriefDescription": "OC to IC mode switch.",
+    "PublicDescription": "OC Mode Switch. OC to IC mode switch.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
+    "EventCode": "0x28a",
+    "BriefDescription": "IC to OC mode switch.",
+    "PublicDescription": "OC Mode Switch. IC to OC mode switch.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "RETIRE Tokens unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. RETIRE Tokens unavailable.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.agsq_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "AGSQ Tokens unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "ALU tokens total unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.alsq3_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "ALSQ 3 Tokens unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3 Tokens unavailable.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "ALSQ 2 Tokens unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "ALSQ 1 Tokens unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
+    "UMask": "0x1"
+  }
+]
index e05c2c8458fcb2c5645cd121f173267228905fcb..d6984a3017e06b609d5b69ce289b3441d0ae0b7c 100644 (file)
@@ -33,3 +33,4 @@ GenuineIntel-6-25,v2,westmereep-sp,core
 GenuineIntel-6-2F,v2,westmereex,core
 GenuineIntel-6-55-[01234],v1,skylakex,core
 GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
+AuthenticAMD-23-[[:xdigit:]]+,v1,amdfam17h,core
index 390a351d15eada0bb7d19805e54aa8fe60732829..c3eae1d77d366d0819a4b7d39df87099d2342307 100644 (file)
@@ -10,6 +10,8 @@
 # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 # more details.
 
+from __future__ import print_function
+
 import os
 import sys
 import struct
@@ -199,6 +201,18 @@ import datetime
 
 from PySide.QtSql import *
 
+if sys.version_info < (3, 0):
+       def toserverstr(str):
+               return str
+       def toclientstr(str):
+               return str
+else:
+       # Assume UTF-8 server_encoding and client_encoding
+       def toserverstr(str):
+               return bytes(str, "UTF_8")
+       def toclientstr(str):
+               return bytes(str, "UTF_8")
+
 # Need to access PostgreSQL C library directly to use COPY FROM STDIN
 from ctypes import *
 libpq = CDLL("libpq.so.5")
@@ -234,12 +248,17 @@ perf_db_export_mode = True
 perf_db_export_calls = False
 perf_db_export_callchains = False
 
+def printerr(*args, **kw_args):
+       print(*args, file=sys.stderr, **kw_args)
+
+def printdate(*args, **kw_args):
+        print(datetime.datetime.today(), *args, sep=' ', **kw_args)
 
 def usage():
-       print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
-       print >> sys.stderr, "where:    columns         'all' or 'branches'"
-       print >> sys.stderr, "          calls           'calls' => create calls and call_paths table"
-       print >> sys.stderr, "          callchains      'callchains' => create call_paths table"
+       printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
+       printerr("where:        columns         'all' or 'branches'")
+       printerr("              calls           'calls' => create calls and call_paths table")
+       printerr("              callchains      'callchains' => create call_paths table")
        raise Exception("Too few arguments")
 
 if (len(sys.argv) < 2):
@@ -273,7 +292,7 @@ def do_query(q, s):
                return
        raise Exception("Query failed: " + q.lastError().text())
 
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database...")
 
 db = QSqlDatabase.addDatabase('QPSQL')
 query = QSqlQuery(db)
@@ -506,12 +525,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
        ' FROM samples')
 
 
-file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
-file_trailer = "\377\377"
+file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
+file_trailer = b"\377\377"
 
 def open_output_file(file_name):
        path_name = output_dir_name + "/" + file_name
-       file = open(path_name, "w+")
+       file = open(path_name, "wb+")
        file.write(file_header)
        return file
 
@@ -526,13 +545,13 @@ def copy_output_file_direct(file, table_name):
 
 # Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
 def copy_output_file(file, table_name):
-       conn = PQconnectdb("dbname = " + dbname)
+       conn = PQconnectdb(toclientstr("dbname = " + dbname))
        if (PQstatus(conn)):
                raise Exception("COPY FROM STDIN PQconnectdb failed")
        file.write(file_trailer)
        file.seek(0)
        sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
-       res = PQexec(conn, sql)
+       res = PQexec(conn, toclientstr(sql))
        if (PQresultStatus(res) != 4):
                raise Exception("COPY FROM STDIN PQexec failed")
        data = file.read(65536)
@@ -566,7 +585,7 @@ if perf_db_export_calls:
        call_file               = open_output_file("call_table.bin")
 
 def trace_begin():
-       print datetime.datetime.today(), "Writing to intermediate files..."
+       printdate("Writing to intermediate files...")
        # id == 0 means unknown.  It is easier to create records for them than replace the zeroes with NULLs
        evsel_table(0, "unknown")
        machine_table(0, 0, "unknown")
@@ -582,7 +601,7 @@ def trace_begin():
 unhandled_count = 0
 
 def trace_end():
-       print datetime.datetime.today(), "Copying to database..."
+       printdate("Copying to database...")
        copy_output_file(evsel_file,            "selected_events")
        copy_output_file(machine_file,          "machines")
        copy_output_file(thread_file,           "threads")
@@ -597,7 +616,7 @@ def trace_end():
        if perf_db_export_calls:
                copy_output_file(call_file,             "calls")
 
-       print datetime.datetime.today(), "Removing intermediate files..."
+       printdate("Removing intermediate files...")
        remove_output_file(evsel_file)
        remove_output_file(machine_file)
        remove_output_file(thread_file)
@@ -612,7 +631,7 @@ def trace_end():
        if perf_db_export_calls:
                remove_output_file(call_file)
        os.rmdir(output_dir_name)
-       print datetime.datetime.today(), "Adding primary keys"
+       printdate("Adding primary keys")
        do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
        do_query(query, 'ALTER TABLE machines        ADD PRIMARY KEY (id)')
        do_query(query, 'ALTER TABLE threads         ADD PRIMARY KEY (id)')
@@ -627,7 +646,7 @@ def trace_end():
        if perf_db_export_calls:
                do_query(query, 'ALTER TABLE calls           ADD PRIMARY KEY (id)')
 
-       print datetime.datetime.today(), "Adding foreign keys"
+       printdate("Adding foreign keys")
        do_query(query, 'ALTER TABLE threads '
                                        'ADD CONSTRAINT machinefk  FOREIGN KEY (machine_id)   REFERENCES machines   (id),'
                                        'ADD CONSTRAINT processfk  FOREIGN KEY (process_id)   REFERENCES threads    (id)')
@@ -663,8 +682,8 @@ def trace_end():
                do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
 
        if (unhandled_count):
-               print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
-       print datetime.datetime.today(), "Done"
+               printdate("Warning: ", unhandled_count, " unhandled events")
+       printdate("Done")
 
 def trace_unhandled(event_name, context, event_fields_dict):
        global unhandled_count
@@ -674,12 +693,14 @@ def sched__sched_switch(*x):
        pass
 
 def evsel_table(evsel_id, evsel_name, *x):
+       evsel_name = toserverstr(evsel_name)
        n = len(evsel_name)
        fmt = "!hiqi" + str(n) + "s"
        value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
        evsel_file.write(value)
 
 def machine_table(machine_id, pid, root_dir, *x):
+       root_dir = toserverstr(root_dir)
        n = len(root_dir)
        fmt = "!hiqiii" + str(n) + "s"
        value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
@@ -690,6 +711,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
        thread_file.write(value)
 
 def comm_table(comm_id, comm_str, *x):
+       comm_str = toserverstr(comm_str)
        n = len(comm_str)
        fmt = "!hiqi" + str(n) + "s"
        value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
@@ -701,6 +723,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
        comm_thread_file.write(value)
 
 def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
+       short_name = toserverstr(short_name)
+       long_name = toserverstr(long_name)
+       build_id = toserverstr(build_id)
        n1 = len(short_name)
        n2 = len(long_name)
        n3 = len(build_id)
@@ -709,12 +734,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
        dso_file.write(value)
 
 def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
+       symbol_name = toserverstr(symbol_name)
        n = len(symbol_name)
        fmt = "!hiqiqiqiqiii" + str(n) + "s"
        value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
        symbol_file.write(value)
 
 def branch_type_table(branch_type, name, *x):
+       name = toserverstr(name)
        n = len(name)
        fmt = "!hiii" + str(n) + "s"
        value = struct.pack(fmt, 2, 4, branch_type, n, name)
index eb63e6c7107fdb29ea7fee0d6606794186c543f9..3b71902a5a21124c6006100580245ceda1c72a23 100644 (file)
@@ -10,6 +10,8 @@
 # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 # more details.
 
+from __future__ import print_function
+
 import os
 import sys
 import struct
@@ -60,11 +62,17 @@ perf_db_export_mode = True
 perf_db_export_calls = False
 perf_db_export_callchains = False
 
+def printerr(*args, **keyword_args):
+       print(*args, file=sys.stderr, **keyword_args)
+
+def printdate(*args, **kw_args):
+        print(datetime.datetime.today(), *args, sep=' ', **kw_args)
+
 def usage():
-       print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
-       print >> sys.stderr, "where:    columns         'all' or 'branches'"
-       print >> sys.stderr, "          calls           'calls' => create calls and call_paths table"
-       print >> sys.stderr, "          callchains      'callchains' => create call_paths table"
+       printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
+       printerr("where:        columns         'all' or 'branches'");
+       printerr("              calls           'calls' => create calls and call_paths table");
+       printerr("              callchains      'callchains' => create call_paths table");
        raise Exception("Too few arguments")
 
 if (len(sys.argv) < 2):
@@ -100,7 +108,7 @@ def do_query_(q):
                return
        raise Exception("Query failed: " + q.lastError().text())
 
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database ...")
 
 db_exists = False
 try:
@@ -378,7 +386,7 @@ if perf_db_export_calls:
        call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
 
 def trace_begin():
-       print datetime.datetime.today(), "Writing records..."
+       printdate("Writing records...")
        do_query(query, 'BEGIN TRANSACTION')
        # id == 0 means unknown.  It is easier to create records for them than replace the zeroes with NULLs
        evsel_table(0, "unknown")
@@ -397,14 +405,14 @@ unhandled_count = 0
 def trace_end():
        do_query(query, 'END TRANSACTION')
 
-       print datetime.datetime.today(), "Adding indexes"
+       printdate("Adding indexes")
        if perf_db_export_calls:
                do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
                do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
 
        if (unhandled_count):
-               print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
-       print datetime.datetime.today(), "Done"
+               printdate("Warning: ", unhandled_count, " unhandled events")
+       printdate("Done")
 
 def trace_unhandled(event_name, context, event_fields_dict):
        global unhandled_count
index afec9479ca7fd273a9ddcbcb7de3a823605db8fc..e38518cdcbc3779c8e001b7c4de969942dd7a5ab 100755 (executable)
 #                                                                              7fab593ea956 48 89 15 3b 13 22 00                            movq  %rdx, 0x22133b(%rip)
 # 8107675243232  2    ls       22011  22011  hardware interrupt     No         7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
 
+from __future__ import print_function
+
 import sys
 import weakref
 import threading
 import string
-import cPickle
+try:
+       # Python2
+       import cPickle as pickle
+       # size of pickled integer big enough for record size
+       glb_nsz = 8
+except ImportError:
+       import pickle
+       glb_nsz = 16
 import re
 import os
 from PySide.QtCore import *
@@ -102,6 +111,15 @@ from decimal import *
 from ctypes import *
 from multiprocessing import Process, Array, Value, Event
 
+# xrange is range in Python3
+try:
+       xrange
+except NameError:
+       xrange = range
+
+def printerr(*args, **keyword_args):
+       print(*args, file=sys.stderr, **keyword_args)
+
 # Data formatting helpers
 
 def tohex(ip):
@@ -1004,10 +1022,6 @@ class ChildDataItemFinder():
 
 glb_chunk_sz = 10000
 
-# size of pickled integer big enough for record size
-
-glb_nsz = 8
-
 # Background process for SQL data fetcher
 
 class SQLFetcherProcess():
@@ -1066,7 +1080,7 @@ class SQLFetcherProcess():
                                return True
                        if space >= glb_nsz:
                                # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
-                               nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL)
+                               nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
                                self.buffer[self.local_head : self.local_head + len(nd)] = nd
                        self.local_head = 0
                if self.local_tail - self.local_head > sz:
@@ -1084,9 +1098,9 @@ class SQLFetcherProcess():
                        self.wait_event.wait()
 
        def AddToBuffer(self, obj):
-               d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL)
+               d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
                n = len(d)
-               nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL)
+               nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
                sz = n + glb_nsz
                self.WaitForSpace(sz)
                pos = self.local_head
@@ -1198,12 +1212,12 @@ class SQLFetcher(QObject):
                pos = self.local_tail
                if len(self.buffer) - pos < glb_nsz:
                        pos = 0
-               n = cPickle.loads(self.buffer[pos : pos + glb_nsz])
+               n = pickle.loads(self.buffer[pos : pos + glb_nsz])
                if n == 0:
                        pos = 0
-                       n = cPickle.loads(self.buffer[0 : glb_nsz])
+                       n = pickle.loads(self.buffer[0 : glb_nsz])
                pos += glb_nsz
-               obj = cPickle.loads(self.buffer[pos : pos + n])
+               obj = pickle.loads(self.buffer[pos : pos + n])
                self.local_tail = pos + n
                return obj
 
@@ -2973,7 +2987,7 @@ class DBRef():
 
 def Main():
        if (len(sys.argv) < 2):
-               print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
+               printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
                raise Exception("Too few arguments")
 
        dbname = sys.argv[1]
@@ -2986,8 +3000,8 @@ def Main():
 
        is_sqlite3 = False
        try:
-               f = open(dbname)
-               if f.read(15) == "SQLite format 3":
+               f = open(dbname, "rb")
+               if f.read(15) == b'SQLite format 3':
                        is_sqlite3 = True
                f.close()
        except:
index cb0a3138fa548bb62fa7a0a22bdb9e77f8e84341..93818054ae2086e4818f14231ef9c731d27cb71d 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -C 0 kill >/dev/null 2>&1
+args    = --no-bpf-event -C 0 kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 85a23cf35ba14dda52997598d5c78afb411ae812..b0ca42a5ecc9ce50167e3ef62ef3dbf0a72e46c5 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = kill >/dev/null 2>&1
+args    = --no-bpf-event kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 81f839e2fad019ca43bb5c6be779ab26b79a0afd..1a99b3ce6b899c0730886308ddb08bdbc2a6409a 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -b kill >/dev/null 2>&1
+args    = --no-bpf-event -b kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 357421f4dfcedd07c7e9a98055f2145b8c3c987d..709768b508c624a8965f3a5e62ad7f39e980296a 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j any kill >/dev/null 2>&1
+args    = --no-bpf-event -j any kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index dbc55f2ab845843a42272374bddfbc256cf58b7f..f943221f782543d84b83a8e67a177a1b7927b51a 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j any_call kill >/dev/null 2>&1
+args    = --no-bpf-event -j any_call kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index a0824ff8e131d2e38e2ebd2d94bf07f9f7044878..fd4f5b4154a9d389ffb43943ad4447a97fa28b2d 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j any_ret kill >/dev/null 2>&1
+args    = --no-bpf-event -j any_ret kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index f34d6f120181e40aef44ec175e8dd4af4826b4d8..4e52d685ebe1724224b743043b6c2e034b425b3e 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j hv kill >/dev/null 2>&1
+args    = --no-bpf-event -j hv kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index b86a352322487e78e362c9cc74af15946f91c247..e08c6ab3796e01d0bbd7867ef59958b1c647066f 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j ind_call kill >/dev/null 2>&1
+args    = --no-bpf-event -j ind_call kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index d3fbc5e1858a649611ad4d1541bb9f159d7f0e5a..b4b98f84fc2f75cbe7f5b5a9b6d2121a0efd0b00 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j k kill >/dev/null 2>&1
+args    = --no-bpf-event -j k kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index a318f0dda173669cbf3d07de3b4b1c3b145b8ba9..fb9610edbb0d7ef00c1e4ca65109df5300cc8e4f 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j u kill >/dev/null 2>&1
+args    = --no-bpf-event -j u kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 34f6cc5772636e11de8a3efbf292c12e23a99f3b..5e9b9019d7865c1d2221f1b9585ae2678022cca3 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -c 123 kill >/dev/null 2>&1
+args    = --no-bpf-event -c 123 kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index a9cf2233b0cef33f5decbdef859823c7339a2455..a99bb13149c20d6042f46f0d668571e98cdbc68f 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -d kill >/dev/null 2>&1
+args    = --no-bpf-event -d kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index bf4cb459f0d5c4a195e5de68a4c9efaf232b2b6a..89e29f6b2ae0cf8cf8670198cc78131f15dd43fe 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -F 100 kill >/dev/null 2>&1
+args    = --no-bpf-event -F 100 kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 0b216e69760cd695786fe132c287ddd451e1570f..5d8234d508452ed3c6263b3df5b69835adc47725 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -g kill >/dev/null 2>&1
+args    = --no-bpf-event -g kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index da2fa73bd0a2801876e8acd9f9fe364909a7508d..ae92061d611ded68ebc072db4745a80a18c4db76 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = --call-graph dwarf -- kill >/dev/null 2>&1
+args    = --no-bpf-event --call-graph dwarf -- kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 625d190bb798e2bce72f306e8b34af9bf4cc3a50..5630521c0b0f3f401004aa65b0f20e033fd3dc22 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = --call-graph fp kill >/dev/null 2>&1
+args    = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 618ba1c174741c46ea730297383d8a67574be252..14ee60fd3f410f53b131f7bf5f65dd461ffdddc9 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = --group -e cycles,instructions kill >/dev/null 2>&1
+args    = --no-bpf-event --group -e cycles,instructions kill >/dev/null 2>&1
 ret     = 1
 
 [event-1:base-record]
index f0729c454f160bed941b16133b9ac437c973404d..300b9f7e6d6938f9cc04879df757497d09ff26d2 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
+args    = --no-bpf-event -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
 ret     = 1
 
 [event-1:base-record]
index 48e8bd12fe4676f166cf74c512f1ec52d9ae93d6..3ffe246e02283970178bc65e87c3e2656299fa0c 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -e '{cycles,instructions}' kill >/dev/null 2>&1
+args    = --no-bpf-event -e '{cycles,instructions}' kill >/dev/null 2>&1
 ret     = 1
 
 [event-1:base-record]
index aa3956d8fe207f7ee28bfa3ebbc03511730212e4..583dcbb078bad826cd2212193654a293f8979f39 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = --no-buffering kill >/dev/null 2>&1
+args    = --no-bpf-event --no-buffering kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 560943decb87cd5ea2fd28a2b5ccc301109f3c7e..15d1dc162e1c3ec754b5dc52efac1a1823ff6164 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -i kill >/dev/null 2>&1
+args    = --no-bpf-event -i kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 8eb73ab639e0a7ba730bdbf2876958ef62fad7de..596fbd6d5a2ccdfd2f9d20bb44fe3acd12b319cf 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -n kill >/dev/null 2>&1
+args    = --no-bpf-event -n kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 69bc748f0f27db32a8a88e08b9a5b62b17632312..119101154c5ee97d0775ad72a822f2d27c794d6d 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -c 100 -P kill >/dev/null 2>&1
+args    = --no-bpf-event -c 100 -P kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index a188a614a44c5827a9ed373ac03129aa25e42b3c..13a5f7860c786c395c3195c17cf508bc7c065d1c 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -R kill >/dev/null 2>&1
+args    = --no-bpf-event -R kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 6d598cc071ae23d5f0f9c5210077a7efaff79d7d..1a9c3becf5ffb89d723e7c636e5be3e31c10053c 100644 (file)
@@ -18,7 +18,7 @@ static void testcase(void)
        int i;
 
        for (i = 0; i < NR_ITERS; i++) {
-               char proc_name[10];
+               char proc_name[15];
 
                snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
                prctl(PR_SET_NAME, proc_name);
index ea7acf403727eb99ec2111218f95059da35e27d4..71f60c0f9faa1fc9fd59e2b7d35b53c7b92d2812 100644 (file)
@@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
        if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
                ret = -1;
 
+       perf_evsel__delete(evsel);
        return ret;
 }
index 01f0706995a9737c4bf7ff9012228f6bda8d11fa..9acc1e80b93673c018f81296314f6c4e7363ca70 100644 (file)
@@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
        const char *p;
        const char **other;
        double val;
-       int ret;
+       int i, ret;
        struct parse_ctx ctx;
        int num_other;
 
@@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
        TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
        TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
        TEST_ASSERT_VAL("find other", other[3] == NULL);
+
+       for (i = 0; i < num_other; i++)
+               free((void *)other[i]);
        free((void *)other);
 
        return 0;
index c531e6deb104799d733f47fbee405b5dca4b861c..493ecb61154026b0ee777feef5043681aa784973 100644 (file)
@@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
        if (IS_ERR(evsel)) {
                tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
                pr_debug("%s\n", errbuf);
-               goto out_thread_map_delete;
+               goto out_cpu_map_delete;
        }
 
        if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -119,6 +119,8 @@ out_close_fd:
        perf_evsel__close_fd(evsel);
 out_evsel_delete:
        perf_evsel__delete(evsel);
+out_cpu_map_delete:
+       cpu_map__put(cpus);
 out_thread_map_delete:
        thread_map__put(threads);
        return err;
index 4f75561424eda58ad554046ec7979924f9c7bc0d..4ad37d8c7d6a16a4ffb158788c82f45174728115 100644 (file)
@@ -611,14 +611,16 @@ void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence)
                browser->top = browser->entries;
                break;
        case SEEK_CUR:
-               browser->top = browser->top + browser->top_idx + offset;
+               browser->top = (char **)browser->top + offset;
                break;
        case SEEK_END:
-               browser->top = browser->top + browser->nr_entries - 1 + offset;
+               browser->top = (char **)browser->entries + browser->nr_entries - 1 + offset;
                break;
        default:
                return;
        }
+       assert((char **)browser->top < (char **)browser->entries + browser->nr_entries);
+       assert((char **)browser->top >= (char **)browser->entries);
 }
 
 unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
@@ -630,7 +632,9 @@ unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
                browser->top = browser->entries;
 
        pos = (char **)browser->top;
-       while (idx < browser->nr_entries) {
+       while (idx < browser->nr_entries &&
+              row < (unsigned)SLtt_Screen_Rows - 1) {
+               assert(pos < (char **)browser->entries + browser->nr_entries);
                if (!browser->filter || !browser->filter(browser, *pos)) {
                        ui_browser__gotorc(browser, row, 0);
                        browser->write(browser, pos, row);
index 8fee56b465027a4776918a538c1d29fa540fa42c..fdf86f7981cab2ca62388733c3d36ac3b64494b8 100644 (file)
@@ -3,6 +3,7 @@ perf-y += hists.o
 perf-y += map.o
 perf-y += scripts.o
 perf-y += header.o
+perf-y += res_sample.o
 
 CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST
 CFLAGS_hists.o    += -DENABLE_SLFUTURE_CONST
index 35bdfd8b1e7198ece317015d68685b86e2fb943c..98d934a36d86a8bcde59fef7d0ae8d4754f42dc2 100644 (file)
@@ -750,7 +750,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
                        continue;
                case 'r':
                        {
-                               script_browse(NULL);
+                               script_browse(NULL, NULL);
                                continue;
                        }
                case 'k':
index aef800d97ea1879ed8c23e19de80bc027c6bf3d8..3421ecbdd3f046c42e4b5ef578abf11bfb591b64 100644 (file)
@@ -7,6 +7,7 @@
 #include <string.h>
 #include <linux/rbtree.h>
 #include <sys/ttydefaults.h>
+#include <linux/time64.h>
 
 #include "../../util/callchain.h"
 #include "../../util/evsel.h"
@@ -30,6 +31,7 @@
 #include "srcline.h"
 #include "string2.h"
 #include "units.h"
+#include "time-utils.h"
 
 #include "sane_ctype.h"
 
@@ -1224,6 +1226,8 @@ void hist_browser__init_hpp(void)
                                hist_browser__hpp_color_overhead_guest_us;
        perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color =
                                hist_browser__hpp_color_overhead_acc;
+
+       res_sample_init();
 }
 
 static int hist_browser__show_entry(struct hist_browser *browser,
@@ -2338,9 +2342,12 @@ close_file_and_continue:
 }
 
 struct popup_action {
+       unsigned long           time;
        struct thread           *thread;
        struct map_symbol       ms;
        int                     socket;
+       struct perf_evsel       *evsel;
+       enum rstype             rstype;
 
        int (*fn)(struct hist_browser *browser, struct popup_action *act);
 };
@@ -2527,45 +2534,136 @@ static int
 do_run_script(struct hist_browser *browser __maybe_unused,
              struct popup_action *act)
 {
-       char script_opt[64];
-       memset(script_opt, 0, sizeof(script_opt));
+       char *script_opt;
+       int len;
+       int n = 0;
 
+       len = 100;
+       if (act->thread)
+               len += strlen(thread__comm_str(act->thread));
+       else if (act->ms.sym)
+               len += strlen(act->ms.sym->name);
+       script_opt = malloc(len);
+       if (!script_opt)
+               return -1;
+
+       script_opt[0] = 0;
        if (act->thread) {
-               scnprintf(script_opt, sizeof(script_opt), " -c %s ",
+               n = scnprintf(script_opt, len, " -c %s ",
                          thread__comm_str(act->thread));
        } else if (act->ms.sym) {
-               scnprintf(script_opt, sizeof(script_opt), " -S %s ",
+               n = scnprintf(script_opt, len, " -S %s ",
                          act->ms.sym->name);
        }
 
-       script_browse(script_opt);
+       if (act->time) {
+               char start[32], end[32];
+               unsigned long starttime = act->time;
+               unsigned long endtime = act->time + symbol_conf.time_quantum;
+
+               if (starttime == endtime) { /* Display 1ms as fallback */
+                       starttime -= 1*NSEC_PER_MSEC;
+                       endtime += 1*NSEC_PER_MSEC;
+               }
+               timestamp__scnprintf_usec(starttime, start, sizeof start);
+               timestamp__scnprintf_usec(endtime, end, sizeof end);
+               n += snprintf(script_opt + n, len - n, " --time %s,%s", start, end);
+       }
+
+       script_browse(script_opt, act->evsel);
+       free(script_opt);
        return 0;
 }
 
 static int
-add_script_opt(struct hist_browser *browser __maybe_unused,
+do_res_sample_script(struct hist_browser *browser __maybe_unused,
+                    struct popup_action *act)
+{
+       struct hist_entry *he;
+
+       he = hist_browser__selected_entry(browser);
+       res_sample_browse(he->res_samples, he->num_res, act->evsel, act->rstype);
+       return 0;
+}
+
+static int
+add_script_opt_2(struct hist_browser *browser __maybe_unused,
               struct popup_action *act, char **optstr,
-              struct thread *thread, struct symbol *sym)
+              struct thread *thread, struct symbol *sym,
+              struct perf_evsel *evsel, const char *tstr)
 {
+
        if (thread) {
-               if (asprintf(optstr, "Run scripts for samples of thread [%s]",
-                            thread__comm_str(thread)) < 0)
+               if (asprintf(optstr, "Run scripts for samples of thread [%s]%s",
+                            thread__comm_str(thread), tstr) < 0)
                        return 0;
        } else if (sym) {
-               if (asprintf(optstr, "Run scripts for samples of symbol [%s]",
-                            sym->name) < 0)
+               if (asprintf(optstr, "Run scripts for samples of symbol [%s]%s",
+                            sym->name, tstr) < 0)
                        return 0;
        } else {
-               if (asprintf(optstr, "Run scripts for all samples") < 0)
+               if (asprintf(optstr, "Run scripts for all samples%s", tstr) < 0)
                        return 0;
        }
 
        act->thread = thread;
        act->ms.sym = sym;
+       act->evsel = evsel;
        act->fn = do_run_script;
        return 1;
 }
 
+static int
+add_script_opt(struct hist_browser *browser,
+              struct popup_action *act, char **optstr,
+              struct thread *thread, struct symbol *sym,
+              struct perf_evsel *evsel)
+{
+       int n, j;
+       struct hist_entry *he;
+
+       n = add_script_opt_2(browser, act, optstr, thread, sym, evsel, "");
+
+       he = hist_browser__selected_entry(browser);
+       if (sort_order && strstr(sort_order, "time")) {
+               char tstr[128];
+
+               optstr++;
+               act++;
+               j = sprintf(tstr, " in ");
+               j += timestamp__scnprintf_usec(he->time, tstr + j,
+                                              sizeof tstr - j);
+               j += sprintf(tstr + j, "-");
+               timestamp__scnprintf_usec(he->time + symbol_conf.time_quantum,
+                                         tstr + j, sizeof tstr - j);
+               n += add_script_opt_2(browser, act, optstr, thread, sym,
+                                         evsel, tstr);
+               act->time = he->time;
+       }
+       return n;
+}
+
+static int
+add_res_sample_opt(struct hist_browser *browser __maybe_unused,
+                  struct popup_action *act, char **optstr,
+                  struct res_sample *res_sample,
+                  struct perf_evsel *evsel,
+                  enum rstype type)
+{
+       if (!res_sample)
+               return 0;
+
+       if (asprintf(optstr, "Show context for individual samples %s",
+               type == A_ASM ? "with assembler" :
+               type == A_SOURCE ? "with source" : "") < 0)
+               return 0;
+
+       act->fn = do_res_sample_script;
+       act->evsel = evsel;
+       act->rstype = type;
+       return 1;
+}
+
 static int
 do_switch_data(struct hist_browser *browser __maybe_unused,
               struct popup_action *act __maybe_unused)
@@ -3031,7 +3129,7 @@ skip_annotation:
                                nr_options += add_script_opt(browser,
                                                             &actions[nr_options],
                                                             &options[nr_options],
-                                                            thread, NULL);
+                                                            thread, NULL, evsel);
                        }
                        /*
                         * Note that browser->selection != NULL
@@ -3046,11 +3144,24 @@ skip_annotation:
                                nr_options += add_script_opt(browser,
                                                             &actions[nr_options],
                                                             &options[nr_options],
-                                                            NULL, browser->selection->sym);
+                                                            NULL, browser->selection->sym,
+                                                            evsel);
                        }
                }
                nr_options += add_script_opt(browser, &actions[nr_options],
-                                            &options[nr_options], NULL, NULL);
+                                            &options[nr_options], NULL, NULL, evsel);
+               nr_options += add_res_sample_opt(browser, &actions[nr_options],
+                                                &options[nr_options],
+                                hist_browser__selected_entry(browser)->res_samples,
+                                evsel, A_NORMAL);
+               nr_options += add_res_sample_opt(browser, &actions[nr_options],
+                                                &options[nr_options],
+                                hist_browser__selected_entry(browser)->res_samples,
+                                evsel, A_ASM);
+               nr_options += add_res_sample_opt(browser, &actions[nr_options],
+                                                &options[nr_options],
+                                hist_browser__selected_entry(browser)->res_samples,
+                                evsel, A_SOURCE);
                nr_options += add_switch_opt(browser, &actions[nr_options],
                                             &options[nr_options]);
 skip_scripting:
diff --git a/tools/perf/ui/browsers/res_sample.c b/tools/perf/ui/browsers/res_sample.c
new file mode 100644 (file)
index 0000000..c0dd731
--- /dev/null
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Display a menu with individual samples to browse with perf script */
+#include "util.h"
+#include "hist.h"
+#include "evsel.h"
+#include "hists.h"
+#include "sort.h"
+#include "config.h"
+#include "time-utils.h"
+#include <linux/time64.h>
+
+static u64 context_len = 10 * NSEC_PER_MSEC;
+
+static int res_sample_config(const char *var, const char *value, void *data __maybe_unused)
+{
+       if (!strcmp(var, "samples.context"))
+               return perf_config_u64(&context_len, var, value);
+       return 0;
+}
+
+void res_sample_init(void)
+{
+       perf_config(res_sample_config, NULL);
+}
+
+int res_sample_browse(struct res_sample *res_samples, int num_res,
+                     struct perf_evsel *evsel, enum rstype rstype)
+{
+       char **names;
+       int i, n;
+       int choice;
+       char *cmd;
+       char pbuf[256], tidbuf[32], cpubuf[32];
+       const char *perf = perf_exe(pbuf, sizeof pbuf);
+       char trange[128], tsample[64];
+       struct res_sample *r;
+       char extra_format[256];
+
+       names = calloc(num_res, sizeof(char *));
+       if (!names)
+               return -1;
+       for (i = 0; i < num_res; i++) {
+               char tbuf[64];
+
+               timestamp__scnprintf_nsec(res_samples[i].time, tbuf, sizeof tbuf);
+               if (asprintf(&names[i], "%s: CPU %d tid %d", tbuf,
+                            res_samples[i].cpu, res_samples[i].tid) < 0) {
+                       while (--i >= 0)
+                               free(names[i]);
+                       free(names);
+                       return -1;
+               }
+       }
+       choice = ui__popup_menu(num_res, names);
+       for (i = 0; i < num_res; i++)
+               free(names[i]);
+       free(names);
+
+       if (choice < 0 || choice >= num_res)
+               return -1;
+       r = &res_samples[choice];
+
+       n = timestamp__scnprintf_nsec(r->time - context_len, trange, sizeof trange);
+       trange[n++] = ',';
+       timestamp__scnprintf_nsec(r->time + context_len, trange + n, sizeof trange - n);
+
+       timestamp__scnprintf_nsec(r->time, tsample, sizeof tsample);
+
+       attr_to_script(extra_format, &evsel->attr);
+
+       if (asprintf(&cmd, "%s script %s%s --time %s %s%s %s%s --ns %s %s %s %s %s | less +/%s",
+                    perf,
+                    input_name ? "-i " : "",
+                    input_name ? input_name : "",
+                    trange,
+                    r->cpu >= 0 ? "--cpu " : "",
+                    r->cpu >= 0 ? (sprintf(cpubuf, "%d", r->cpu), cpubuf) : "",
+                    r->tid ? "--tid " : "",
+                    r->tid ? (sprintf(tidbuf, "%d", r->tid), tidbuf) : "",
+                    extra_format,
+                    rstype == A_ASM ? "-F +insn --xed" :
+                    rstype == A_SOURCE ? "-F +srcline,+srccode" : "",
+                    symbol_conf.inline_name ? "--inline" : "",
+                    "--show-lost-events ",
+                    r->tid ? "--show-switch-events --show-task-events " : "",
+                    tsample) < 0)
+               return -1;
+       run_script(cmd);
+       free(cmd);
+       return 0;
+}
index 90a32ac69e76c9eeae87b913008874ececb0f366..27cf3ab88d13f894e1e4597eeec4112c4d00fe9e 100644 (file)
@@ -1,34 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0
-#include <elf.h>
-#include <inttypes.h>
-#include <sys/ttydefaults.h>
-#include <string.h>
 #include "../../util/sort.h"
 #include "../../util/util.h"
 #include "../../util/hist.h"
 #include "../../util/debug.h"
 #include "../../util/symbol.h"
 #include "../browser.h"
-#include "../helpline.h"
 #include "../libslang.h"
-
-/* 2048 lines should be enough for a script output */
-#define MAX_LINES              2048
-
-/* 160 bytes for one output line */
-#define AVERAGE_LINE_LEN       160
-
-struct script_line {
-       struct list_head node;
-       char line[AVERAGE_LINE_LEN];
-};
-
-struct perf_script_browser {
-       struct ui_browser b;
-       struct list_head entries;
-       const char *script_name;
-       int nr_lines;
-};
+#include "config.h"
 
 #define SCRIPT_NAMELEN 128
 #define SCRIPT_MAX_NO  64
@@ -40,149 +18,169 @@ struct perf_script_browser {
  */
 #define SCRIPT_FULLPATH_LEN    256
 
+struct script_config {
+       const char **names;
+       char **paths;
+       int index;
+       const char *perf;
+       char extra_format[256];
+};
+
+void attr_to_script(char *extra_format, struct perf_event_attr *attr)
+{
+       extra_format[0] = 0;
+       if (attr->read_format & PERF_FORMAT_GROUP)
+               strcat(extra_format, " -F +metric");
+       if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK)
+               strcat(extra_format, " -F +brstackinsn --xed");
+       if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
+               strcat(extra_format, " -F +iregs");
+       if (attr->sample_type & PERF_SAMPLE_REGS_USER)
+               strcat(extra_format, " -F +uregs");
+       if (attr->sample_type & PERF_SAMPLE_PHYS_ADDR)
+               strcat(extra_format, " -F +phys_addr");
+}
+
+static int add_script_option(const char *name, const char *opt,
+                            struct script_config *c)
+{
+       c->names[c->index] = name;
+       if (asprintf(&c->paths[c->index],
+                    "%s script %s -F +metric %s %s",
+                    c->perf, opt, symbol_conf.inline_name ? " --inline" : "",
+                    c->extra_format) < 0)
+               return -1;
+       c->index++;
+       return 0;
+}
+
+static int scripts_config(const char *var, const char *value, void *data)
+{
+       struct script_config *c = data;
+
+       if (!strstarts(var, "scripts."))
+               return -1;
+       if (c->index >= SCRIPT_MAX_NO)
+               return -1;
+       c->names[c->index] = strdup(var + 7);
+       if (!c->names[c->index])
+               return -1;
+       if (asprintf(&c->paths[c->index], "%s %s", value,
+                    c->extra_format) < 0)
+               return -1;
+       c->index++;
+       return 0;
+}
+
 /*
  * When success, will copy the full path of the selected script
  * into  the buffer pointed by script_name, and return 0.
  * Return -1 on failure.
  */
-static int list_scripts(char *script_name)
+static int list_scripts(char *script_name, bool *custom,
+                       struct perf_evsel *evsel)
 {
-       char *buf, *names[SCRIPT_MAX_NO], *paths[SCRIPT_MAX_NO];
-       int i, num, choice, ret = -1;
+       char *buf, *paths[SCRIPT_MAX_NO], *names[SCRIPT_MAX_NO];
+       int i, num, choice;
+       int ret = 0;
+       int max_std, custom_perf;
+       char pbuf[256];
+       const char *perf = perf_exe(pbuf, sizeof pbuf);
+       struct script_config scriptc = {
+               .names = (const char **)names,
+               .paths = paths,
+               .perf = perf
+       };
+
+       script_name[0] = 0;
 
        /* Preset the script name to SCRIPT_NAMELEN */
        buf = malloc(SCRIPT_MAX_NO * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN));
        if (!buf)
-               return ret;
+               return -1;
 
-       for (i = 0; i < SCRIPT_MAX_NO; i++) {
-               names[i] = buf + i * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
+       if (evsel)
+               attr_to_script(scriptc.extra_format, &evsel->attr);
+       add_script_option("Show individual samples", "", &scriptc);
+       add_script_option("Show individual samples with assembler", "-F +insn --xed",
+                         &scriptc);
+       add_script_option("Show individual samples with source", "-F +srcline,+srccode",
+                         &scriptc);
+       perf_config(scripts_config, &scriptc);
+       custom_perf = scriptc.index;
+       add_script_option("Show samples with custom perf script arguments", "", &scriptc);
+       i = scriptc.index;
+       max_std = i;
+
+       for (; i < SCRIPT_MAX_NO; i++) {
+               names[i] = buf + (i - max_std) * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
                paths[i] = names[i] + SCRIPT_NAMELEN;
        }
 
-       num = find_scripts(names, paths);
-       if (num > 0) {
-               choice = ui__popup_menu(num, names);
-               if (choice < num && choice >= 0) {
-                       strcpy(script_name, paths[choice]);
-                       ret = 0;
-               }
+       num = find_scripts(names + max_std, paths + max_std, SCRIPT_MAX_NO - max_std,
+                       SCRIPT_FULLPATH_LEN);
+       if (num < 0)
+               num = 0;
+       choice = ui__popup_menu(num + max_std, (char * const *)names);
+       if (choice < 0) {
+               ret = -1;
+               goto out;
        }
+       if (choice == custom_perf) {
+               char script_args[50];
+               int key = ui_browser__input_window("perf script command",
+                               "Enter perf script command line (without perf script prefix)",
+                               script_args, "", 0);
+               if (key != K_ENTER)
+                       return -1;
+               sprintf(script_name, "%s script %s", perf, script_args);
+       } else if (choice < num + max_std) {
+               strcpy(script_name, paths[choice]);
+       }
+       *custom = choice >= max_std;
 
+out:
        free(buf);
+       for (i = 0; i < max_std; i++)
+               free(paths[i]);
        return ret;
 }
 
-static void script_browser__write(struct ui_browser *browser,
-                                  void *entry, int row)
+void run_script(char *cmd)
 {
-       struct script_line *sline = list_entry(entry, struct script_line, node);
-       bool current_entry = ui_browser__is_current_entry(browser, row);
-
-       ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
-                                                      HE_COLORSET_NORMAL);
-
-       ui_browser__write_nstring(browser, sline->line, browser->width);
+       pr_debug("Running %s\n", cmd);
+       SLang_reset_tty();
+       if (system(cmd) < 0)
+               pr_warning("Cannot run %s\n", cmd);
+       /*
+        * SLang doesn't seem to reset the whole terminal, so be more
+        * forceful to get back to the original state.
+        */
+       printf("\033[c\033[H\033[J");
+       fflush(stdout);
+       SLang_init_tty(0, 0, 0);
+       SLsmg_refresh();
 }
 
-static int script_browser__run(struct perf_script_browser *browser)
+int script_browse(const char *script_opt, struct perf_evsel *evsel)
 {
-       int key;
+       char *cmd, script_name[SCRIPT_FULLPATH_LEN];
+       bool custom = false;
 
-       if (ui_browser__show(&browser->b, browser->script_name,
-                            "Press ESC to exit") < 0)
+       memset(script_name, 0, SCRIPT_FULLPATH_LEN);
+       if (list_scripts(script_name, &custom, evsel))
                return -1;
 
-       while (1) {
-               key = ui_browser__run(&browser->b, 0);
-
-               /* We can add some special key handling here if needed */
-               break;
-       }
-
-       ui_browser__hide(&browser->b);
-       return key;
-}
-
-
-int script_browse(const char *script_opt)
-{
-       char cmd[SCRIPT_FULLPATH_LEN*2], script_name[SCRIPT_FULLPATH_LEN];
-       char *line = NULL;
-       size_t len = 0;
-       ssize_t retlen;
-       int ret = -1, nr_entries = 0;
-       FILE *fp;
-       void *buf;
-       struct script_line *sline;
-
-       struct perf_script_browser script = {
-               .b = {
-                       .refresh    = ui_browser__list_head_refresh,
-                       .seek       = ui_browser__list_head_seek,
-                       .write      = script_browser__write,
-               },
-               .script_name = script_name,
-       };
-
-       INIT_LIST_HEAD(&script.entries);
-
-       /* Save each line of the output in one struct script_line object. */
-       buf = zalloc((sizeof(*sline)) * MAX_LINES);
-       if (!buf)
+       if (asprintf(&cmd, "%s%s %s %s%s 2>&1 | less",
+                       custom ? "perf script -s " : "",
+                       script_name,
+                       script_opt ? script_opt : "",
+                       input_name ? "-i " : "",
+                       input_name ? input_name : "") < 0)
                return -1;
-       sline = buf;
-
-       memset(script_name, 0, SCRIPT_FULLPATH_LEN);
-       if (list_scripts(script_name))
-               goto exit;
-
-       sprintf(cmd, "perf script -s %s ", script_name);
 
-       if (script_opt)
-               strcat(cmd, script_opt);
+       run_script(cmd);
+       free(cmd);
 
-       if (input_name) {
-               strcat(cmd, " -i ");
-               strcat(cmd, input_name);
-       }
-
-       strcat(cmd, " 2>&1");
-
-       fp = popen(cmd, "r");
-       if (!fp)
-               goto exit;
-
-       while ((retlen = getline(&line, &len, fp)) != -1) {
-               strncpy(sline->line, line, AVERAGE_LINE_LEN);
-
-               /* If one output line is very large, just cut it short */
-               if (retlen >= AVERAGE_LINE_LEN) {
-                       sline->line[AVERAGE_LINE_LEN - 1] = '\0';
-                       sline->line[AVERAGE_LINE_LEN - 2] = '\n';
-               }
-               list_add_tail(&sline->node, &script.entries);
-
-               if (script.b.width < retlen)
-                       script.b.width = retlen;
-
-               if (nr_entries++ >= MAX_LINES - 1)
-                       break;
-               sline++;
-       }
-
-       if (script.b.width > AVERAGE_LINE_LEN)
-               script.b.width = AVERAGE_LINE_LEN;
-
-       free(line);
-       pclose(fp);
-
-       script.nr_lines = nr_entries;
-       script.b.nr_entries = nr_entries;
-       script.b.entries = &script.entries;
-
-       ret = script_browser__run(&script);
-exit:
-       free(buf);
-       return ret;
+       return 0;
 }
index 5f6dbbf5d74931dd3379e57717bc92acf93e5472..c8b01176c9e162c4a80ff5264568d699774bb413 100644 (file)
 #include <errno.h>
 #include <inttypes.h>
 #include <libgen.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <linux/btf.h>
 #include "util.h"
 #include "ui/ui.h"
 #include "sort.h"
@@ -24,6 +28,7 @@
 #include "annotate.h"
 #include "evsel.h"
 #include "evlist.h"
+#include "bpf-event.h"
 #include "block-range.h"
 #include "string2.h"
 #include "arch/common.h"
@@ -31,6 +36,7 @@
 #include <pthread.h>
 #include <linux/bitops.h>
 #include <linux/kernel.h>
+#include <bpf/libbpf.h>
 
 /* FIXME: For the HE_COLORSET */
 #include "ui/browser.h"
@@ -1615,6 +1621,9 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
                          "  --vmlinux vmlinux\n", build_id_msg ?: "");
        }
                break;
+       case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
+               scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
+               break;
        default:
                scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
                break;
@@ -1674,6 +1683,156 @@ fallback:
        return 0;
 }
 
+#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+#define PACKAGE "perf"
+#include <bfd.h>
+#include <dis-asm.h>
+
+static int symbol__disassemble_bpf(struct symbol *sym,
+                                  struct annotate_args *args)
+{
+       struct annotation *notes = symbol__annotation(sym);
+       struct annotation_options *opts = args->options;
+       struct bpf_prog_info_linear *info_linear;
+       struct bpf_prog_linfo *prog_linfo = NULL;
+       struct bpf_prog_info_node *info_node;
+       int len = sym->end - sym->start;
+       disassembler_ftype disassemble;
+       struct map *map = args->ms.map;
+       struct disassemble_info info;
+       struct dso *dso = map->dso;
+       int pc = 0, count, sub_id;
+       struct btf *btf = NULL;
+       char tpath[PATH_MAX];
+       size_t buf_size;
+       int nr_skip = 0;
+       int ret = -1;
+       char *buf;
+       bfd *bfdf;
+       FILE *s;
+
+       if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
+               return -1;
+
+       pr_debug("%s: handling sym %s addr %lx len %lx\n", __func__,
+                sym->name, sym->start, sym->end - sym->start);
+
+       memset(tpath, 0, sizeof(tpath));
+       perf_exe(tpath, sizeof(tpath));
+
+       bfdf = bfd_openr(tpath, NULL);
+       assert(bfdf);
+       assert(bfd_check_format(bfdf, bfd_object));
+
+       s = open_memstream(&buf, &buf_size);
+       if (!s)
+               goto out;
+       init_disassemble_info(&info, s,
+                             (fprintf_ftype) fprintf);
+
+       info.arch = bfd_get_arch(bfdf);
+       info.mach = bfd_get_mach(bfdf);
+
+       info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
+                                                dso->bpf_prog.id);
+       if (!info_node)
+               goto out;
+       info_linear = info_node->info_linear;
+       sub_id = dso->bpf_prog.sub_id;
+
+       info.buffer = (void *)(info_linear->info.jited_prog_insns);
+       info.buffer_length = info_linear->info.jited_prog_len;
+
+       if (info_linear->info.nr_line_info)
+               prog_linfo = bpf_prog_linfo__new(&info_linear->info);
+
+       if (info_linear->info.btf_id) {
+               struct btf_node *node;
+
+               node = perf_env__find_btf(dso->bpf_prog.env,
+                                         info_linear->info.btf_id);
+               if (node)
+                       btf = btf__new((__u8 *)(node->data),
+                                      node->data_size);
+       }
+
+       disassemble_init_for_target(&info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+       disassemble = disassembler(info.arch,
+                                  bfd_big_endian(bfdf),
+                                  info.mach,
+                                  bfdf);
+#else
+       disassemble = disassembler(bfdf);
+#endif
+       assert(disassemble);
+
+       fflush(s);
+       do {
+               const struct bpf_line_info *linfo = NULL;
+               struct disasm_line *dl;
+               size_t prev_buf_size;
+               const char *srcline;
+               u64 addr;
+
+               addr = pc + ((u64 *)(info_linear->info.jited_ksyms))[sub_id];
+               count = disassemble(pc, &info);
+
+               if (prog_linfo)
+                       linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+                                                               addr, sub_id,
+                                                               nr_skip);
+
+               if (linfo && btf) {
+                       srcline = btf__name_by_offset(btf, linfo->line_off);
+                       nr_skip++;
+               } else
+                       srcline = NULL;
+
+               fprintf(s, "\n");
+               prev_buf_size = buf_size;
+               fflush(s);
+
+               if (!opts->hide_src_code && srcline) {
+                       args->offset = -1;
+                       args->line = strdup(srcline);
+                       args->line_nr = 0;
+                       args->ms.sym  = sym;
+                       dl = disasm_line__new(args);
+                       if (dl) {
+                               annotation_line__add(&dl->al,
+                                                    &notes->src->source);
+                       }
+               }
+
+               args->offset = pc;
+               args->line = buf + prev_buf_size;
+               args->line_nr = 0;
+               args->ms.sym  = sym;
+               dl = disasm_line__new(args);
+               if (dl)
+                       annotation_line__add(&dl->al, &notes->src->source);
+
+               pc += count;
+       } while (count > 0 && pc < len);
+
+       ret = 0;
+out:
+       free(prog_linfo);
+       free(btf);
+       fclose(s);
+       bfd_close(bfdf);
+       return ret;
+}
+#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
+                                  struct annotate_args *args __maybe_unused)
+{
+       return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
+}
+#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+
 static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
 {
        struct annotation_options *opts = args->options;
@@ -1701,7 +1860,9 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
        pr_debug("annotating [%p] %30s : [%p] %30s\n",
                 dso, dso->long_name, sym, sym->name);
 
-       if (dso__is_kcore(dso)) {
+       if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
+               return symbol__disassemble_bpf(sym, args);
+       } else if (dso__is_kcore(dso)) {
                kce.kcore_filename = symfs_filename;
                kce.addr = map__rip_2objdump(map, sym->start);
                kce.offs = sym->start;
index df34fe48316495cce4435ae6f319fe71f87d4384..5bc0cf655d377eb2a352545067e680cb6917c277 100644 (file)
@@ -369,6 +369,7 @@ enum symbol_disassemble_errno {
        __SYMBOL_ANNOTATE_ERRNO__START          = -10000,
 
        SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX       = __SYMBOL_ANNOTATE_ERRNO__START,
+       SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
 
        __SYMBOL_ANNOTATE_ERRNO__END,
 };
diff --git a/tools/perf/util/archinsn.h b/tools/perf/util/archinsn.h
new file mode 100644 (file)
index 0000000..448cbb6
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef INSN_H
+#define INSN_H 1
+
+struct perf_sample;
+struct machine;
+struct thread;
+
+void arch_fetch_insn(struct perf_sample *sample,
+                    struct thread *thread,
+                    struct machine *machine);
+
+#endif
index 028c8ec1f62a9c347b5c493d3b0caddf74145451..2a4a0da35632feeca4d0da640c3f65be2e62532e 100644 (file)
@@ -3,11 +3,17 @@
 #include <stdlib.h>
 #include <bpf/bpf.h>
 #include <bpf/btf.h>
+#include <bpf/libbpf.h>
 #include <linux/btf.h>
+#include <linux/err.h>
 #include "bpf-event.h"
 #include "debug.h"
 #include "symbol.h"
 #include "machine.h"
+#include "env.h"
+#include "session.h"
+#include "map.h"
+#include "evlist.h"
 
 #define ptr_to_u64(ptr)    ((__u64)(unsigned long)(ptr))
 
@@ -21,15 +27,122 @@ static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
        return ret;
 }
 
+static int machine__process_bpf_event_load(struct machine *machine,
+                                          union perf_event *event,
+                                          struct perf_sample *sample __maybe_unused)
+{
+       struct bpf_prog_info_linear *info_linear;
+       struct bpf_prog_info_node *info_node;
+       struct perf_env *env = machine->env;
+       int id = event->bpf_event.id;
+       unsigned int i;
+
+       /* perf-record, no need to handle bpf-event */
+       if (env == NULL)
+               return 0;
+
+       info_node = perf_env__find_bpf_prog_info(env, id);
+       if (!info_node)
+               return 0;
+       info_linear = info_node->info_linear;
+
+       for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
+               u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
+               u64 addr = addrs[i];
+               struct map *map;
+
+               map = map_groups__find(&machine->kmaps, addr);
+
+               if (map) {
+                       map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
+                       map->dso->bpf_prog.id = id;
+                       map->dso->bpf_prog.sub_id = i;
+                       map->dso->bpf_prog.env = env;
+               }
+       }
+       return 0;
+}
+
 int machine__process_bpf_event(struct machine *machine __maybe_unused,
                               union perf_event *event,
                               struct perf_sample *sample __maybe_unused)
 {
        if (dump_trace)
                perf_event__fprintf_bpf_event(event, stdout);
+
+       switch (event->bpf_event.type) {
+       case PERF_BPF_EVENT_PROG_LOAD:
+               return machine__process_bpf_event_load(machine, event, sample);
+
+       case PERF_BPF_EVENT_PROG_UNLOAD:
+               /*
+                * Do not free bpf_prog_info and btf of the program here,
+                * as annotation still need them. They will be freed at
+                * the end of the session.
+                */
+               break;
+       default:
+               pr_debug("unexpected bpf_event type of %d\n",
+                        event->bpf_event.type);
+               break;
+       }
        return 0;
 }
 
+static int perf_env__fetch_btf(struct perf_env *env,
+                              u32 btf_id,
+                              struct btf *btf)
+{
+       struct btf_node *node;
+       u32 data_size;
+       const void *data;
+
+       data = btf__get_raw_data(btf, &data_size);
+
+       node = malloc(data_size + sizeof(struct btf_node));
+       if (!node)
+               return -1;
+
+       node->id = btf_id;
+       node->data_size = data_size;
+       memcpy(node->data, data, data_size);
+
+       perf_env__insert_btf(env, node);
+       return 0;
+}
+
+static int synthesize_bpf_prog_name(char *buf, int size,
+                                   struct bpf_prog_info *info,
+                                   struct btf *btf,
+                                   u32 sub_id)
+{
+       u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
+       void *func_infos = (void *)(uintptr_t)(info->func_info);
+       u32 sub_prog_cnt = info->nr_jited_ksyms;
+       const struct bpf_func_info *finfo;
+       const char *short_name = NULL;
+       const struct btf_type *t;
+       int name_len;
+
+       name_len = snprintf(buf, size, "bpf_prog_");
+       name_len += snprintf_hex(buf + name_len, size - name_len,
+                                prog_tags[sub_id], BPF_TAG_SIZE);
+       if (btf) {
+               finfo = func_infos + sub_id * info->func_info_rec_size;
+               t = btf__type_by_id(btf, finfo->type_id);
+               short_name = btf__name_by_offset(btf, t->name_off);
+       } else if (sub_id == 0 && sub_prog_cnt == 1) {
+               /* no subprog */
+               if (info->name[0])
+                       short_name = info->name;
+       } else
+               short_name = "F";
+       if (short_name)
+               name_len += snprintf(buf + name_len, size - name_len,
+                                    "_%s", short_name);
+       return name_len;
+}
+
 /*
  * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
  * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
@@ -40,7 +153,7 @@ int machine__process_bpf_event(struct machine *machine __maybe_unused,
  *   -1 for failures;
  *   -2 for lack of kernel support.
  */
-static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
+static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
                                               perf_event__handler_t process,
                                               struct machine *machine,
                                               int fd,
@@ -49,102 +162,71 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
 {
        struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
        struct bpf_event *bpf_event = &event->bpf_event;
-       u32 sub_prog_cnt, i, func_info_rec_size = 0;
-       u8 (*prog_tags)[BPF_TAG_SIZE] = NULL;
-       struct bpf_prog_info info = { .type = 0, };
-       u32 info_len = sizeof(info);
-       void *func_infos = NULL;
-       u64 *prog_addrs = NULL;
+       struct bpf_prog_info_linear *info_linear;
+       struct perf_tool *tool = session->tool;
+       struct bpf_prog_info_node *info_node;
+       struct bpf_prog_info *info;
        struct btf *btf = NULL;
-       u32 *prog_lens = NULL;
-       bool has_btf = false;
-       char errbuf[512];
+       struct perf_env *env;
+       u32 sub_prog_cnt, i;
        int err = 0;
+       u64 arrays;
+
+       /*
+        * for perf-record and perf-report use header.env;
+        * otherwise, use global perf_env.
+        */
+       env = session->data ? &session->header.env : &perf_env;
 
-       /* Call bpf_obj_get_info_by_fd() to get sizes of arrays */
-       err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+       arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+       arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+       arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 
-       if (err) {
-               pr_debug("%s: failed to get BPF program info: %s, aborting\n",
-                        __func__, str_error_r(errno, errbuf, sizeof(errbuf)));
+       info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+       if (IS_ERR_OR_NULL(info_linear)) {
+               info_linear = NULL;
+               pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
                return -1;
        }
-       if (info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+
+       if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
                pr_debug("%s: the kernel is too old, aborting\n", __func__);
                return -2;
        }
 
+       info = &info_linear->info;
+
        /* number of ksyms, func_lengths, and tags should match */
-       sub_prog_cnt = info.nr_jited_ksyms;
-       if (sub_prog_cnt != info.nr_prog_tags ||
-           sub_prog_cnt != info.nr_jited_func_lens)
+       sub_prog_cnt = info->nr_jited_ksyms;
+       if (sub_prog_cnt != info->nr_prog_tags ||
+           sub_prog_cnt != info->nr_jited_func_lens)
                return -1;
 
        /* check BTF func info support */
-       if (info.btf_id && info.nr_func_info && info.func_info_rec_size) {
+       if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
                /* btf func info number should be same as sub_prog_cnt */
-               if (sub_prog_cnt != info.nr_func_info) {
+               if (sub_prog_cnt != info->nr_func_info) {
                        pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
-                       return -1;
-               }
-               if (btf__get_from_id(info.btf_id, &btf)) {
-                       pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
-                       return -1;
+                       err = -1;
+                       goto out;
                }
-               func_info_rec_size = info.func_info_rec_size;
-               func_infos = calloc(sub_prog_cnt, func_info_rec_size);
-               if (!func_infos) {
-                       pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__);
-                       return -1;
+               if (btf__get_from_id(info->btf_id, &btf)) {
+                       pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
+                       err = -1;
+                       btf = NULL;
+                       goto out;
                }
-               has_btf = true;
-       }
-
-       /*
-        * We need address, length, and tag for each sub program.
-        * Allocate memory and call bpf_obj_get_info_by_fd() again
-        */
-       prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
-       if (!prog_addrs) {
-               pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
-               goto out;
-       }
-       prog_lens = calloc(sub_prog_cnt, sizeof(u32));
-       if (!prog_lens) {
-               pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
-               goto out;
-       }
-       prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
-       if (!prog_tags) {
-               pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
-               goto out;
-       }
-
-       memset(&info, 0, sizeof(info));
-       info.nr_jited_ksyms = sub_prog_cnt;
-       info.nr_jited_func_lens = sub_prog_cnt;
-       info.nr_prog_tags = sub_prog_cnt;
-       info.jited_ksyms = ptr_to_u64(prog_addrs);
-       info.jited_func_lens = ptr_to_u64(prog_lens);
-       info.prog_tags = ptr_to_u64(prog_tags);
-       info_len = sizeof(info);
-       if (has_btf) {
-               info.nr_func_info = sub_prog_cnt;
-               info.func_info_rec_size = func_info_rec_size;
-               info.func_info = ptr_to_u64(func_infos);
-       }
-
-       err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
-       if (err) {
-               pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
-               goto out;
+               perf_env__fetch_btf(env, info->btf_id, btf);
        }
 
        /* Synthesize PERF_RECORD_KSYMBOL */
        for (i = 0; i < sub_prog_cnt; i++) {
-               const struct bpf_func_info *finfo;
-               const char *short_name = NULL;
-               const struct btf_type *t;
+               __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+               __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
                int name_len;
 
                *ksymbol_event = (struct ksymbol_event){
@@ -157,26 +239,9 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
                        .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
                        .flags = 0,
                };
-               name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
-                                   "bpf_prog_");
-               name_len += snprintf_hex(ksymbol_event->name + name_len,
-                                        KSYM_NAME_LEN - name_len,
-                                        prog_tags[i], BPF_TAG_SIZE);
-               if (has_btf) {
-                       finfo = func_infos + i * info.func_info_rec_size;
-                       t = btf__type_by_id(btf, finfo->type_id);
-                       short_name = btf__name_by_offset(btf, t->name_off);
-               } else if (i == 0 && sub_prog_cnt == 1) {
-                       /* no subprog */
-                       if (info.name[0])
-                               short_name = info.name;
-               } else
-                       short_name = "F";
-               if (short_name)
-                       name_len += snprintf(ksymbol_event->name + name_len,
-                                            KSYM_NAME_LEN - name_len,
-                                            "_%s", short_name);
 
+               name_len = synthesize_bpf_prog_name(ksymbol_event->name,
+                                                   KSYM_NAME_LEN, info, btf, i);
                ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
                                                         sizeof(u64));
 
@@ -186,8 +251,8 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
                                                     machine, process);
        }
 
-       /* Synthesize PERF_RECORD_BPF_EVENT */
-       if (opts->bpf_event) {
+       if (!opts->no_bpf_event) {
+               /* Synthesize PERF_RECORD_BPF_EVENT */
                *bpf_event = (struct bpf_event){
                        .header = {
                                .type = PERF_RECORD_BPF_EVENT,
@@ -195,25 +260,38 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
                        },
                        .type = PERF_BPF_EVENT_PROG_LOAD,
                        .flags = 0,
-                       .id = info.id,
+                       .id = info->id,
                };
-               memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
+               memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
                memset((void *)event + event->header.size, 0, machine->id_hdr_size);
                event->header.size += machine->id_hdr_size;
+
+               /* save bpf_prog_info to env */
+               info_node = malloc(sizeof(struct bpf_prog_info_node));
+               if (!info_node) {
+                       err = -1;
+                       goto out;
+               }
+
+               info_node->info_linear = info_linear;
+               perf_env__insert_bpf_prog_info(env, info_node);
+               info_linear = NULL;
+
+               /*
+                * process after saving bpf_prog_info to env, so that
+                * required information is ready for look up
+                */
                err = perf_tool__process_synth_event(tool, event,
                                                     machine, process);
        }
 
 out:
-       free(prog_tags);
-       free(prog_lens);
-       free(prog_addrs);
-       free(func_infos);
+       free(info_linear);
        free(btf);
        return err ? -1 : 0;
 }
 
-int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+int perf_event__synthesize_bpf_events(struct perf_session *session,
                                      perf_event__handler_t process,
                                      struct machine *machine,
                                      struct record_opts *opts)
@@ -247,7 +325,7 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
                        continue;
                }
 
-               err = perf_event__synthesize_one_bpf_prog(tool, process,
+               err = perf_event__synthesize_one_bpf_prog(session, process,
                                                          machine, fd,
                                                          event, opts);
                close(fd);
@@ -261,3 +339,142 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
        free(event);
        return err;
 }
+
+static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+{
+       struct bpf_prog_info_linear *info_linear;
+       struct bpf_prog_info_node *info_node;
+       struct btf *btf = NULL;
+       u64 arrays;
+       u32 btf_id;
+       int fd;
+
+       fd = bpf_prog_get_fd_by_id(id);
+       if (fd < 0)
+               return;
+
+       arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+       arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+       arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
+
+       info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+       if (IS_ERR_OR_NULL(info_linear)) {
+               pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
+               goto out;
+       }
+
+       btf_id = info_linear->info.btf_id;
+
+       info_node = malloc(sizeof(struct bpf_prog_info_node));
+       if (info_node) {
+               info_node->info_linear = info_linear;
+               perf_env__insert_bpf_prog_info(env, info_node);
+       } else
+               free(info_linear);
+
+       if (btf_id == 0)
+               goto out;
+
+       if (btf__get_from_id(btf_id, &btf)) {
+               pr_debug("%s: failed to get BTF of id %u, aborting\n",
+                        __func__, btf_id);
+               goto out;
+       }
+       perf_env__fetch_btf(env, btf_id, btf);
+
+out:
+       free(btf);
+       close(fd);
+}
+
+static int bpf_event__sb_cb(union perf_event *event, void *data)
+{
+       struct perf_env *env = data;
+
+       if (event->header.type != PERF_RECORD_BPF_EVENT)
+               return -1;
+
+       switch (event->bpf_event.type) {
+       case PERF_BPF_EVENT_PROG_LOAD:
+               perf_env__add_bpf_info(env, event->bpf_event.id);
+
+       case PERF_BPF_EVENT_PROG_UNLOAD:
+               /*
+                * Do not free bpf_prog_info and btf of the program here,
+                * as annotation still need them. They will be freed at
+                * the end of the session.
+                */
+               break;
+       default:
+               pr_debug("unexpected bpf_event type of %d\n",
+                        event->bpf_event.type);
+               break;
+       }
+
+       return 0;
+}
+
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+                           struct perf_env *env)
+{
+       struct perf_event_attr attr = {
+               .type             = PERF_TYPE_SOFTWARE,
+               .config           = PERF_COUNT_SW_DUMMY,
+               .sample_id_all    = 1,
+               .watermark        = 1,
+               .bpf_event        = 1,
+               .size      = sizeof(attr), /* to capture ABI version */
+       };
+
+       /*
+        * Older gcc versions don't support designated initializers, like above,
+        * for unnamed union members, such as the following:
+        */
+       attr.wakeup_watermark = 1;
+
+       return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
+}
+
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+                                   struct perf_env *env,
+                                   FILE *fp)
+{
+       __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+       __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+       char name[KSYM_NAME_LEN];
+       struct btf *btf = NULL;
+       u32 sub_prog_cnt, i;
+
+       sub_prog_cnt = info->nr_jited_ksyms;
+       if (sub_prog_cnt != info->nr_prog_tags ||
+           sub_prog_cnt != info->nr_jited_func_lens)
+               return;
+
+       if (info->btf_id) {
+               struct btf_node *node;
+
+               node = perf_env__find_btf(env, info->btf_id);
+               if (node)
+                       btf = btf__new((__u8 *)(node->data),
+                                      node->data_size);
+       }
+
+       if (sub_prog_cnt == 1) {
+               synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
+               fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
+                       info->id, name, prog_addrs[0], prog_lens[0]);
+               return;
+       }
+
+       fprintf(fp, "# bpf_prog_info %u:\n", info->id);
+       for (i = 0; i < sub_prog_cnt; i++) {
+               synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
+
+               fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
+                       i, name, prog_addrs[i], prog_lens[i]);
+       }
+}
index 7890067e1a3781a1cdb629915eab5e3bc6f46b33..04c33b3bfe281055292f512812972babaf91589d 100644 (file)
@@ -3,22 +3,45 @@
 #define __PERF_BPF_EVENT_H
 
 #include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <pthread.h>
+#include <api/fd/array.h>
 #include "event.h"
+#include <stdio.h>
 
 struct machine;
 union perf_event;
+struct perf_env;
 struct perf_sample;
-struct perf_tool;
 struct record_opts;
+struct evlist;
+struct target;
+
+struct bpf_prog_info_node {
+       struct bpf_prog_info_linear     *info_linear;
+       struct rb_node                  rb_node;
+};
+
+struct btf_node {
+       struct rb_node  rb_node;
+       u32             id;
+       u32             data_size;
+       char            data[];
+};
 
 #ifdef HAVE_LIBBPF_SUPPORT
 int machine__process_bpf_event(struct machine *machine, union perf_event *event,
                               struct perf_sample *sample);
 
-int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+int perf_event__synthesize_bpf_events(struct perf_session *session,
                                      perf_event__handler_t process,
                                      struct machine *machine,
                                      struct record_opts *opts);
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+                                struct perf_env *env);
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+                                   struct perf_env *env,
+                                   FILE *fp);
 #else
 static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
                                             union perf_event *event __maybe_unused,
@@ -27,12 +50,25 @@ static inline int machine__process_bpf_event(struct machine *machine __maybe_unu
        return 0;
 }
 
-static inline int perf_event__synthesize_bpf_events(struct perf_tool *tool __maybe_unused,
+static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
                                                    perf_event__handler_t process __maybe_unused,
                                                    struct machine *machine __maybe_unused,
                                                    struct record_opts *opts __maybe_unused)
 {
        return 0;
 }
+
+static inline int bpf_event__add_sb_event(struct perf_evlist **evlist __maybe_unused,
+                                         struct perf_env *env __maybe_unused)
+{
+       return 0;
+}
+
+static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+                                                 struct perf_env *env __maybe_unused,
+                                                 FILE *fp __maybe_unused)
+{
+
+}
 #endif // HAVE_LIBBPF_SUPPORT
 #endif
index bff0d17920ed7dd6a386f31b6377e2cfb6353165..0c5517a8d0b772bc25acc1e9309fb4e00502965a 100644 (file)
@@ -185,6 +185,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
        return bf;
 }
 
+/* The caller is responsible to free the returned buffer. */
 char *build_id_cache__origname(const char *sbuild_id)
 {
        char *linkname;
index fa092511c52b6af5921e7f8f2e3cf576d3bd01c4..7e3c1b60120c259a7b01f32cf1e109e4af873b89 100644 (file)
@@ -633,11 +633,10 @@ static int collect_config(const char *var, const char *value,
        }
 
        ret = set_value(item, value);
-       return ret;
 
 out_free:
        free(key);
-       return -1;
+       return ret;
 }
 
 int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
index e098e189f93e45e0217bb07758fab03f543a45c0..6a64f713710ddab8fff024831ec766dbca04dd81 100644 (file)
@@ -14,6 +14,7 @@
 #include "data.h"
 #include "util.h"
 #include "debug.h"
+#include "header.h"
 
 static void close_dir(struct perf_data_file *files, int nr)
 {
@@ -34,12 +35,16 @@ int perf_data__create_dir(struct perf_data *data, int nr)
        struct perf_data_file *files = NULL;
        int i, ret = -1;
 
+       if (WARN_ON(!data->is_dir))
+               return -EINVAL;
+
        files = zalloc(nr * sizeof(*files));
        if (!files)
                return -ENOMEM;
 
-       data->dir.files = files;
-       data->dir.nr    = nr;
+       data->dir.version = PERF_DIR_VERSION;
+       data->dir.files   = files;
+       data->dir.nr      = nr;
 
        for (i = 0; i < nr; i++) {
                struct perf_data_file *file = &files[i];
@@ -69,6 +74,13 @@ int perf_data__open_dir(struct perf_data *data)
        DIR *dir;
        int nr = 0;
 
+       if (WARN_ON(!data->is_dir))
+               return -EINVAL;
+
+       /* The version is provided by DIR_FORMAT feature. */
+       if (WARN_ON(data->dir.version != PERF_DIR_VERSION))
+               return -1;
+
        dir = opendir(data->path);
        if (!dir)
                return -EINVAL;
@@ -118,6 +130,26 @@ out_err:
        return ret;
 }
 
+int perf_data__update_dir(struct perf_data *data)
+{
+       int i;
+
+       if (WARN_ON(!data->is_dir))
+               return -EINVAL;
+
+       for (i = 0; i < data->dir.nr; i++) {
+               struct perf_data_file *file = &data->dir.files[i];
+               struct stat st;
+
+               if (fstat(file->fd, &st))
+                       return -1;
+
+               file->size = st.st_size;
+       }
+
+       return 0;
+}
+
 static bool check_pipe(struct perf_data *data)
 {
        struct stat st;
@@ -173,6 +205,16 @@ static int check_backup(struct perf_data *data)
        return 0;
 }
 
+static bool is_dir(struct perf_data *data)
+{
+       struct stat st;
+
+       if (stat(data->path, &st))
+               return false;
+
+       return (st.st_mode & S_IFMT) == S_IFDIR;
+}
+
 static int open_file_read(struct perf_data *data)
 {
        struct stat st;
@@ -254,6 +296,30 @@ static int open_file_dup(struct perf_data *data)
        return open_file(data);
 }
 
+static int open_dir(struct perf_data *data)
+{
+       int ret;
+
+       /*
+        * So far we open only the header, so we can read the data version and
+        * layout.
+        */
+       if (asprintf(&data->file.path, "%s/header", data->path) < 0)
+               return -1;
+
+       if (perf_data__is_write(data) &&
+           mkdir(data->path, S_IRWXU) < 0)
+               return -1;
+
+       ret = open_file(data);
+
+       /* Cleanup whatever we managed to create so far. */
+       if (ret && perf_data__is_write(data))
+               rm_rf_perf_data(data->path);
+
+       return ret;
+}
+
 int perf_data__open(struct perf_data *data)
 {
        if (check_pipe(data))
@@ -265,11 +331,18 @@ int perf_data__open(struct perf_data *data)
        if (check_backup(data))
                return -1;
 
-       return open_file_dup(data);
+       if (perf_data__is_read(data))
+               data->is_dir = is_dir(data);
+
+       return perf_data__is_dir(data) ?
+              open_dir(data) : open_file_dup(data);
 }
 
 void perf_data__close(struct perf_data *data)
 {
+       if (perf_data__is_dir(data))
+               perf_data__close_dir(data);
+
        zfree(&data->file.path);
        close(data->file.fd);
 }
@@ -288,9 +361,9 @@ ssize_t perf_data__write(struct perf_data *data,
 
 int perf_data__switch(struct perf_data *data,
                           const char *postfix,
-                          size_t pos, bool at_exit)
+                          size_t pos, bool at_exit,
+                          char **new_filepath)
 {
-       char *new_filepath;
        int ret;
 
        if (check_pipe(data))
@@ -298,15 +371,15 @@ int perf_data__switch(struct perf_data *data,
        if (perf_data__is_read(data))
                return -EINVAL;
 
-       if (asprintf(&new_filepath, "%s.%s", data->path, postfix) < 0)
+       if (asprintf(new_filepath, "%s.%s", data->path, postfix) < 0)
                return -ENOMEM;
 
        /*
         * Only fire a warning, don't return error, continue fill
         * original file.
         */
-       if (rename(data->path, new_filepath))
-               pr_warning("Failed to rename %s to %s\n", data->path, new_filepath);
+       if (rename(data->path, *new_filepath))
+               pr_warning("Failed to rename %s to %s\n", data->path, *new_filepath);
 
        if (!at_exit) {
                close(data->file.fd);
@@ -323,6 +396,22 @@ int perf_data__switch(struct perf_data *data,
        }
        ret = data->file.fd;
 out:
-       free(new_filepath);
        return ret;
 }
+
+unsigned long perf_data__size(struct perf_data *data)
+{
+       u64 size = data->file.size;
+       int i;
+
+       if (!data->is_dir)
+               return size;
+
+       for (i = 0; i < data->dir.nr; i++) {
+               struct perf_data_file *file = &data->dir.files[i];
+
+               size += file->size;
+       }
+
+       return size;
+}
index 14b47be2bd69b6d5578e74334fb5577594469efd..259868a39019890ce6c91de895fc7cf2048c7e6e 100644 (file)
@@ -19,10 +19,12 @@ struct perf_data {
        const char              *path;
        struct perf_data_file    file;
        bool                     is_pipe;
+       bool                     is_dir;
        bool                     force;
        enum perf_data_mode      mode;
 
        struct {
+               u64                      version;
                struct perf_data_file   *files;
                int                      nr;
        } dir;
@@ -43,14 +45,14 @@ static inline int perf_data__is_pipe(struct perf_data *data)
        return data->is_pipe;
 }
 
-static inline int perf_data__fd(struct perf_data *data)
+static inline bool perf_data__is_dir(struct perf_data *data)
 {
-       return data->file.fd;
+       return data->is_dir;
 }
 
-static inline unsigned long perf_data__size(struct perf_data *data)
+static inline int perf_data__fd(struct perf_data *data)
 {
-       return data->file.size;
+       return data->file.fd;
 }
 
 int perf_data__open(struct perf_data *data);
@@ -68,9 +70,11 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
  */
 int perf_data__switch(struct perf_data *data,
                           const char *postfix,
-                          size_t pos, bool at_exit);
+                          size_t pos, bool at_exit, char **new_filepath);
 
 int perf_data__create_dir(struct perf_data *data, int nr);
 int perf_data__open_dir(struct perf_data *data);
 void perf_data__close_dir(struct perf_data *data);
+int perf_data__update_dir(struct perf_data *data);
+unsigned long perf_data__size(struct perf_data *data);
 #endif /* __PERF_DATA_H */
index ba58ba603b69c800454688d21bbb5d7134d5abac..e059976d9d9365abeb943647b456fbfa560e7775 100644 (file)
@@ -184,6 +184,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
        case DSO_BINARY_TYPE__KALLSYMS:
        case DSO_BINARY_TYPE__GUEST_KALLSYMS:
        case DSO_BINARY_TYPE__JAVA_JIT:
+       case DSO_BINARY_TYPE__BPF_PROG_INFO:
        case DSO_BINARY_TYPE__NOT_FOUND:
                ret = -1;
                break;
@@ -1141,28 +1142,34 @@ void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
 
 static void dso__set_basename(struct dso *dso)
 {
-       /*
-        * basename() may modify path buffer, so we must pass
-        * a copy.
-        */
-       char *base, *lname = strdup(dso->long_name);
+       char *base, *lname;
+       int tid;
 
-       if (!lname)
-               return;
-
-       /*
-        * basename() may return a pointer to internal
-        * storage which is reused in subsequent calls
-        * so copy the result.
-        */
-       base = strdup(basename(lname));
+       if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
+               if (asprintf(&base, "[JIT] tid %d", tid) < 0)
+                       return;
+       } else {
+             /*
+              * basename() may modify path buffer, so we must pass
+               * a copy.
+               */
+               lname = strdup(dso->long_name);
+               if (!lname)
+                       return;
 
-       free(lname);
+               /*
+                * basename() may return a pointer to internal
+                * storage which is reused in subsequent calls
+                * so copy the result.
+                */
+               base = strdup(basename(lname));
 
-       if (!base)
-               return;
+               free(lname);
 
-       dso__set_short_name(dso, base, true);
+               if (!base)
+                       return;
+       }
+       dso__set_short_name(dso, base, true);
 }
 
 int dso__name_len(const struct dso *dso)
index bb417c54c25a3abfe26ac2f35fddd70405ed907d..6e3f63781e51ad2f8294dbbe8645c9ddf39a62cd 100644 (file)
@@ -14,6 +14,7 @@
 
 struct machine;
 struct map;
+struct perf_env;
 
 enum dso_binary_type {
        DSO_BINARY_TYPE__KALLSYMS = 0,
@@ -35,6 +36,7 @@ enum dso_binary_type {
        DSO_BINARY_TYPE__KCORE,
        DSO_BINARY_TYPE__GUEST_KCORE,
        DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+       DSO_BINARY_TYPE__BPF_PROG_INFO,
        DSO_BINARY_TYPE__NOT_FOUND,
 };
 
@@ -189,6 +191,12 @@ struct dso {
                u64              debug_frame_offset;
                u64              eh_frame_hdr_offset;
        } data;
+       /* bpf prog information */
+       struct {
+               u32             id;
+               u32             sub_id;
+               struct perf_env *env;
+       } bpf_prog;
 
        union { /* Tool specific area */
                void     *priv;
index 4c23779e271a31ce66d0deeb527a658e0ae90ae7..c6351b557bb0a9afb70d2ed4330c3496c3266a35 100644 (file)
 #include "env.h"
 #include "sane_ctype.h"
 #include "util.h"
+#include "bpf-event.h"
 #include <errno.h>
 #include <sys/utsname.h>
+#include <bpf/libbpf.h>
 
 struct perf_env perf_env;
 
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+                                   struct bpf_prog_info_node *info_node)
+{
+       __u32 prog_id = info_node->info_linear->info.id;
+       struct bpf_prog_info_node *node;
+       struct rb_node *parent = NULL;
+       struct rb_node **p;
+
+       down_write(&env->bpf_progs.lock);
+       p = &env->bpf_progs.infos.rb_node;
+
+       while (*p != NULL) {
+               parent = *p;
+               node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
+               if (prog_id < node->info_linear->info.id) {
+                       p = &(*p)->rb_left;
+               } else if (prog_id > node->info_linear->info.id) {
+                       p = &(*p)->rb_right;
+               } else {
+                       pr_debug("duplicated bpf prog info %u\n", prog_id);
+                       goto out;
+               }
+       }
+
+       rb_link_node(&info_node->rb_node, parent, p);
+       rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+       env->bpf_progs.infos_cnt++;
+out:
+       up_write(&env->bpf_progs.lock);
+}
+
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+                                                       __u32 prog_id)
+{
+       struct bpf_prog_info_node *node = NULL;
+       struct rb_node *n;
+
+       down_read(&env->bpf_progs.lock);
+       n = env->bpf_progs.infos.rb_node;
+
+       while (n) {
+               node = rb_entry(n, struct bpf_prog_info_node, rb_node);
+               if (prog_id < node->info_linear->info.id)
+                       n = n->rb_left;
+               else if (prog_id > node->info_linear->info.id)
+                       n = n->rb_right;
+               else
+                       break;
+       }
+
+       up_read(&env->bpf_progs.lock);
+       return node;
+}
+
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
+       struct rb_node *parent = NULL;
+       __u32 btf_id = btf_node->id;
+       struct btf_node *node;
+       struct rb_node **p;
+
+       down_write(&env->bpf_progs.lock);
+       p = &env->bpf_progs.btfs.rb_node;
+
+       while (*p != NULL) {
+               parent = *p;
+               node = rb_entry(parent, struct btf_node, rb_node);
+               if (btf_id < node->id) {
+                       p = &(*p)->rb_left;
+               } else if (btf_id > node->id) {
+                       p = &(*p)->rb_right;
+               } else {
+                       pr_debug("duplicated btf %u\n", btf_id);
+                       goto out;
+               }
+       }
+
+       rb_link_node(&btf_node->rb_node, parent, p);
+       rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
+       env->bpf_progs.btfs_cnt++;
+out:
+       up_write(&env->bpf_progs.lock);
+}
+
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
+       struct btf_node *node = NULL;
+       struct rb_node *n;
+
+       down_read(&env->bpf_progs.lock);
+       n = env->bpf_progs.btfs.rb_node;
+
+       while (n) {
+               node = rb_entry(n, struct btf_node, rb_node);
+               if (btf_id < node->id)
+                       n = n->rb_left;
+               else if (btf_id > node->id)
+                       n = n->rb_right;
+               else
+                       break;
+       }
+
+       up_read(&env->bpf_progs.lock);
+       return node;
+}
+
+/* purge data in bpf_progs.infos tree */
+static void perf_env__purge_bpf(struct perf_env *env)
+{
+       struct rb_root *root;
+       struct rb_node *next;
+
+       down_write(&env->bpf_progs.lock);
+
+       root = &env->bpf_progs.infos;
+       next = rb_first(root);
+
+       while (next) {
+               struct bpf_prog_info_node *node;
+
+               node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+               next = rb_next(&node->rb_node);
+               rb_erase(&node->rb_node, root);
+               free(node);
+       }
+
+       env->bpf_progs.infos_cnt = 0;
+
+       root = &env->bpf_progs.btfs;
+       next = rb_first(root);
+
+       while (next) {
+               struct btf_node *node;
+
+               node = rb_entry(next, struct btf_node, rb_node);
+               next = rb_next(&node->rb_node);
+               rb_erase(&node->rb_node, root);
+               free(node);
+       }
+
+       env->bpf_progs.btfs_cnt = 0;
+
+       up_write(&env->bpf_progs.lock);
+}
+
 void perf_env__exit(struct perf_env *env)
 {
        int i;
 
+       perf_env__purge_bpf(env);
        zfree(&env->hostname);
        zfree(&env->os_release);
        zfree(&env->version);
@@ -38,6 +186,13 @@ void perf_env__exit(struct perf_env *env)
        zfree(&env->memory_nodes);
 }
 
+void perf_env__init(struct perf_env *env)
+{
+       env->bpf_progs.infos = RB_ROOT;
+       env->bpf_progs.btfs = RB_ROOT;
+       init_rwsem(&env->bpf_progs.lock);
+}
+
 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
 {
        int i;
index d01b8355f4caba9440d0e0d4db4c6f241e801dc7..4f8e2b485c01cb9dab58f12942734d076d35208c 100644 (file)
@@ -3,7 +3,9 @@
 #define __PERF_ENV_H
 
 #include <linux/types.h>
+#include <linux/rbtree.h>
 #include "cpumap.h"
+#include "rwsem.h"
 
 struct cpu_topology_map {
        int     socket_id;
@@ -64,8 +66,23 @@ struct perf_env {
        struct memory_node      *memory_nodes;
        unsigned long long       memory_bsize;
        u64                     clockid_res_ns;
+
+       /*
+        * bpf_info_lock protects bpf rbtrees. This is needed because the
+        * trees are accessed by different threads in perf-top
+        */
+       struct {
+               struct rw_semaphore     lock;
+               struct rb_root          infos;
+               u32                     infos_cnt;
+               struct rb_root          btfs;
+               u32                     btfs_cnt;
+       } bpf_progs;
 };
 
+struct bpf_prog_info_node;
+struct btf_node;
+
 extern struct perf_env perf_env;
 
 void perf_env__exit(struct perf_env *env);
@@ -80,4 +97,11 @@ const char *perf_env__arch(struct perf_env *env);
 const char *perf_env__raw_arch(struct perf_env *env);
 int perf_env__nr_cpus_avail(struct perf_env *env);
 
+void perf_env__init(struct perf_env *env);
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+                                   struct bpf_prog_info_node *info_node);
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+                                                       __u32 prog_id);
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
 #endif /* __PERF_ENV_H */
index ed20f4379956594a908299f08c9d2ef4ac140dc2..ec78e93085ded81aa9def49640b48495b93ead7c 100644 (file)
@@ -19,6 +19,7 @@
 #include "debug.h"
 #include "units.h"
 #include "asm/bug.h"
+#include "bpf-event.h"
 #include <signal.h>
 #include <unistd.h>
 
@@ -1856,3 +1857,121 @@ struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
        }
        return leader;
 }
+
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+                             struct perf_event_attr *attr,
+                             perf_evsel__sb_cb_t cb,
+                             void *data)
+{
+       struct perf_evsel *evsel;
+       bool new_evlist = (*evlist) == NULL;
+
+       if (*evlist == NULL)
+               *evlist = perf_evlist__new();
+       if (*evlist == NULL)
+               return -1;
+
+       if (!attr->sample_id_all) {
+               pr_warning("enabling sample_id_all for all side band events\n");
+               attr->sample_id_all = 1;
+       }
+
+       evsel = perf_evsel__new_idx(attr, (*evlist)->nr_entries);
+       if (!evsel)
+               goto out_err;
+
+       evsel->side_band.cb = cb;
+       evsel->side_band.data = data;
+       perf_evlist__add(*evlist, evsel);
+       return 0;
+
+out_err:
+       if (new_evlist) {
+               perf_evlist__delete(*evlist);
+               *evlist = NULL;
+       }
+       return -1;
+}
+
+static void *perf_evlist__poll_thread(void *arg)
+{
+       struct perf_evlist *evlist = arg;
+       bool draining = false;
+       int i;
+
+       while (draining || !(evlist->thread.done)) {
+               if (draining)
+                       draining = false;
+               else if (evlist->thread.done)
+                       draining = true;
+
+               if (!draining)
+                       perf_evlist__poll(evlist, 1000);
+
+               for (i = 0; i < evlist->nr_mmaps; i++) {
+                       struct perf_mmap *map = &evlist->mmap[i];
+                       union perf_event *event;
+
+                       if (perf_mmap__read_init(map))
+                               continue;
+                       while ((event = perf_mmap__read_event(map)) != NULL) {
+                               struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+                               if (evsel && evsel->side_band.cb)
+                                       evsel->side_band.cb(event, evsel->side_band.data);
+                               else
+                                       pr_warning("cannot locate proper evsel for the side band event\n");
+
+                               perf_mmap__consume(map);
+                       }
+                       perf_mmap__read_done(map);
+               }
+       }
+       return NULL;
+}
+
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+                                struct target *target)
+{
+       struct perf_evsel *counter;
+
+       if (!evlist)
+               return 0;
+
+       if (perf_evlist__create_maps(evlist, target))
+               goto out_delete_evlist;
+
+       evlist__for_each_entry(evlist, counter) {
+               if (perf_evsel__open(counter, evlist->cpus,
+                                    evlist->threads) < 0)
+                       goto out_delete_evlist;
+       }
+
+       if (perf_evlist__mmap(evlist, UINT_MAX))
+               goto out_delete_evlist;
+
+       evlist__for_each_entry(evlist, counter) {
+               if (perf_evsel__enable(counter))
+                       goto out_delete_evlist;
+       }
+
+       evlist->thread.done = 0;
+       if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
+               goto out_delete_evlist;
+
+       return 0;
+
+out_delete_evlist:
+       perf_evlist__delete(evlist);
+       evlist = NULL;
+       return -1;
+}
+
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist)
+{
+       if (!evlist)
+               return;
+       evlist->thread.done = 1;
+       pthread_join(evlist->thread.th, NULL);
+       perf_evlist__delete(evlist);
+}
index 744906dd488732a46801fcc50588a32c190b7a45..dcb68f34d2cd1a3cb4460adbda76eef879f9282e 100644 (file)
@@ -54,6 +54,10 @@ struct perf_evlist {
                                       struct perf_sample *sample);
        u64             first_sample_time;
        u64             last_sample_time;
+       struct {
+               pthread_t               th;
+               volatile int            done;
+       } thread;
 };
 
 struct perf_evsel_str_handler {
@@ -87,6 +91,14 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
 
 int perf_evlist__add_dummy(struct perf_evlist *evlist);
 
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+                             struct perf_event_attr *attr,
+                             perf_evsel__sb_cb_t cb,
+                             void *data);
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+                                struct target *target);
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist);
+
 int perf_evlist__add_newtp(struct perf_evlist *evlist,
                           const char *sys, const char *name, void *handler);
 
index 3bbf73e979c00aa4f9c363e9d224440190425457..7835e05f0c0a476c4d782a98dc8de5fb4494d55b 100644 (file)
@@ -1036,7 +1036,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
        attr->mmap2 = track && !perf_missing_features.mmap2;
        attr->comm  = track;
        attr->ksymbol = track && !perf_missing_features.ksymbol;
-       attr->bpf_event = track && opts->bpf_event &&
+       attr->bpf_event = track && !opts->no_bpf_event &&
                !perf_missing_features.bpf_event;
 
        if (opts->record_namespaces)
@@ -1292,6 +1292,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
 {
        assert(list_empty(&evsel->node));
        assert(evsel->evlist == NULL);
+       perf_evsel__free_counts(evsel);
        perf_evsel__free_fd(evsel);
        perf_evsel__free_id(evsel);
        perf_evsel__free_config_terms(evsel);
@@ -1342,10 +1343,9 @@ void perf_counts_values__scale(struct perf_counts_values *count,
                        count->val = 0;
                } else if (count->run < count->ena) {
                        scaled = 1;
-                       count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
+                       count->val = (u64)((double) count->val * count->ena / count->run);
                }
-       } else
-               count->ena = count->run = 0;
+       }
 
        if (pscaled)
                *pscaled = scaled;
index cc578e02e08fb41a281a0b33d523bcf037efc5b3..0f2c6c93d7215d349585da941f95e5ed1c0a1460 100644 (file)
@@ -73,6 +73,8 @@ struct perf_evsel_config_term {
 
 struct perf_stat_evsel;
 
+typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
+
 /** struct perf_evsel - event selector
  *
  * @evlist - evlist this evsel is in, if it is in one.
@@ -151,6 +153,10 @@ struct perf_evsel {
        bool                    collect_stat;
        bool                    weak_group;
        const char              *pmu_name;
+       struct {
+               perf_evsel__sb_cb_t     *cb;
+               void                    *data;
+       } side_band;
 };
 
 union u64_swap {
index 01b324c275b9d1ba0bd771f30b49952550038295..b9e693825873a8459c055a62cfcf1aefb289c99a 100644 (file)
@@ -18,6 +18,7 @@
 #include <sys/utsname.h>
 #include <linux/time64.h>
 #include <dirent.h>
+#include <bpf/libbpf.h>
 
 #include "evlist.h"
 #include "evsel.h"
@@ -40,6 +41,7 @@
 #include "time-utils.h"
 #include "units.h"
 #include "cputopo.h"
+#include "bpf-event.h"
 
 #include "sane_ctype.h"
 
@@ -861,6 +863,104 @@ static int write_clockid(struct feat_fd *ff,
                        sizeof(ff->ph->env.clockid_res_ns));
 }
 
+static int write_dir_format(struct feat_fd *ff,
+                           struct perf_evlist *evlist __maybe_unused)
+{
+       struct perf_session *session;
+       struct perf_data *data;
+
+       session = container_of(ff->ph, struct perf_session, header);
+       data = session->data;
+
+       if (WARN_ON(!perf_data__is_dir(data)))
+               return -1;
+
+       return do_write(ff, &data->dir.version, sizeof(data->dir.version));
+}
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff,
+                              struct perf_evlist *evlist __maybe_unused)
+{
+       struct perf_env *env = &ff->ph->env;
+       struct rb_root *root;
+       struct rb_node *next;
+       int ret;
+
+       down_read(&env->bpf_progs.lock);
+
+       ret = do_write(ff, &env->bpf_progs.infos_cnt,
+                      sizeof(env->bpf_progs.infos_cnt));
+       if (ret < 0)
+               goto out;
+
+       root = &env->bpf_progs.infos;
+       next = rb_first(root);
+       while (next) {
+               struct bpf_prog_info_node *node;
+               size_t len;
+
+               node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+               next = rb_next(&node->rb_node);
+               len = sizeof(struct bpf_prog_info_linear) +
+                       node->info_linear->data_len;
+
+               /* before writing to file, translate address to offset */
+               bpf_program__bpil_addr_to_offs(node->info_linear);
+               ret = do_write(ff, node->info_linear, len);
+               /*
+                * translate back to address even when do_write() fails,
+                * so that this function never changes the data.
+                */
+               bpf_program__bpil_offs_to_addr(node->info_linear);
+               if (ret < 0)
+                       goto out;
+       }
+out:
+       up_read(&env->bpf_progs.lock);
+       return ret;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
+                              struct perf_evlist *evlist __maybe_unused)
+{
+       return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int write_bpf_btf(struct feat_fd *ff,
+                        struct perf_evlist *evlist __maybe_unused)
+{
+       struct perf_env *env = &ff->ph->env;
+       struct rb_root *root;
+       struct rb_node *next;
+       int ret;
+
+       down_read(&env->bpf_progs.lock);
+
+       ret = do_write(ff, &env->bpf_progs.btfs_cnt,
+                      sizeof(env->bpf_progs.btfs_cnt));
+
+       if (ret < 0)
+               goto out;
+
+       root = &env->bpf_progs.btfs;
+       next = rb_first(root);
+       while (next) {
+               struct btf_node *node;
+
+               node = rb_entry(next, struct btf_node, rb_node);
+               next = rb_next(&node->rb_node);
+               ret = do_write(ff, &node->id,
+                              sizeof(u32) * 2 + node->data_size);
+               if (ret < 0)
+                       goto out;
+       }
+out:
+       up_read(&env->bpf_progs.lock);
+       return ret;
+}
+
 static int cpu_cache_level__sort(const void *a, const void *b)
 {
        struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
@@ -1341,6 +1441,63 @@ static void print_clockid(struct feat_fd *ff, FILE *fp)
                ff->ph->env.clockid_res_ns * 1000);
 }
 
+static void print_dir_format(struct feat_fd *ff, FILE *fp)
+{
+       struct perf_session *session;
+       struct perf_data *data;
+
+       session = container_of(ff->ph, struct perf_session, header);
+       data = session->data;
+
+       fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
+}
+
+static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+{
+       struct perf_env *env = &ff->ph->env;
+       struct rb_root *root;
+       struct rb_node *next;
+
+       down_read(&env->bpf_progs.lock);
+
+       root = &env->bpf_progs.infos;
+       next = rb_first(root);
+
+       while (next) {
+               struct bpf_prog_info_node *node;
+
+               node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+               next = rb_next(&node->rb_node);
+
+               bpf_event__print_bpf_prog_info(&node->info_linear->info,
+                                              env, fp);
+       }
+
+       up_read(&env->bpf_progs.lock);
+}
+
+static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
+{
+       struct perf_env *env = &ff->ph->env;
+       struct rb_root *root;
+       struct rb_node *next;
+
+       down_read(&env->bpf_progs.lock);
+
+       root = &env->bpf_progs.btfs;
+       next = rb_first(root);
+
+       while (next) {
+               struct btf_node *node;
+
+               node = rb_entry(next, struct btf_node, rb_node);
+               next = rb_next(&node->rb_node);
+               fprintf(fp, "# btf info of id %u\n", node->id);
+       }
+
+       up_read(&env->bpf_progs.lock);
+}
+
 static void free_event_desc(struct perf_evsel *events)
 {
        struct perf_evsel *evsel;
@@ -2373,6 +2530,139 @@ static int process_clockid(struct feat_fd *ff,
        return 0;
 }
 
+static int process_dir_format(struct feat_fd *ff,
+                             void *_data __maybe_unused)
+{
+       struct perf_session *session;
+       struct perf_data *data;
+
+       session = container_of(ff->ph, struct perf_session, header);
+       data = session->data;
+
+       if (WARN_ON(!perf_data__is_dir(data)))
+               return -1;
+
+       return do_read_u64(ff, &data->dir.version);
+}
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+{
+       struct bpf_prog_info_linear *info_linear;
+       struct bpf_prog_info_node *info_node;
+       struct perf_env *env = &ff->ph->env;
+       u32 count, i;
+       int err = -1;
+
+       if (ff->ph->needs_swap) {
+               pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
+               return 0;
+       }
+
+       if (do_read_u32(ff, &count))
+               return -1;
+
+       down_write(&env->bpf_progs.lock);
+
+       for (i = 0; i < count; ++i) {
+               u32 info_len, data_len;
+
+               info_linear = NULL;
+               info_node = NULL;
+               if (do_read_u32(ff, &info_len))
+                       goto out;
+               if (do_read_u32(ff, &data_len))
+                       goto out;
+
+               if (info_len > sizeof(struct bpf_prog_info)) {
+                       pr_warning("detected invalid bpf_prog_info\n");
+                       goto out;
+               }
+
+               info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
+                                    data_len);
+               if (!info_linear)
+                       goto out;
+               info_linear->info_len = sizeof(struct bpf_prog_info);
+               info_linear->data_len = data_len;
+               if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
+                       goto out;
+               if (__do_read(ff, &info_linear->info, info_len))
+                       goto out;
+               if (info_len < sizeof(struct bpf_prog_info))
+                       memset(((void *)(&info_linear->info)) + info_len, 0,
+                              sizeof(struct bpf_prog_info) - info_len);
+
+               if (__do_read(ff, info_linear->data, data_len))
+                       goto out;
+
+               info_node = malloc(sizeof(struct bpf_prog_info_node));
+               if (!info_node)
+                       goto out;
+
+               /* after reading from file, translate offset to address */
+               bpf_program__bpil_offs_to_addr(info_linear);
+               info_node->info_linear = info_linear;
+               perf_env__insert_bpf_prog_info(env, info_node);
+       }
+
+       return 0;
+out:
+       free(info_linear);
+       free(info_node);
+       up_write(&env->bpf_progs.lock);
+       return err;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
+{
+       return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+{
+       struct perf_env *env = &ff->ph->env;
+       u32 count, i;
+
+       if (ff->ph->needs_swap) {
+               pr_warning("interpreting btf from systems with endianity is not yet supported\n");
+               return 0;
+       }
+
+       if (do_read_u32(ff, &count))
+               return -1;
+
+       down_write(&env->bpf_progs.lock);
+
+       for (i = 0; i < count; ++i) {
+               struct btf_node *node;
+               u32 id, data_size;
+
+               if (do_read_u32(ff, &id))
+                       return -1;
+               if (do_read_u32(ff, &data_size))
+                       return -1;
+
+               node = malloc(sizeof(struct btf_node) + data_size);
+               if (!node)
+                       return -1;
+
+               node->id = id;
+               node->data_size = data_size;
+
+               if (__do_read(ff, node->data, data_size)) {
+                       free(node);
+                       return -1;
+               }
+
+               perf_env__insert_btf(env, node);
+       }
+
+       up_write(&env->bpf_progs.lock);
+       return 0;
+}
+
 struct feature_ops {
        int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
        void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2432,7 +2722,10 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
        FEAT_OPN(CACHE,         cache,          true),
        FEAT_OPR(SAMPLE_TIME,   sample_time,    false),
        FEAT_OPR(MEM_TOPOLOGY,  mem_topology,   true),
-       FEAT_OPR(CLOCKID,       clockid,        false)
+       FEAT_OPR(CLOCKID,       clockid,        false),
+       FEAT_OPN(DIR_FORMAT,    dir_format,     false),
+       FEAT_OPR(BPF_PROG_INFO, bpf_prog_info,  false),
+       FEAT_OPR(BPF_BTF,       bpf_btf,        false),
 };
 
 struct header_print_data {
index 0d553ddca0a3049f941d96a0ae0d68b71ea7a49c..386da49e1bfa05f7ac546b26d1e05730d8e2f3c9 100644 (file)
@@ -39,6 +39,9 @@ enum {
        HEADER_SAMPLE_TIME,
        HEADER_MEM_TOPOLOGY,
        HEADER_CLOCKID,
+       HEADER_DIR_FORMAT,
+       HEADER_BPF_PROG_INFO,
+       HEADER_BPF_BTF,
        HEADER_LAST_FEATURE,
        HEADER_FEAT_BITS        = 256,
 };
@@ -48,6 +51,10 @@ enum perf_header_version {
        PERF_HEADER_VERSION_2,
 };
 
+enum perf_dir_version {
+       PERF_DIR_VERSION        = 1,
+};
+
 struct perf_file_section {
        u64 offset;
        u64 size;
index f9eb95bf3938b7248a1df567c04d1f31cf7e1f36..7ace7a10054d82da540825ad2eb36d5a85b0a6a4 100644 (file)
@@ -19,6 +19,7 @@
 #include <math.h>
 #include <inttypes.h>
 #include <sys/param.h>
+#include <linux/time64.h>
 
 static bool hists__filter_entry_by_dso(struct hists *hists,
                                       struct hist_entry *he);
@@ -192,6 +193,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
        hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
        hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
        hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+       hists__new_col_len(hists, HISTC_TIME, 12);
 
        if (h->srcline) {
                len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
@@ -246,6 +248,14 @@ static void he_stat__add_cpumode_period(struct he_stat *he_stat,
        }
 }
 
+static long hist_time(unsigned long htime)
+{
+       unsigned long time_quantum = symbol_conf.time_quantum;
+       if (time_quantum)
+               return (htime / time_quantum) * time_quantum;
+       return htime;
+}
+
 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
                                u64 weight)
 {
@@ -426,6 +436,13 @@ static int hist_entry__init(struct hist_entry *he,
                        goto err_rawdata;
        }
 
+       if (symbol_conf.res_sample) {
+               he->res_samples = calloc(sizeof(struct res_sample),
+                                       symbol_conf.res_sample);
+               if (!he->res_samples)
+                       goto err_srcline;
+       }
+
        INIT_LIST_HEAD(&he->pairs.node);
        thread__get(he->thread);
        he->hroot_in  = RB_ROOT_CACHED;
@@ -436,6 +453,9 @@ static int hist_entry__init(struct hist_entry *he,
 
        return 0;
 
+err_srcline:
+       free(he->srcline);
+
 err_rawdata:
        free(he->raw_data);
 
@@ -593,6 +613,32 @@ out:
        return he;
 }
 
+static unsigned random_max(unsigned high)
+{
+       unsigned thresh = -high % high;
+       for (;;) {
+               unsigned r = random();
+               if (r >= thresh)
+                       return r % high;
+       }
+}
+
+static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
+{
+       struct res_sample *r;
+       int j;
+
+       if (he->num_res < symbol_conf.res_sample) {
+               j = he->num_res++;
+       } else {
+               j = random_max(symbol_conf.res_sample);
+       }
+       r = &he->res_samples[j];
+       r->time = sample->time;
+       r->cpu = sample->cpu;
+       r->tid = sample->tid;
+}
+
 static struct hist_entry*
 __hists__add_entry(struct hists *hists,
                   struct addr_location *al,
@@ -635,10 +681,13 @@ __hists__add_entry(struct hists *hists,
                .raw_data = sample->raw_data,
                .raw_size = sample->raw_size,
                .ops = ops,
+               .time = hist_time(sample->time),
        }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
 
        if (!hists->has_callchains && he && he->callchain_size != 0)
                hists->has_callchains = true;
+       if (he && symbol_conf.res_sample)
+               hists__res_sample(he, sample);
        return he;
 }
 
@@ -1062,8 +1111,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
 
        err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
                                        iter->evsel, al, max_stack_depth);
-       if (err)
+       if (err) {
+               map__put(alm);
                return err;
+       }
 
        err = iter->ops->prepare_entry(iter, al);
        if (err)
@@ -1162,6 +1213,7 @@ void hist_entry__delete(struct hist_entry *he)
                mem_info__zput(he->mem_info);
        }
 
+       zfree(&he->res_samples);
        zfree(&he->stat_acc);
        free_srcline(he->srcline);
        if (he->srcfile && he->srcfile[0])
index 4af27fbab24f5c3ca3ca0eda289537b506846f20..76ff6c6d03b82f334ea8aa3528d8d817f164a066 100644 (file)
@@ -31,6 +31,7 @@ enum hist_filter {
 
 enum hist_column {
        HISTC_SYMBOL,
+       HISTC_TIME,
        HISTC_DSO,
        HISTC_THREAD,
        HISTC_COMM,
@@ -432,9 +433,18 @@ struct hist_browser_timer {
 };
 
 struct annotation_options;
+struct res_sample;
+
+enum rstype {
+       A_NORMAL,
+       A_ASM,
+       A_SOURCE
+};
 
 #ifdef HAVE_SLANG_SUPPORT
 #include "../ui/keysyms.h"
+void attr_to_script(char *buf, struct perf_event_attr *attr);
+
 int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
                             struct hist_browser_timer *hbt,
                             struct annotation_options *annotation_opts);
@@ -449,7 +459,13 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
                                  struct perf_env *env,
                                  bool warn_lost_event,
                                  struct annotation_options *annotation_options);
-int script_browse(const char *script_opt);
+
+int script_browse(const char *script_opt, struct perf_evsel *evsel);
+
+void run_script(char *cmd);
+int res_sample_browse(struct res_sample *res_samples, int num_res,
+                     struct perf_evsel *evsel, enum rstype rstype);
+void res_sample_init(void);
 #else
 static inline
 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
@@ -478,11 +494,22 @@ static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
        return 0;
 }
 
-static inline int script_browse(const char *script_opt __maybe_unused)
+static inline int script_browse(const char *script_opt __maybe_unused,
+                               struct perf_evsel *evsel __maybe_unused)
 {
        return 0;
 }
 
+static inline int res_sample_browse(struct res_sample *res_samples __maybe_unused,
+                                   int num_res __maybe_unused,
+                                   struct perf_evsel *evsel __maybe_unused,
+                                   enum rstype rstype __maybe_unused)
+{
+       return 0;
+}
+
+static inline void res_sample_init(void) {}
+
 #define K_LEFT  -1000
 #define K_RIGHT -2000
 #define K_SWITCH_INPUT_DATA -3000
index fbeb0c6efaa6e5ea05383e7a9227798b313379d4..e32628cd20a7f36e0e06efb3ee5c2e6cae09dabb 100644 (file)
@@ -577,10 +577,25 @@ static void __maps__purge(struct maps *maps)
        }
 }
 
+static void __maps__purge_names(struct maps *maps)
+{
+       struct rb_root *root = &maps->names;
+       struct rb_node *next = rb_first(root);
+
+       while (next) {
+               struct map *pos = rb_entry(next, struct map, rb_node_name);
+
+               next = rb_next(&pos->rb_node_name);
+               rb_erase_init(&pos->rb_node_name, root);
+               map__put(pos);
+       }
+}
+
 static void maps__exit(struct maps *maps)
 {
        down_write(&maps->lock);
        __maps__purge(maps);
+       __maps__purge_names(maps);
        up_write(&maps->lock);
 }
 
@@ -917,6 +932,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
 {
        rb_erase_init(&map->rb_node, &maps->entries);
        map__put(map);
+
+       rb_erase_init(&map->rb_node_name, &maps->names);
+       map__put(map);
 }
 
 void maps__remove(struct maps *maps, struct map *map)
index ea523d3b248fe9bdfb2ded5bca9f88bc966b31b1..989fed6f43b5a5f34d485c39b7b8fa30790e6a2d 100644 (file)
@@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
                "FINAL",
                "ROUND",
                "HALF ",
+               "TOP  ",
+               "TIME ",
        };
        int err;
        bool show_progress = false;
index 4dcc01b2532c842dddc720d31b528f6be5532c02..5ef4939408f2a5b2394943f5313607ec7e73e4a4 100644 (file)
@@ -2271,6 +2271,7 @@ static bool is_event_supported(u8 type, unsigned config)
                perf_evsel__delete(evsel);
        }
 
+       thread_map__put(tmap);
        return ret;
 }
 
@@ -2341,6 +2342,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
                                printf("  %-50s [%s]\n", buf, "SDT event");
                                free(buf);
                        }
+                       free(path);
                } else
                        printf("  %-50s [%s]\n", nd->s, "SDT event");
                if (nd2) {
index a1b8d9649ca737abf09c24934cb8af8b78821e82..198e09ff611e48cecce9ae3795f0a7c8a63c2c27 100644 (file)
@@ -160,8 +160,10 @@ static struct map *kernel_get_module_map(const char *module)
        if (module && strchr(module, '/'))
                return dso__new_map(module);
 
-       if (!module)
-               module = "kernel";
+       if (!module) {
+               pos = machine__kernel_map(host_machine);
+               return map__get(pos);
+       }
 
        for (pos = maps__first(maps); pos; pos = map__next(pos)) {
                /* short_name is "[module]" */
index db643f3c2b9544d5bc12ce439b1a86817de6af5d..b17f1c9bc9651d620810825ab6014663890006f7 100644 (file)
@@ -132,6 +132,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
        ordered_events__init(&session->ordered_events,
                             ordered_events__deliver_event, NULL);
 
+       perf_env__init(&session->header.env);
        if (data) {
                if (perf_data__open(data))
                        goto out_delete;
@@ -152,6 +153,10 @@ struct perf_session *perf_session__new(struct perf_data *data,
                        }
 
                        perf_evlist__init_trace_event_sample_raw(session->evlist);
+
+                       /* Open the directory data. */
+                       if (data->is_dir && perf_data__open_dir(data))
+                               goto out_delete;
                }
        } else  {
                session->machines.host.env = &perf_env;
@@ -1843,10 +1848,17 @@ fetch_mmaped_event(struct perf_session *session,
 #define NUM_MMAPS 128
 #endif
 
+struct reader;
+
+typedef s64 (*reader_cb_t)(struct perf_session *session,
+                          union perf_event *event,
+                          u64 file_offset);
+
 struct reader {
-       int     fd;
-       u64     data_size;
-       u64     data_offset;
+       int              fd;
+       u64              data_size;
+       u64              data_offset;
+       reader_cb_t      process;
 };
 
 static int
@@ -1917,7 +1929,7 @@ more:
        size = event->header.size;
 
        if (size < sizeof(struct perf_event_header) ||
-           (skip = perf_session__process_event(session, event, file_pos)) < 0) {
+           (skip = rd->process(session, event, file_pos)) < 0) {
                pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
                       file_offset + head, event->header.size,
                       event->header.type);
@@ -1943,12 +1955,20 @@ out:
        return err;
 }
 
+static s64 process_simple(struct perf_session *session,
+                         union perf_event *event,
+                         u64 file_offset)
+{
+       return perf_session__process_event(session, event, file_offset);
+}
+
 static int __perf_session__process_events(struct perf_session *session)
 {
        struct reader rd = {
                .fd             = perf_data__fd(session->data),
                .data_size      = session->header.data_size,
                .data_offset    = session->header.data_offset,
+               .process        = process_simple,
        };
        struct ordered_events *oe = &session->ordered_events;
        struct perf_tool *tool = session->tool;
index d2299e912e591ac569ef62d2b034421798ca50d1..5d2518e89fc49f21ac6a85c69283c07dc05042ef 100644 (file)
@@ -3,6 +3,7 @@
 #include <inttypes.h>
 #include <regex.h>
 #include <linux/mman.h>
+#include <linux/time64.h>
 #include "sort.h"
 #include "hist.h"
 #include "comm.h"
 #include "evsel.h"
 #include "evlist.h"
 #include "strlist.h"
+#include "strbuf.h"
 #include <traceevent/event-parse.h>
 #include "mem-events.h"
 #include "annotate.h"
+#include "time-utils.h"
 #include <linux/kernel.h>
 
 regex_t                parent_regex;
@@ -654,6 +657,42 @@ struct sort_entry sort_socket = {
        .se_width_idx   = HISTC_SOCKET,
 };
 
+/* --sort time */
+
+static int64_t
+sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       return right->time - left->time;
+}
+
+static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
+                                   size_t size, unsigned int width)
+{
+       unsigned long secs;
+       unsigned long long nsecs;
+       char he_time[32];
+
+       nsecs = he->time;
+       secs = nsecs / NSEC_PER_SEC;
+       nsecs -= secs * NSEC_PER_SEC;
+
+       if (symbol_conf.nanosecs)
+               snprintf(he_time, sizeof he_time, "%5lu.%09llu: ",
+                        secs, nsecs);
+       else
+               timestamp__scnprintf_usec(he->time, he_time,
+                                         sizeof(he_time));
+
+       return repsep_snprintf(bf, size, "%-.*s", width, he_time);
+}
+
+struct sort_entry sort_time = {
+       .se_header      = "Time",
+       .se_cmp         = sort__time_cmp,
+       .se_snprintf    = hist_entry__time_snprintf,
+       .se_width_idx   = HISTC_TIME,
+};
+
 /* --sort trace */
 
 static char *get_trace_output(struct hist_entry *he)
@@ -1634,6 +1673,7 @@ static struct sort_dimension common_sort_dimensions[] = {
        DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
        DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
        DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
+       DIM(SORT_TIME, "time", sort_time),
 };
 
 #undef DIM
@@ -3068,3 +3108,54 @@ void reset_output_field(void)
        reset_dimensions();
        perf_hpp__reset_output_field(&perf_hpp_list);
 }
+
+#define INDENT (3*8 + 1)
+
+static void add_key(struct strbuf *sb, const char *str, int *llen)
+{
+       if (*llen >= 75) {
+               strbuf_addstr(sb, "\n\t\t\t ");
+               *llen = INDENT;
+       }
+       strbuf_addf(sb, " %s", str);
+       *llen += strlen(str) + 1;
+}
+
+static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
+                           int *llen)
+{
+       int i;
+
+       for (i = 0; i < n; i++)
+               add_key(sb, s[i].name, llen);
+}
+
+static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
+                               int *llen)
+{
+       int i;
+
+       for (i = 0; i < n; i++)
+               add_key(sb, s[i].name, llen);
+}
+
+const char *sort_help(const char *prefix)
+{
+       struct strbuf sb;
+       char *s;
+       int len = strlen(prefix) + INDENT;
+
+       strbuf_init(&sb, 300);
+       strbuf_addstr(&sb, prefix);
+       add_hpp_sort_string(&sb, hpp_sort_dimensions,
+                           ARRAY_SIZE(hpp_sort_dimensions), &len);
+       add_sort_string(&sb, common_sort_dimensions,
+                           ARRAY_SIZE(common_sort_dimensions), &len);
+       add_sort_string(&sb, bstack_sort_dimensions,
+                           ARRAY_SIZE(bstack_sort_dimensions), &len);
+       add_sort_string(&sb, memory_sort_dimensions,
+                           ARRAY_SIZE(memory_sort_dimensions), &len);
+       s = strbuf_detach(&sb, NULL);
+       strbuf_release(&sb);
+       return s;
+}
index 2fbee0b1011c6a68d4cafefe9d0fc13d67f9c226..ce376a73f964dc83dc9a222b212cfcf3bad82aff 100644 (file)
@@ -47,6 +47,12 @@ extern struct sort_entry sort_srcline;
 extern enum sort_type sort__first_dimension;
 extern const char default_mem_sort_order[];
 
+struct res_sample {
+       u64 time;
+       int cpu;
+       int tid;
+};
+
 struct he_stat {
        u64                     period;
        u64                     period_sys;
@@ -135,10 +141,13 @@ struct hist_entry {
        char                    *srcfile;
        struct symbol           *parent;
        struct branch_info      *branch_info;
+       long                    time;
        struct hists            *hists;
        struct mem_info         *mem_info;
        void                    *raw_data;
        u32                     raw_size;
+       int                     num_res;
+       struct res_sample       *res_samples;
        void                    *trace_output;
        struct perf_hpp_list    *hpp_list;
        struct hist_entry       *parent_he;
@@ -231,6 +240,7 @@ enum sort_type {
        SORT_DSO_SIZE,
        SORT_CGROUP_ID,
        SORT_SYM_IPC_NULL,
+       SORT_TIME,
 
        /* branch stack specific sort keys */
        __SORT_BRANCH_STACK,
@@ -286,6 +296,8 @@ void reset_output_field(void);
 void sort__setup_elide(FILE *fp);
 void perf_hpp__set_elide(int idx, bool elide);
 
+const char *sort_help(const char *prefix);
+
 int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
 
 bool is_strict_order(const char *order);
index 4d40515307b8024a4b438edfc8a7d1f2bfc97c9a..2856cc9d5a31e8aa55a631aa78b7875a12bd2c5b 100644 (file)
@@ -291,10 +291,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
                break;
        case AGGR_GLOBAL:
                aggr->val += count->val;
-               if (config->scale) {
-                       aggr->ena += count->ena;
-                       aggr->run += count->run;
-               }
+               aggr->ena += count->ena;
+               aggr->run += count->run;
        case AGGR_UNSET:
        default:
                break;
@@ -442,10 +440,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel,
        struct perf_event_attr *attr = &evsel->attr;
        struct perf_evsel *leader = evsel->leader;
 
-       if (config->scale) {
-               attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
-                                   PERF_FORMAT_TOTAL_TIME_RUNNING;
-       }
+       attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+                           PERF_FORMAT_TOTAL_TIME_RUNNING;
 
        /*
         * The event is part of non trivial group, let's enable
index 758bf5f74e6ee91a7ff09ac667bf6ac6d18a9d73..5cbad55cd99dfbee9729b64331ea384da46cb2a8 100644 (file)
@@ -6,6 +6,7 @@
 #include <string.h>
 #include <linux/kernel.h>
 #include <linux/mman.h>
+#include <linux/time64.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/param.h>
@@ -39,15 +40,18 @@ int vmlinux_path__nr_entries;
 char **vmlinux_path;
 
 struct symbol_conf symbol_conf = {
+       .nanosecs               = false,
        .use_modules            = true,
        .try_vmlinux_path       = true,
        .demangle               = true,
        .demangle_kernel        = false,
        .cumulate_callchain     = true,
+       .time_quantum           = 100 * NSEC_PER_MSEC, /* 100ms */
        .show_hist_headers      = true,
        .symfs                  = "",
        .event_group            = true,
        .inline_name            = true,
+       .res_sample             = 0,
 };
 
 static enum dso_binary_type binary_type_symtab[] = {
@@ -1451,6 +1455,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
        case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
                return true;
 
+       case DSO_BINARY_TYPE__BPF_PROG_INFO:
        case DSO_BINARY_TYPE__NOT_FOUND:
        default:
                return false;
index fffea68c12035da068cb6a121867cab65596e1b2..6c55fa6fccec151fce220bf0d0489a6e3dfca606 100644 (file)
@@ -8,6 +8,7 @@ struct strlist;
 struct intlist;
 
 struct symbol_conf {
+       bool            nanosecs;
        unsigned short  priv_size;
        bool            try_vmlinux_path,
                        init_annotation,
@@ -55,6 +56,7 @@ struct symbol_conf {
                        *sym_list_str,
                        *col_width_list_str,
                        *bt_stop_list_str;
+       unsigned long   time_quantum;
        struct strlist  *dso_list,
                        *comm_list,
                        *sym_list,
@@ -66,6 +68,7 @@ struct symbol_conf {
        struct intlist  *pid_list,
                        *tid_list;
        const char      *symfs;
+       int             res_sample;
 };
 
 extern struct symbol_conf symbol_conf;
index 0f53baec660e2494db6f3e506ddff91c05d89c36..20663a460df34d63d9d575ebea8a7e7681099bc7 100644 (file)
@@ -453,6 +453,14 @@ int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
        return scnprintf(buf, sz, "%"PRIu64".%06"PRIu64, sec, usec);
 }
 
+int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz)
+{
+       u64 sec  = timestamp / NSEC_PER_SEC,
+           nsec = timestamp % NSEC_PER_SEC;
+
+       return scnprintf(buf, sz, "%" PRIu64 ".%09" PRIu64, sec, nsec);
+}
+
 int fetch_current_timestamp(char *buf, size_t sz)
 {
        struct timeval tv;
index b923de44e36f9b350c9b94c021ccee732bc5f7df..72a42ea1d513af55f54896da02ce66bc6d037a82 100644 (file)
@@ -30,6 +30,7 @@ int perf_time__parse_for_ranges(const char *str, struct perf_session *session,
                                int *range_size, int *range_num);
 
 int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
+int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz);
 
 int fetch_current_timestamp(char *buf, size_t sz);
 
index 9327c0ddc3a59c6424d33b6278b9cf7e51155ce7..c3fad065c89c085b39da83de4a751041a99ae3d6 100644 (file)
@@ -5077,6 +5077,9 @@ int fork_it(char **argv)
                signal(SIGQUIT, SIG_IGN);
                if (waitpid(child_pid, &status, 0) == -1)
                        err(status, "waitpid");
+
+               if (WIFEXITED(status))
+                       status = WEXITSTATUS(status);
        }
        /*
         * n.b. fork_it() does not check for errors from for_all_cpus()
index c9433a496d548daf9417a1f19af71753f93547c0..c81fc350f7ad46ad60d53ac3dd8121059020f9a6 100644 (file)
@@ -180,6 +180,8 @@ static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
        (void *) BPF_FUNC_sk_fullsock;
 static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
        (void *) BPF_FUNC_tcp_sock;
+static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
+       (void *) BPF_FUNC_get_listener_sock;
 static int (*bpf_skb_ecn_set_ce)(void *ctx) =
        (void *) BPF_FUNC_skb_ecn_set_ce;
 
index 90f8a206340ab4daa61c401144d45cbec6b15a63..ee99368c595ca0b0768ad7938212bc80977bacf1 100644 (file)
@@ -37,7 +37,7 @@ void test_map_lock(void)
        const char *file = "./test_map_lock.o";
        int prog_fd, map_fd[2], vars[17] = {};
        pthread_t thread_id[6];
-       struct bpf_object *obj;
+       struct bpf_object *obj = NULL;
        int err = 0, key = 0, i;
        void *ret;
 
index 9a573a9675d74beee07fc6025f1504682c394c21..114ebe6a438e562d864971a5a5d174b1e0936f8a 100644 (file)
@@ -5,7 +5,7 @@ void test_spinlock(void)
 {
        const char *file = "./test_spin_lock.o";
        pthread_t thread_id[4];
-       struct bpf_object *obj;
+       struct bpf_object *obj = NULL;
        int prog_fd;
        int err = 0, i;
        void *ret;
index de1a43e8f61070220c511b9aae698e4de7fb0659..37328f1485384b756c3120f428a3348206a9eb47 100644 (file)
@@ -8,38 +8,51 @@
 #include "bpf_helpers.h"
 #include "bpf_endian.h"
 
-enum bpf_array_idx {
-       SRV_IDX,
-       CLI_IDX,
-       __NR_BPF_ARRAY_IDX,
+enum bpf_addr_array_idx {
+       ADDR_SRV_IDX,
+       ADDR_CLI_IDX,
+       __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+enum bpf_result_array_idx {
+       EGRESS_SRV_IDX,
+       EGRESS_CLI_IDX,
+       INGRESS_LISTEN_IDX,
+       __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+enum bpf_linum_array_idx {
+       EGRESS_LINUM_IDX,
+       INGRESS_LINUM_IDX,
+       __NR_BPF_LINUM_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") addr_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(struct sockaddr_in6),
-       .max_entries = __NR_BPF_ARRAY_IDX,
+       .max_entries = __NR_BPF_ADDR_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") sock_result_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(struct bpf_sock),
-       .max_entries = __NR_BPF_ARRAY_IDX,
+       .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") tcp_sock_result_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(struct bpf_tcp_sock),
-       .max_entries = __NR_BPF_ARRAY_IDX,
+       .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") linum_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(__u32),
-       .max_entries = 1,
+       .max_entries = __NR_BPF_LINUM_ARRAY_IDX,
 };
 
 static bool is_loopback6(__u32 *a6)
@@ -100,18 +113,20 @@ static void tpcpy(struct bpf_tcp_sock *dst,
 
 #define RETURN {                                               \
        linum = __LINE__;                                       \
-       bpf_map_update_elem(&linum_map, &idx0, &linum, 0);      \
+       bpf_map_update_elem(&linum_map, &linum_idx, &linum, 0); \
        return 1;                                               \
 }
 
 SEC("cgroup_skb/egress")
-int read_sock_fields(struct __sk_buff *skb)
+int egress_read_sock_fields(struct __sk_buff *skb)
 {
-       __u32 srv_idx = SRV_IDX, cli_idx = CLI_IDX, idx;
+       __u32 srv_idx = ADDR_SRV_IDX, cli_idx = ADDR_CLI_IDX, result_idx;
        struct sockaddr_in6 *srv_sa6, *cli_sa6;
        struct bpf_tcp_sock *tp, *tp_ret;
        struct bpf_sock *sk, *sk_ret;
-       __u32 linum, idx0 = 0;
+       __u32 linum, linum_idx;
+
+       linum_idx = EGRESS_LINUM_IDX;
 
        sk = skb->sk;
        if (!sk || sk->state == 10)
@@ -132,14 +147,55 @@ int read_sock_fields(struct __sk_buff *skb)
                RETURN;
 
        if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
-               idx = srv_idx;
+               result_idx = EGRESS_SRV_IDX;
        else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
-               idx = cli_idx;
+               result_idx = EGRESS_CLI_IDX;
        else
                RETURN;
 
-       sk_ret = bpf_map_lookup_elem(&sock_result_map, &idx);
-       tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &idx);
+       sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
+       tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
+       if (!sk_ret || !tp_ret)
+               RETURN;
+
+       skcpy(sk_ret, sk);
+       tpcpy(tp_ret, tp);
+
+       RETURN;
+}
+
+SEC("cgroup_skb/ingress")
+int ingress_read_sock_fields(struct __sk_buff *skb)
+{
+       __u32 srv_idx = ADDR_SRV_IDX, result_idx = INGRESS_LISTEN_IDX;
+       struct bpf_tcp_sock *tp, *tp_ret;
+       struct bpf_sock *sk, *sk_ret;
+       struct sockaddr_in6 *srv_sa6;
+       __u32 linum, linum_idx;
+
+       linum_idx = INGRESS_LINUM_IDX;
+
+       sk = skb->sk;
+       if (!sk || sk->family != AF_INET6 || !is_loopback6(sk->src_ip6))
+               RETURN;
+
+       srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
+       if (!srv_sa6 || sk->src_port != bpf_ntohs(srv_sa6->sin6_port))
+               RETURN;
+
+       if (sk->state != 10 && sk->state != 12)
+               RETURN;
+
+       sk = bpf_get_listener_sock(sk);
+       if (!sk)
+               RETURN;
+
+       tp = bpf_tcp_sock(sk);
+       if (!tp)
+               RETURN;
+
+       sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
+       tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
        if (!sk_ret || !tp_ret)
                RETURN;
 
index 38797aa627a732f31d333aaa6cf8020ceb3a211d..23e3b314ca603956ce88ed4ac8f1512eeddfa34f 100644 (file)
@@ -5874,6 +5874,50 @@ const struct btf_dedup_test dedup_tests[] = {
                .dont_resolve_fwds = false,
        },
 },
+{
+       .descr = "dedup: enum fwd resolution",
+       .input = {
+               .raw_types = {
+                       /* [1] fwd enum 'e1' before full enum */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+                       /* [2] full enum 'e1' after fwd */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 123),
+                       /* [3] full enum 'e2' before fwd */
+                       BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(4), 456),
+                       /* [4] fwd enum 'e2' after full enum */
+                       BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+                       /* [5] incompatible fwd enum with different size */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
+                       /* [6] incompatible full enum with different value */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 321),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+       },
+       .expect = {
+               .raw_types = {
+                       /* [1] full enum 'e1' */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 123),
+                       /* [2] full enum 'e2' */
+                       BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(4), 456),
+                       /* [3] incompatible fwd enum with different size */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
+                       /* [4] incompatible full enum with different value */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 321),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+       },
+       .opts = {
+               .dont_resolve_fwds = false,
+       },
+},
 
 };
 
index bc8943938bf53933577c9be075c2f35314708eb3..dcae7f664dce0827f5ddda4bc5f693d191def9b0 100644 (file)
 #include "cgroup_helpers.h"
 #include "bpf_rlimit.h"
 
-enum bpf_array_idx {
-       SRV_IDX,
-       CLI_IDX,
-       __NR_BPF_ARRAY_IDX,
+enum bpf_addr_array_idx {
+       ADDR_SRV_IDX,
+       ADDR_CLI_IDX,
+       __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+enum bpf_result_array_idx {
+       EGRESS_SRV_IDX,
+       EGRESS_CLI_IDX,
+       INGRESS_LISTEN_IDX,
+       __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+enum bpf_linum_array_idx {
+       EGRESS_LINUM_IDX,
+       INGRESS_LINUM_IDX,
+       __NR_BPF_LINUM_ARRAY_IDX,
 };
 
 #define CHECK(condition, tag, format...) ({                            \
@@ -41,8 +54,16 @@ static int linum_map_fd;
 static int addr_map_fd;
 static int tp_map_fd;
 static int sk_map_fd;
-static __u32 srv_idx = SRV_IDX;
-static __u32 cli_idx = CLI_IDX;
+
+static __u32 addr_srv_idx = ADDR_SRV_IDX;
+static __u32 addr_cli_idx = ADDR_CLI_IDX;
+
+static __u32 egress_srv_idx = EGRESS_SRV_IDX;
+static __u32 egress_cli_idx = EGRESS_CLI_IDX;
+static __u32 ingress_listen_idx = INGRESS_LISTEN_IDX;
+
+static __u32 egress_linum_idx = EGRESS_LINUM_IDX;
+static __u32 ingress_linum_idx = INGRESS_LINUM_IDX;
 
 static void init_loopback6(struct sockaddr_in6 *sa6)
 {
@@ -93,29 +114,46 @@ static void print_tp(const struct bpf_tcp_sock *tp)
 
 static void check_result(void)
 {
-       struct bpf_tcp_sock srv_tp, cli_tp;
-       struct bpf_sock srv_sk, cli_sk;
-       __u32 linum, idx0 = 0;
+       struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
+       struct bpf_sock srv_sk, cli_sk, listen_sk;
+       __u32 ingress_linum, egress_linum;
        int err;
 
-       err = bpf_map_lookup_elem(linum_map_fd, &idx0, &linum);
+       err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
+                                 &egress_linum);
        CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
              "err:%d errno:%d", err, errno);
 
-       err = bpf_map_lookup_elem(sk_map_fd, &srv_idx, &srv_sk);
-       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &srv_idx)",
+       err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
+                                 &ingress_linum);
+       CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+             "err:%d errno:%d", err, errno);
+
+       err = bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx, &srv_sk);
+       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx)",
+             "err:%d errno:%d", err, errno);
+       err = bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx, &srv_tp);
+       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx)",
+             "err:%d errno:%d", err, errno);
+
+       err = bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx, &cli_sk);
+       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx)",
              "err:%d errno:%d", err, errno);
-       err = bpf_map_lookup_elem(tp_map_fd, &srv_idx, &srv_tp);
-       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &srv_idx)",
+       err = bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx, &cli_tp);
+       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx)",
              "err:%d errno:%d", err, errno);
 
-       err = bpf_map_lookup_elem(sk_map_fd, &cli_idx, &cli_sk);
-       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &cli_idx)",
+       err = bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx, &listen_sk);
+       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx)",
              "err:%d errno:%d", err, errno);
-       err = bpf_map_lookup_elem(tp_map_fd, &cli_idx, &cli_tp);
-       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &cli_idx)",
+       err = bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx, &listen_tp);
+       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx)",
              "err:%d errno:%d", err, errno);
 
+       printf("listen_sk: ");
+       print_sk(&listen_sk);
+       printf("\n");
+
        printf("srv_sk: ");
        print_sk(&srv_sk);
        printf("\n");
@@ -124,6 +162,10 @@ static void check_result(void)
        print_sk(&cli_sk);
        printf("\n");
 
+       printf("listen_tp: ");
+       print_tp(&listen_tp);
+       printf("\n");
+
        printf("srv_tp: ");
        print_tp(&srv_tp);
        printf("\n");
@@ -132,6 +174,19 @@ static void check_result(void)
        print_tp(&cli_tp);
        printf("\n");
 
+       CHECK(listen_sk.state != 10 ||
+             listen_sk.family != AF_INET6 ||
+             listen_sk.protocol != IPPROTO_TCP ||
+             memcmp(listen_sk.src_ip6, &in6addr_loopback,
+                    sizeof(listen_sk.src_ip6)) ||
+             listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] ||
+             listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] ||
+             listen_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+             listen_sk.dst_port,
+             "Unexpected listen_sk",
+             "Check listen_sk output. ingress_linum:%u",
+             ingress_linum);
+
        CHECK(srv_sk.state == 10 ||
              !srv_sk.state ||
              srv_sk.family != AF_INET6 ||
@@ -142,7 +197,8 @@ static void check_result(void)
                     sizeof(srv_sk.dst_ip6)) ||
              srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
              srv_sk.dst_port != cli_sa6.sin6_port,
-             "Unexpected srv_sk", "Check srv_sk output. linum:%u", linum);
+             "Unexpected srv_sk", "Check srv_sk output. egress_linum:%u",
+             egress_linum);
 
        CHECK(cli_sk.state == 10 ||
              !cli_sk.state ||
@@ -154,21 +210,31 @@ static void check_result(void)
                     sizeof(cli_sk.dst_ip6)) ||
              cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
              cli_sk.dst_port != srv_sa6.sin6_port,
-             "Unexpected cli_sk", "Check cli_sk output. linum:%u", linum);
+             "Unexpected cli_sk", "Check cli_sk output. egress_linum:%u",
+             egress_linum);
+
+       CHECK(listen_tp.data_segs_out ||
+             listen_tp.data_segs_in ||
+             listen_tp.total_retrans ||
+             listen_tp.bytes_acked,
+             "Unexpected listen_tp", "Check listen_tp output. ingress_linum:%u",
+             ingress_linum);
 
        CHECK(srv_tp.data_segs_out != 1 ||
              srv_tp.data_segs_in ||
              srv_tp.snd_cwnd != 10 ||
              srv_tp.total_retrans ||
              srv_tp.bytes_acked != DATA_LEN,
-             "Unexpected srv_tp", "Check srv_tp output. linum:%u", linum);
+             "Unexpected srv_tp", "Check srv_tp output. egress_linum:%u",
+             egress_linum);
 
        CHECK(cli_tp.data_segs_out ||
              cli_tp.data_segs_in != 1 ||
              cli_tp.snd_cwnd != 10 ||
              cli_tp.total_retrans ||
              cli_tp.bytes_received != DATA_LEN,
-             "Unexpected cli_tp", "Check cli_tp output. linum:%u", linum);
+             "Unexpected cli_tp", "Check cli_tp output. egress_linum:%u",
+             egress_linum);
 }
 
 static void test(void)
@@ -211,10 +277,10 @@ static void test(void)
              err, errno);
 
        /* Update addr_map with srv_sa6 and cli_sa6 */
-       err = bpf_map_update_elem(addr_map_fd, &srv_idx, &srv_sa6, 0);
+       err = bpf_map_update_elem(addr_map_fd, &addr_srv_idx, &srv_sa6, 0);
        CHECK(err, "map_update", "err:%d errno:%d", err, errno);
 
-       err = bpf_map_update_elem(addr_map_fd, &cli_idx, &cli_sa6, 0);
+       err = bpf_map_update_elem(addr_map_fd, &addr_cli_idx, &cli_sa6, 0);
        CHECK(err, "map_update", "err:%d errno:%d", err, errno);
 
        /* Connect from cli_sa6 to srv_sa6 */
@@ -273,9 +339,9 @@ int main(int argc, char **argv)
        struct bpf_prog_load_attr attr = {
                .file = "test_sock_fields_kern.o",
                .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
-               .expected_attach_type = BPF_CGROUP_INET_EGRESS,
        };
-       int cgroup_fd, prog_fd, err;
+       int cgroup_fd, egress_fd, ingress_fd, err;
+       struct bpf_program *ingress_prog;
        struct bpf_object *obj;
        struct bpf_map *map;
 
@@ -293,12 +359,24 @@ int main(int argc, char **argv)
        err = join_cgroup(TEST_CGROUP);
        CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno);
 
-       err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+       err = bpf_prog_load_xattr(&attr, &obj, &egress_fd);
        CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);
 
-       err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
+       ingress_prog = bpf_object__find_program_by_title(obj,
+                                                        "cgroup_skb/ingress");
+       CHECK(!ingress_prog,
+             "bpf_object__find_program_by_title(cgroup_skb/ingress)",
+             "not found");
+       ingress_fd = bpf_program__fd(ingress_prog);
+
+       err = bpf_prog_attach(egress_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
        CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)",
              "err:%d errno%d", err, errno);
+
+       err = bpf_prog_attach(ingress_fd, cgroup_fd,
+                             BPF_CGROUP_INET_INGRESS, 0);
+       CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_INGRESS)",
+             "err:%d errno%d", err, errno);
        close(cgroup_fd);
 
        map = bpf_object__find_map_by_name(obj, "addr_map");
index 4004891afa9c3dd7969419bcd8d10d4b9b9541c8..f2ccae39ee66b32c8b60890dcacff0bd89c8abd0 100644 (file)
        .errstr = "!read_ok",
        .result = REJECT,
 },
+{
+       "calls: cross frame pruning - liveness propagation",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_IMM(BPF_REG_8, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_8, 1),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_IMM(BPF_REG_9, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_9, 1),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+       .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+       .errstr = "!read_ok",
+       .result = REJECT,
+},
index 3ed3593bd8b61f4301b03fc9f06b97af4e8be17f..923f2110072d6f1f4a124824228c082d43f094d8 100644 (file)
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = ACCEPT,
 },
+{
+       "reference tracking: use ptr from bpf_tcp_sock() after release",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use ptr from bpf_sk_fullsock() after release",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use sk after bpf_sk_release(tp)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = ACCEPT,
+},
+{
+       "reference tracking: bpf_sk_release(listen_sk)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "reference has not been acquired before",
+},
+{
+       /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
+       "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
index 0ddfdf76aba5a56f387f9f43f567cc69009c672b..416436231fab011aeebfbed0f0f253ad25bef912 100644 (file)
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = REJECT,
-       .errstr = "type=sock_common expected=sock",
+       .errstr = "reference has not been acquired before",
 },
 {
        "bpf_sk_release(bpf_sk_fullsock(skb->sk))",
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = REJECT,
-       .errstr = "type=tcp_sock expected=sock",
+       .errstr = "reference has not been acquired before",
 },
index 5970cee6d05f26fd9be36a5b1385d62083ab3cfe..b074ea9b6fe864b25720729ff1a8c18ef03dedfd 100644 (file)
         "teardown": [
             "$TC action flush action bpf"
         ]
+    },
+    {
+        "id": "b8a1",
+        "name": "Replace bpf action with invalid goto_chain control",
+        "category": [
+            "actions",
+            "bpf"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action bpf",
+                0,
+                1,
+                255
+            ],
+            "$TC action add action bpf bytecode '1,6 0 0 4294967295' pass index 90"
+        ],
+        "cmdUnderTest": "$TC action replace action bpf bytecode '1,6 0 0 4294967295' goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC action list action bpf",
+        "matchPattern": "action order [0-9]*: bpf.* default-action pass.*index 90",
+        "matchCount": "1",
+        "teardown": [
+            "$TC action flush action bpf"
+        ]
     }
 ]
index 13147a1f5731444abc28bd53950b3702c71835a5..cadde8f41fcd3db8cdaa21afd117cda720802b05 100644 (file)
         "teardown": [
             "$TC actions flush action connmark"
         ]
+    },
+    {
+        "id": "c506",
+        "name": "Replace connmark with invalid goto chain control",
+        "category": [
+            "actions",
+            "connmark"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action connmark",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action connmark pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action connmark goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action connmark index 90",
+        "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action connmark"
+        ]
     }
 ]
index a022792d392a9c93bf4c33d049ea7c817b85eded..ddabb2fbb7c72b4fa49787636c0b3502fb807aeb 100644 (file)
         "matchPattern": "^[ \t]+index [0-9]+ ref",
         "matchCount": "0",
         "teardown": []
+    },
+    {
+        "id": "d128",
+        "name": "Replace csum action with invalid goto chain control",
+        "category": [
+            "actions",
+            "csum"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action csum",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action csum iph index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action csum iph goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action csum index 90",
+        "matchPattern": "action order [0-9]*: csum \\(iph\\) action pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action csum"
+        ]
     }
 ]
index 89189a03ce3d431b817fcbdaef4eb3a89219fcf9..814b7a8a478be8c9283c8d9426b829d00b9350a4 100644 (file)
         "teardown": [
             "$TC actions flush action gact"
         ]
+    },
+    {
+        "id": "ca89",
+        "name": "Replace gact action with invalid goto chain control",
+        "category": [
+            "actions",
+            "gact"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action gact",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action pass random determ drop 2 index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action goto chain 42 random determ drop 5 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action gact",
+        "matchPattern": "action order [0-9]*: gact action pass.*random type determ drop val 2.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action gact"
+        ]
     }
 ]
index 0da3545cabdb6239190c8e916d28e25a882b78d2..c13a68b98fc775086d2087205ccefeb5304a4e7a 100644 (file)
         "matchPattern": "action order [0-9]*: ife encode action pipe.*allow prio.*index 4",
         "matchCount": "0",
         "teardown": []
+    },
+    {
+        "id": "a0e2",
+        "name": "Replace ife encode action with invalid goto chain control",
+        "category": [
+            "actions",
+            "ife"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action ife",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action ife encode allow mark pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action ife encode allow mark goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action ife index 90",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E .*allow mark.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action ife"
+        ]
     }
 ]
index db49fd0f84459fdfa844bfea2fceb8723ad7b1d1..6e5fb3d256811a9c606505072d2cd1e2db085088 100644 (file)
         "teardown": [
             "$TC actions flush action mirred"
         ]
+    },
+    {
+        "id": "2a9a",
+        "name": "Replace mirred action with invalid goto chain control",
+        "category": [
+            "actions",
+            "mirred"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action mirred",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action mirred ingress mirror dev lo drop index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action mirred ingress mirror dev lo goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action mirred index 90",
+        "matchPattern": "action order [0-9]*: mirred \\(Ingress Mirror to device lo\\) drop.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action mirred"
+        ]
     }
 ]
index 0080dc2fd41c4542ac21f5f655a80c4c6225644a..bc12c1ccad30e9660c5ab19abfc8064aece57f05 100644 (file)
         "teardown": [
             "$TC actions flush action nat"
         ]
+    },
+    {
+        "id": "4b12",
+        "name": "Replace nat action with invalid goto chain control",
+        "category": [
+            "actions",
+            "nat"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action nat",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 drop index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action nat ingress 1.18.1.1 1.18.2.2 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action nat index 90",
+        "matchPattern": "action order [0-9]+:  nat ingress 1.18.1.1/32 1.18.2.2 drop.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action nat"
+        ]
     }
 ]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
new file mode 100644 (file)
index 0000000..b73ceb9
--- /dev/null
@@ -0,0 +1,51 @@
+[
+    {
+        "id": "319a",
+        "name": "Add pedit action that mangles IP TTL",
+        "category": [
+            "actions",
+            "pedit"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action pedit",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action pedit ex munge ip ttl set 10",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions ls action pedit",
+        "matchPattern": "action order [0-9]+:  pedit action pass keys 1.*index 1 ref.*key #0  at ipv4\\+8: val 0a000000 mask 00ffffff",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action pedit"
+        ]
+    },
+    {
+        "id": "7e67",
+        "name": "Replace pedit action with invalid goto chain",
+        "category": [
+            "actions",
+            "pedit"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action pedit",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action pedit ex munge ip ttl set 10 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action pedit ex munge ip ttl set 10 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions ls action pedit",
+        "matchPattern": "action order [0-9]+:  pedit action pass keys 1.*index 90 ref.*key #0  at ipv4\\+8: val 0a000000 mask 00ffffff",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action pedit"
+        ]
+    }
+]
index 4086a50a670ecba9cc46cb9872061e9151a24e42..b8268da5adaaa77a1cf4a3a7092b6beb4fffeced 100644 (file)
         "teardown": [
             "$TC actions flush action police"
         ]
+    },
+    {
+        "id": "689e",
+        "name": "Replace police action with invalid goto chain control",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action police rate 3mbit burst 250k drop index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action police rate 3mbit burst 250k goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action police index 90",
+        "matchPattern": "action order [0-9]*:  police 0x5a rate 3Mbit burst 250Kb mtu 2Kb action drop",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
     }
 ]
index 3aca33c00039615eb4687fbabf64c75b7e6bb388..27f0acaed880e765e9829306b1cc3a28e2755cbb 100644 (file)
         "teardown": [
             "$TC actions flush action sample"
         ]
+    },
+    {
+        "id": "0a6e",
+        "name": "Replace sample action with invalid goto chain control",
+        "category": [
+            "actions",
+            "sample"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action sample",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action sample rate 1024 group 4 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action sample rate 1024 group 7 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action sample",
+        "matchPattern": "action order [0-9]+: sample rate 1/1024 group 4 pass.*index 90",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action sample"
+        ]
     }
 ]
index e89a7aa4012d1664ef5a3f1d7263aaf70b3bf785..8e8c1ae12260877fea635022a4d3a194a8f3c65c 100644 (file)
         "teardown": [
             ""
         ]
+    },
+    {
+        "id": "b776",
+        "name": "Replace simple action with invalid goto chain control",
+        "category": [
+            "actions",
+            "simple"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action simple",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action simple sdata \"hello\" pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action simple sdata \"world\" goto chain 42 index  90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action simple",
+        "matchPattern": "action order [0-9]*: Simple <hello>.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action simple"
+        ]
     }
 ]
index 5aaf593b914a3646d8e62922b6dba210724fa8d9..ecd96eda7f6a1044996b2afaa37e325008acfa0e 100644 (file)
         "teardown": [
             "$TC actions flush action skbedit"
         ]
+    },
+    {
+        "id": "1b2b",
+        "name": "Replace skbedit action with invalid goto_chain control",
+        "category": [
+            "actions",
+            "skbedit"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action skbedit",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action skbedit ptype host pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action skbedit ptype host goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action skbedit",
+        "matchPattern": "action order [0-9]*: skbedit  ptype host pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action skbedit"
+        ]
     }
 ]
index fe3326e939c1b11bc008b46f452c336218906460..6eb4c4f97060fd3116a77537d8566f9e66d14060 100644 (file)
         "teardown": [
             "$TC actions flush action skbmod"
         ]
+    },
+    {
+        "id": "b651",
+        "name": "Replace skbmod action with invalid goto_chain control",
+        "category": [
+            "actions",
+            "skbmod"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action skbmod",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action skbmod set etype 0x1111 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action skbmod set etype 0x1111 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions ls action skbmod",
+        "matchPattern": "action order [0-9]*: skbmod pass set etype 0x1111\\s+index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action skbmod"
+        ]
     }
 ]
index e7e15a7336b6dfd1516e0276343f4afa735b41dd..28453a445fdb7e0af074dd26daaff90e8d8a120b 100644 (file)
         "teardown": [
            "$TC actions flush action tunnel_key"
        ]
+    },
+    {
+        "id": "8242",
+        "name": "Replace tunnel_key set action with invalid goto chain",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 10.10.10.2 dst_ip 20.20.20.1 dst_port 3129 id 2 csum goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 90",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*csum pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
     }
 ]
index 69ea09eefffc27290b5c16b084f8ff3798c184e8..cc7c7d75800809115bc22d62dbcd4c646a7f35d3 100644 (file)
         "teardown": [
             "$TC actions flush action vlan"
         ]
+    },
+    {
+        "id": "e394",
+        "name": "Replace vlan push action with invalid goto chain control",
+        "category": [
+            "actions",
+            "vlan"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action vlan",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action vlan push id 500 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action vlan push id 500 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action vlan index 90",
+        "matchPattern": "action order [0-9]+: vlan.*push id 500 protocol 802.1Q priority 0 pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action vlan"
+        ]
     }
 ]