--- /dev/null
+From 3543d7ddd55fe12c37e8a9db846216c51846015b Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 23 Jan 2020 14:51:12 +0000
+Subject: arm64: dts: fast models: Fix FVP PCI interrupt-map property
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 3543d7ddd55fe12c37e8a9db846216c51846015b upstream.
+
+The interrupt map for the FVP's PCI node is missing the
+parent-unit-address cells for each of the INTx entries, leading to the
+kernel code failing to parse the entries correctly.
+
+Add the missing zero cells, which are pretty useless as far as the GIC
+is concerned, but that the spec requires. This allows INTx to be usable
+on the model, and VFIO to work correctly.
+
+Fixes: fa083b99eb28 ("arm64: dts: fast models: Add DTS fo Base RevC FVP")
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/arm/fvp-base-revc.dts | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
++++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+@@ -161,10 +161,10 @@
+ bus-range = <0x0 0x1>;
+ reg = <0x0 0x40000000 0x0 0x10000000>;
+ ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>;
+- interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+- <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+- <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+- <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 2 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 3 &gic 0 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 4 &gic 0 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ msi-map = <0x0 &its 0x0 0x10000>;
+ iommu-map = <0x0 &smmu 0x0 0x10000>;
--- /dev/null
+From 3bf3c9744694803bd2d6f0ee70a6369b980530fd Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <marek.behun@nic.cz>
+Date: Sat, 15 Feb 2020 15:21:30 +0100
+Subject: bus: moxtet: fix potential stack buffer overflow
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marek BehĂșn <marek.behun@nic.cz>
+
+commit 3bf3c9744694803bd2d6f0ee70a6369b980530fd upstream.
+
+The input_read function declares the size of the hex array relative to
+sizeof(buf), but buf is a pointer argument of the function. The hex
+array is meant to contain hexadecimal representation of the bin array.
+
+Link: https://lore.kernel.org/r/20200215142130.22743-1-marek.behun@nic.cz
+Fixes: 5bc7f990cd98 ("bus: Add support for Moxtet bus")
+Signed-off-by: Marek BehĂșn <marek.behun@nic.cz>
+Reported-by: sohu0106 <sohu0106@126.com>
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bus/moxtet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/bus/moxtet.c
++++ b/drivers/bus/moxtet.c
+@@ -466,7 +466,7 @@ static ssize_t input_read(struct file *f
+ {
+ struct moxtet *moxtet = file->private_data;
+ u8 bin[TURRIS_MOX_MAX_MODULES];
+- u8 hex[sizeof(buf) * 2 + 1];
++ u8 hex[sizeof(bin) * 2 + 1];
+ int ret, n;
+
+ ret = moxtet_spi_read(moxtet, bin);
--- /dev/null
+From 3b20bc2fe4c0cfd82d35838965dc7ff0b93415c6 Mon Sep 17 00:00:00 2001
+From: Xiubo Li <xiubli@redhat.com>
+Date: Tue, 11 Feb 2020 01:53:16 -0500
+Subject: ceph: noacl mount option is effectively ignored
+
+From: Xiubo Li <xiubli@redhat.com>
+
+commit 3b20bc2fe4c0cfd82d35838965dc7ff0b93415c6 upstream.
+
+For the old mount API, the module parameters parseing function will
+be called in ceph_mount() and also just after the default posix acl
+flag set, so we can control to enable/disable it via the mount option.
+
+But for the new mount API, it will call the module parameters
+parseing function before ceph_get_tree(), so the posix acl will always
+be enabled.
+
+Fixes: 82995cc6c5ae ("libceph, rbd, ceph: convert to use the new mount API")
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ceph/super.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -1020,10 +1020,6 @@ static int ceph_get_tree(struct fs_conte
+ if (!fc->source)
+ return invalf(fc, "ceph: No source");
+
+-#ifdef CONFIG_CEPH_FS_POSIX_ACL
+- fc->sb_flags |= SB_POSIXACL;
+-#endif
+-
+ /* create client (which we may/may not use) */
+ fsc = create_fs_client(pctx->opts, pctx->copts);
+ pctx->opts = NULL;
+@@ -1141,6 +1137,10 @@ static int ceph_init_fs_context(struct f
+ fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
+ fsopt->congestion_kb = default_congestion_kb();
+
++#ifdef CONFIG_CEPH_FS_POSIX_ACL
++ fc->sb_flags |= SB_POSIXACL;
++#endif
++
+ fc->fs_private = pctx;
+ fc->ops = &ceph_context_ops;
+ return 0;
--- /dev/null
+From 3f6166aaf19902f2f3124b5426405e292e8974dd Mon Sep 17 00:00:00 2001
+From: Petr Pavlu <petr.pavlu@suse.com>
+Date: Mon, 10 Feb 2020 10:38:14 +0100
+Subject: cifs: fix mount option display for sec=krb5i
+
+From: Petr Pavlu <petr.pavlu@suse.com>
+
+commit 3f6166aaf19902f2f3124b5426405e292e8974dd upstream.
+
+Fix display for sec=krb5i which was wrongly interleaved by cruid,
+resulting in string "sec=krb5,cruid=<...>i" instead of
+"sec=krb5i,cruid=<...>".
+
+Fixes: 96281b9e46eb ("smb3: for kerberos mounts display the credential uid used")
+Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifsfs.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -414,7 +414,7 @@ cifs_show_security(struct seq_file *s, s
+ seq_puts(s, "ntlm");
+ break;
+ case Kerberos:
+- seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
++ seq_puts(s, "krb5");
+ break;
+ case RawNTLMSSP:
+ seq_puts(s, "ntlmssp");
+@@ -427,6 +427,10 @@ cifs_show_security(struct seq_file *s, s
+
+ if (ses->sign)
+ seq_puts(s, "i");
++
++ if (ses->sectype == Kerberos)
++ seq_printf(s, ",cruid=%u",
++ from_kuid_munged(&init_user_ns, ses->cred_uid));
+ }
+
+ static void
--- /dev/null
+From e0354d147e5889b5faa12e64fa38187aed39aad4 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 14 Jan 2020 14:40:31 +0000
+Subject: drivers: ipmi: fix off-by-one bounds check that leads to a out-of-bounds write
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit e0354d147e5889b5faa12e64fa38187aed39aad4 upstream.
+
+The end of buffer check is off-by-one since the check is against
+an index that is pre-incremented before a store to buf[]. Fix this
+adjusting the bounds check appropriately.
+
+Addresses-Coverity: ("Out-of-bounds write")
+Fixes: 51bd6f291583 ("Add support for IPMB driver")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Message-Id: <20200114144031.358003-1-colin.king@canonical.com>
+Reviewed-by: Asmaa Mnebhi <asmaa@mellanox.com>
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/ipmi/ipmb_dev_int.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/char/ipmi/ipmb_dev_int.c
++++ b/drivers/char/ipmi/ipmb_dev_int.c
+@@ -253,7 +253,7 @@ static int ipmb_slave_cb(struct i2c_clie
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+- if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg))
++ if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1)
+ break;
+
+ buf[++ipmb_dev->msg_idx] = *val;
--- /dev/null
+From cf2b012c90e74e85d8aea7d67e48868069cfee0c Mon Sep 17 00:00:00 2001
+From: Mike Jones <michael-a1.jones@analog.com>
+Date: Tue, 28 Jan 2020 10:59:59 -0700
+Subject: hwmon: (pmbus/ltc2978) Fix PMBus polling of MFR_COMMON definitions.
+
+From: Mike Jones <michael-a1.jones@analog.com>
+
+commit cf2b012c90e74e85d8aea7d67e48868069cfee0c upstream.
+
+Change 21537dc driver PMBus polling of MFR_COMMON from bits 5/4 to
+bits 6/5. This fixs a LTC297X family bug where polling always returns
+not busy even when the part is busy. This fixes a LTC388X and
+LTM467X bug where polling used PEND and NOT_IN_TRANS, and BUSY was
+not polled, which can lead to NACKing of commands. LTC388X and
+LTM467X modules now poll BUSY and PEND, increasing reliability by
+eliminating NACKing of commands.
+
+Signed-off-by: Mike Jones <michael-a1.jones@analog.com>
+Link: https://lore.kernel.org/r/1580234400-2829-2-git-send-email-michael-a1.jones@analog.com
+Fixes: e04d1ce9bbb49 ("hwmon: (ltc2978) Add polling for chips requiring it")
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwmon/pmbus/ltc2978.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/hwmon/pmbus/ltc2978.c
++++ b/drivers/hwmon/pmbus/ltc2978.c
+@@ -82,8 +82,8 @@ enum chips { ltc2974, ltc2975, ltc2977,
+
+ #define LTC_POLL_TIMEOUT 100 /* in milli-seconds */
+
+-#define LTC_NOT_BUSY BIT(5)
+-#define LTC_NOT_PENDING BIT(4)
++#define LTC_NOT_BUSY BIT(6)
++#define LTC_NOT_PENDING BIT(5)
+
+ /*
+ * LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
--- /dev/null
+From a70ed0f2e6262e723ae8d70accb984ba309eacc2 Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 10 Feb 2020 08:10:26 -0500
+Subject: IB/hfi1: Acquire lock to release TID entries when user file is closed
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit a70ed0f2e6262e723ae8d70accb984ba309eacc2 upstream.
+
+Each user context is allocated a certain number of RcvArray (TID)
+entries and these entries are managed through TID groups. These groups
+are put into one of three lists in each user context: tid_group_list,
+tid_used_list, and tid_full_list, depending on the number of used TID
+entries within each group. When TID packets are expected, one or more
+TID groups will be allocated. After the packets are received, the TID
+groups will be freed. Since multiple user threads may access the TID
+groups simultaneously, a mutex exp_mutex is used to synchronize the
+access. However, when the user file is closed, it tries to release
+all TID groups without acquiring the mutex first, which risks a race
+condition with another thread that may be releasing its TID groups,
+leading to data corruption.
+
+This patch addresses the issue by acquiring the mutex first before
+releasing the TID groups when the file is closed.
+
+Fixes: 3abb33ac6521 ("staging/hfi1: Add TID cache receive init and free funcs")
+Link: https://lore.kernel.org/r/20200210131026.87408.86853.stgit@awfm-01.aw.intel.com
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/user_exp_rcv.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+@@ -142,10 +142,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_
+ {
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+
++ mutex_lock(&uctxt->exp_mutex);
+ if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
+ unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
+ if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
+ unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
++ mutex_unlock(&uctxt->exp_mutex);
+
+ kfree(fd->invalid_tids);
+ fd->invalid_tids = NULL;
--- /dev/null
+From be8638344c70bf492963ace206a9896606b6922d Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Mon, 10 Feb 2020 08:10:33 -0500
+Subject: IB/hfi1: Close window for pq and request coliding
+
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+
+commit be8638344c70bf492963ace206a9896606b6922d upstream.
+
+Cleaning up a pq can result in the following warning and panic:
+
+ WARNING: CPU: 52 PID: 77418 at lib/list_debug.c:53 __list_del_entry+0x63/0xd0
+ list_del corruption, ffff88cb2c6ac068->next is LIST_POISON1 (dead000000000100)
+ Modules linked in: mmfs26(OE) mmfslinux(OE) tracedev(OE) 8021q garp mrp ib_isert iscsi_target_mod target_core_mod crc_t10dif crct10dif_generic opa_vnic rpcrdma ib_iser libiscsi scsi_transport_iscsi ib_ipoib(OE) bridge stp llc iTCO_wdt iTCO_vendor_support intel_powerclamp coretemp intel_rapl iosf_mbi kvm_intel kvm irqbypass crct10dif_pclmul crct10dif_common crc32_pclmul ghash_clmulni_intel ast aesni_intel ttm lrw gf128mul glue_helper ablk_helper drm_kms_helper cryptd syscopyarea sysfillrect sysimgblt fb_sys_fops drm pcspkr joydev lpc_ich mei_me drm_panel_orientation_quirks i2c_i801 mei wmi ipmi_si ipmi_devintf ipmi_msghandler nfit libnvdimm acpi_power_meter acpi_pad hfi1(OE) rdmavt(OE) rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_core binfmt_misc numatools(OE) xpmem(OE) ip_tables
+ nfsv3 nfs_acl nfs lockd grace sunrpc fscache igb ahci i2c_algo_bit libahci dca ptp libata pps_core crc32c_intel [last unloaded: i2c_algo_bit]
+ CPU: 52 PID: 77418 Comm: pvbatch Kdump: loaded Tainted: G OE ------------ 3.10.0-957.38.3.el7.x86_64 #1
+ Hardware name: HPE.COM HPE SGI 8600-XA730i Gen10/X11DPT-SB-SG007, BIOS SBED1229 01/22/2019
+ Call Trace:
+ [<ffffffff90365ac0>] dump_stack+0x19/0x1b
+ [<ffffffff8fc98b78>] __warn+0xd8/0x100
+ [<ffffffff8fc98bff>] warn_slowpath_fmt+0x5f/0x80
+ [<ffffffff8ff970c3>] __list_del_entry+0x63/0xd0
+ [<ffffffff8ff9713d>] list_del+0xd/0x30
+ [<ffffffff8fddda70>] kmem_cache_destroy+0x50/0x110
+ [<ffffffffc0328130>] hfi1_user_sdma_free_queues+0xf0/0x200 [hfi1]
+ [<ffffffffc02e2350>] hfi1_file_close+0x70/0x1e0 [hfi1]
+ [<ffffffff8fe4519c>] __fput+0xec/0x260
+ [<ffffffff8fe453fe>] ____fput+0xe/0x10
+ [<ffffffff8fcbfd1b>] task_work_run+0xbb/0xe0
+ [<ffffffff8fc2bc65>] do_notify_resume+0xa5/0xc0
+ [<ffffffff90379134>] int_signal+0x12/0x17
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000010
+ IP: [<ffffffff8fe1f93e>] kmem_cache_close+0x7e/0x300
+ PGD 2cdab19067 PUD 2f7bfdb067 PMD 0
+ Oops: 0000 [#1] SMP
+ Modules linked in: mmfs26(OE) mmfslinux(OE) tracedev(OE) 8021q garp mrp ib_isert iscsi_target_mod target_core_mod crc_t10dif crct10dif_generic opa_vnic rpcrdma ib_iser libiscsi scsi_transport_iscsi ib_ipoib(OE) bridge stp llc iTCO_wdt iTCO_vendor_support intel_powerclamp coretemp intel_rapl iosf_mbi kvm_intel kvm irqbypass crct10dif_pclmul crct10dif_common crc32_pclmul ghash_clmulni_intel ast aesni_intel ttm lrw gf128mul glue_helper ablk_helper drm_kms_helper cryptd syscopyarea sysfillrect sysimgblt fb_sys_fops drm pcspkr joydev lpc_ich mei_me drm_panel_orientation_quirks i2c_i801 mei wmi ipmi_si ipmi_devintf ipmi_msghandler nfit libnvdimm acpi_power_meter acpi_pad hfi1(OE) rdmavt(OE) rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_core binfmt_misc numatools(OE) xpmem(OE) ip_tables
+ nfsv3 nfs_acl nfs lockd grace sunrpc fscache igb ahci i2c_algo_bit libahci dca ptp libata pps_core crc32c_intel [last unloaded: i2c_algo_bit]
+ CPU: 52 PID: 77418 Comm: pvbatch Kdump: loaded Tainted: G W OE ------------ 3.10.0-957.38.3.el7.x86_64 #1
+ Hardware name: HPE.COM HPE SGI 8600-XA730i Gen10/X11DPT-SB-SG007, BIOS SBED1229 01/22/2019
+ task: ffff88cc26db9040 ti: ffff88b5393a8000 task.ti: ffff88b5393a8000
+ RIP: 0010:[<ffffffff8fe1f93e>] [<ffffffff8fe1f93e>] kmem_cache_close+0x7e/0x300
+ RSP: 0018:ffff88b5393abd60 EFLAGS: 00010287
+ RAX: 0000000000000000 RBX: ffff88cb2c6ac000 RCX: 0000000000000003
+ RDX: 0000000000000400 RSI: 0000000000000400 RDI: ffffffff9095b800
+ RBP: ffff88b5393abdb0 R08: ffffffff9095b808 R09: ffffffff8ff77c19
+ R10: ffff88b73ce1f160 R11: ffffddecddde9800 R12: ffff88cb2c6ac000
+ R13: 000000000000000c R14: ffff88cf3fdca780 R15: 0000000000000000
+ FS: 00002aaaaab52500(0000) GS:ffff88b73ce00000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000010 CR3: 0000002d27664000 CR4: 00000000007607e0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ PKRU: 55555554
+ Call Trace:
+ [<ffffffff8fe20d44>] __kmem_cache_shutdown+0x14/0x80
+ [<ffffffff8fddda78>] kmem_cache_destroy+0x58/0x110
+ [<ffffffffc0328130>] hfi1_user_sdma_free_queues+0xf0/0x200 [hfi1]
+ [<ffffffffc02e2350>] hfi1_file_close+0x70/0x1e0 [hfi1]
+ [<ffffffff8fe4519c>] __fput+0xec/0x260
+ [<ffffffff8fe453fe>] ____fput+0xe/0x10
+ [<ffffffff8fcbfd1b>] task_work_run+0xbb/0xe0
+ [<ffffffff8fc2bc65>] do_notify_resume+0xa5/0xc0
+ [<ffffffff90379134>] int_signal+0x12/0x17
+ Code: 00 00 ba 00 04 00 00 0f 4f c2 3d 00 04 00 00 89 45 bc 0f 84 e7 01 00 00 48 63 45 bc 49 8d 04 c4 48 89 45 b0 48 8b 80 c8 00 00 00 <48> 8b 78 10 48 89 45 c0 48 83 c0 10 48 89 45 d0 48 8b 17 48 39
+ RIP [<ffffffff8fe1f93e>] kmem_cache_close+0x7e/0x300
+ RSP <ffff88b5393abd60>
+ CR2: 0000000000000010
+
+The panic is the result of slab entries being freed during the destruction
+of the pq slab.
+
+The code attempts to quiesce the pq, but looking for n_req == 0 doesn't
+account for new requests.
+
+Fix the issue by using SRCU to get a pq pointer and adjust the pq free
+logic to NULL the fd pq pointer prior to the quiesce.
+
+Fixes: e87473bc1b6c ("IB/hfi1: Only set fd pointer when base context is completely initialized")
+Link: https://lore.kernel.org/r/20200210131033.87408.81174.stgit@awfm-01.aw.intel.com
+Reviewed-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/file_ops.c | 52 ++++++++++++++++++------------
+ drivers/infiniband/hw/hfi1/hfi.h | 5 ++
+ drivers/infiniband/hw/hfi1/user_exp_rcv.c | 3 -
+ drivers/infiniband/hw/hfi1/user_sdma.c | 17 ++++++---
+ 4 files changed, 48 insertions(+), 29 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/file_ops.c
++++ b/drivers/infiniband/hw/hfi1/file_ops.c
+@@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *
+
+ fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+
+- if (fd) {
+- fd->rec_cpu_num = -1; /* no cpu affinity by default */
+- fd->mm = current->mm;
+- mmgrab(fd->mm);
+- fd->dd = dd;
+- kobject_get(&fd->dd->kobj);
+- fp->private_data = fd;
+- } else {
+- fp->private_data = NULL;
+-
+- if (atomic_dec_and_test(&dd->user_refcount))
+- complete(&dd->user_comp);
+-
+- return -ENOMEM;
+- }
+-
++ if (!fd || init_srcu_struct(&fd->pq_srcu))
++ goto nomem;
++ spin_lock_init(&fd->pq_rcu_lock);
++ spin_lock_init(&fd->tid_lock);
++ spin_lock_init(&fd->invalid_lock);
++ fd->rec_cpu_num = -1; /* no cpu affinity by default */
++ fd->mm = current->mm;
++ mmgrab(fd->mm);
++ fd->dd = dd;
++ kobject_get(&fd->dd->kobj);
++ fp->private_data = fd;
+ return 0;
++nomem:
++ kfree(fd);
++ fp->private_data = NULL;
++ if (atomic_dec_and_test(&dd->user_refcount))
++ complete(&dd->user_comp);
++ return -ENOMEM;
+ }
+
+ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
+@@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file
+ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
+ {
+ struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
+- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
++ struct hfi1_user_sdma_pkt_q *pq;
+ struct hfi1_user_sdma_comp_q *cq = fd->cq;
+ int done = 0, reqs = 0;
+ unsigned long dim = from->nr_segs;
++ int idx;
+
+- if (!cq || !pq)
++ idx = srcu_read_lock(&fd->pq_srcu);
++ pq = srcu_dereference(fd->pq, &fd->pq_srcu);
++ if (!cq || !pq) {
++ srcu_read_unlock(&fd->pq_srcu, idx);
+ return -EIO;
++ }
+
+- if (!iter_is_iovec(from) || !dim)
++ if (!iter_is_iovec(from) || !dim) {
++ srcu_read_unlock(&fd->pq_srcu, idx);
+ return -EINVAL;
++ }
+
+ trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
+
+- if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
++ if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
++ srcu_read_unlock(&fd->pq_srcu, idx);
+ return -ENOSPC;
++ }
+
+ while (dim) {
+ int ret;
+@@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct ki
+ reqs++;
+ }
+
++ srcu_read_unlock(&fd->pq_srcu, idx);
+ return reqs;
+ }
+
+@@ -707,6 +718,7 @@ done:
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+
++ cleanup_srcu_struct(&fdata->pq_srcu);
+ kfree(fdata);
+ return 0;
+ }
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -1436,10 +1436,13 @@ struct mmu_rb_handler;
+
+ /* Private data for file operations */
+ struct hfi1_filedata {
++ struct srcu_struct pq_srcu;
+ struct hfi1_devdata *dd;
+ struct hfi1_ctxtdata *uctxt;
+ struct hfi1_user_sdma_comp_q *cq;
+- struct hfi1_user_sdma_pkt_q *pq;
++ /* update side lock for SRCU */
++ spinlock_t pq_rcu_lock;
++ struct hfi1_user_sdma_pkt_q __rcu *pq;
+ u16 subctxt;
+ /* for cpu affinity; -1 if none */
+ int rec_cpu_num;
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+@@ -87,9 +87,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_f
+ {
+ int ret = 0;
+
+- spin_lock_init(&fd->tid_lock);
+- spin_lock_init(&fd->invalid_lock);
+-
+ fd->entry_to_rb = kcalloc(uctxt->expected_count,
+ sizeof(struct rb_node *),
+ GFP_KERNEL);
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct h
+ pq = kzalloc(sizeof(*pq), GFP_KERNEL);
+ if (!pq)
+ return -ENOMEM;
+-
+ pq->dd = dd;
+ pq->ctxt = uctxt->ctxt;
+ pq->subctxt = fd->subctxt;
+@@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct h
+ goto pq_mmu_fail;
+ }
+
+- fd->pq = pq;
++ rcu_assign_pointer(fd->pq, pq);
+ fd->cq = cq;
+
+ return 0;
+@@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hf
+
+ trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
+
+- pq = fd->pq;
++ spin_lock(&fd->pq_rcu_lock);
++ pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
++ lockdep_is_held(&fd->pq_rcu_lock));
+ if (pq) {
++ rcu_assign_pointer(fd->pq, NULL);
++ spin_unlock(&fd->pq_rcu_lock);
++ synchronize_srcu(&fd->pq_srcu);
++ /* at this point there can be no more new requests */
+ if (pq->handler)
+ hfi1_mmu_rb_unregister(pq->handler);
+ iowait_sdma_drain(&pq->busy);
+@@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hf
+ kfree(pq->req_in_use);
+ kmem_cache_destroy(pq->txreq_cache);
+ kfree(pq);
+- fd->pq = NULL;
++ } else {
++ spin_unlock(&fd->pq_rcu_lock);
+ }
+ if (fd->cq) {
+ vfree(fd->cq->comps);
+@@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struc
+ {
+ int ret = 0, i;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
++ struct hfi1_user_sdma_pkt_q *pq =
++ srcu_dereference(fd->pq, &fd->pq_srcu);
+ struct hfi1_user_sdma_comp_q *cq = fd->cq;
+ struct hfi1_devdata *dd = pq->dd;
+ unsigned long idx = 0;
--- /dev/null
+From 10189e8e6fe8dcde13435f9354800429c4474fb1 Mon Sep 17 00:00:00 2001
+From: Mark Zhang <markz@mellanox.com>
+Date: Sun, 26 Jan 2020 19:17:08 +0200
+Subject: IB/mlx5: Return failure when rts2rts_qp_counters_set_id is not supported
+
+From: Mark Zhang <markz@mellanox.com>
+
+commit 10189e8e6fe8dcde13435f9354800429c4474fb1 upstream.
+
+When binding a QP with a counter and the QP state is not RESET, return
+failure if the rts2rts_qp_counters_set_id is not supported by the
+device.
+
+This is to prevent cases like manual bind for Connect-IB devices from
+returning success when the feature is not supported.
+
+Fixes: d14133dd4161 ("IB/mlx5: Support set qp counter")
+Link: https://lore.kernel.org/r/20200126171708.5167-1-leon@kernel.org
+Signed-off-by: Mark Zhang <markz@mellanox.com>
+Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/qp.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3394,9 +3394,6 @@ static int __mlx5_ib_qp_set_counter(stru
+ struct mlx5_ib_qp_base *base;
+ u32 set_id;
+
+- if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
+- return 0;
+-
+ if (counter)
+ set_id = counter->id;
+ else
+@@ -6529,6 +6526,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
+ */
+ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
+ {
++ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ int err = 0;
+
+@@ -6538,6 +6536,11 @@ int mlx5_ib_qp_set_counter(struct ib_qp
+ goto out;
+ }
+
++ if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
++ err = -EOPNOTSUPP;
++ goto out;
++ }
++
+ if (mqp->state == IB_QPS_RTS) {
+ err = __mlx5_ib_qp_set_counter(qp, counter);
+ if (!err)
--- /dev/null
+From f92e48718889b3d49cee41853402aa88cac84a6b Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 10 Feb 2020 08:10:40 -0500
+Subject: IB/rdmavt: Reset all QPs when the device is shut down
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit f92e48718889b3d49cee41853402aa88cac84a6b upstream.
+
+When the hfi1 device is shut down during a system reboot, it is possible
+that some QPs might have not not freed by ULPs. More requests could be
+post sent and a lingering timer could be triggered to schedule more packet
+sends, leading to a crash:
+
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000102
+ IP: [ffffffff810a65f2] __queue_work+0x32/0x3c0
+ PGD 0
+ Oops: 0000 1 SMP
+ Modules linked in: nvmet_rdma(OE) nvmet(OE) nvme(OE) dm_round_robin nvme_rdma(OE) nvme_fabrics(OE) nvme_core(OE) pal_raw(POE) pal_pmt(POE) pal_cache(POE) pal_pile(POE) pal(POE) pal_compatible(OE) rpcrdma sunrpc ib_isert iscsi_target_mod target_core_mod ib_iser libiscsi scsi_transport_iscsi ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm mlx4_ib sb_edac edac_core intel_powerclamp coretemp intel_rapl iosf_mbi kvm irqbypass crc32_pclmul ghash_clmulni_intel aesni_intel lrw gf128mul glue_helper ablk_helper cryptd iTCO_wdt iTCO_vendor_support mxm_wmi ipmi_ssif pcspkr ses enclosure joydev scsi_transport_sas i2c_i801 sg mei_me lpc_ich mei ioatdma shpchp ipmi_si ipmi_devintf ipmi_msghandler wmi acpi_power_meter acpi_pad dm_multipath hangcheck_timer ip_tables ext4 mbcache jbd2 mlx4_en
+ sd_mod crc_t10dif crct10dif_generic mgag200 drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm drm mlx4_core crct10dif_pclmul crct10dif_common hfi1(OE) igb crc32c_intel rdmavt(OE) ahci ib_core libahci libata ptp megaraid_sas pps_core dca i2c_algo_bit i2c_core devlink dm_mirror dm_region_hash dm_log dm_mod
+ CPU: 23 PID: 0 Comm: swapper/23 Tainted: P OE ------------ 3.10.0-693.el7.x86_64 #1
+ Hardware name: Intel Corporation S2600CWR/S2600CWR, BIOS SE5C610.86B.01.01.0028.121720182203 12/17/2018
+ task: ffff8808f4ec4f10 ti: ffff8808f4ed8000 task.ti: ffff8808f4ed8000
+ RIP: 0010:[ffffffff810a65f2] [ffffffff810a65f2] __queue_work+0x32/0x3c0
+ RSP: 0018:ffff88105df43d48 EFLAGS: 00010046
+ RAX: 0000000000000086 RBX: 0000000000000086 RCX: 0000000000000000
+ RDX: ffff880f74e758b0 RSI: 0000000000000000 RDI: 000000000000001f
+ RBP: ffff88105df43d80 R08: ffff8808f3c583c8 R09: ffff8808f3c58000
+ R10: 0000000000000002 R11: ffff88105df43da8 R12: ffff880f74e758b0
+ R13: 000000000000001f R14: 0000000000000000 R15: ffff88105a300000
+ FS: 0000000000000000(0000) GS:ffff88105df40000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000102 CR3: 00000000019f2000 CR4: 00000000001407e0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
+ Stack:
+ ffff88105b6dd708 0000001f00000286 0000000000000086 ffff88105a300000
+ ffff880f74e75800 0000000000000000 ffff88105a300000 ffff88105df43d98
+ ffffffff810a6b85 ffff88105a301e80 ffff88105df43dc8 ffffffffc0224cde
+ Call Trace:
+ IRQ
+
+ [ffffffff810a6b85] queue_work_on+0x45/0x50
+ [ffffffffc0224cde] _hfi1_schedule_send+0x6e/0xc0 [hfi1]
+ [ffffffffc0170570] ? get_map_page+0x60/0x60 [rdmavt]
+ [ffffffffc0224d62] hfi1_schedule_send+0x32/0x70 [hfi1]
+ [ffffffffc0170644] rvt_rc_timeout+0xd4/0x120 [rdmavt]
+ [ffffffffc0170570] ? get_map_page+0x60/0x60 [rdmavt]
+ [ffffffff81097316] call_timer_fn+0x36/0x110
+ [ffffffffc0170570] ? get_map_page+0x60/0x60 [rdmavt]
+ [ffffffff8109982d] run_timer_softirq+0x22d/0x310
+ [ffffffff81090b3f] __do_softirq+0xef/0x280
+ [ffffffff816b6a5c] call_softirq+0x1c/0x30
+ [ffffffff8102d3c5] do_softirq+0x65/0xa0
+ [ffffffff81090ec5] irq_exit+0x105/0x110
+ [ffffffff816b76c2] smp_apic_timer_interrupt+0x42/0x50
+ [ffffffff816b5c1d] apic_timer_interrupt+0x6d/0x80
+ EOI
+
+ [ffffffff81527a02] ? cpuidle_enter_state+0x52/0xc0
+ [ffffffff81527b48] cpuidle_idle_call+0xd8/0x210
+ [ffffffff81034fee] arch_cpu_idle+0xe/0x30
+ [ffffffff810e7bca] cpu_startup_entry+0x14a/0x1c0
+ [ffffffff81051af6] start_secondary+0x1b6/0x230
+ Code: 89 e5 41 57 41 56 49 89 f6 41 55 41 89 fd 41 54 49 89 d4 53 48 83 ec 10 89 7d d4 9c 58 0f 1f 44 00 00 f6 c4 02 0f 85 be 02 00 00 41 f6 86 02 01 00 00 01 0f 85 58 02 00 00 49 c7 c7 28 19 01 00
+ RIP [ffffffff810a65f2] __queue_work+0x32/0x3c0
+ RSP ffff88105df43d48
+ CR2: 0000000000000102
+
+The solution is to reset the QPs before the device resources are freed.
+This reset will change the QP state to prevent post sends and delete
+timers to prevent callbacks.
+
+Fixes: 0acb0cc7ecc1 ("IB/rdmavt: Initialize and teardown of qpn table")
+Link: https://lore.kernel.org/r/20200210131040.87408.38161.stgit@awfm-01.aw.intel.com
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/sw/rdmavt/qp.c | 84 +++++++++++++++++++++++---------------
+ 1 file changed, 51 insertions(+), 33 deletions(-)
+
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -61,6 +61,8 @@
+ #define RVT_RWQ_COUNT_THRESHOLD 16
+
+ static void rvt_rc_timeout(struct timer_list *t);
++static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
++ enum ib_qp_type type);
+
+ /*
+ * Convert the AETH RNR timeout code into the number of microseconds.
+@@ -452,40 +454,41 @@ no_qp_table:
+ }
+
+ /**
+- * free_all_qps - check for QPs still in use
++ * rvt_free_qp_cb - callback function to reset a qp
++ * @qp: the qp to reset
++ * @v: a 64-bit value
++ *
++ * This function resets the qp and removes it from the
++ * qp hash table.
++ */
++static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
++{
++ unsigned int *qp_inuse = (unsigned int *)v;
++ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
++
++ /* Reset the qp and remove it from the qp hash list */
++ rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
++
++ /* Increment the qp_inuse count */
++ (*qp_inuse)++;
++}
++
++/**
++ * rvt_free_all_qps - check for QPs still in use
+ * @rdi: rvt device info structure
+ *
+ * There should not be any QPs still in use.
+ * Free memory for table.
++ * Return the number of QPs still in use.
+ */
+ static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
+ {
+- unsigned long flags;
+- struct rvt_qp *qp;
+- unsigned n, qp_inuse = 0;
+- spinlock_t *ql; /* work around too long line below */
+-
+- if (rdi->driver_f.free_all_qps)
+- qp_inuse = rdi->driver_f.free_all_qps(rdi);
++ unsigned int qp_inuse = 0;
+
+ qp_inuse += rvt_mcast_tree_empty(rdi);
+
+- if (!rdi->qp_dev)
+- return qp_inuse;
++ rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
+
+- ql = &rdi->qp_dev->qpt_lock;
+- spin_lock_irqsave(ql, flags);
+- for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
+- qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
+- lockdep_is_held(ql));
+- RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
+-
+- for (; qp; qp = rcu_dereference_protected(qp->next,
+- lockdep_is_held(ql)))
+- qp_inuse++;
+- }
+- spin_unlock_irqrestore(ql, flags);
+- synchronize_rcu();
+ return qp_inuse;
+ }
+
+@@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_i
+ }
+
+ /**
+- * rvt_reset_qp - initialize the QP state to the reset state
++ * _rvt_reset_qp - initialize the QP state to the reset state
+ * @qp: the QP to reset
+ * @type: the QP type
+ *
+ * r_lock, s_hlock, and s_lock are required to be held by the caller
+ */
+-static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+- enum ib_qp_type type)
++static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
++ enum ib_qp_type type)
+ __must_hold(&qp->s_lock)
+ __must_hold(&qp->s_hlock)
+ __must_hold(&qp->r_lock)
+@@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_
+ lockdep_assert_held(&qp->s_lock);
+ }
+
++/**
++ * rvt_reset_qp - initialize the QP state to the reset state
++ * @rdi: the device info
++ * @qp: the QP to reset
++ * @type: the QP type
++ *
++ * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
++ * before calling _rvt_reset_qp().
++ */
++static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
++ enum ib_qp_type type)
++{
++ spin_lock_irq(&qp->r_lock);
++ spin_lock(&qp->s_hlock);
++ spin_lock(&qp->s_lock);
++ _rvt_reset_qp(rdi, qp, type);
++ spin_unlock(&qp->s_lock);
++ spin_unlock(&qp->s_hlock);
++ spin_unlock_irq(&qp->r_lock);
++}
++
+ /** rvt_free_qpn - Free a qpn from the bit map
+ * @qpt: QP table
+ * @qpn: queue pair number to free
+@@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, st
+ switch (new_state) {
+ case IB_QPS_RESET:
+ if (qp->state != IB_QPS_RESET)
+- rvt_reset_qp(rdi, qp, ibqp->qp_type);
++ _rvt_reset_qp(rdi, qp, ibqp->qp_type);
+ break;
+
+ case IB_QPS_RTR:
+@@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, s
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+- spin_lock_irq(&qp->r_lock);
+- spin_lock(&qp->s_hlock);
+- spin_lock(&qp->s_lock);
+ rvt_reset_qp(rdi, qp, ibqp->qp_type);
+- spin_unlock(&qp->s_lock);
+- spin_unlock(&qp->s_hlock);
+- spin_unlock_irq(&qp->r_lock);
+
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+ /* qpn is now available for use again */
--- /dev/null
+From 9ea04d0df6e6541c6736b43bff45f1e54875a1db Mon Sep 17 00:00:00 2001
+From: Yonatan Cohen <yonatanc@mellanox.com>
+Date: Wed, 12 Feb 2020 09:26:34 +0200
+Subject: IB/umad: Fix kernel crash while unloading ib_umad
+
+From: Yonatan Cohen <yonatanc@mellanox.com>
+
+commit 9ea04d0df6e6541c6736b43bff45f1e54875a1db upstream.
+
+When disassociating a device from umad we must ensure that the sysfs
+access is prevented before blocking the fops, otherwise assumptions in
+syfs don't hold:
+
+ CPU0 CPU1
+ ib_umad_kill_port() ibdev_show()
+ port->ib_dev = NULL
+ dev_name(port->ib_dev)
+
+The prior patch made an error in moving the device_destroy(), it should
+have been split into device_del() (above) and put_device() (below). At
+this point we already have the split, so move the device_del() back to its
+original place.
+
+ kernel stack
+ PF: error_code(0x0000) - not-present page
+ Oops: 0000 [#1] SMP DEBUG_PAGEALLOC PTI
+ RIP: 0010:ibdev_show+0x18/0x50 [ib_umad]
+ RSP: 0018:ffffc9000097fe40 EFLAGS: 00010282
+ RAX: 0000000000000000 RBX: ffffffffa0441120 RCX: ffff8881df514000
+ RDX: ffff8881df514000 RSI: ffffffffa0441120 RDI: ffff8881df1e8870
+ RBP: ffffffff81caf000 R08: ffff8881df1e8870 R09: 0000000000000000
+ R10: 0000000000001000 R11: 0000000000000003 R12: ffff88822f550b40
+ R13: 0000000000000001 R14: ffffc9000097ff08 R15: ffff8882238bad58
+ FS: 00007f1437ff3740(0000) GS:ffff888236940000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00000000000004e8 CR3: 00000001e0dfc001 CR4: 00000000001606e0
+ Call Trace:
+ dev_attr_show+0x15/0x50
+ sysfs_kf_seq_show+0xb8/0x1a0
+ seq_read+0x12d/0x350
+ vfs_read+0x89/0x140
+ ksys_read+0x55/0xd0
+ do_syscall_64+0x55/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9:
+
+Fixes: cf7ad3030271 ("IB/umad: Avoid destroying device while it is accessed")
+Link: https://lore.kernel.org/r/20200212072635.682689-9-leon@kernel.org
+Signed-off-by: Yonatan Cohen <yonatanc@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/user_mad.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -1312,6 +1312,9 @@ static void ib_umad_kill_port(struct ib_
+ struct ib_umad_file *file;
+ int id;
+
++ cdev_device_del(&port->sm_cdev, &port->sm_dev);
++ cdev_device_del(&port->cdev, &port->dev);
++
+ mutex_lock(&port->file_mutex);
+
+ /* Mark ib_dev NULL and block ioctl or other file ops to progress
+@@ -1331,8 +1334,6 @@ static void ib_umad_kill_port(struct ib_
+
+ mutex_unlock(&port->file_mutex);
+
+- cdev_device_del(&port->sm_cdev, &port->sm_dev);
+- cdev_device_del(&port->cdev, &port->dev);
+ ida_free(&umad_ida, port->dev_num);
+
+ /* balances device_initialize() */
--- /dev/null
+From fbd1ec000213c8b457dd4fb15b6de9ba02ec5482 Mon Sep 17 00:00:00 2001
+From: Luca Weiss <luca@z3ntu.xyz>
+Date: Sun, 9 Feb 2020 14:42:36 -0800
+Subject: Input: ili210x - fix return value of is_visible function
+
+From: Luca Weiss <luca@z3ntu.xyz>
+
+commit fbd1ec000213c8b457dd4fb15b6de9ba02ec5482 upstream.
+
+The is_visible function expects the permissions associated with an
+attribute of the sysfs group or 0 if an attribute is not visible.
+
+Change the code to return the attribute permissions when the attribute
+should be visible which resolves the warning:
+
+ Attribute calibrate: Invalid permissions 01
+
+Fixes: cc12ba1872c6 ("Input: ili210x - optionally show calibrate sysfs attribute")
+Signed-off-by: Luca Weiss <luca@z3ntu.xyz>
+Reviewed-by: Sven Van Asbroeck <TheSven73@gmail.com>
+Link: https://lore.kernel.org/r/20200209145628.649409-1-luca@z3ntu.xyz
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/touchscreen/ili210x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -321,7 +321,7 @@ static umode_t ili210x_calibrate_visible
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+
+- return priv->chip->has_calibrate_reg;
++ return priv->chip->has_calibrate_reg ? attr->mode : 0;
+ }
+
+ static const struct attribute_group ili210x_attr_group = {
--- /dev/null
+From 684c0422da71da0cd81319c90b8099b563b13da4 Mon Sep 17 00:00:00 2001
+From: Oliver Upton <oupton@google.com>
+Date: Fri, 7 Feb 2020 02:36:05 -0800
+Subject: KVM: nVMX: Handle pending #DB when injecting INIT VM-exit
+
+From: Oliver Upton <oupton@google.com>
+
+commit 684c0422da71da0cd81319c90b8099b563b13da4 upstream.
+
+SDM 27.3.4 states that the 'pending debug exceptions' VMCS field will
+be populated if a VM-exit caused by an INIT signal takes priority over a
+debug-trap. Emulate this behavior when synthesizing an INIT signal
+VM-exit into L1.
+
+Fixes: 4b9852f4f389 ("KVM: x86: Fix INIT signal handling in various CPU states")
+Signed-off-by: Oliver Upton <oupton@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/nested.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3583,6 +3583,33 @@ static void nested_vmx_inject_exception_
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
+ }
+
++/*
++ * Returns true if a debug trap is pending delivery.
++ *
++ * In KVM, debug traps bear an exception payload. As such, the class of a #DB
++ * exception may be inferred from the presence of an exception payload.
++ */
++static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
++{
++ return vcpu->arch.exception.pending &&
++ vcpu->arch.exception.nr == DB_VECTOR &&
++ vcpu->arch.exception.payload;
++}
++
++/*
++ * Certain VM-exits set the 'pending debug exceptions' field to indicate a
++ * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
++ * represents these debug traps with a payload that is said to be compatible
++ * with the 'pending debug exceptions' field, write the payload to the VMCS
++ * field if a VM-exit is delivered before the debug trap.
++ */
++static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
++{
++ if (vmx_pending_dbg_trap(vcpu))
++ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
++ vcpu->arch.exception.payload);
++}
++
+ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -3595,6 +3622,7 @@ static int vmx_check_nested_events(struc
+ test_bit(KVM_APIC_INIT, &apic->pending_events)) {
+ if (block_nested_events)
+ return -EBUSY;
++ nested_vmx_update_pending_dbg(vcpu);
+ clear_bit(KVM_APIC_INIT, &apic->pending_events);
+ nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
+ return 0;
--- /dev/null
+From 307f1cfa269657c63cfe2c932386fcc24684d9dd Mon Sep 17 00:00:00 2001
+From: Oliver Upton <oupton@google.com>
+Date: Fri, 7 Feb 2020 02:36:04 -0800
+Subject: KVM: x86: Mask off reserved bit from #DB exception payload
+
+From: Oliver Upton <oupton@google.com>
+
+commit 307f1cfa269657c63cfe2c932386fcc24684d9dd upstream.
+
+KVM defines the #DB payload as compatible with the 'pending debug
+exceptions' field under VMX, not DR6. Mask off bit 12 when applying the
+payload to DR6, as it is reserved on DR6 but not the 'pending debug
+exceptions' field.
+
+Fixes: f10c729ff965 ("kvm: vmx: Defer setting of DR6 until #DB delivery")
+Signed-off-by: Oliver Upton <oupton@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -437,6 +437,14 @@ void kvm_deliver_exception_payload(struc
+ * for #DB exceptions under VMX.
+ */
+ vcpu->arch.dr6 ^= payload & DR6_RTM;
++
++ /*
++ * The #DB payload is defined as compatible with the 'pending
++ * debug exceptions' field under VMX, not DR6. While bit 12 is
++ * defined in the 'pending debug exceptions' field (enabled
++ * breakpoint), it is reserved and must be zero in DR6.
++ */
++ vcpu->arch.dr6 &= ~BIT(12);
+ break;
+ case PF_VECTOR:
+ vcpu->arch.cr2 = payload;
--- /dev/null
+From 2bf973ff9b9aeceb8acda629ae65341820d4b35b Mon Sep 17 00:00:00 2001
+From: Sara Sharon <sara.sharon@intel.com>
+Date: Fri, 31 Jan 2020 13:12:51 +0200
+Subject: mac80211: fix quiet mode activation in action frames
+
+From: Sara Sharon <sara.sharon@intel.com>
+
+commit 2bf973ff9b9aeceb8acda629ae65341820d4b35b upstream.
+
+Previously I intended to ignore quiet mode in probe response, however
+I ended up ignoring it instead for action frames. As a matter of fact,
+this path isn't invoked for probe responses to start with. Just revert
+this patch.
+
+Signed-off-by: Sara Sharon <sara.sharon@intel.com>
+Fixes: 7976b1e9e3bf ("mac80211: ignore quiet mode in probe")
+Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
+Link: https://lore.kernel.org/r/20200131111300.891737-15-luca@coelho.fi
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/mlme.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -8,7 +8,7 @@
+ * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2019 Intel Corporation
++ * Copyright (C) 2018 - 2020 Intel Corporation
+ */
+
+ #include <linux/delay.h>
+@@ -1311,7 +1311,7 @@ ieee80211_sta_process_chanswitch(struct
+ if (!res) {
+ ch_switch.timestamp = timestamp;
+ ch_switch.device_timestamp = device_timestamp;
+- ch_switch.block_tx = beacon ? csa_ie.mode : 0;
++ ch_switch.block_tx = csa_ie.mode;
+ ch_switch.chandef = csa_ie.chandef;
+ ch_switch.count = csa_ie.count;
+ ch_switch.delay = csa_ie.max_switch_time;
+@@ -1404,7 +1404,7 @@ ieee80211_sta_process_chanswitch(struct
+
+ sdata->vif.csa_active = true;
+ sdata->csa_chandef = csa_ie.chandef;
+- sdata->csa_block_tx = ch_switch.block_tx;
++ sdata->csa_block_tx = csa_ie.mode;
+ ifmgd->csa_ignored_same_chan = false;
+
+ if (sdata->csa_block_tx)
+@@ -1438,7 +1438,7 @@ ieee80211_sta_process_chanswitch(struct
+ * reset when the disconnection worker runs.
+ */
+ sdata->vif.csa_active = true;
+- sdata->csa_block_tx = ch_switch.block_tx;
++ sdata->csa_block_tx = csa_ie.mode;
+
+ ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
+ mutex_unlock(&local->chanctx_mtx);
--- /dev/null
+From f2b18baca9539c6a3116d48b70972c7a2ba5d766 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Wed, 15 Jan 2020 12:25:50 +0100
+Subject: mac80211: use more bits for ack_frame_id
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit f2b18baca9539c6a3116d48b70972c7a2ba5d766 upstream.
+
+It turns out that this wasn't a good idea, I hit a test failure in
+hwsim due to this. That particular failure was easily worked around,
+but it raised questions: if an AP needs to, for example, send action
+frames to each connected station, the current limit is nowhere near
+enough (especially if those stations are sleeping and the frames are
+queued for a while.)
+
+Shuffle around some bits to make more room for ack_frame_id to allow
+up to 8192 queued up frames, that's enough for queueing 4 frames to
+each connected station, even at the maximum of 2007 stations on a
+single AP.
+
+We take the bits from band (which currently only 2 but I leave 3 in
+case we add another band) and from the hw_queue, which can only need
+4 since it has a limit of 16 queues.
+
+Fixes: 6912daed05e1 ("mac80211: Shrink the size of ack_frame_id to make room for tx_time_est")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Acked-by: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+Link: https://lore.kernel.org/r/20200115122549.b9a4ef9f4980.Ied52ed90150220b83a280009c590b65d125d087c@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/mac80211.h | 11 +++++------
+ net/mac80211/cfg.c | 2 +-
+ net/mac80211/tx.c | 2 +-
+ 3 files changed, 7 insertions(+), 8 deletions(-)
+
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1004,12 +1004,11 @@ ieee80211_rate_get_vht_nss(const struct
+ struct ieee80211_tx_info {
+ /* common information */
+ u32 flags;
+- u8 band;
+-
+- u8 hw_queue;
+-
+- u16 ack_frame_id:6;
+- u16 tx_time_est:10;
++ u32 band:3,
++ ack_frame_id:13,
++ hw_queue:4,
++ tx_time_est:10;
++ /* 2 free bits */
+
+ union {
+ struct {
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -3450,7 +3450,7 @@ int ieee80211_attach_ack_skb(struct ieee
+
+ spin_lock_irqsave(&local->ack_status_lock, spin_flags);
+ id = idr_alloc(&local->ack_status_frames, ack_skb,
+- 1, 0x40, GFP_ATOMIC);
++ 1, 0x2000, GFP_ATOMIC);
+ spin_unlock_irqrestore(&local->ack_status_lock, spin_flags);
+
+ if (id < 0) {
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2442,7 +2442,7 @@ static int ieee80211_store_ack_skb(struc
+
+ spin_lock_irqsave(&local->ack_status_lock, flags);
+ id = idr_alloc(&local->ack_status_frames, ack_skb,
+- 1, 0x40, GFP_ATOMIC);
++ 1, 0x2000, GFP_ATOMIC);
+ spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+ if (id >= 0) {
--- /dev/null
+From 5d63944f8206a80636ae8cb4b9107d3b49f43d37 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trondmy@gmail.com>
+Date: Thu, 13 Feb 2020 14:51:07 -0500
+Subject: NFSv4: Ensure the delegation cred is pinned when we call delegreturn
+
+From: Trond Myklebust <trondmy@gmail.com>
+
+commit 5d63944f8206a80636ae8cb4b9107d3b49f43d37 upstream.
+
+Ensure we don't release the delegation cred during the call to
+nfs4_proc_delegreturn().
+
+Fixes: ee05f456772d ("NFSv4: Fix races between open and delegreturn")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/delegation.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -222,13 +222,18 @@ void nfs_inode_reclaim_delegation(struct
+
+ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
+ {
++ const struct cred *cred;
+ int res = 0;
+
+- if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+- res = nfs4_proc_delegreturn(inode,
+- delegation->cred,
++ if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
++ spin_lock(&delegation->lock);
++ cred = get_cred(delegation->cred);
++ spin_unlock(&delegation->lock);
++ res = nfs4_proc_delegreturn(inode, cred,
+ &delegation->stateid,
+ issync);
++ put_cred(cred);
++ }
+ return res;
+ }
+
--- /dev/null
+From cd1b659d8ce7697ee9799b64f887528315b9097b Mon Sep 17 00:00:00 2001
+From: Olga Kornievskaia <kolga@netapp.com>
+Date: Wed, 12 Feb 2020 17:32:12 -0500
+Subject: NFSv4.1 make cachethis=no for writes
+
+From: Olga Kornievskaia <kolga@netapp.com>
+
+commit cd1b659d8ce7697ee9799b64f887528315b9097b upstream.
+
+Turning caching off for writes on the server should improve performance.
+
+Fixes: fba83f34119a ("NFS: Pass "privileged" value to nfs4_init_sequence()")
+Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
+Reviewed-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5295,7 +5295,7 @@ static void nfs4_proc_write_setup(struct
+ hdr->timestamp = jiffies;
+
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
+- nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1, 0);
++ nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
+ nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
+ }
+
--- /dev/null
+From f25372ffc3f6c2684b57fb718219137e6ee2b64c Mon Sep 17 00:00:00 2001
+From: Yi Zhang <yi.zhang@redhat.com>
+Date: Fri, 14 Feb 2020 18:48:02 +0800
+Subject: nvme: fix the parameter order for nvme_get_log in nvme_get_fw_slot_info
+
+From: Yi Zhang <yi.zhang@redhat.com>
+
+commit f25372ffc3f6c2684b57fb718219137e6ee2b64c upstream.
+
+nvme fw-activate operation will get bellow warning log,
+fix it by update the parameter order
+
+[ 113.231513] nvme nvme0: Get FW SLOT INFO log error
+
+Fixes: 0e98719b0e4b ("nvme: simplify the API for getting log pages")
+Reported-by: Sujith Pandel <sujith_pandel@dell.com>
+Reviewed-by: David Milburn <dmilburn@redhat.com>
+Signed-off-by: Yi Zhang <yi.zhang@redhat.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3867,7 +3867,7 @@ static void nvme_get_fw_slot_info(struct
+ if (!log)
+ return;
+
+- if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
++ if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
+ sizeof(*log), 0))
+ dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
+ kfree(log);
--- /dev/null
+From 80cc7bb6c104d733bff60ddda09f19139c61507c Mon Sep 17 00:00:00 2001
+From: Kim Phillips <kim.phillips@amd.com>
+Date: Fri, 7 Feb 2020 17:06:11 -0600
+Subject: perf stat: Don't report a null stalled cycles per insn metric
+
+From: Kim Phillips <kim.phillips@amd.com>
+
+commit 80cc7bb6c104d733bff60ddda09f19139c61507c upstream.
+
+For data collected on machines with front end stalled cycles supported,
+such as found on modern AMD CPU families, commit 146540fb545b ("perf
+stat: Always separate stalled cycles per insn") introduces a new line in
+CSV output with a leading comma that upsets some automated scripts.
+Scripts have to use "-e ex_ret_instr" to work around this issue, after
+upgrading to a version of perf with that commit.
+
+We could add "if (have_frontend_stalled && !config->csv_sep)" to the not
+(total && avg) else clause, to emphasize that CSV users are usually
+scripts, and are written to do only what is needed, i.e., they wouldn't
+typically invoke "perf stat" without specifying an explicit event list.
+
+But - let alone CSV output - why should users now tolerate a constant
+0-reporting extra line in regular terminal output?:
+
+BEFORE:
+
+$ sudo perf stat --all-cpus -einstructions,cycles -- sleep 1
+
+ Performance counter stats for 'system wide':
+
+ 181,110,981 instructions # 0.58 insn per cycle
+ # 0.00 stalled cycles per insn
+ 309,876,469 cycles
+
+ 1.002202582 seconds time elapsed
+
+The user would not like to see the now permanent:
+
+ "0.00 stalled cycles per insn"
+
+line fixture, as it gives no useful information.
+
+So this patch removes the printing of the zeroed stalled cycles line
+altogether, almost reverting the very original commit fb4605ba47e7
+("perf stat: Check for frontend stalled for metrics"), which seems like
+it was written to normalize --metric-only column output of common Intel
+machines at the time: modern Intel machines have ceased to support the
+genericised frontend stalled metrics AFAICT.
+
+AFTER:
+
+$ sudo perf stat --all-cpus -einstructions,cycles -- sleep 1
+
+ Performance counter stats for 'system wide':
+
+ 244,071,432 instructions # 0.69 insn per cycle
+ 355,353,490 cycles
+
+ 1.001862516 seconds time elapsed
+
+Output behaviour when stalled cycles is indeed measured is not affected
+(BEFORE == AFTER):
+
+$ sudo perf stat --all-cpus -einstructions,cycles,stalled-cycles-frontend -- sleep 1
+
+ Performance counter stats for 'system wide':
+
+ 247,227,799 instructions # 0.63 insn per cycle
+ # 0.26 stalled cycles per insn
+ 394,745,636 cycles
+ 63,194,485 stalled-cycles-frontend # 16.01% frontend cycles idle
+
+ 1.002079770 seconds time elapsed
+
+Fixes: 146540fb545b ("perf stat: Always separate stalled cycles per insn")
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Acked-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Cong Wang <xiyou.wangcong@gmail.com>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Jin Yao <yao.jin@linux.intel.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/20200207230613.26709-1-kim.phillips@amd.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/stat-shadow.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/tools/perf/util/stat-shadow.c
++++ b/tools/perf/util/stat-shadow.c
+@@ -18,7 +18,6 @@
+ * AGGR_NONE: Use matching CPU
+ * AGGR_THREAD: Not supported?
+ */
+-static bool have_frontend_stalled;
+
+ struct runtime_stat rt_stat;
+ struct stats walltime_nsecs_stats;
+@@ -144,7 +143,6 @@ void runtime_stat__exit(struct runtime_s
+
+ void perf_stat__init_shadow_stats(void)
+ {
+- have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
+ runtime_stat__init(&rt_stat);
+ }
+
+@@ -853,10 +851,6 @@ void perf_stat__print_shadow_stats(struc
+ print_metric(config, ctxp, NULL, "%7.2f ",
+ "stalled cycles per insn",
+ ratio);
+- } else if (have_frontend_stalled) {
+- out->new_line(config, ctxp);
+- print_metric(config, ctxp, NULL, "%7.2f ",
+- "stalled cycles per insn", 0);
+ }
+ } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
+ if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
--- /dev/null
+From f861854e1b435b27197417f6f90d87188003cb24 Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Tue, 21 Jan 2020 11:01:25 -0800
+Subject: perf/x86/intel: Fix inaccurate period in context switch for auto-reload
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit f861854e1b435b27197417f6f90d87188003cb24 upstream.
+
+Perf doesn't take the left period into account when auto-reload is
+enabled with fixed period sampling mode in context switch.
+
+Here is the MSR trace of the perf command as below.
+(The MSR trace is simplified from a ftrace log.)
+
+ #perf record -e cycles:p -c 2000000 -- ./triad_loop
+
+ //The MSR trace of task schedule out
+ //perf disable all counters, disable PEBS, disable GP counter 0,
+ //read GP counter 0, and re-enable all counters.
+ //The counter 0 stops at 0xfffffff82840
+ write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value 0
+ write_msr: MSR_IA32_PEBS_ENABLE(3f1), value 0
+ write_msr: MSR_P6_EVNTSEL0(186), value 40003003c
+ rdpmc: 0, value fffffff82840
+ write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value f000000ff
+
+ //The MSR trace of the same task schedule in again
+ //perf disable all counters, enable and set GP counter 0,
+ //enable PEBS, and re-enable all counters.
+ //0xffffffe17b80 (-2000000) is written to GP counter 0.
+ write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value 0
+ write_msr: MSR_IA32_PMC0(4c1), value ffffffe17b80
+ write_msr: MSR_P6_EVNTSEL0(186), value 40043003c
+ write_msr: MSR_IA32_PEBS_ENABLE(3f1), value 1
+ write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value f000000ff
+
+When the same task schedule in again, the counter should starts from
+previous left. However, it starts from the fixed period -2000000 again.
+
+A special variant of intel_pmu_save_and_restart() is used for
+auto-reload, which doesn't update the hwc->period_left.
+When the monitored task schedules in again, perf doesn't know the left
+period. The fixed period is used, which is inaccurate.
+
+With auto-reload, the counter always has a negative counter value. So
+the left period is -value. Update the period_left in
+intel_pmu_save_and_restart_reload().
+
+With the patch:
+
+ //The MSR trace of task schedule out
+ write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value 0
+ write_msr: MSR_IA32_PEBS_ENABLE(3f1), value 0
+ write_msr: MSR_P6_EVNTSEL0(186), value 40003003c
+ rdpmc: 0, value ffffffe25cbc
+ write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value f000000ff
+
+ //The MSR trace of the same task schedule in again
+ write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value 0
+ write_msr: MSR_IA32_PMC0(4c1), value ffffffe25cbc
+ write_msr: MSR_P6_EVNTSEL0(186), value 40043003c
+ write_msr: MSR_IA32_PEBS_ENABLE(3f1), value 1
+ write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value f000000ff
+
+Fixes: d31fc13fdcb2 ("perf/x86/intel: Fix event update for auto-reload")
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lkml.kernel.org/r/20200121190125.3389-1-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/ds.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1713,6 +1713,8 @@ intel_pmu_save_and_restart_reload(struct
+ old = ((s64)(prev_raw_count << shift) >> shift);
+ local64_add(new - old + count * period, &event->count);
+
++ local64_set(&hwc->period_left, -new);
++
+ perf_event_update_userpage(event);
+
+ return 0;
--- /dev/null
+From a72f4ac1d778f7bde93dfee69bfc23377ec3d74f Mon Sep 17 00:00:00 2001
+From: Avihai Horon <avihaih@mellanox.com>
+Date: Sun, 26 Jan 2020 19:15:00 +0200
+Subject: RDMA/core: Fix invalid memory access in spec_filter_size
+
+From: Avihai Horon <avihaih@mellanox.com>
+
+commit a72f4ac1d778f7bde93dfee69bfc23377ec3d74f upstream.
+
+Add a check that the size specified in the flow spec header doesn't cause
+an overflow when calculating the filter size, and thus prevent access to
+invalid memory. The following crash from syzkaller revealed it.
+
+ kasan: CONFIG_KASAN_INLINE enabled
+ kasan: GPF could be caused by NULL-ptr deref or user memory access
+ general protection fault: 0000 [#1] SMP KASAN PTI
+ CPU: 1 PID: 17834 Comm: syz-executor.3 Not tainted 5.5.0-rc5 #2
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
+ rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014
+ RIP: 0010:memchr_inv+0xd3/0x330
+ Code: 89 f9 89 f5 83 e1 07 0f 85 f9 00 00 00 49 89 d5 49 c1 ed 03 45 85
+ ed 74 6f 48 89 d9 48 b8 00 00 00 00 00 fc ff df 48 c1 e9 03 <80> 3c 01
+ 00 0f 85 0d 02 00 00 44 0f b6 e5 48 b8 01 01 01 01 01 01
+ RSP: 0018:ffffc9000a13fa50 EFLAGS: 00010202
+ RAX: dffffc0000000000 RBX: 7fff88810de9d820 RCX: 0ffff11021bd3b04
+ RDX: 000000000000fff8 RSI: 0000000000000000 RDI: 7fff88810de9d820
+ RBP: 0000000000000000 R08: ffff888110d69018 R09: 0000000000000009
+ R10: 0000000000000001 R11: ffffed10236267cc R12: 0000000000000004
+ R13: 0000000000001fff R14: ffff88810de9d820 R15: 0000000000000040
+ FS: 00007f9ee0e51700(0000) GS:ffff88811b100000(0000)
+ knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000000 CR3: 0000000115ea0006 CR4: 0000000000360ee0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ spec_filter_size.part.16+0x34/0x50
+ ib_uverbs_kern_spec_to_ib_spec_filter+0x691/0x770
+ ib_uverbs_ex_create_flow+0x9ea/0x1b40
+ ib_uverbs_write+0xaa5/0xdf0
+ __vfs_write+0x7c/0x100
+ vfs_write+0x168/0x4a0
+ ksys_write+0xc8/0x200
+ do_syscall_64+0x9c/0x390
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ RIP: 0033:0x465b49
+ Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89
+ f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01
+ f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
+ RSP: 002b:00007f9ee0e50c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+ RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000465b49
+ RDX: 00000000000003a0 RSI: 00000000200007c0 RDI: 0000000000000004
+ RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000
+ R10: 0000000000000000 R11: 0000000000000246 R12: 00007f9ee0e516bc
+ R13: 00000000004ca2da R14: 000000000070deb8 R15: 00000000ffffffff
+ Modules linked in:
+ Dumping ftrace buffer:
+ (ftrace buffer empty)
+
+Fixes: 94e03f11ad1f ("IB/uverbs: Add support for flow tag")
+Link: https://lore.kernel.org/r/20200126171500.4623-1-leon@kernel.org
+Signed-off-by: Avihai Horon <avihaih@mellanox.com>
+Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/uverbs_cmd.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -2720,12 +2720,6 @@ static int kern_spec_to_ib_spec_action(s
+ return 0;
+ }
+
+-static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
+-{
+- /* Returns user space filter size, includes padding */
+- return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
+-}
+-
+ static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
+ u16 ib_real_filter_sz)
+ {
+@@ -2869,11 +2863,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filte
+ static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
+ union ib_flow_spec *ib_spec)
+ {
+- ssize_t kern_filter_sz;
++ size_t kern_filter_sz;
+ void *kern_spec_mask;
+ void *kern_spec_val;
+
+- kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
++ if (check_sub_overflow((size_t)kern_spec->hdr.size,
++ sizeof(struct ib_uverbs_flow_spec_hdr),
++ &kern_filter_sz))
++ return -EINVAL;
++
++ kern_filter_sz /= 2;
+
+ kern_spec_val = (void *)kern_spec +
+ sizeof(struct ib_uverbs_flow_spec_hdr);
--- /dev/null
+From 1dd017882e01d2fcd9c5dbbf1eb376211111c393 Mon Sep 17 00:00:00 2001
+From: Leon Romanovsky <leon@kernel.org>
+Date: Wed, 12 Feb 2020 10:06:51 +0200
+Subject: RDMA/core: Fix protection fault in get_pkey_idx_qp_list
+
+From: Leon Romanovsky <leonro@mellanox.com>
+
+commit 1dd017882e01d2fcd9c5dbbf1eb376211111c393 upstream.
+
+We don't need to set pkey as valid in case that user set only one of pkey
+index or port number, otherwise it will be resulted in NULL pointer
+dereference while accessing to uninitialized pkey list. The following
+crash from Syzkaller revealed it.
+
+ kasan: CONFIG_KASAN_INLINE enabled
+ kasan: GPF could be caused by NULL-ptr deref or user memory access
+ general protection fault: 0000 [#1] SMP KASAN PTI
+ CPU: 1 PID: 14753 Comm: syz-executor.2 Not tainted 5.5.0-rc5 #2
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
+ rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014
+ RIP: 0010:get_pkey_idx_qp_list+0x161/0x2d0
+ Code: 01 00 00 49 8b 5e 20 4c 39 e3 0f 84 b9 00 00 00 e8 e4 42 6e fe 48
+ 8d 7b 10 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <0f> b6 04
+ 02 84 c0 74 08 3c 01 0f 8e d0 00 00 00 48 8d 7d 04 48 b8
+ RSP: 0018:ffffc9000bc6f950 EFLAGS: 00010202
+ RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffffffff82c8bdec
+ RDX: 0000000000000002 RSI: ffffc900030a8000 RDI: 0000000000000010
+ RBP: ffff888112c8ce80 R08: 0000000000000004 R09: fffff5200178df1f
+ R10: 0000000000000001 R11: fffff5200178df1f R12: ffff888115dc4430
+ R13: ffff888115da8498 R14: ffff888115dc4410 R15: ffff888115da8000
+ FS: 00007f20777de700(0000) GS:ffff88811b100000(0000)
+ knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000001b2f721000 CR3: 00000001173ca002 CR4: 0000000000360ee0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ port_pkey_list_insert+0xd7/0x7c0
+ ib_security_modify_qp+0x6fa/0xfc0
+ _ib_modify_qp+0x8c4/0xbf0
+ modify_qp+0x10da/0x16d0
+ ib_uverbs_modify_qp+0x9a/0x100
+ ib_uverbs_write+0xaa5/0xdf0
+ __vfs_write+0x7c/0x100
+ vfs_write+0x168/0x4a0
+ ksys_write+0xc8/0x200
+ do_syscall_64+0x9c/0x390
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Fixes: d291f1a65232 ("IB/core: Enforce PKey security on QPs")
+Link: https://lore.kernel.org/r/20200212080651.GB679970@unreal
+Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Message-Id: <20200212080651.GB679970@unreal>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/security.c | 24 +++++++++---------------
+ 1 file changed, 9 insertions(+), 15 deletions(-)
+
+--- a/drivers/infiniband/core/security.c
++++ b/drivers/infiniband/core/security.c
+@@ -339,22 +339,16 @@ static struct ib_ports_pkeys *get_new_pp
+ if (!new_pps)
+ return NULL;
+
+- if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
+- if (!qp_pps) {
+- new_pps->main.port_num = qp_attr->port_num;
+- new_pps->main.pkey_index = qp_attr->pkey_index;
+- } else {
+- new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
+- qp_attr->port_num :
+- qp_pps->main.port_num;
+-
+- new_pps->main.pkey_index =
+- (qp_attr_mask & IB_QP_PKEY_INDEX) ?
+- qp_attr->pkey_index :
+- qp_pps->main.pkey_index;
+- }
++ if (qp_attr_mask & IB_QP_PORT)
++ new_pps->main.port_num =
++ (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
++ if (qp_attr_mask & IB_QP_PKEY_INDEX)
++ new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
++ qp_attr->pkey_index;
++ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
+ new_pps->main.state = IB_PORT_PKEY_VALID;
+- } else if (qp_pps) {
++
++ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
+ new_pps->main.port_num = qp_pps->main.port_num;
+ new_pps->main.pkey_index = qp_pps->main.pkey_index;
+ if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
--- /dev/null
+From 8a4f300b978edbbaa73ef9eca660e45eb9f13873 Mon Sep 17 00:00:00 2001
+From: Kamal Heib <kamalheib1@gmail.com>
+Date: Wed, 5 Feb 2020 13:05:30 +0200
+Subject: RDMA/hfi1: Fix memory leak in _dev_comp_vect_mappings_create
+
+From: Kamal Heib <kamalheib1@gmail.com>
+
+commit 8a4f300b978edbbaa73ef9eca660e45eb9f13873 upstream.
+
+Make sure to free the allocated cpumask_var_t's to avoid the following
+reported memory leak by kmemleak:
+
+$ cat /sys/kernel/debug/kmemleak
+unreferenced object 0xffff8897f812d6a8 (size 8):
+ comm "kworker/1:1", pid 347, jiffies 4294751400 (age 101.703s)
+ hex dump (first 8 bytes):
+ 00 00 00 00 00 00 00 00 ........
+ backtrace:
+ [<00000000bff49664>] alloc_cpumask_var_node+0x4c/0xb0
+ [<0000000075d3ca81>] hfi1_comp_vectors_set_up+0x20f/0x800 [hfi1]
+ [<0000000098d420df>] hfi1_init_dd+0x3311/0x4960 [hfi1]
+ [<0000000071be7e52>] init_one+0x25e/0xf10 [hfi1]
+ [<000000005483d4c2>] local_pci_probe+0xd4/0x180
+ [<000000007c3cbc6e>] work_for_cpu_fn+0x51/0xa0
+ [<000000001d626905>] process_one_work+0x8f0/0x17b0
+ [<000000007e569e7e>] worker_thread+0x536/0xb50
+ [<00000000fd39a4a5>] kthread+0x30c/0x3d0
+ [<0000000056f2edb3>] ret_from_fork+0x3a/0x50
+
+Fixes: 5d18ee67d4c1 ("IB/{hfi1, rdmavt, qib}: Implement CQ completion vector support")
+Link: https://lore.kernel.org/r/20200205110530.12129-1-kamalheib1@gmail.com
+Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/affinity.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -479,6 +479,8 @@ static int _dev_comp_vect_mappings_creat
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
+ }
+
++ free_cpumask_var(available_cpus);
++ free_cpumask_var(non_intr_cpus);
+ return 0;
+
+ fail:
--- /dev/null
+From d219face9059f38ad187bde133451a2a308fdb7c Mon Sep 17 00:00:00 2001
+From: Krishnamraju Eraparaju <krishna2@chelsio.com>
+Date: Tue, 4 Feb 2020 14:42:30 +0530
+Subject: RDMA/iw_cxgb4: initiate CLOSE when entering TERM
+
+From: Krishnamraju Eraparaju <krishna2@chelsio.com>
+
+commit d219face9059f38ad187bde133451a2a308fdb7c upstream.
+
+As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3, always initiate a CLOSE
+when entering into TERM state.
+
+In c4iw_modify_qp(), disconnect operation should only be performed when
+the modify_qp call is invoked from ib_core. And all other internal
+modify_qp calls(invoked within iw_cxgb4) that needs 'disconnect' should
+call c4iw_ep_disconnect() explicitly after modify_qp. Otherwise, deadlocks
+like below can occur:
+
+ Call Trace:
+ schedule+0x2f/0xa0
+ schedule_preempt_disabled+0xa/0x10
+ __mutex_lock.isra.5+0x2d0/0x4a0
+ c4iw_ep_disconnect+0x39/0x430 => tries to reacquire ep lock again
+ c4iw_modify_qp+0x468/0x10d0
+ rx_data+0x218/0x570 => acquires ep lock
+ process_work+0x5f/0x70
+ process_one_work+0x1a7/0x3b0
+ worker_thread+0x30/0x390
+ kthread+0x112/0x130
+ ret_from_fork+0x35/0x40
+
+Fixes: d2c33370ae73 ("RDMA/iw_cxgb4: Always disconnect when QP is transitioning to TERMINATE state")
+Link: https://lore.kernel.org/r/20200204091230.7210-1-krishna2@chelsio.com
+Signed-off-by: Krishnamraju Eraparaju <krishna2@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/cxgb4/cm.c | 4 ++++
+ drivers/infiniband/hw/cxgb4/qp.c | 4 ++--
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -3036,6 +3036,10 @@ static int terminate(struct c4iw_dev *de
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+
++ /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
++ * when entering the TERM state the RNIC MUST initiate a CLOSE.
++ */
++ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
+ } else
+ pr_warn("TERM received tid %u no ep/qp\n", tid);
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -1948,10 +1948,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp,
+ qhp->attr.layer_etype = attrs->layer_etype;
+ qhp->attr.ecode = attrs->ecode;
+ ep = qhp->ep;
+- c4iw_get_ep(&ep->com);
+- disconnect = 1;
+ if (!internal) {
++ c4iw_get_ep(&ep->com);
+ terminate = 1;
++ disconnect = 1;
+ } else {
+ terminate = qhp->attr.send_term;
+ ret = rdma_fini(rhp, qhp, ep);
--- /dev/null
+From 8ac0e6641c7ca14833a2a8c6f13d8e0a435e535c Mon Sep 17 00:00:00 2001
+From: Zhu Yanjun <yanjunz@mellanox.com>
+Date: Wed, 12 Feb 2020 09:26:33 +0200
+Subject: RDMA/rxe: Fix soft lockup problem due to using tasklets in softirq
+
+From: Zhu Yanjun <yanjunz@mellanox.com>
+
+commit 8ac0e6641c7ca14833a2a8c6f13d8e0a435e535c upstream.
+
+When run stress tests with RXE, the following Call Traces often occur
+
+ watchdog: BUG: soft lockup - CPU#2 stuck for 22s! [swapper/2:0]
+ ...
+ Call Trace:
+ <IRQ>
+ create_object+0x3f/0x3b0
+ kmem_cache_alloc_node_trace+0x129/0x2d0
+ __kmalloc_reserve.isra.52+0x2e/0x80
+ __alloc_skb+0x83/0x270
+ rxe_init_packet+0x99/0x150 [rdma_rxe]
+ rxe_requester+0x34e/0x11a0 [rdma_rxe]
+ rxe_do_task+0x85/0xf0 [rdma_rxe]
+ tasklet_action_common.isra.21+0xeb/0x100
+ __do_softirq+0xd0/0x298
+ irq_exit+0xc5/0xd0
+ smp_apic_timer_interrupt+0x68/0x120
+ apic_timer_interrupt+0xf/0x20
+ </IRQ>
+ ...
+
+The root cause is that tasklet is actually a softirq. In a tasklet
+handler, another softirq handler is triggered. Usually these softirq
+handlers run on the same cpu core. So this will cause "soft lockup Bug".
+
+Fixes: 8700e3e7c485 ("Soft RoCE driver")
+Link: https://lore.kernel.org/r/20200212072635.682689-8-leon@kernel.org
+Signed-off-by: Zhu Yanjun <yanjunz@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/sw/rxe/rxe_comp.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -329,7 +329,7 @@ static inline enum comp_state check_ack(
+ qp->comp.psn = pkt->psn;
+ if (qp->req.wait_psn) {
+ qp->req.wait_psn = 0;
+- rxe_run_task(&qp->req.task, 1);
++ rxe_run_task(&qp->req.task, 0);
+ }
+ }
+ return COMPST_ERROR_RETRY;
+@@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *q
+ */
+ if (qp->req.wait_fence) {
+ qp->req.wait_fence = 0;
+- rxe_run_task(&qp->req.task, 1);
++ rxe_run_task(&qp->req.task, 0);
+ }
+ }
+
+@@ -479,7 +479,7 @@ static inline enum comp_state complete_a
+ if (qp->req.need_rd_atomic) {
+ qp->comp.timeout_retry = 0;
+ qp->req.need_rd_atomic = 0;
+- rxe_run_task(&qp->req.task, 1);
++ rxe_run_task(&qp->req.task, 0);
+ }
+ }
+
+@@ -725,7 +725,7 @@ int rxe_completer(void *arg)
+ RXE_CNT_COMP_RETRY);
+ qp->req.need_retry = 1;
+ qp->comp.started_retry = 1;
+- rxe_run_task(&qp->req.task, 1);
++ rxe_run_task(&qp->req.task, 0);
+ }
+
+ if (pkt) {
--- /dev/null
+From 0f8a206df7c920150d2aa45574fba0ab7ff6be4f Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <natechancellor@gmail.com>
+Date: Sat, 8 Feb 2020 07:08:59 -0700
+Subject: s390/time: Fix clk type in get_tod_clock
+
+From: Nathan Chancellor <natechancellor@gmail.com>
+
+commit 0f8a206df7c920150d2aa45574fba0ab7ff6be4f upstream.
+
+Clang warns:
+
+In file included from ../arch/s390/boot/startup.c:3:
+In file included from ../include/linux/elf.h:5:
+In file included from ../arch/s390/include/asm/elf.h:132:
+In file included from ../include/linux/compat.h:10:
+In file included from ../include/linux/time.h:74:
+In file included from ../include/linux/time32.h:13:
+In file included from ../include/linux/timex.h:65:
+../arch/s390/include/asm/timex.h:160:20: warning: passing 'unsigned char
+[16]' to parameter of type 'char *' converts between pointers to integer
+types with different sign [-Wpointer-sign]
+ get_tod_clock_ext(clk);
+ ^~~
+../arch/s390/include/asm/timex.h:149:44: note: passing argument to
+parameter 'clk' here
+static inline void get_tod_clock_ext(char *clk)
+ ^
+
+Change clk's type to just be char so that it matches what happens in
+get_tod_clock_ext.
+
+Fixes: 57b28f66316d ("[S390] s390_hypfs: Add new attributes")
+Link: https://github.com/ClangBuiltLinux/linux/issues/861
+Link: http://lkml.kernel.org/r/20200208140858.47970-1-natechancellor@gmail.com
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/timex.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/include/asm/timex.h
++++ b/arch/s390/include/asm/timex.h
+@@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(cha
+
+ static inline unsigned long long get_tod_clock(void)
+ {
+- unsigned char clk[STORE_CLOCK_EXT_SIZE];
++ char clk[STORE_CLOCK_EXT_SIZE];
+
+ get_tod_clock_ext(clk);
+ return *((unsigned long long *)&clk[1]);
--- /dev/null
+From b562d140649966d4daedd0483a8fe59ad3bb465a Mon Sep 17 00:00:00 2001
+From: Qais Yousef <qais.yousef@arm.com>
+Date: Tue, 14 Jan 2020 21:09:47 +0000
+Subject: sched/uclamp: Reject negative values in cpu_uclamp_write()
+
+From: Qais Yousef <qais.yousef@arm.com>
+
+commit b562d140649966d4daedd0483a8fe59ad3bb465a upstream.
+
+The check to ensure that the new written value into cpu.uclamp.{min,max}
+is within range, [0:100], wasn't working because of the signed
+comparison
+
+ 7301 if (req.percent > UCLAMP_PERCENT_SCALE) {
+ 7302 req.ret = -ERANGE;
+ 7303 return req;
+ 7304 }
+
+ # echo -1 > cpu.uclamp.min
+ # cat cpu.uclamp.min
+ 42949671.96
+
+Cast req.percent into u64 to force the comparison to be unsigned and
+work as intended in capacity_from_percent().
+
+ # echo -1 > cpu.uclamp.min
+ sh: write error: Numerical result out of range
+
+Fixes: 2480c093130f ("sched/uclamp: Extend CPU's cgroup controller")
+Signed-off-by: Qais Yousef <qais.yousef@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lkml.kernel.org/r/20200114210947.14083-1-qais.yousef@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7260,7 +7260,7 @@ capacity_from_percent(char *buf)
+ &req.percent);
+ if (req.ret)
+ return req;
+- if (req.percent > UCLAMP_PERCENT_SCALE) {
++ if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
+ req.ret = -ERANGE;
+ return req;
+ }
drm-panfrost-make-sure-the-shrinker-does-not-reclaim-referenced-bos.patch
drm-amdgpu-update-smu_v11_0_pptable.h.patch
drm-amdgpu-navi10-use-the-odcap-enum-to-index-the-caps-array.patch
+bus-moxtet-fix-potential-stack-buffer-overflow.patch
+nvme-fix-the-parameter-order-for-nvme_get_log-in-nvme_get_fw_slot_info.patch
+drivers-ipmi-fix-off-by-one-bounds-check-that-leads-to-a-out-of-bounds-write.patch
+ib-mlx5-return-failure-when-rts2rts_qp_counters_set_id-is-not-supported.patch
+ib-hfi1-acquire-lock-to-release-tid-entries-when-user-file-is-closed.patch
+ib-hfi1-close-window-for-pq-and-request-coliding.patch
+ib-rdmavt-reset-all-qps-when-the-device-is-shut-down.patch
+ib-umad-fix-kernel-crash-while-unloading-ib_umad.patch
+rdma-core-fix-invalid-memory-access-in-spec_filter_size.patch
+rdma-iw_cxgb4-initiate-close-when-entering-term.patch
+rdma-hfi1-fix-memory-leak-in-_dev_comp_vect_mappings_create.patch
+rdma-rxe-fix-soft-lockup-problem-due-to-using-tasklets-in-softirq.patch
+rdma-core-fix-protection-fault-in-get_pkey_idx_qp_list.patch
+s390-time-fix-clk-type-in-get_tod_clock.patch
+input-ili210x-fix-return-value-of-is_visible-function.patch
+sched-uclamp-reject-negative-values-in-cpu_uclamp_write.patch
+mac80211-use-more-bits-for-ack_frame_id.patch
+spmi-pmic-arb-set-lockdep-class-for-hierarchical-irq-domains.patch
+perf-x86-intel-fix-inaccurate-period-in-context-switch-for-auto-reload.patch
+hwmon-pmbus-ltc2978-fix-pmbus-polling-of-mfr_common-definitions.patch
+mac80211-fix-quiet-mode-activation-in-action-frames.patch
+cifs-fix-mount-option-display-for-sec-krb5i.patch
+ceph-noacl-mount-option-is-effectively-ignored.patch
+arm64-dts-fast-models-fix-fvp-pci-interrupt-map-property.patch
+kvm-x86-mask-off-reserved-bit-from-db-exception-payload.patch
+kvm-nvmx-handle-pending-db-when-injecting-init-vm-exit.patch
+perf-stat-don-t-report-a-null-stalled-cycles-per-insn-metric.patch
+nfsv4.1-make-cachethis-no-for-writes.patch
+nfsv4-ensure-the-delegation-cred-is-pinned-when-we-call-delegreturn.patch
--- /dev/null
+From 2d5a2f913b658a7ae984773a63318ed4daadf4af Mon Sep 17 00:00:00 2001
+From: Stephen Boyd <swboyd@chromium.org>
+Date: Tue, 21 Jan 2020 10:37:48 -0800
+Subject: spmi: pmic-arb: Set lockdep class for hierarchical irq domains
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+commit 2d5a2f913b658a7ae984773a63318ed4daadf4af upstream.
+
+I see the following lockdep splat in the qcom pinctrl driver when
+attempting to suspend the device.
+
+ WARNING: possible recursive locking detected
+ 5.4.11 #3 Tainted: G W
+ --------------------------------------------
+ cat/3074 is trying to acquire lock:
+ ffffff81f49804c0 (&irq_desc_lock_class){-.-.}, at: __irq_get_desc_lock+0x64/0x94
+
+ but task is already holding lock:
+ ffffff81f1cc10c0 (&irq_desc_lock_class){-.-.}, at: __irq_get_desc_lock+0x64/0x94
+
+ other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(&irq_desc_lock_class);
+ lock(&irq_desc_lock_class);
+
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+ 6 locks held by cat/3074:
+ #0: ffffff81f01d9420 (sb_writers#7){.+.+}, at: vfs_write+0xd0/0x1a4
+ #1: ffffff81bd7d2080 (&of->mutex){+.+.}, at: kernfs_fop_write+0x12c/0x1fc
+ #2: ffffff81f4c322f0 (kn->count#337){.+.+}, at: kernfs_fop_write+0x134/0x1fc
+ #3: ffffffe411a41d60 (system_transition_mutex){+.+.}, at: pm_suspend+0x108/0x348
+ #4: ffffff81f1c5e970 (&dev->mutex){....}, at: __device_suspend+0x168/0x41c
+ #5: ffffff81f1cc10c0 (&irq_desc_lock_class){-.-.}, at: __irq_get_desc_lock+0x64/0x94
+
+ stack backtrace:
+ CPU: 5 PID: 3074 Comm: cat Tainted: G W 5.4.11 #3
+ Hardware name: Google Cheza (rev3+) (DT)
+ Call trace:
+ dump_backtrace+0x0/0x174
+ show_stack+0x20/0x2c
+ dump_stack+0xc8/0x124
+ __lock_acquire+0x460/0x2388
+ lock_acquire+0x1cc/0x210
+ _raw_spin_lock_irqsave+0x64/0x80
+ __irq_get_desc_lock+0x64/0x94
+ irq_set_irq_wake+0x40/0x144
+ qpnpint_irq_set_wake+0x28/0x34
+ set_irq_wake_real+0x40/0x5c
+ irq_set_irq_wake+0x70/0x144
+ pm8941_pwrkey_suspend+0x34/0x44
+ platform_pm_suspend+0x34/0x60
+ dpm_run_callback+0x64/0xcc
+ __device_suspend+0x310/0x41c
+ dpm_suspend+0xf8/0x298
+ dpm_suspend_start+0x84/0xb4
+ suspend_devices_and_enter+0xbc/0x620
+ pm_suspend+0x210/0x348
+ state_store+0xb0/0x108
+ kobj_attr_store+0x14/0x24
+ sysfs_kf_write+0x4c/0x64
+ kernfs_fop_write+0x15c/0x1fc
+ __vfs_write+0x54/0x18c
+ vfs_write+0xe4/0x1a4
+ ksys_write+0x7c/0xe4
+ __arm64_sys_write+0x20/0x2c
+ el0_svc_common+0xa8/0x160
+ el0_svc_handler+0x7c/0x98
+ el0_svc+0x8/0xc
+
+Set a lockdep class when we map the irq so that irq_set_wake() doesn't
+warn about a lockdep bug that doesn't exist.
+
+Fixes: 12a9eeaebba3 ("spmi: pmic-arb: convert to v2 irq interfaces to support hierarchical IRQ chips")
+Cc: Douglas Anderson <dianders@chromium.org>
+Cc: Brian Masney <masneyb@onstation.org>
+Cc: Lina Iyer <ilina@codeaurora.org>
+Cc: Maulik Shah <mkshah@codeaurora.org>
+Cc: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/20200121183748.68662-1-swboyd@chromium.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spmi/spmi-pmic-arb.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/spmi/spmi-pmic-arb.c
++++ b/drivers/spmi/spmi-pmic-arb.c
+@@ -731,6 +731,7 @@ static int qpnpint_irq_domain_translate(
+ return 0;
+ }
+
++static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class;
+
+ static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
+ struct irq_domain *domain, unsigned int virq,
+@@ -746,6 +747,9 @@ static void qpnpint_irq_domain_map(struc
+ else
+ handler = handle_level_irq;
+
++
++ irq_set_lockdep_class(virq, &qpnpint_irq_lock_class,
++ &qpnpint_irq_request_class);
+ irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb,
+ handler, NULL, NULL);
+ }