--- /dev/null
+From 72aad489f992871e908ff6d9055b26c6366fb864 Mon Sep 17 00:00:00 2001
+From: Sergey Shtylyov <s.shtylyov@omp.ru>
+Date: Wed, 8 Jun 2022 22:51:07 +0300
+Subject: ata: libata-transport: fix {dma|pio|xfer}_mode sysfs files
+
+From: Sergey Shtylyov <s.shtylyov@omp.ru>
+
+commit 72aad489f992871e908ff6d9055b26c6366fb864 upstream.
+
+The {dma|pio}_mode sysfs files are incorrectly documented as having a
+list of the supported DMA/PIO transfer modes, while the corresponding
+fields of the *struct* ata_device hold the transfer mode IDs, not masks.
+
+To match these docs, the {dma|pio}_mode (and even xfer_mode!) sysfs
+files are handled by the ata_bitfield_name_match() macro which leads to
+reading such kind of nonsense from them:
+
+$ cat /sys/class/ata_device/dev3.0/pio_mode
+XFER_UDMA_7, XFER_UDMA_6, XFER_UDMA_5, XFER_UDMA_4, XFER_MW_DMA_4,
+XFER_PIO_6, XFER_PIO_5, XFER_PIO_4, XFER_PIO_3, XFER_PIO_2, XFER_PIO_1,
+XFER_PIO_0
+
+Using the correct ata_bitfield_name_search() macro fixes that:
+
+$ cat /sys/class/ata_device/dev3.0/pio_mode
+XFER_PIO_4
+
+While fixing the file documentation, somewhat reword the {dma|pio}_mode
+file doc and add a note about being mostly useful for PATA devices to
+the xfer_mode file doc...
+
+Fixes: d9027470b886 ("[libata] Add ATA transport class")
+Signed-off-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-ata | 11 ++++++-----
+ drivers/ata/libata-transport.c | 2 +-
+ 2 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-ata
++++ b/Documentation/ABI/testing/sysfs-ata
+@@ -107,13 +107,14 @@ Description:
+ described in ATA8 7.16 and 7.17. Only valid if
+ the device is not a PM.
+
+- pio_mode: (RO) Transfer modes supported by the device when
+- in PIO mode. Mostly used by PATA device.
++ pio_mode: (RO) PIO transfer mode used by the device.
++ Mostly used by PATA devices.
+
+- xfer_mode: (RO) Current transfer mode
++ xfer_mode: (RO) Current transfer mode. Mostly used by
++ PATA devices.
+
+- dma_mode: (RO) Transfer modes supported by the device when
+- in DMA mode. Mostly used by PATA device.
++ dma_mode: (RO) DMA transfer mode used by the device.
++ Mostly used by PATA devices.
+
+ class: (RO) Device class. Can be "ata" for disk,
+ "atapi" for packet device, "pmp" for PM, or
+--- a/drivers/ata/libata-transport.c
++++ b/drivers/ata/libata-transport.c
+@@ -196,7 +196,7 @@ static struct {
+ { XFER_PIO_0, "XFER_PIO_0" },
+ { XFER_PIO_SLOW, "XFER_PIO_SLOW" }
+ };
+-ata_bitfield_name_match(xfer,ata_xfer_names)
++ata_bitfield_name_search(xfer, ata_xfer_names)
+
+ /*
+ * ATA Port attributes
--- /dev/null
+From dcfa24ba68991ab69a48254a18377b45180ae664 Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Wed, 25 May 2022 14:23:45 -0400
+Subject: filemap: Cache the value of vm_flags
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit dcfa24ba68991ab69a48254a18377b45180ae664 upstream.
+
+After we have unlocked the mmap_lock for I/O, the file is pinned, but
+the VMA is not. Checking this flag after that can be a use-after-free.
+It's not a terribly interesting use-after-free as it can only read one
+bit, and it's used to decide whether to read 2MB or 4MB. But it
+upsets the automated tools and it's generally bad practice anyway,
+so let's fix it.
+
+Reported-by: syzbot+5b96d55e5b54924c77ad@syzkaller.appspotmail.com
+Fixes: 4687fdbb805a ("mm/filemap: Support VM_HUGEPAGE for file mappings")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/filemap.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2991,11 +2991,12 @@ static struct file *do_sync_mmap_readahe
+ struct address_space *mapping = file->f_mapping;
+ DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
+ struct file *fpin = NULL;
++ unsigned long vm_flags = vmf->vma->vm_flags;
+ unsigned int mmap_miss;
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /* Use the readahead code, even if readahead is disabled */
+- if (vmf->vma->vm_flags & VM_HUGEPAGE) {
++ if (vm_flags & VM_HUGEPAGE) {
+ fpin = maybe_unlock_mmap_for_io(vmf, fpin);
+ ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
+ ra->size = HPAGE_PMD_NR;
+@@ -3003,7 +3004,7 @@ static struct file *do_sync_mmap_readahe
+ * Fetch two PMD folios, so we get the chance to actually
+ * readahead, unless we've been told not to.
+ */
+- if (!(vmf->vma->vm_flags & VM_RAND_READ))
++ if (!(vm_flags & VM_RAND_READ))
+ ra->size *= 2;
+ ra->async_size = HPAGE_PMD_NR;
+ page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
+@@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahe
+ #endif
+
+ /* If we don't want any read-ahead, don't bother */
+- if (vmf->vma->vm_flags & VM_RAND_READ)
++ if (vm_flags & VM_RAND_READ)
+ return fpin;
+ if (!ra->ra_pages)
+ return fpin;
+
+- if (vmf->vma->vm_flags & VM_SEQ_READ) {
++ if (vm_flags & VM_SEQ_READ) {
+ fpin = maybe_unlock_mmap_for_io(vmf, fpin);
+ page_cache_sync_ra(&ractl, ra->ra_pages);
+ return fpin;
--- /dev/null
+From dda5384313a40ecbaafd8a9a80f47483255e4c4d Mon Sep 17 00:00:00 2001
+From: David Safford <david.safford@gmail.com>
+Date: Tue, 7 Jun 2022 14:07:57 -0400
+Subject: KEYS: trusted: tpm2: Fix migratable logic
+
+From: David Safford <david.safford@gmail.com>
+
+commit dda5384313a40ecbaafd8a9a80f47483255e4c4d upstream.
+
+When creating (sealing) a new trusted key, migratable
+trusted keys have the FIXED_TPM and FIXED_PARENT attributes
+set, and non-migratable keys don't. This is backwards, and
+also causes creation to fail when creating a migratable key
+under a migratable parent. (The TPM thinks you are trying to
+seal a non-migratable blob under a migratable parent.)
+
+The following simple patch fixes the logic, and has been
+tested for all four combinations of migratable and non-migratable
+trusted keys and parent storage keys. With this logic, you will
+get a proper failure if you try to create a non-migratable
+trusted key under a migratable parent storage key, and all other
+combinations work correctly.
+
+Cc: stable@vger.kernel.org # v5.13+
+Fixes: e5fb5d2c5a03 ("security: keys: trusted: Make sealed key properly interoperable")
+Signed-off-by: David Safford <david.safford@gmail.com>
+Reviewed-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/keys/trusted-keys/trusted_tpm2.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -283,8 +283,8 @@ int tpm2_seal_trusted(struct tpm_chip *c
+ /* key properties */
+ flags = 0;
+ flags |= options->policydigest_len ? 0 : TPM2_OA_USER_WITH_AUTH;
+- flags |= payload->migratable ? (TPM2_OA_FIXED_TPM |
+- TPM2_OA_FIXED_PARENT) : 0;
++ flags |= payload->migratable ? 0 : (TPM2_OA_FIXED_TPM |
++ TPM2_OA_FIXED_PARENT);
+ tpm_buf_append_u32(&buf, flags);
+
+ /* policy */
--- /dev/null
+From 11d39e8cc43e1c6737af19ca9372e590061b5ad2 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Mon, 6 Jun 2022 21:11:49 +0300
+Subject: KVM: SVM: fix tsc scaling cache logic
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit 11d39e8cc43e1c6737af19ca9372e590061b5ad2 upstream.
+
+SVM uses a per-cpu variable to cache the current value of the
+tsc scaling multiplier msr on each cpu.
+
+Commit 1ab9287add5e2
+("KVM: X86: Add vendor callbacks for writing the TSC multiplier")
+broke this caching logic.
+
+Refactor the code so that all TSC scaling multiplier writes go through
+a single function which checks and updates the cache.
+
+This fixes the following scenario:
+
+1. A CPU runs a guest with some tsc scaling ratio.
+
+2. New guest with different tsc scaling ratio starts on this CPU
+ and terminates almost immediately.
+
+ This ensures that the short running guest had set the tsc scaling ratio just
+ once when it was set via KVM_SET_TSC_KHZ. Due to the bug,
+ the per-cpu cache is not updated.
+
+3. The original guest continues to run, it doesn't restore the msr
+ value back to its own value, because the cache matches,
+ and thus continues to run with a wrong tsc scaling ratio.
+
+Fixes: 1ab9287add5e2 ("KVM: X86: Add vendor callbacks for writing the TSC multiplier")
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20220606181149.103072-1-mlevitsk@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 4 ++--
+ arch/x86/kvm/svm/svm.c | 32 ++++++++++++++++++++------------
+ arch/x86/kvm/svm/svm.h | 2 +-
+ 3 files changed, 23 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -896,7 +896,7 @@ int nested_svm_vmexit(struct vcpu_svm *s
+ if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
+ WARN_ON(!svm->tsc_scaling_enabled);
+ vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
+- svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
++ __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
+ }
+
+ svm->nested.ctl.nested_cr3 = 0;
+@@ -1293,7 +1293,7 @@ void nested_svm_update_tsc_ratio_msr(str
+ vcpu->arch.tsc_scaling_ratio =
+ kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
+ svm->tsc_ratio_msr);
+- svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
++ __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
+ }
+
+ /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -463,11 +463,24 @@ static int has_svm(void)
+ return 1;
+ }
+
++void __svm_write_tsc_multiplier(u64 multiplier)
++{
++ preempt_disable();
++
++ if (multiplier == __this_cpu_read(current_tsc_ratio))
++ goto out;
++
++ wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
++ __this_cpu_write(current_tsc_ratio, multiplier);
++out:
++ preempt_enable();
++}
++
+ static void svm_hardware_disable(void)
+ {
+ /* Make sure we clean up behind us */
+ if (tsc_scaling)
+- wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
++ __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
+
+ cpu_svm_disable();
+
+@@ -513,8 +526,7 @@ static int svm_hardware_enable(void)
+ * Set the default value, even if we don't use TSC scaling
+ * to avoid having stale value in the msr
+ */
+- wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
+- __this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT);
++ __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
+ }
+
+
+@@ -915,11 +927,12 @@ static void svm_write_tsc_offset(struct
+ vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+ }
+
+-void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
++static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+ {
+- wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
++ __svm_write_tsc_multiplier(multiplier);
+ }
+
++
+ /* Evaluate instruction intercepts that depend on guest CPUID features. */
+ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
+ struct vcpu_svm *svm)
+@@ -1276,13 +1289,8 @@ static void svm_prepare_switch_to_guest(
+ sev_es_prepare_switch_to_guest(hostsa);
+ }
+
+- if (tsc_scaling) {
+- u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
+- if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
+- __this_cpu_write(current_tsc_ratio, tsc_ratio);
+- wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
+- }
+- }
++ if (tsc_scaling)
++ __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
+
+ if (likely(tsc_aux_uret_slot >= 0))
+ kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -558,7 +558,7 @@ int nested_svm_check_exception(struct vc
+ bool has_error_code, u32 error_code);
+ int nested_svm_exit_special(struct vcpu_svm *svm);
+ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
+-void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
++void __svm_write_tsc_multiplier(u64 multiplier);
+ void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
+ struct vmcb_control_area *control);
+ void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
--- /dev/null
+From cf4a8693d97a51dccf5a1557248d12d6d8be4b9e Mon Sep 17 00:00:00 2001
+From: Shaoqin Huang <shaoqin.huang@intel.com>
+Date: Mon, 6 Jun 2022 18:59:05 -0600
+Subject: KVM: x86/mmu: Check every prev_roots in __kvm_mmu_free_obsolete_roots()
+
+From: Shaoqin Huang <shaoqin.huang@intel.com>
+
+commit cf4a8693d97a51dccf5a1557248d12d6d8be4b9e upstream.
+
+When freeing obsolete previous roots, check prev_roots as intended, not
+the current root.
+
+Signed-off-by: Shaoqin Huang <shaoqin.huang@intel.com>
+Fixes: 527d5cd7eece ("KVM: x86/mmu: Zap only obsolete roots if a root shadow page is zapped")
+Message-Id: <20220607005905.2933378-1-shaoqin.huang@intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -5168,7 +5168,7 @@ static void __kvm_mmu_free_obsolete_root
+ roots_to_free |= KVM_MMU_ROOT_CURRENT;
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+- if (is_obsolete_root(kvm, mmu->root.hpa))
++ if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
+ roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
+ }
+
--- /dev/null
+From c745dfc541e78428ba3986f1d17fe1dfdaca8184 Mon Sep 17 00:00:00 2001
+From: Tyler Erickson <tyler.erickson@seagate.com>
+Date: Thu, 2 Jun 2022 16:51:11 -0600
+Subject: libata: fix reading concurrent positioning ranges log
+
+From: Tyler Erickson <tyler.erickson@seagate.com>
+
+commit c745dfc541e78428ba3986f1d17fe1dfdaca8184 upstream.
+
+The concurrent positioning ranges log is not a fixed size and may depend
+on how many ranges are supported by the device. This patch uses the size
+reported in the GPL directory to determine the number of pages supported
+by the device before attempting to read this log page.
+
+This resolves this error from the dmesg output:
+ ata6.00: Read log 0x47 page 0x00 failed, Emask 0x1
+
+Cc: stable@vger.kernel.org
+Fixes: fe22e1c2f705 ("libata: support concurrent positioning ranges log")
+Signed-off-by: Tyler Erickson <tyler.erickson@seagate.com>
+Reviewed-by: Muhammad Ahmad <muhammad.ahmad@seagate.com>
+Tested-by: Michael English <michael.english@seagate.com>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/libata-core.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2003,16 +2003,16 @@ retry:
+ return err_mask;
+ }
+
+-static bool ata_log_supported(struct ata_device *dev, u8 log)
++static int ata_log_supported(struct ata_device *dev, u8 log)
+ {
+ struct ata_port *ap = dev->link->ap;
+
+ if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
+- return false;
++ return 0;
+
+ if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
+- return false;
+- return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
++ return 0;
++ return get_unaligned_le16(&ap->sector_buf[log * 2]);
+ }
+
+ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
+@@ -2448,15 +2448,20 @@ static void ata_dev_config_cpr(struct at
+ struct ata_cpr_log *cpr_log = NULL;
+ u8 *desc, *buf = NULL;
+
+- if (ata_id_major_version(dev->id) < 11 ||
+- !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
++ if (ata_id_major_version(dev->id) < 11)
++ goto out;
++
++ buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
++ if (buf_len == 0)
+ goto out;
+
+ /*
+ * Read the concurrent positioning ranges log (0x47). We can have at
+- * most 255 32B range descriptors plus a 64B header.
++ * most 255 32B range descriptors plus a 64B header. This log varies in
++ * size, so use the size reported in the GPL directory. Reading beyond
++ * the supported length will result in an error.
+ */
+- buf_len = (64 + 255 * 32 + 511) & ~511;
++ buf_len <<= 9;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ goto out;
--- /dev/null
+From 6d11acd452fd885ef6ace184c9c70bc863a8c72f Mon Sep 17 00:00:00 2001
+From: Tyler Erickson <tyler.erickson@seagate.com>
+Date: Thu, 2 Jun 2022 16:51:12 -0600
+Subject: libata: fix translation of concurrent positioning ranges
+
+From: Tyler Erickson <tyler.erickson@seagate.com>
+
+commit 6d11acd452fd885ef6ace184c9c70bc863a8c72f upstream.
+
+Fixing the page length in the SCSI translation for the concurrent
+positioning ranges VPD page. It was writing starting in offset 3
+rather than offset 2 where the MSB is supposed to start for
+the VPD page length.
+
+Cc: stable@vger.kernel.org
+Fixes: fe22e1c2f705 ("libata: support concurrent positioning ranges log")
+Signed-off-by: Tyler Erickson <tyler.erickson@seagate.com>
+Reviewed-by: Muhammad Ahmad <muhammad.ahmad@seagate.com>
+Tested-by: Michael English <michael.english@seagate.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/libata-scsi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2101,7 +2101,7 @@ static unsigned int ata_scsiop_inq_b9(st
+
+ /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
+ rbuf[1] = 0xb9;
+- put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]);
++ put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
+
+ for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
+ desc[0] = cpr_log->cpr[i].num;
--- /dev/null
+From a051246b786af7e4a9d9219cc7038a6e8a411531 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Tue, 31 May 2022 20:19:22 +0300
+Subject: mmc: block: Fix CQE recovery reset success
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit a051246b786af7e4a9d9219cc7038a6e8a411531 upstream.
+
+The intention of the use of mmc_blk_reset_success() in
+mmc_blk_cqe_recovery() was to prevent repeated resets when retrying and
+getting the same error. However, that may not be the case - any amount
+of time and I/O may pass before another recovery is needed, in which
+case there would be no reason to deny it the opportunity to recover via
+a reset if necessary. CQE recovery is expected seldom and failure to
+recover (if the clear tasks command fails), even more seldom, so it is
+better to allow the reset always, which can be done by calling
+mmc_blk_reset_success() always.
+
+Fixes: 1e8e55b67030c6 ("mmc: block: Add CQE support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20220531171922.76080-1-adrian.hunter@intel.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/block.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1482,8 +1482,7 @@ void mmc_blk_cqe_recovery(struct mmc_que
+ err = mmc_cqe_recovery(host);
+ if (err)
+ mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
+- else
+- mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
++ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+
+ pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
+ }
--- /dev/null
+From 291e7d52d19f114cad6cbf802f3f19ef12a011f8 Mon Sep 17 00:00:00 2001
+From: Ben Chuang <benchuanggli@gmail.com>
+Date: Fri, 20 May 2022 19:42:42 +0800
+Subject: mmc: sdhci-pci-gli: Fix GL9763E runtime PM when the system resumes from suspend
+
+From: Ben Chuang <benchuanggli@gmail.com>
+
+commit 291e7d52d19f114cad6cbf802f3f19ef12a011f8 upstream.
+
+When the system resumes from suspend (S3 or S4), the power mode is
+MMC_POWER_OFF. In this status, gl9763e_runtime_resume() should not enable
+PLL. Add a condition to this function to enable PLL only when the power
+mode is MMC_POWER_ON.
+
+Fixes: d607667bb8fa (mmc: sdhci-pci-gli: Add runtime PM for GL9763E)
+Signed-off-by: Ben Chuang <benchuanggli@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220520114242.150235-1-benchuanggli@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-pci-gli.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -972,6 +972,9 @@ static int gl9763e_runtime_resume(struct
+ struct sdhci_host *host = slot->host;
+ u16 clock;
+
++ if (host->mmc->ios.power_mode != MMC_POWER_ON)
++ return 0;
++
+ clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+
+ clock |= SDHCI_CLOCK_PLL_EN;
--- /dev/null
+From 2061ecfdf2350994e5b61c43e50e98a7a70e95ee Mon Sep 17 00:00:00 2001
+From: Ilya Maximets <i.maximets@ovn.org>
+Date: Tue, 7 Jun 2022 00:11:40 +0200
+Subject: net: openvswitch: fix misuse of the cached connection on tuple changes
+
+From: Ilya Maximets <i.maximets@ovn.org>
+
+commit 2061ecfdf2350994e5b61c43e50e98a7a70e95ee upstream.
+
+If packet headers changed, the cached nfct is no longer relevant
+for the packet and attempt to re-use it leads to the incorrect packet
+classification.
+
+This issue is causing broken connectivity in OpenStack deployments
+with OVS/OVN due to hairpin traffic being unexpectedly dropped.
+
+The setup has datapath flows with several conntrack actions and tuple
+changes between them:
+
+ actions:ct(commit,zone=8,mark=0/0x1,nat(src)),
+ set(eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:06)),
+ set(ipv4(src=172.18.2.10,dst=192.168.100.6,ttl=62)),
+ ct(zone=8),recirc(0x4)
+
+After the first ct() action the packet headers are almost fully
+re-written. The next ct() tries to re-use the existing nfct entry
+and marks the packet as invalid, so it gets dropped later in the
+pipeline.
+
+Clearing the cached conntrack entry whenever packet tuple is changed
+to avoid the issue.
+
+The flow key should not be cleared though, because we should still
+be able to match on the ct_state if the recirculation happens after
+the tuple change but before the next ct() action.
+
+Cc: stable@vger.kernel.org
+Fixes: 7f8a436eaa2c ("openvswitch: Add conntrack action")
+Reported-by: Frode Nordahl <frode.nordahl@canonical.com>
+Link: https://mail.openvswitch.org/pipermail/ovs-discuss/2022-May/051829.html
+Link: https://bugs.launchpad.net/ubuntu/+source/ovn/+bug/1967856
+Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
+Link: https://lore.kernel.org/r/20220606221140.488984-1-i.maximets@ovn.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/openvswitch/actions.c | 6 ++++++
+ net/openvswitch/conntrack.c | 4 +++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -373,6 +373,7 @@ static void set_ip_addr(struct sk_buff *
+ update_ip_l4_checksum(skb, nh, *addr, new_addr);
+ csum_replace4(&nh->check, *addr, new_addr);
+ skb_clear_hash(skb);
++ ovs_ct_clear(skb, NULL);
+ *addr = new_addr;
+ }
+
+@@ -420,6 +421,7 @@ static void set_ipv6_addr(struct sk_buff
+ update_ipv6_checksum(skb, l4_proto, addr, new_addr);
+
+ skb_clear_hash(skb);
++ ovs_ct_clear(skb, NULL);
+ memcpy(addr, new_addr, sizeof(__be32[4]));
+ }
+
+@@ -660,6 +662,7 @@ static int set_nsh(struct sk_buff *skb,
+ static void set_tp_port(struct sk_buff *skb, __be16 *port,
+ __be16 new_port, __sum16 *check)
+ {
++ ovs_ct_clear(skb, NULL);
+ inet_proto_csum_replace2(check, skb, *port, new_port, false);
+ *port = new_port;
+ }
+@@ -699,6 +702,7 @@ static int set_udp(struct sk_buff *skb,
+ uh->dest = dst;
+ flow_key->tp.src = src;
+ flow_key->tp.dst = dst;
++ ovs_ct_clear(skb, NULL);
+ }
+
+ skb_clear_hash(skb);
+@@ -761,6 +765,8 @@ static int set_sctp(struct sk_buff *skb,
+ sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
+
+ skb_clear_hash(skb);
++ ovs_ct_clear(skb, NULL);
++
+ flow_key->tp.src = sh->source;
+ flow_key->tp.dst = sh->dest;
+
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1342,7 +1342,9 @@ int ovs_ct_clear(struct sk_buff *skb, st
+
+ nf_ct_put(ct);
+ nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+- ovs_ct_fill_key(skb, key, false);
++
++ if (key)
++ ovs_ct_fill_key(skb, key, false);
+
+ return 0;
+ }
--- /dev/null
+From c76acfb7e19dcc3a0964e0563770b1d11b8d4540 Mon Sep 17 00:00:00 2001
+From: Tan Tee Min <tee.min.tan@linux.intel.com>
+Date: Thu, 26 May 2022 17:03:47 +0800
+Subject: net: phy: dp83867: retrigger SGMII AN when link change
+
+From: Tan Tee Min <tee.min.tan@linux.intel.com>
+
+commit c76acfb7e19dcc3a0964e0563770b1d11b8d4540 upstream.
+
+There is a limitation in TI DP83867 PHY device where SGMII AN is only
+triggered once after the device is booted up. Even after the PHY TPI is
+down and up again, SGMII AN is not triggered and hence no new in-band
+message from PHY to MAC side SGMII.
+
+This could cause an issue during power up, when PHY is up prior to MAC.
+At this condition, once MAC side SGMII is up, MAC side SGMII wouldn`t
+receive new in-band message from TI PHY with correct link status, speed
+and duplex info.
+
+As suggested by TI, implemented a SW solution here to retrigger SGMII
+Auto-Neg whenever there is a link change.
+
+v2: Add Fixes tag in commit message.
+
+Fixes: 2a10154abcb7 ("net: phy: dp83867: Add TI dp83867 phy")
+Cc: <stable@vger.kernel.org> # 5.4.x
+Signed-off-by: Sit, Michael Wei Hong <michael.wei.hong.sit@intel.com>
+Reviewed-by: Voon Weifeng <weifeng.voon@intel.com>
+Signed-off-by: Tan Tee Min <tee.min.tan@linux.intel.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20220526090347.128742-1-tee.min.tan@linux.intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/dp83867.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -137,6 +137,7 @@
+ #define DP83867_DOWNSHIFT_2_COUNT 2
+ #define DP83867_DOWNSHIFT_4_COUNT 4
+ #define DP83867_DOWNSHIFT_8_COUNT 8
++#define DP83867_SGMII_AUTONEG_EN BIT(7)
+
+ /* CFG3 bits */
+ #define DP83867_CFG3_INT_OE BIT(7)
+@@ -855,6 +856,32 @@ static int dp83867_phy_reset(struct phy_
+ DP83867_PHYCR_FORCE_LINK_GOOD, 0);
+ }
+
++static void dp83867_link_change_notify(struct phy_device *phydev)
++{
++ /* There is a limitation in DP83867 PHY device where SGMII AN is
++ * only triggered once after the device is booted up. Even after the
++ * PHY TPI is down and up again, SGMII AN is not triggered and
++ * hence no new in-band message from PHY to MAC side SGMII.
++ * This could cause an issue during power up, when PHY is up prior
++ * to MAC. At this condition, once MAC side SGMII is up, MAC side
++ * SGMII wouldn`t receive new in-band message from TI PHY with
++ * correct link status, speed and duplex info.
++ * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg
++ * whenever there is a link change.
++ */
++ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
++ int val = 0;
++
++ val = phy_clear_bits(phydev, DP83867_CFG2,
++ DP83867_SGMII_AUTONEG_EN);
++ if (val < 0)
++ return;
++
++ phy_set_bits(phydev, DP83867_CFG2,
++ DP83867_SGMII_AUTONEG_EN);
++ }
++}
++
+ static struct phy_driver dp83867_driver[] = {
+ {
+ .phy_id = DP83867_PHY_ID,
+@@ -879,6 +906,8 @@ static struct phy_driver dp83867_driver[
+
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
++
++ .link_change_notify = dp83867_link_change_notify,
+ },
+ };
+ module_phy_driver(dp83867_driver);
--- /dev/null
+From 6f808bd78e8296b4ded813b7182988d57e1f6176 Mon Sep 17 00:00:00 2001
+From: James Smart <jsmart2021@gmail.com>
+Date: Fri, 3 Jun 2022 10:43:24 -0700
+Subject: scsi: lpfc: Address NULL pointer dereference after starget_to_rport()
+
+From: James Smart <jsmart2021@gmail.com>
+
+commit 6f808bd78e8296b4ded813b7182988d57e1f6176 upstream.
+
+Calls to starget_to_rport() may return NULL. Add check for NULL rport
+before dereference.
+
+Link: https://lore.kernel.org/r/20220603174329.63777-5-jsmart2021@gmail.com
+Fixes: bb21fc9911ee ("scsi: lpfc: Use fc_block_rport()")
+Cc: <stable@vger.kernel.org> # v5.18
+Co-developed-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_scsi.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -6316,6 +6316,9 @@ lpfc_device_reset_handler(struct scsi_cm
+ int status;
+ u32 logit = LOG_FCP;
+
++ if (!rport)
++ return FAILED;
++
+ rdata = rport->dd_data;
+ if (!rdata || !rdata->pnode) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+@@ -6394,6 +6397,9 @@ lpfc_target_reset_handler(struct scsi_cm
+ unsigned long flags;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+
++ if (!rport)
++ return FAILED;
++
+ rdata = rport->dd_data;
+ if (!rdata || !rdata->pnode) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
--- /dev/null
+From 24e1f056677eefe834d5dcf61905cce857ca4b19 Mon Sep 17 00:00:00 2001
+From: James Smart <jsmart2021@gmail.com>
+Date: Fri, 3 Jun 2022 10:43:22 -0700
+Subject: scsi: lpfc: Resolve some cleanup issues following abort path refactoring
+
+From: James Smart <jsmart2021@gmail.com>
+
+commit 24e1f056677eefe834d5dcf61905cce857ca4b19 upstream.
+
+Refactoring and consolidation of abort paths:
+
+ - lpfc_sli4_abort_fcp_cmpl() and lpfc_sli_abort_fcp_cmpl() are combined
+ into a single generic lpfc_sli_abort_fcp_cmpl() routine. Thus, remove
+ extraneous lpfc_sli4_abort_fcp_cmpl() prototype declaration.
+
+ - lpfc_nvme_abort_fcreq_cmpl() abort completion routine is called with a
+ mismatched argument type. This may result in misleading log message
+ content. Update to the correct argument type of lpfc_iocbq instead of
+ lpfc_wcqe_complete. The lpfc_wcqe_complete should be derived from the
+ lpfc_iocbq structure.
+
+Link: https://lore.kernel.org/r/20220603174329.63777-3-jsmart2021@gmail.com
+Fixes: 31a59f75702f ("scsi: lpfc: SLI path split: Refactor Abort paths")
+Cc: <stable@vger.kernel.org> # v5.18
+Co-developed-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_crtn.h | 4 +---
+ drivers/scsi/lpfc/lpfc_nvme.c | 6 ++++--
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -418,8 +418,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc
+ uint32_t);
+ void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+-void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i,
+- struct lpfc_wcqe_complete *w);
+
+ void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
+
+@@ -627,7 +625,7 @@ void lpfc_nvmet_invalidate_host(struct l
+ struct lpfc_nodelist *ndlp);
+ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocb,
+- struct lpfc_wcqe_complete *abts_cmpl);
++ struct lpfc_iocbq *rspiocb);
+ void lpfc_create_multixri_pools(struct lpfc_hba *phba);
+ void lpfc_create_destroy_pools(struct lpfc_hba *phba);
+ void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid);
+--- a/drivers/scsi/lpfc/lpfc_nvme.c
++++ b/drivers/scsi/lpfc/lpfc_nvme.c
+@@ -1741,7 +1741,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_l
+ * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
+ * @phba: Pointer to HBA context object
+ * @cmdiocb: Pointer to command iocb object.
+- * @abts_cmpl: Pointer to wcqe complete object.
++ * @rspiocb: Pointer to response iocb object.
+ *
+ * This is the callback function for any NVME FCP IO that was aborted.
+ *
+@@ -1750,8 +1750,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_l
+ **/
+ void
+ lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+- struct lpfc_wcqe_complete *abts_cmpl)
++ struct lpfc_iocbq *rspiocb)
+ {
++ struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
++
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+ "6145 ABORT_XRI_CN completing on rpi x%x "
+ "original iotag x%x, abort cmd iotag x%x "
--- /dev/null
+From e27f05147bff21408c1b8410ad8e90cd286e7952 Mon Sep 17 00:00:00 2001
+From: James Smart <jsmart2021@gmail.com>
+Date: Fri, 3 Jun 2022 10:43:23 -0700
+Subject: scsi: lpfc: Resolve some cleanup issues following SLI path refactoring
+
+From: James Smart <jsmart2021@gmail.com>
+
+commit e27f05147bff21408c1b8410ad8e90cd286e7952 upstream.
+
+Following refactoring and consolidation in SLI processing, fix up some
+minor issues related to SLI path:
+
+ - Correct the setting of LPFC_EXCHANGE_BUSY flag in response IOCB.
+
+ - Fix some typographical errors.
+
+ - Fix duplicate log messages.
+
+Link: https://lore.kernel.org/r/20220603174329.63777-4-jsmart2021@gmail.com
+Fixes: 1b64aa9eae28 ("scsi: lpfc: SLI path split: Refactor fast and slow paths to native SLI4")
+Cc: <stable@vger.kernel.org> # v5.18
+Co-developed-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 2 +-
+ drivers/scsi/lpfc/lpfc_sli.c | 25 ++++++++++++-------------
+ 2 files changed, 13 insertions(+), 14 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -12063,7 +12063,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phb
+ rc = pci_enable_msi(phba->pcidev);
+ if (!rc)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0462 PCI enable MSI mode success.\n");
++ "0012 PCI enable MSI mode success.\n");
+ else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0471 PCI enable MSI mode failed (%d)\n", rc);
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -1930,7 +1930,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba
+ sync_buf = __lpfc_sli_get_iocbq(phba);
+ if (!sync_buf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+- "6213 No available WQEs for CMF_SYNC_WQE\n");
++ "6244 No available WQEs for CMF_SYNC_WQE\n");
+ ret_val = ENOMEM;
+ goto out_unlock;
+ }
+@@ -3816,7 +3816,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hb
+ set_job_ulpword4(cmdiocbp,
+ IOERR_ABORT_REQUESTED);
+ /*
+- * For SLI4, irsiocb contains
++ * For SLI4, irspiocb contains
+ * NO_XRI in sli_xritag, it
+ * shall not affect releasing
+ * sgl (xri) process.
+@@ -3834,7 +3834,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hb
+ }
+ }
+ }
+- (cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq);
++ cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
+ } else
+ lpfc_sli_release_iocbq(phba, cmdiocbp);
+ } else {
+@@ -4074,8 +4074,7 @@ lpfc_sli_handle_fast_ring_event(struct l
+ cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ if (cmdiocbq->cmd_cmpl) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+- (cmdiocbq->cmd_cmpl)(phba, cmdiocbq,
+- &rspiocbq);
++ cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+ break;
+@@ -10304,7 +10303,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
+- * send an iocb command to an HBA with SLI-4 interface spec.
++ * send an iocb command to an HBA with SLI-3 interface spec.
+ *
+ * This function takes the hbalock before invoking the lockless version.
+ * The function will return success after it successfully submit the wqe to
+@@ -12741,7 +12740,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba
+ cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
+ cmdiocbq->wait_cmd_cmpl = NULL;
+ if (cmdiocbq->cmd_cmpl)
+- (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL);
++ cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
+ else
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ return;
+@@ -12755,9 +12754,9 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba
+
+ /* Set the exchange busy flag for task management commands */
+ if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
+- !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
++ !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
+ lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
+- cur_iocbq);
++ cur_iocbq);
+ if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+@@ -13897,7 +13896,7 @@ void lpfc_sli4_els_xri_abort_event_proc(
+ * @irspiocbq: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event and construct
+- * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
++ * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
+ * discovery engine to handle.
+ *
+ * Return: Pointer to the receive IOCBQ, NULL otherwise.
+@@ -13941,7 +13940,7 @@ lpfc_sli4_els_preprocess_rspiocbq(struct
+
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+- cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
++ irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+
+@@ -14800,7 +14799,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc
+ /* Pass the cmd_iocb and the wcqe to the upper layer */
+ memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
+ sizeof(struct lpfc_wcqe_complete));
+- (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq);
++ cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0375 FCP cmdiocb not callback function "
+@@ -18963,7 +18962,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vp
+
+ /* Free iocb created in lpfc_prep_seq */
+ list_for_each_entry_safe(curr_iocb, next_iocb,
+- &iocbq->list, list) {
++ &iocbq->list, list) {
+ list_del_init(&curr_iocb->list);
+ lpfc_sli_release_iocbq(phba, curr_iocb);
+ }
--- /dev/null
+From f92de9d110429e39929a49240d823251c2fe903e Mon Sep 17 00:00:00 2001
+From: Tyler Erickson <tyler.erickson@seagate.com>
+Date: Thu, 2 Jun 2022 16:51:13 -0600
+Subject: scsi: sd: Fix interpretation of VPD B9h length
+
+From: Tyler Erickson <tyler.erickson@seagate.com>
+
+commit f92de9d110429e39929a49240d823251c2fe903e upstream.
+
+Fixing the interpretation of the length of the B9h VPD page (Concurrent
+Positioning Ranges). Adding 4 is necessary as the first 4 bytes of the page
+is the header with page number and length information. Adding 3 was likely
+a misinterpretation of the SBC-5 specification which sets all offsets
+starting at zero.
+
+This fixes the error in dmesg:
+
+[ 9.014456] sd 1:0:0:0: [sda] Invalid Concurrent Positioning Ranges VPD page
+
+Link: https://lore.kernel.org/r/20220602225113.10218-4-tyler.erickson@seagate.com
+Fixes: e815d36548f0 ("scsi: sd: add concurrent positioning ranges support")
+Cc: stable@vger.kernel.org
+Tested-by: Michael English <michael.english@seagate.com>
+Reviewed-by: Muhammad Ahmad <muhammad.ahmad@seagate.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Tyler Erickson <tyler.erickson@seagate.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/sd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3067,7 +3067,7 @@ static void sd_read_cpr(struct scsi_disk
+ goto out;
+
+ /* We must have at least a 64B header and one 32B range descriptor */
+- vpd_len = get_unaligned_be16(&buffer[2]) + 3;
++ vpd_len = get_unaligned_be16(&buffer[2]) + 4;
+ if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid Concurrent Positioning Ranges VPD page\n");
cifs-return-errors-during-session-setup-during-reconnects.patch
cifs-fix-reconnect-on-smb3-mount-types.patch
cifs-populate-empty-hostnames-for-extra-channels.patch
+scsi-sd-fix-interpretation-of-vpd-b9h-length.patch
+scsi-lpfc-resolve-some-cleanup-issues-following-abort-path-refactoring.patch
+scsi-lpfc-resolve-some-cleanup-issues-following-sli-path-refactoring.patch
+scsi-lpfc-address-null-pointer-dereference-after-starget_to_rport.patch
+kvm-x86-mmu-check-every-prev_roots-in-__kvm_mmu_free_obsolete_roots.patch
+kvm-svm-fix-tsc-scaling-cache-logic.patch
+filemap-cache-the-value-of-vm_flags.patch
+keys-trusted-tpm2-fix-migratable-logic.patch
+libata-fix-reading-concurrent-positioning-ranges-log.patch
+libata-fix-translation-of-concurrent-positioning-ranges.patch
+ata-libata-transport-fix-dma-pio-xfer-_mode-sysfs-files.patch
+mmc-sdhci-pci-gli-fix-gl9763e-runtime-pm-when-the-system-resumes-from-suspend.patch
+mmc-block-fix-cqe-recovery-reset-success.patch
+net-phy-dp83867-retrigger-sgmii-an-when-link-change.patch
+net-openvswitch-fix-misuse-of-the-cached-connection-on-tuple-changes.patch
+writeback-fix-inode-i_io_list-not-be-protected-by-inode-i_lock-error.patch
--- /dev/null
+From 10e14073107dd0b6d97d9516a02845a8e501c2c9 Mon Sep 17 00:00:00 2001
+From: Jchao Sun <sunjunchao2870@gmail.com>
+Date: Tue, 24 May 2022 08:05:40 -0700
+Subject: writeback: Fix inode->i_io_list not be protected by inode->i_lock error
+
+From: Jchao Sun <sunjunchao2870@gmail.com>
+
+commit 10e14073107dd0b6d97d9516a02845a8e501c2c9 upstream.
+
+Commit b35250c0816c ("writeback: Protect inode->i_io_list with
+inode->i_lock") made inode->i_io_list not only protected by
+wb->list_lock but also inode->i_lock, but inode_io_list_move_locked()
+was missed. Add lock there and also update comment describing
+things protected by inode->i_lock. This also fixes a race where
+__mark_inode_dirty() could move inode under flush worker's hands
+and thus sync(2) could miss writing some inodes.
+
+Fixes: b35250c0816c ("writeback: Protect inode->i_io_list with inode->i_lock")
+Link: https://lore.kernel.org/r/20220524150540.12552-1-sunjunchao2870@gmail.com
+CC: stable@vger.kernel.org
+Signed-off-by: Jchao Sun <sunjunchao2870@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fs-writeback.c | 37 ++++++++++++++++++++++++++++---------
+ fs/inode.c | 2 +-
+ 2 files changed, 29 insertions(+), 10 deletions(-)
+
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -120,6 +120,7 @@ static bool inode_io_list_move_locked(st
+ struct list_head *head)
+ {
+ assert_spin_locked(&wb->list_lock);
++ assert_spin_locked(&inode->i_lock);
+
+ list_move(&inode->i_io_list, head);
+
+@@ -1365,9 +1366,9 @@ static int move_expired_inodes(struct li
+ inode = wb_inode(delaying_queue->prev);
+ if (inode_dirtied_after(inode, dirtied_before))
+ break;
++ spin_lock(&inode->i_lock);
+ list_move(&inode->i_io_list, &tmp);
+ moved++;
+- spin_lock(&inode->i_lock);
+ inode->i_state |= I_SYNC_QUEUED;
+ spin_unlock(&inode->i_lock);
+ if (sb_is_blkdev_sb(inode->i_sb))
+@@ -1383,7 +1384,12 @@ static int move_expired_inodes(struct li
+ goto out;
+ }
+
+- /* Move inodes from one superblock together */
++ /*
++ * Although inode's i_io_list is moved from 'tmp' to 'dispatch_queue',
++ * we don't take inode->i_lock here because it is just a pointless overhead.
++ * Inode is already marked as I_SYNC_QUEUED so writeback list handling is
++ * fully under our control.
++ */
+ while (!list_empty(&tmp)) {
+ sb = wb_inode(tmp.prev)->i_sb;
+ list_for_each_prev_safe(pos, node, &tmp) {
+@@ -1826,8 +1832,8 @@ static long writeback_sb_inodes(struct s
+ * We'll have another go at writing back this inode
+ * when we completed a full scan of b_io.
+ */
+- spin_unlock(&inode->i_lock);
+ requeue_io(inode, wb);
++ spin_unlock(&inode->i_lock);
+ trace_writeback_sb_inodes_requeue(inode);
+ continue;
+ }
+@@ -2358,6 +2364,7 @@ void __mark_inode_dirty(struct inode *in
+ {
+ struct super_block *sb = inode->i_sb;
+ int dirtytime = 0;
++ struct bdi_writeback *wb = NULL;
+
+ trace_writeback_mark_inode_dirty(inode, flags);
+
+@@ -2410,13 +2417,24 @@ void __mark_inode_dirty(struct inode *in
+ inode->i_state |= flags;
+
+ /*
++ * Grab inode's wb early because it requires dropping i_lock and we
++ * need to make sure following checks happen atomically with dirty
++ * list handling so that we don't move inodes under flush worker's
++ * hands.
++ */
++ if (!was_dirty) {
++ wb = locked_inode_to_wb_and_lock_list(inode);
++ spin_lock(&inode->i_lock);
++ }
++
++ /*
+ * If the inode is queued for writeback by flush worker, just
+ * update its dirty state. Once the flush worker is done with
+ * the inode it will place it on the appropriate superblock
+ * list, based upon its state.
+ */
+ if (inode->i_state & I_SYNC_QUEUED)
+- goto out_unlock_inode;
++ goto out_unlock;
+
+ /*
+ * Only add valid (hashed) inodes to the superblock's
+@@ -2424,22 +2442,19 @@ void __mark_inode_dirty(struct inode *in
+ */
+ if (!S_ISBLK(inode->i_mode)) {
+ if (inode_unhashed(inode))
+- goto out_unlock_inode;
++ goto out_unlock;
+ }
+ if (inode->i_state & I_FREEING)
+- goto out_unlock_inode;
++ goto out_unlock;
+
+ /*
+ * If the inode was already on b_dirty/b_io/b_more_io, don't
+ * reposition it (that would break b_dirty time-ordering).
+ */
+ if (!was_dirty) {
+- struct bdi_writeback *wb;
+ struct list_head *dirty_list;
+ bool wakeup_bdi = false;
+
+- wb = locked_inode_to_wb_and_lock_list(inode);
+-
+ inode->dirtied_when = jiffies;
+ if (dirtytime)
+ inode->dirtied_time_when = jiffies;
+@@ -2453,6 +2468,7 @@ void __mark_inode_dirty(struct inode *in
+ dirty_list);
+
+ spin_unlock(&wb->list_lock);
++ spin_unlock(&inode->i_lock);
+ trace_writeback_dirty_inode_enqueue(inode);
+
+ /*
+@@ -2467,6 +2483,9 @@ void __mark_inode_dirty(struct inode *in
+ return;
+ }
+ }
++out_unlock:
++ if (wb)
++ spin_unlock(&wb->list_lock);
+ out_unlock_inode:
+ spin_unlock(&inode->i_lock);
+ }
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -27,7 +27,7 @@
+ * Inode locking rules:
+ *
+ * inode->i_lock protects:
+- * inode->i_state, inode->i_hash, __iget()
++ * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
+ * Inode LRU list locks protect:
+ * inode->i_sb->s_inode_lru, inode->i_lru
+ * inode->i_sb->s_inode_list_lock protects: