--- /dev/null
+From 72aad489f992871e908ff6d9055b26c6366fb864 Mon Sep 17 00:00:00 2001
+From: Sergey Shtylyov <s.shtylyov@omp.ru>
+Date: Wed, 8 Jun 2022 22:51:07 +0300
+Subject: ata: libata-transport: fix {dma|pio|xfer}_mode sysfs files
+
+From: Sergey Shtylyov <s.shtylyov@omp.ru>
+
+commit 72aad489f992871e908ff6d9055b26c6366fb864 upstream.
+
+The {dma|pio}_mode sysfs files are incorrectly documented as having a
+list of the supported DMA/PIO transfer modes, while the corresponding
+fields of the *struct* ata_device hold the transfer mode IDs, not masks.
+
+To match these docs, the {dma|pio}_mode (and even xfer_mode!) sysfs
+files are handled by the ata_bitfield_name_match() macro which leads to
+reading such kind of nonsense from them:
+
+$ cat /sys/class/ata_device/dev3.0/pio_mode
+XFER_UDMA_7, XFER_UDMA_6, XFER_UDMA_5, XFER_UDMA_4, XFER_MW_DMA_4,
+XFER_PIO_6, XFER_PIO_5, XFER_PIO_4, XFER_PIO_3, XFER_PIO_2, XFER_PIO_1,
+XFER_PIO_0
+
+Using the correct ata_bitfield_name_search() macro fixes that:
+
+$ cat /sys/class/ata_device/dev3.0/pio_mode
+XFER_PIO_4
+
+While fixing the file documentation, somewhat reword the {dma|pio}_mode
+file doc and add a note about being mostly useful for PATA devices to
+the xfer_mode file doc...
+
+Fixes: d9027470b886 ("[libata] Add ATA transport class")
+Signed-off-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-ata | 11 ++++++-----
+ drivers/ata/libata-transport.c | 2 +-
+ 2 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-ata
++++ b/Documentation/ABI/testing/sysfs-ata
+@@ -107,13 +107,14 @@ Description:
+ described in ATA8 7.16 and 7.17. Only valid if
+ the device is not a PM.
+
+- pio_mode: (RO) Transfer modes supported by the device when
+- in PIO mode. Mostly used by PATA device.
++ pio_mode: (RO) PIO transfer mode used by the device.
++ Mostly used by PATA devices.
+
+- xfer_mode: (RO) Current transfer mode
++ xfer_mode: (RO) Current transfer mode. Mostly used by
++ PATA devices.
+
+- dma_mode: (RO) Transfer modes supported by the device when
+- in DMA mode. Mostly used by PATA device.
++ dma_mode: (RO) DMA transfer mode used by the device.
++ Mostly used by PATA devices.
+
+ class: (RO) Device class. Can be "ata" for disk,
+ "atapi" for packet device, "pmp" for PM, or
+--- a/drivers/ata/libata-transport.c
++++ b/drivers/ata/libata-transport.c
+@@ -196,7 +196,7 @@ static struct {
+ { XFER_PIO_0, "XFER_PIO_0" },
+ { XFER_PIO_SLOW, "XFER_PIO_SLOW" }
+ };
+-ata_bitfield_name_match(xfer,ata_xfer_names)
++ata_bitfield_name_search(xfer, ata_xfer_names)
+
+ /*
+ * ATA Port attributes
--- /dev/null
+From dda5384313a40ecbaafd8a9a80f47483255e4c4d Mon Sep 17 00:00:00 2001
+From: David Safford <david.safford@gmail.com>
+Date: Tue, 7 Jun 2022 14:07:57 -0400
+Subject: KEYS: trusted: tpm2: Fix migratable logic
+
+From: David Safford <david.safford@gmail.com>
+
+commit dda5384313a40ecbaafd8a9a80f47483255e4c4d upstream.
+
+When creating (sealing) a new trusted key, migratable
+trusted keys have the FIXED_TPM and FIXED_PARENT attributes
+set, and non-migratable keys don't. This is backwards, and
+also causes creation to fail when creating a migratable key
+under a migratable parent. (The TPM thinks you are trying to
+seal a non-migratable blob under a migratable parent.)
+
+The following simple patch fixes the logic, and has been
+tested for all four combinations of migratable and non-migratable
+trusted keys and parent storage keys. With this logic, you will
+get a proper failure if you try to create a non-migratable
+trusted key under a migratable parent storage key, and all other
+combinations work correctly.
+
+Cc: stable@vger.kernel.org # v5.13+
+Fixes: e5fb5d2c5a03 ("security: keys: trusted: Make sealed key properly interoperable")
+Signed-off-by: David Safford <david.safford@gmail.com>
+Reviewed-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/keys/trusted-keys/trusted_tpm2.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -283,8 +283,8 @@ int tpm2_seal_trusted(struct tpm_chip *c
+ /* key properties */
+ flags = 0;
+ flags |= options->policydigest_len ? 0 : TPM2_OA_USER_WITH_AUTH;
+- flags |= payload->migratable ? (TPM2_OA_FIXED_TPM |
+- TPM2_OA_FIXED_PARENT) : 0;
++ flags |= payload->migratable ? 0 : (TPM2_OA_FIXED_TPM |
++ TPM2_OA_FIXED_PARENT);
+ tpm_buf_append_u32(&buf, flags);
+
+ /* policy */
--- /dev/null
+From c745dfc541e78428ba3986f1d17fe1dfdaca8184 Mon Sep 17 00:00:00 2001
+From: Tyler Erickson <tyler.erickson@seagate.com>
+Date: Thu, 2 Jun 2022 16:51:11 -0600
+Subject: libata: fix reading concurrent positioning ranges log
+
+From: Tyler Erickson <tyler.erickson@seagate.com>
+
+commit c745dfc541e78428ba3986f1d17fe1dfdaca8184 upstream.
+
+The concurrent positioning ranges log is not a fixed size and may depend
+on how many ranges are supported by the device. This patch uses the size
+reported in the GPL directory to determine the number of pages supported
+by the device before attempting to read this log page.
+
+This resolves this error from the dmesg output:
+ ata6.00: Read log 0x47 page 0x00 failed, Emask 0x1
+
+Cc: stable@vger.kernel.org
+Fixes: fe22e1c2f705 ("libata: support concurrent positioning ranges log")
+Signed-off-by: Tyler Erickson <tyler.erickson@seagate.com>
+Reviewed-by: Muhammad Ahmad <muhammad.ahmad@seagate.com>
+Tested-by: Michael English <michael.english@seagate.com>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/libata-core.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2003,16 +2003,16 @@ retry:
+ return err_mask;
+ }
+
+-static bool ata_log_supported(struct ata_device *dev, u8 log)
++static int ata_log_supported(struct ata_device *dev, u8 log)
+ {
+ struct ata_port *ap = dev->link->ap;
+
+ if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
+- return false;
++ return 0;
+
+ if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
+- return false;
+- return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
++ return 0;
++ return get_unaligned_le16(&ap->sector_buf[log * 2]);
+ }
+
+ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
+@@ -2448,15 +2448,20 @@ static void ata_dev_config_cpr(struct at
+ struct ata_cpr_log *cpr_log = NULL;
+ u8 *desc, *buf = NULL;
+
+- if (ata_id_major_version(dev->id) < 11 ||
+- !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
++ if (ata_id_major_version(dev->id) < 11)
++ goto out;
++
++ buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
++ if (buf_len == 0)
+ goto out;
+
+ /*
+ * Read the concurrent positioning ranges log (0x47). We can have at
+- * most 255 32B range descriptors plus a 64B header.
++ * most 255 32B range descriptors plus a 64B header. This log varies in
++ * size, so use the size reported in the GPL directory. Reading beyond
++ * the supported length will result in an error.
+ */
+- buf_len = (64 + 255 * 32 + 511) & ~511;
++ buf_len <<= 9;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ goto out;
--- /dev/null
+From 6d11acd452fd885ef6ace184c9c70bc863a8c72f Mon Sep 17 00:00:00 2001
+From: Tyler Erickson <tyler.erickson@seagate.com>
+Date: Thu, 2 Jun 2022 16:51:12 -0600
+Subject: libata: fix translation of concurrent positioning ranges
+
+From: Tyler Erickson <tyler.erickson@seagate.com>
+
+commit 6d11acd452fd885ef6ace184c9c70bc863a8c72f upstream.
+
+Fixing the page length in the SCSI translation for the concurrent
+positioning ranges VPD page. It was writing starting in offset 3
+rather than offset 2 where the MSB is supposed to start for
+the VPD page length.
+
+Cc: stable@vger.kernel.org
+Fixes: fe22e1c2f705 ("libata: support concurrent positioning ranges log")
+Signed-off-by: Tyler Erickson <tyler.erickson@seagate.com>
+Reviewed-by: Muhammad Ahmad <muhammad.ahmad@seagate.com>
+Tested-by: Michael English <michael.english@seagate.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/libata-scsi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2119,7 +2119,7 @@ static unsigned int ata_scsiop_inq_b9(st
+
+ /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
+ rbuf[1] = 0xb9;
+- put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]);
++ put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
+
+ for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
+ desc[0] = cpr_log->cpr[i].num;
--- /dev/null
+From a051246b786af7e4a9d9219cc7038a6e8a411531 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Tue, 31 May 2022 20:19:22 +0300
+Subject: mmc: block: Fix CQE recovery reset success
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit a051246b786af7e4a9d9219cc7038a6e8a411531 upstream.
+
+The intention of the use of mmc_blk_reset_success() in
+mmc_blk_cqe_recovery() was to prevent repeated resets when retrying and
+getting the same error. However, that may not be the case - any amount
+of time and I/O may pass before another recovery is needed, in which
+case there would be no reason to deny it the opportunity to recover via
+a reset if necessary. CQE recovery is expected seldom and failure to
+recover (if the clear tasks command fails), even more seldom, so it is
+better to allow the reset always, which can be done by calling
+mmc_blk_reset_success() always.
+
+Fixes: 1e8e55b67030c6 ("mmc: block: Add CQE support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20220531171922.76080-1-adrian.hunter@intel.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/block.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1482,8 +1482,7 @@ void mmc_blk_cqe_recovery(struct mmc_que
+ err = mmc_cqe_recovery(host);
+ if (err)
+ mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
+- else
+- mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
++ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+
+ pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
+ }
--- /dev/null
+From 2061ecfdf2350994e5b61c43e50e98a7a70e95ee Mon Sep 17 00:00:00 2001
+From: Ilya Maximets <i.maximets@ovn.org>
+Date: Tue, 7 Jun 2022 00:11:40 +0200
+Subject: net: openvswitch: fix misuse of the cached connection on tuple changes
+
+From: Ilya Maximets <i.maximets@ovn.org>
+
+commit 2061ecfdf2350994e5b61c43e50e98a7a70e95ee upstream.
+
+If packet headers changed, the cached nfct is no longer relevant
+for the packet and attempt to re-use it leads to the incorrect packet
+classification.
+
+This issue is causing broken connectivity in OpenStack deployments
+with OVS/OVN due to hairpin traffic being unexpectedly dropped.
+
+The setup has datapath flows with several conntrack actions and tuple
+changes between them:
+
+ actions:ct(commit,zone=8,mark=0/0x1,nat(src)),
+ set(eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:06)),
+ set(ipv4(src=172.18.2.10,dst=192.168.100.6,ttl=62)),
+ ct(zone=8),recirc(0x4)
+
+After the first ct() action the packet headers are almost fully
+re-written. The next ct() tries to re-use the existing nfct entry
+and marks the packet as invalid, so it gets dropped later in the
+pipeline.
+
+Clearing the cached conntrack entry whenever packet tuple is changed
+to avoid the issue.
+
+The flow key should not be cleared though, because we should still
+be able to match on the ct_state if the recirculation happens after
+the tuple change but before the next ct() action.
+
+Cc: stable@vger.kernel.org
+Fixes: 7f8a436eaa2c ("openvswitch: Add conntrack action")
+Reported-by: Frode Nordahl <frode.nordahl@canonical.com>
+Link: https://mail.openvswitch.org/pipermail/ovs-discuss/2022-May/051829.html
+Link: https://bugs.launchpad.net/ubuntu/+source/ovn/+bug/1967856
+Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
+Link: https://lore.kernel.org/r/20220606221140.488984-1-i.maximets@ovn.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/openvswitch/actions.c | 6 ++++++
+ net/openvswitch/conntrack.c | 4 +++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -373,6 +373,7 @@ static void set_ip_addr(struct sk_buff *
+ update_ip_l4_checksum(skb, nh, *addr, new_addr);
+ csum_replace4(&nh->check, *addr, new_addr);
+ skb_clear_hash(skb);
++ ovs_ct_clear(skb, NULL);
+ *addr = new_addr;
+ }
+
+@@ -420,6 +421,7 @@ static void set_ipv6_addr(struct sk_buff
+ update_ipv6_checksum(skb, l4_proto, addr, new_addr);
+
+ skb_clear_hash(skb);
++ ovs_ct_clear(skb, NULL);
+ memcpy(addr, new_addr, sizeof(__be32[4]));
+ }
+
+@@ -660,6 +662,7 @@ static int set_nsh(struct sk_buff *skb,
+ static void set_tp_port(struct sk_buff *skb, __be16 *port,
+ __be16 new_port, __sum16 *check)
+ {
++ ovs_ct_clear(skb, NULL);
+ inet_proto_csum_replace2(check, skb, *port, new_port, false);
+ *port = new_port;
+ }
+@@ -699,6 +702,7 @@ static int set_udp(struct sk_buff *skb,
+ uh->dest = dst;
+ flow_key->tp.src = src;
+ flow_key->tp.dst = dst;
++ ovs_ct_clear(skb, NULL);
+ }
+
+ skb_clear_hash(skb);
+@@ -761,6 +765,8 @@ static int set_sctp(struct sk_buff *skb,
+ sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
+
+ skb_clear_hash(skb);
++ ovs_ct_clear(skb, NULL);
++
+ flow_key->tp.src = sh->source;
+ flow_key->tp.dst = sh->dest;
+
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1342,7 +1342,9 @@ int ovs_ct_clear(struct sk_buff *skb, st
+
+ nf_ct_put(ct);
+ nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+- ovs_ct_fill_key(skb, key, false);
++
++ if (key)
++ ovs_ct_fill_key(skb, key, false);
+
+ return 0;
+ }
--- /dev/null
+From c76acfb7e19dcc3a0964e0563770b1d11b8d4540 Mon Sep 17 00:00:00 2001
+From: Tan Tee Min <tee.min.tan@linux.intel.com>
+Date: Thu, 26 May 2022 17:03:47 +0800
+Subject: net: phy: dp83867: retrigger SGMII AN when link change
+
+From: Tan Tee Min <tee.min.tan@linux.intel.com>
+
+commit c76acfb7e19dcc3a0964e0563770b1d11b8d4540 upstream.
+
+There is a limitation in TI DP83867 PHY device where SGMII AN is only
+triggered once after the device is booted up. Even after the PHY TPI is
+down and up again, SGMII AN is not triggered and hence no new in-band
+message from PHY to MAC side SGMII.
+
+This could cause an issue during power up, when PHY is up prior to MAC.
+At this condition, once MAC side SGMII is up, MAC side SGMII wouldn`t
+receive new in-band message from TI PHY with correct link status, speed
+and duplex info.
+
+As suggested by TI, implemented a SW solution here to retrigger SGMII
+Auto-Neg whenever there is a link change.
+
+v2: Add Fixes tag in commit message.
+
+Fixes: 2a10154abcb7 ("net: phy: dp83867: Add TI dp83867 phy")
+Cc: <stable@vger.kernel.org> # 5.4.x
+Signed-off-by: Sit, Michael Wei Hong <michael.wei.hong.sit@intel.com>
+Reviewed-by: Voon Weifeng <weifeng.voon@intel.com>
+Signed-off-by: Tan Tee Min <tee.min.tan@linux.intel.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20220526090347.128742-1-tee.min.tan@linux.intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/dp83867.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -137,6 +137,7 @@
+ #define DP83867_DOWNSHIFT_2_COUNT 2
+ #define DP83867_DOWNSHIFT_4_COUNT 4
+ #define DP83867_DOWNSHIFT_8_COUNT 8
++#define DP83867_SGMII_AUTONEG_EN BIT(7)
+
+ /* CFG3 bits */
+ #define DP83867_CFG3_INT_OE BIT(7)
+@@ -855,6 +856,32 @@ static int dp83867_phy_reset(struct phy_
+ DP83867_PHYCR_FORCE_LINK_GOOD, 0);
+ }
+
++static void dp83867_link_change_notify(struct phy_device *phydev)
++{
++ /* There is a limitation in DP83867 PHY device where SGMII AN is
++ * only triggered once after the device is booted up. Even after the
++ * PHY TPI is down and up again, SGMII AN is not triggered and
++ * hence no new in-band message from PHY to MAC side SGMII.
++ * This could cause an issue during power up, when PHY is up prior
++ * to MAC. At this condition, once MAC side SGMII is up, MAC side
++ * SGMII wouldn`t receive new in-band message from TI PHY with
++ * correct link status, speed and duplex info.
++ * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg
++ * whenever there is a link change.
++ */
++ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
++ int val = 0;
++
++ val = phy_clear_bits(phydev, DP83867_CFG2,
++ DP83867_SGMII_AUTONEG_EN);
++ if (val < 0)
++ return;
++
++ phy_set_bits(phydev, DP83867_CFG2,
++ DP83867_SGMII_AUTONEG_EN);
++ }
++}
++
+ static struct phy_driver dp83867_driver[] = {
+ {
+ .phy_id = DP83867_PHY_ID,
+@@ -879,6 +906,8 @@ static struct phy_driver dp83867_driver[
+
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
++
++ .link_change_notify = dp83867_link_change_notify,
+ },
+ };
+ module_phy_driver(dp83867_driver);
--- /dev/null
+From f92de9d110429e39929a49240d823251c2fe903e Mon Sep 17 00:00:00 2001
+From: Tyler Erickson <tyler.erickson@seagate.com>
+Date: Thu, 2 Jun 2022 16:51:13 -0600
+Subject: scsi: sd: Fix interpretation of VPD B9h length
+
+From: Tyler Erickson <tyler.erickson@seagate.com>
+
+commit f92de9d110429e39929a49240d823251c2fe903e upstream.
+
+Fixing the interpretation of the length of the B9h VPD page (Concurrent
+Positioning Ranges). Adding 4 is necessary as the first 4 bytes of the page
+is the header with page number and length information. Adding 3 was likely
+a misinterpretation of the SBC-5 specification which sets all offsets
+starting at zero.
+
+This fixes the error in dmesg:
+
+[ 9.014456] sd 1:0:0:0: [sda] Invalid Concurrent Positioning Ranges VPD page
+
+Link: https://lore.kernel.org/r/20220602225113.10218-4-tyler.erickson@seagate.com
+Fixes: e815d36548f0 ("scsi: sd: add concurrent positioning ranges support")
+Cc: stable@vger.kernel.org
+Tested-by: Michael English <michael.english@seagate.com>
+Reviewed-by: Muhammad Ahmad <muhammad.ahmad@seagate.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Tyler Erickson <tyler.erickson@seagate.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/sd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3171,7 +3171,7 @@ static void sd_read_cpr(struct scsi_disk
+ goto out;
+
+ /* We must have at least a 64B header and one 32B range descriptor */
+- vpd_len = get_unaligned_be16(&buffer[2]) + 3;
++ vpd_len = get_unaligned_be16(&buffer[2]) + 4;
+ if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid Concurrent Positioning Ranges VPD page\n");
cifs-return-errors-during-session-setup-during-reconnects.patch
cifs-fix-reconnect-on-smb3-mount-types.patch
cifs-populate-empty-hostnames-for-extra-channels.patch
+scsi-sd-fix-interpretation-of-vpd-b9h-length.patch
+keys-trusted-tpm2-fix-migratable-logic.patch
+libata-fix-reading-concurrent-positioning-ranges-log.patch
+libata-fix-translation-of-concurrent-positioning-ranges.patch
+ata-libata-transport-fix-dma-pio-xfer-_mode-sysfs-files.patch
+mmc-block-fix-cqe-recovery-reset-success.patch
+net-phy-dp83867-retrigger-sgmii-an-when-link-change.patch
+net-openvswitch-fix-misuse-of-the-cached-connection-on-tuple-changes.patch
+writeback-fix-inode-i_io_list-not-be-protected-by-inode-i_lock-error.patch
--- /dev/null
+From 10e14073107dd0b6d97d9516a02845a8e501c2c9 Mon Sep 17 00:00:00 2001
+From: Jchao Sun <sunjunchao2870@gmail.com>
+Date: Tue, 24 May 2022 08:05:40 -0700
+Subject: writeback: Fix inode->i_io_list not be protected by inode->i_lock error
+
+From: Jchao Sun <sunjunchao2870@gmail.com>
+
+commit 10e14073107dd0b6d97d9516a02845a8e501c2c9 upstream.
+
+Commit b35250c0816c ("writeback: Protect inode->i_io_list with
+inode->i_lock") made inode->i_io_list not only protected by
+wb->list_lock but also inode->i_lock, but inode_io_list_move_locked()
+was missed. Add lock there and also update comment describing
+things protected by inode->i_lock. This also fixes a race where
+__mark_inode_dirty() could move inode under flush worker's hands
+and thus sync(2) could miss writing some inodes.
+
+Fixes: b35250c0816c ("writeback: Protect inode->i_io_list with inode->i_lock")
+Link: https://lore.kernel.org/r/20220524150540.12552-1-sunjunchao2870@gmail.com
+CC: stable@vger.kernel.org
+Signed-off-by: Jchao Sun <sunjunchao2870@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fs-writeback.c | 37 ++++++++++++++++++++++++++++---------
+ fs/inode.c | 2 +-
+ 2 files changed, 29 insertions(+), 10 deletions(-)
+
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -120,6 +120,7 @@ static bool inode_io_list_move_locked(st
+ struct list_head *head)
+ {
+ assert_spin_locked(&wb->list_lock);
++ assert_spin_locked(&inode->i_lock);
+
+ list_move(&inode->i_io_list, head);
+
+@@ -1402,9 +1403,9 @@ static int move_expired_inodes(struct li
+ inode = wb_inode(delaying_queue->prev);
+ if (inode_dirtied_after(inode, dirtied_before))
+ break;
++ spin_lock(&inode->i_lock);
+ list_move(&inode->i_io_list, &tmp);
+ moved++;
+- spin_lock(&inode->i_lock);
+ inode->i_state |= I_SYNC_QUEUED;
+ spin_unlock(&inode->i_lock);
+ if (sb_is_blkdev_sb(inode->i_sb))
+@@ -1420,7 +1421,12 @@ static int move_expired_inodes(struct li
+ goto out;
+ }
+
+- /* Move inodes from one superblock together */
++ /*
++ * Although inode's i_io_list is moved from 'tmp' to 'dispatch_queue',
++ * we don't take inode->i_lock here because it is just a pointless overhead.
++ * Inode is already marked as I_SYNC_QUEUED so writeback list handling is
++ * fully under our control.
++ */
+ while (!list_empty(&tmp)) {
+ sb = wb_inode(tmp.prev)->i_sb;
+ list_for_each_prev_safe(pos, node, &tmp) {
+@@ -1863,8 +1869,8 @@ static long writeback_sb_inodes(struct s
+ * We'll have another go at writing back this inode
+ * when we completed a full scan of b_io.
+ */
+- spin_unlock(&inode->i_lock);
+ requeue_io(inode, wb);
++ spin_unlock(&inode->i_lock);
+ trace_writeback_sb_inodes_requeue(inode);
+ continue;
+ }
+@@ -2400,6 +2406,7 @@ void __mark_inode_dirty(struct inode *in
+ {
+ struct super_block *sb = inode->i_sb;
+ int dirtytime = 0;
++ struct bdi_writeback *wb = NULL;
+
+ trace_writeback_mark_inode_dirty(inode, flags);
+
+@@ -2452,13 +2459,24 @@ void __mark_inode_dirty(struct inode *in
+ inode->i_state |= flags;
+
+ /*
++ * Grab inode's wb early because it requires dropping i_lock and we
++ * need to make sure following checks happen atomically with dirty
++ * list handling so that we don't move inodes under flush worker's
++ * hands.
++ */
++ if (!was_dirty) {
++ wb = locked_inode_to_wb_and_lock_list(inode);
++ spin_lock(&inode->i_lock);
++ }
++
++ /*
+ * If the inode is queued for writeback by flush worker, just
+ * update its dirty state. Once the flush worker is done with
+ * the inode it will place it on the appropriate superblock
+ * list, based upon its state.
+ */
+ if (inode->i_state & I_SYNC_QUEUED)
+- goto out_unlock_inode;
++ goto out_unlock;
+
+ /*
+ * Only add valid (hashed) inodes to the superblock's
+@@ -2466,22 +2484,19 @@ void __mark_inode_dirty(struct inode *in
+ */
+ if (!S_ISBLK(inode->i_mode)) {
+ if (inode_unhashed(inode))
+- goto out_unlock_inode;
++ goto out_unlock;
+ }
+ if (inode->i_state & I_FREEING)
+- goto out_unlock_inode;
++ goto out_unlock;
+
+ /*
+ * If the inode was already on b_dirty/b_io/b_more_io, don't
+ * reposition it (that would break b_dirty time-ordering).
+ */
+ if (!was_dirty) {
+- struct bdi_writeback *wb;
+ struct list_head *dirty_list;
+ bool wakeup_bdi = false;
+
+- wb = locked_inode_to_wb_and_lock_list(inode);
+-
+ inode->dirtied_when = jiffies;
+ if (dirtytime)
+ inode->dirtied_time_when = jiffies;
+@@ -2495,6 +2510,7 @@ void __mark_inode_dirty(struct inode *in
+ dirty_list);
+
+ spin_unlock(&wb->list_lock);
++ spin_unlock(&inode->i_lock);
+ trace_writeback_dirty_inode_enqueue(inode);
+
+ /*
+@@ -2509,6 +2525,9 @@ void __mark_inode_dirty(struct inode *in
+ return;
+ }
+ }
++out_unlock:
++ if (wb)
++ spin_unlock(&wb->list_lock);
+ out_unlock_inode:
+ spin_unlock(&inode->i_lock);
+ }
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -27,7 +27,7 @@
+ * Inode locking rules:
+ *
+ * inode->i_lock protects:
+- * inode->i_state, inode->i_hash, __iget()
++ * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
+ * Inode LRU list locks protect:
+ * inode->i_sb->s_inode_lru, inode->i_lru
+ * inode->i_sb->s_inode_list_lock protects: