]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.4
authorSasha Levin <sashal@kernel.org>
Thu, 16 Apr 2020 12:44:21 +0000 (08:44 -0400)
committerSasha Levin <sashal@kernel.org>
Thu, 16 Apr 2020 12:44:21 +0000 (08:44 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
16 files changed:
queue-5.4/dm-clone-add-missing-casts-to-prevent-overflows-and-.patch [new file with mode: 0644]
queue-5.4/dm-clone-fix-handling-of-partial-region-discards.patch [new file with mode: 0644]
queue-5.4/dm-clone-replace-spin_lock_irqsave-with-spin_lock_ir.patch [new file with mode: 0644]
queue-5.4/dm-zoned-remove-duplicate-nr_rnd_zones-increase-in-d.patch [new file with mode: 0644]
queue-5.4/drm-amdgpu-fix-gfx-hang-during-suspend-with-video-pl.patch [new file with mode: 0644]
queue-5.4/drm-dp_mst-fix-clearing-payload-state-on-topology-di.patch [new file with mode: 0644]
queue-5.4/drm-i915-icl-don-t-enable-ddi-io-power-on-a-typec-po.patch [new file with mode: 0644]
queue-5.4/mmc-sdhci-convert-sdhci_set_timeout_irq-to-non-stati.patch [new file with mode: 0644]
queue-5.4/mmc-sdhci-refactor-sdhci_set_timeout.patch [new file with mode: 0644]
queue-5.4/powerpc-kasan-fix-kasan_remap_early_shadow_ro.patch [new file with mode: 0644]
queue-5.4/revert-drm-dp_mst-remove-vcpi-while-disabling-topolo.patch [new file with mode: 0644]
queue-5.4/scsi-lpfc-add-registration-for-cpu-offline-online-ev.patch [new file with mode: 0644]
queue-5.4/scsi-lpfc-fix-broken-credit-recovery-after-driver-lo.patch [new file with mode: 0644]
queue-5.4/scsi-lpfc-fix-configuration-of-bb-credit-recovery-in.patch [new file with mode: 0644]
queue-5.4/scsi-lpfc-fix-fabric-hostname-registration-if-system.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/dm-clone-add-missing-casts-to-prevent-overflows-and-.patch b/queue-5.4/dm-clone-add-missing-casts-to-prevent-overflows-and-.patch
new file mode 100644 (file)
index 0000000..9eaece7
--- /dev/null
@@ -0,0 +1,67 @@
+From 6ac3fe1e68ec31d5df454cfb51d72927125b67e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Mar 2020 16:01:10 +0200
+Subject: dm clone: Add missing casts to prevent overflows and data corruption
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+[ Upstream commit 9fc06ff56845cc5ccafec52f545fc2e08d22f849 ]
+
+Add missing casts when converting from regions to sectors.
+
+In case BITS_PER_LONG == 32, the lack of the appropriate casts can lead
+to overflows and miscalculation of the device sector.
+
+As a result, we could end up discarding and/or copying the wrong parts
+of the device, thus corrupting the device's data.
+
+Fixes: 7431b7835f55 ("dm: add clone target")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-clone-target.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
+index 315d3bca59792..eb7a5d3ba81a2 100644
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -282,7 +282,7 @@ static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
+ /* Get the address of the region in sectors */
+ static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
+ {
+-      return (region_nr << clone->region_shift);
++      return ((sector_t)region_nr << clone->region_shift);
+ }
+ /* Get the region number of the bio */
+@@ -471,7 +471,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
+       if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
+               remap_to_dest(clone, bio);
+               bio_region_range(clone, bio, &rs, &nr_regions);
+-              trim_bio(bio, rs << clone->region_shift,
++              trim_bio(bio, region_to_sector(clone, rs),
+                        nr_regions << clone->region_shift);
+               generic_make_request(bio);
+       } else
+@@ -798,11 +798,14 @@ static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr
+       struct dm_io_region from, to;
+       struct clone *clone = hd->clone;
++      if (WARN_ON(!nr_regions))
++              return;
++
+       region_size = clone->region_size;
+       region_start = hd->region_nr;
+       region_end = region_start + nr_regions - 1;
+-      total_size = (nr_regions - 1) << clone->region_shift;
++      total_size = region_to_sector(clone, nr_regions - 1);
+       if (region_end == clone->nr_regions - 1) {
+               /*
+-- 
+2.20.1
+
diff --git a/queue-5.4/dm-clone-fix-handling-of-partial-region-discards.patch b/queue-5.4/dm-clone-fix-handling-of-partial-region-discards.patch
new file mode 100644 (file)
index 0000000..c765695
--- /dev/null
@@ -0,0 +1,206 @@
+From 092a98a63f0b18de9e15e60b0a6a608b4da7c341 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Mar 2020 16:01:08 +0200
+Subject: dm clone: Fix handling of partial region discards
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+[ Upstream commit 4b5142905d4ff58a4b93f7c8eaa7ba829c0a53c9 ]
+
+There is a bug in the way dm-clone handles discards, which can lead to
+discarding the wrong blocks or trying to discard blocks beyond the end
+of the device.
+
+This could lead to data corruption, if the destination device indeed
+discards the underlying blocks, i.e., if the discard operation results
+in the original contents of a block to be lost.
+
+The root of the problem is the code that calculates the range of regions
+covered by a discard request and decides which regions to discard.
+
+Since dm-clone handles the device in units of regions, we don't discard
+parts of a region, only whole regions.
+
+The range is calculated as:
+
+    rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
+    re = bio_end_sector(bio) >> clone->region_shift;
+
+, where 'rs' is the first region to discard and (re - rs) is the number
+of regions to discard.
+
+The bug manifests when we try to discard part of a single region, i.e.,
+when we try to discard a block with size < region_size, and the discard
+request both starts at an offset with respect to the beginning of that
+region and ends before the end of the region.
+
+The root cause is the following comparison:
+
+  if (rs == re)
+    // skip discard and complete original bio immediately
+
+, which doesn't take into account that 'rs' might be greater than 're'.
+
+Thus, we then issue a discard request for the wrong blocks, instead of
+skipping the discard all together.
+
+Fix the check to also take into account the above case, so we don't end
+up discarding the wrong blocks.
+
+Also, add some range checks to dm_clone_set_region_hydrated() and
+dm_clone_cond_set_range(), which update dm-clone's region bitmap.
+
+Note that the aforementioned bug doesn't cause invalid memory accesses,
+because dm_clone_is_range_hydrated() returns True for this case, so the
+checks are just precautionary.
+
+Fixes: 7431b7835f55 ("dm: add clone target")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-clone-metadata.c | 13 ++++++++++
+ drivers/md/dm-clone-target.c   | 43 +++++++++++++++++++++++-----------
+ 2 files changed, 42 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
+index 581d11250333a..17712456fa634 100644
+--- a/drivers/md/dm-clone-metadata.c
++++ b/drivers/md/dm-clone-metadata.c
+@@ -850,6 +850,12 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
+       struct dirty_map *dmap;
+       unsigned long word, flags;
++      if (unlikely(region_nr >= cmd->nr_regions)) {
++              DMERR("Region %lu out of range (total number of regions %lu)",
++                    region_nr, cmd->nr_regions);
++              return -ERANGE;
++      }
++
+       word = region_nr / BITS_PER_LONG;
+       spin_lock_irqsave(&cmd->bitmap_lock, flags);
+@@ -879,6 +885,13 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
+       struct dirty_map *dmap;
+       unsigned long word, region_nr;
++      if (unlikely(start >= cmd->nr_regions || (start + nr_regions) < start ||
++                   (start + nr_regions) > cmd->nr_regions)) {
++              DMERR("Invalid region range: start %lu, nr_regions %lu (total number of regions %lu)",
++                    start, nr_regions, cmd->nr_regions);
++              return -ERANGE;
++      }
++
+       spin_lock_irq(&cmd->bitmap_lock);
+       if (cmd->read_only) {
+diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
+index ad5dca5d20707..315d3bca59792 100644
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -293,10 +293,17 @@ static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
+ /* Get the region range covered by the bio */
+ static void bio_region_range(struct clone *clone, struct bio *bio,
+-                           unsigned long *rs, unsigned long *re)
++                           unsigned long *rs, unsigned long *nr_regions)
+ {
++      unsigned long end;
++
+       *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
+-      *re = bio_end_sector(bio) >> clone->region_shift;
++      end = bio_end_sector(bio) >> clone->region_shift;
++
++      if (*rs >= end)
++              *nr_regions = 0;
++      else
++              *nr_regions = end - *rs;
+ }
+ /* Check whether a bio overwrites a region */
+@@ -454,7 +461,7 @@ static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
+ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
+ {
+-      unsigned long rs, re;
++      unsigned long rs, nr_regions;
+       /*
+        * If the destination device supports discards, remap and trim the
+@@ -463,9 +470,9 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
+        */
+       if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
+               remap_to_dest(clone, bio);
+-              bio_region_range(clone, bio, &rs, &re);
++              bio_region_range(clone, bio, &rs, &nr_regions);
+               trim_bio(bio, rs << clone->region_shift,
+-                       (re - rs) << clone->region_shift);
++                       nr_regions << clone->region_shift);
+               generic_make_request(bio);
+       } else
+               bio_endio(bio);
+@@ -473,12 +480,21 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
+ static void process_discard_bio(struct clone *clone, struct bio *bio)
+ {
+-      unsigned long rs, re;
++      unsigned long rs, nr_regions;
+-      bio_region_range(clone, bio, &rs, &re);
+-      BUG_ON(re > clone->nr_regions);
++      bio_region_range(clone, bio, &rs, &nr_regions);
++      if (!nr_regions) {
++              bio_endio(bio);
++              return;
++      }
+-      if (unlikely(rs == re)) {
++      if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs ||
++                  (rs + nr_regions) > clone->nr_regions)) {
++              DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
++                    clone_device_name(clone), rs, nr_regions,
++                    clone->nr_regions,
++                    (unsigned long long)bio->bi_iter.bi_sector,
++                    bio_sectors(bio));
+               bio_endio(bio);
+               return;
+       }
+@@ -487,7 +503,7 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
+        * The covered regions are already hydrated so we just need to pass
+        * down the discard.
+        */
+-      if (dm_clone_is_range_hydrated(clone->cmd, rs, re - rs)) {
++      if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) {
+               complete_discard_bio(clone, bio, true);
+               return;
+       }
+@@ -1165,7 +1181,7 @@ static void process_deferred_discards(struct clone *clone)
+       int r = -EPERM;
+       struct bio *bio;
+       struct blk_plug plug;
+-      unsigned long rs, re;
++      unsigned long rs, nr_regions;
+       struct bio_list discards = BIO_EMPTY_LIST;
+       spin_lock_irq(&clone->lock);
+@@ -1181,14 +1197,13 @@ static void process_deferred_discards(struct clone *clone)
+       /* Update the metadata */
+       bio_list_for_each(bio, &discards) {
+-              bio_region_range(clone, bio, &rs, &re);
++              bio_region_range(clone, bio, &rs, &nr_regions);
+               /*
+                * A discard request might cover regions that have been already
+                * hydrated. There is no need to update the metadata for these
+                * regions.
+                */
+-              r = dm_clone_cond_set_range(clone->cmd, rs, re - rs);
+-
++              r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions);
+               if (unlikely(r))
+                       break;
+       }
+-- 
+2.20.1
+
diff --git a/queue-5.4/dm-clone-replace-spin_lock_irqsave-with-spin_lock_ir.patch b/queue-5.4/dm-clone-replace-spin_lock_irqsave-with-spin_lock_ir.patch
new file mode 100644 (file)
index 0000000..d9b61f3
--- /dev/null
@@ -0,0 +1,245 @@
+From 4465861b083f2fa0fc311b198a3c4b0c1edd5f7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Oct 2019 10:17:37 -0400
+Subject: dm clone: replace spin_lock_irqsave with spin_lock_irq
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+[ Upstream commit 6ca43ed8376a51afec790dd484a51804ade4352a ]
+
+If we are in a place where it is known that interrupts are enabled,
+functions spin_lock_irq/spin_unlock_irq should be used instead of
+spin_lock_irqsave/spin_unlock_irqrestore.
+
+spin_lock_irq and spin_unlock_irq are faster because they don't need to
+push and pop the flags register.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-clone-metadata.c | 29 ++++++++++++-----------------
+ drivers/md/dm-clone-metadata.h |  4 +++-
+ drivers/md/dm-clone-target.c   | 28 ++++++++++++----------------
+ 3 files changed, 27 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
+index 2460cc6e0ef1d..581d11250333a 100644
+--- a/drivers/md/dm-clone-metadata.c
++++ b/drivers/md/dm-clone-metadata.c
+@@ -748,7 +748,7 @@ static int __metadata_commit(struct dm_clone_metadata *cmd)
+ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
+ {
+       int r;
+-      unsigned long word, flags;
++      unsigned long word;
+       word = 0;
+       do {
+@@ -772,9 +772,9 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
+               return r;
+       /* Update the changed flag */
+-      spin_lock_irqsave(&cmd->bitmap_lock, flags);
++      spin_lock_irq(&cmd->bitmap_lock);
+       dmap->changed = 0;
+-      spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
++      spin_unlock_irq(&cmd->bitmap_lock);
+       return 0;
+ }
+@@ -782,7 +782,6 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
+ int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd)
+ {
+       int r = 0;
+-      unsigned long flags;
+       struct dirty_map *dmap, *next_dmap;
+       down_write(&cmd->lock);
+@@ -808,9 +807,9 @@ int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd)
+       }
+       /* Swap dirty bitmaps */
+-      spin_lock_irqsave(&cmd->bitmap_lock, flags);
++      spin_lock_irq(&cmd->bitmap_lock);
+       cmd->current_dmap = next_dmap;
+-      spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
++      spin_unlock_irq(&cmd->bitmap_lock);
+       /* Set old dirty bitmap as currently committing */
+       cmd->committing_dmap = dmap;
+@@ -878,9 +877,9 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
+ {
+       int r = 0;
+       struct dirty_map *dmap;
+-      unsigned long word, region_nr, flags;
++      unsigned long word, region_nr;
+-      spin_lock_irqsave(&cmd->bitmap_lock, flags);
++      spin_lock_irq(&cmd->bitmap_lock);
+       if (cmd->read_only) {
+               r = -EPERM;
+@@ -898,7 +897,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
+               }
+       }
+ out:
+-      spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
++      spin_unlock_irq(&cmd->bitmap_lock);
+       return r;
+ }
+@@ -965,13 +964,11 @@ int dm_clone_metadata_abort(struct dm_clone_metadata *cmd)
+ void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
+ {
+-      unsigned long flags;
+-
+       down_write(&cmd->lock);
+-      spin_lock_irqsave(&cmd->bitmap_lock, flags);
++      spin_lock_irq(&cmd->bitmap_lock);
+       cmd->read_only = 1;
+-      spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
++      spin_unlock_irq(&cmd->bitmap_lock);
+       if (!cmd->fail_io)
+               dm_bm_set_read_only(cmd->bm);
+@@ -981,13 +978,11 @@ void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
+ void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd)
+ {
+-      unsigned long flags;
+-
+       down_write(&cmd->lock);
+-      spin_lock_irqsave(&cmd->bitmap_lock, flags);
++      spin_lock_irq(&cmd->bitmap_lock);
+       cmd->read_only = 0;
+-      spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
++      spin_unlock_irq(&cmd->bitmap_lock);
+       if (!cmd->fail_io)
+               dm_bm_set_read_write(cmd->bm);
+diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h
+index 6a217f5ea98c0..d848b8799c07d 100644
+--- a/drivers/md/dm-clone-metadata.h
++++ b/drivers/md/dm-clone-metadata.h
+@@ -44,7 +44,9 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
+  * @start: Starting region number
+  * @nr_regions: Number of regions in the range
+  *
+- * This function doesn't block, so it's safe to call it from interrupt context.
++ * This function doesn't block, but since it uses spin_lock_irq()/spin_unlock_irq()
++ * it's NOT safe to call it from any context where interrupts are disabled, e.g.,
++ * from interrupt context.
+  */
+ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
+                           unsigned long nr_regions);
+diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
+index 2addf611ef3d8..ad5dca5d20707 100644
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -338,8 +338,6 @@ static void submit_bios(struct bio_list *bios)
+  */
+ static void issue_bio(struct clone *clone, struct bio *bio)
+ {
+-      unsigned long flags;
+-
+       if (!bio_triggers_commit(clone, bio)) {
+               generic_make_request(bio);
+               return;
+@@ -358,9 +356,9 @@ static void issue_bio(struct clone *clone, struct bio *bio)
+        * Batch together any bios that trigger commits and then issue a single
+        * commit for them in process_deferred_flush_bios().
+        */
+-      spin_lock_irqsave(&clone->lock, flags);
++      spin_lock_irq(&clone->lock);
+       bio_list_add(&clone->deferred_flush_bios, bio);
+-      spin_unlock_irqrestore(&clone->lock, flags);
++      spin_unlock_irq(&clone->lock);
+       wake_worker(clone);
+ }
+@@ -475,7 +473,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
+ static void process_discard_bio(struct clone *clone, struct bio *bio)
+ {
+-      unsigned long rs, re, flags;
++      unsigned long rs, re;
+       bio_region_range(clone, bio, &rs, &re);
+       BUG_ON(re > clone->nr_regions);
+@@ -507,9 +505,9 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
+       /*
+        * Defer discard processing.
+        */
+-      spin_lock_irqsave(&clone->lock, flags);
++      spin_lock_irq(&clone->lock);
+       bio_list_add(&clone->deferred_discard_bios, bio);
+-      spin_unlock_irqrestore(&clone->lock, flags);
++      spin_unlock_irq(&clone->lock);
+       wake_worker(clone);
+ }
+@@ -1167,13 +1165,13 @@ static void process_deferred_discards(struct clone *clone)
+       int r = -EPERM;
+       struct bio *bio;
+       struct blk_plug plug;
+-      unsigned long rs, re, flags;
++      unsigned long rs, re;
+       struct bio_list discards = BIO_EMPTY_LIST;
+-      spin_lock_irqsave(&clone->lock, flags);
++      spin_lock_irq(&clone->lock);
+       bio_list_merge(&discards, &clone->deferred_discard_bios);
+       bio_list_init(&clone->deferred_discard_bios);
+-      spin_unlock_irqrestore(&clone->lock, flags);
++      spin_unlock_irq(&clone->lock);
+       if (bio_list_empty(&discards))
+               return;
+@@ -1203,13 +1201,12 @@ static void process_deferred_discards(struct clone *clone)
+ static void process_deferred_bios(struct clone *clone)
+ {
+-      unsigned long flags;
+       struct bio_list bios = BIO_EMPTY_LIST;
+-      spin_lock_irqsave(&clone->lock, flags);
++      spin_lock_irq(&clone->lock);
+       bio_list_merge(&bios, &clone->deferred_bios);
+       bio_list_init(&clone->deferred_bios);
+-      spin_unlock_irqrestore(&clone->lock, flags);
++      spin_unlock_irq(&clone->lock);
+       if (bio_list_empty(&bios))
+               return;
+@@ -1220,7 +1217,6 @@ static void process_deferred_bios(struct clone *clone)
+ static void process_deferred_flush_bios(struct clone *clone)
+ {
+       struct bio *bio;
+-      unsigned long flags;
+       bool dest_dev_flushed;
+       struct bio_list bios = BIO_EMPTY_LIST;
+       struct bio_list bio_completions = BIO_EMPTY_LIST;
+@@ -1229,13 +1225,13 @@ static void process_deferred_flush_bios(struct clone *clone)
+        * If there are any deferred flush bios, we must commit the metadata
+        * before issuing them or signaling their completion.
+        */
+-      spin_lock_irqsave(&clone->lock, flags);
++      spin_lock_irq(&clone->lock);
+       bio_list_merge(&bios, &clone->deferred_flush_bios);
+       bio_list_init(&clone->deferred_flush_bios);
+       bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
+       bio_list_init(&clone->deferred_flush_completions);
+-      spin_unlock_irqrestore(&clone->lock, flags);
++      spin_unlock_irq(&clone->lock);
+       if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
+           !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
+-- 
+2.20.1
+
diff --git a/queue-5.4/dm-zoned-remove-duplicate-nr_rnd_zones-increase-in-d.patch b/queue-5.4/dm-zoned-remove-duplicate-nr_rnd_zones-increase-in-d.patch
new file mode 100644 (file)
index 0000000..ab3e1fa
--- /dev/null
@@ -0,0 +1,41 @@
+From 951cc9bbec9960c10c2d8ec5732f338ec3a1b5ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 21:22:45 +0800
+Subject: dm zoned: remove duplicate nr_rnd_zones increase in dmz_init_zone()
+
+From: Bob Liu <bob.liu@oracle.com>
+
+[ Upstream commit b8fdd090376a7a46d17db316638fe54b965c2fb0 ]
+
+zmd->nr_rnd_zones was increased twice by mistake. The other place it
+is increased in dmz_init_zone() is the only one needed:
+
+1131                 zmd->nr_useable_zones++;
+1132                 if (dmz_is_rnd(zone)) {
+1133                         zmd->nr_rnd_zones++;
+                                       ^^^
+Fixes: 3b1a94c88b79 ("dm zoned: drive-managed zoned block device target")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bob Liu <bob.liu@oracle.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-zoned-metadata.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
+index 5205cf9bbfd92..e0a6cf9239f1c 100644
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -1107,7 +1107,6 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
+       if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
+               set_bit(DMZ_RND, &zone->flags);
+-              zmd->nr_rnd_zones++;
+       } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
+                  blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
+               set_bit(DMZ_SEQ, &zone->flags);
+-- 
+2.20.1
+
diff --git a/queue-5.4/drm-amdgpu-fix-gfx-hang-during-suspend-with-video-pl.patch b/queue-5.4/drm-amdgpu-fix-gfx-hang-during-suspend-with-video-pl.patch
new file mode 100644 (file)
index 0000000..ad894f6
--- /dev/null
@@ -0,0 +1,52 @@
+From 35c573431be64785693392145b40596041f5b9f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2020 20:21:26 +0800
+Subject: drm/amdgpu: fix gfx hang during suspend with video playback (v2)
+
+From: Prike Liang <Prike.Liang@amd.com>
+
+[ Upstream commit 487eca11a321ef33bcf4ca5adb3c0c4954db1b58 ]
+
+The system will be hang up during S3 suspend because of SMU is pending
+for GC not respose the register CP_HQD_ACTIVE access request.This issue
+root cause of accessing the GC register under enter GFX CGGPG and can
+be fixed by disable GFX CGPG before perform suspend.
+
+v2: Use disable the GFX CGPG instead of RLC safe mode guard.
+
+Signed-off-by: Prike Liang <Prike.Liang@amd.com>
+Tested-by: Mengbing Wang <Mengbing.Wang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 13694d5eba474..f423b53847051 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2176,8 +2176,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ {
+       int i, r;
+-      amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+-      amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+       for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+               if (!adev->ip_blocks[i].status.valid)
+@@ -3070,6 +3068,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+               }
+       }
++      amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
++      amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
++
+       amdgpu_amdkfd_suspend(adev);
+       amdgpu_ras_suspend(adev);
+-- 
+2.20.1
+
diff --git a/queue-5.4/drm-dp_mst-fix-clearing-payload-state-on-topology-di.patch b/queue-5.4/drm-dp_mst-fix-clearing-payload-state-on-topology-di.patch
new file mode 100644 (file)
index 0000000..bfdf5d2
--- /dev/null
@@ -0,0 +1,94 @@
+From 7fb9e469eed2479a11766a2415cc26b6b028a15b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2020 14:43:20 -0500
+Subject: drm/dp_mst: Fix clearing payload state on topology disable
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lyude Paul <lyude@redhat.com>
+
+[ Upstream commit 8732fe46b20c951493bfc4dba0ad08efdf41de81 ]
+
+The issues caused by:
+
+commit 64e62bdf04ab ("drm/dp_mst: Remove VCPI while disabling topology
+mgr")
+
+Prompted me to take a closer look at how we clear the payload state in
+general when disabling the topology, and it turns out there's actually
+two subtle issues here.
+
+The first is that we're not grabbing &mgr.payload_lock when clearing the
+payloads in drm_dp_mst_topology_mgr_set_mst(). Seeing as the canonical
+lock order is &mgr.payload_lock -> &mgr.lock (because we always want
+&mgr.lock to be the inner-most lock so topology validation always
+works), this makes perfect sense. It also means that -technically- there
+could be racing between someone calling
+drm_dp_mst_topology_mgr_set_mst() to disable the topology, along with a
+modeset occurring that's modifying the payload state at the same time.
+
+The second is the more obvious issue that Wayne Lin discovered, that
+we're not clearing proposed_payloads when disabling the topology.
+
+I actually can't see any obvious places where the racing caused by the
+first issue would break something, and it could be that some of our
+higher-level locks already prevent this by happenstance, but better safe
+then sorry. So, let's make it so that drm_dp_mst_topology_mgr_set_mst()
+first grabs &mgr.payload_lock followed by &mgr.lock so that we never
+race when modifying the payload state. Then, we also clear
+proposed_payloads to fix the original issue of enabling a new topology
+with a dirty payload state. This doesn't clear any of the drm_dp_vcpi
+structures, but those are getting destroyed along with the ports anyway.
+
+Changes since v1:
+* Use sizeof(mgr->payloads[0])/sizeof(mgr->proposed_vcpis[0]) instead -
+  vsyrjala
+
+Cc: Sean Paul <sean@poorly.run>
+Cc: Wayne Lin <Wayne.Lin@amd.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: stable@vger.kernel.org # v4.4+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200122194321.14953-1-lyude@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index a48a4c21b1b38..4b7aaad074233 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2696,6 +2696,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+       int ret = 0;
+       struct drm_dp_mst_branch *mstb = NULL;
++      mutex_lock(&mgr->payload_lock);
+       mutex_lock(&mgr->lock);
+       if (mst_state == mgr->mst_state)
+               goto out_unlock;
+@@ -2754,7 +2755,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+               /* this can fail if the device is gone */
+               drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+               ret = 0;
+-              memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
++              memset(mgr->payloads, 0,
++                     mgr->max_payloads * sizeof(mgr->payloads[0]));
++              memset(mgr->proposed_vcpis, 0,
++                     mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
+               mgr->payload_mask = 0;
+               set_bit(0, &mgr->payload_mask);
+               mgr->vcpi_mask = 0;
+@@ -2762,6 +2766,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ out_unlock:
+       mutex_unlock(&mgr->lock);
++      mutex_unlock(&mgr->payload_lock);
+       if (mstb)
+               drm_dp_mst_topology_put_mstb(mstb);
+       return ret;
+-- 
+2.20.1
+
diff --git a/queue-5.4/drm-i915-icl-don-t-enable-ddi-io-power-on-a-typec-po.patch b/queue-5.4/drm-i915-icl-don-t-enable-ddi-io-power-on-a-typec-po.patch
new file mode 100644 (file)
index 0000000..7dbf0c7
--- /dev/null
@@ -0,0 +1,49 @@
+From 37190010cac966d1e181ffab327c00d9a96b1440 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2020 18:22:44 +0300
+Subject: drm/i915/icl+: Don't enable DDI IO power on a TypeC port in TBT mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Imre Deak <imre.deak@intel.com>
+
+The DDI IO power well must not be enabled for a TypeC port in TBT mode,
+ensure this during driver loading/system resume.
+
+This gets rid of error messages like
+[drm] *ERROR* power well DDI E TC2 IO state mismatch (refcount 1/enabled 0)
+
+and avoids leaking the power ref when disabling the output.
+
+Cc: <stable@vger.kernel.org> # v5.4+
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200330152244.11316-1-imre.deak@intel.com
+(cherry picked from commit f77a2db27f26c3ccba0681f7e89fef083718f07f)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_ddi.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 8eb2b3ec01edd..b3c77c988d1cd 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -2124,7 +2124,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
+               return;
+       dig_port = enc_to_dig_port(&encoder->base);
+-      intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
++
++      if (!intel_phy_is_tc(dev_priv, phy) ||
++          dig_port->tc_mode != TC_PORT_TBT_ALT)
++              intel_display_power_get(dev_priv,
++                                      dig_port->ddi_io_power_domain);
+       /*
+        * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
+-- 
+2.20.1
+
diff --git a/queue-5.4/mmc-sdhci-convert-sdhci_set_timeout_irq-to-non-stati.patch b/queue-5.4/mmc-sdhci-convert-sdhci_set_timeout_irq-to-non-stati.patch
new file mode 100644 (file)
index 0000000..df0be85
--- /dev/null
@@ -0,0 +1,56 @@
+From f875fe3874e6cf5575a934e3bb8de40cecd1901f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jan 2020 16:21:49 +0530
+Subject: mmc: sdhci: Convert sdhci_set_timeout_irq() to non-static
+
+From: Faiz Abbas <faiz_abbas@ti.com>
+
+[ Upstream commit 7907ebe741a7f14ed12889ebe770438a4ff47613 ]
+
+Export sdhci_set_timeout_irq() so that it is accessible from platform drivers.
+
+Signed-off-by: Faiz Abbas <faiz_abbas@ti.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20200116105154.7685-6-faiz_abbas@ti.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci.c | 3 ++-
+ drivers/mmc/host/sdhci.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 4478b94d47915..4c40fd4ba21b1 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -981,7 +981,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
+       sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
++void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+ {
+       if (enable)
+               host->ier |= SDHCI_INT_DATA_TIMEOUT;
+@@ -990,6 +990,7 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+       sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+       sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
++EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
+ static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+ {
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index fe83ece6965b1..4613d71b3cd6e 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -795,5 +795,6 @@ void sdhci_end_tuning(struct sdhci_host *host);
+ void sdhci_reset_tuning(struct sdhci_host *host);
+ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
+ void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
++void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
+ #endif /* __SDHCI_HW_H */
+-- 
+2.20.1
+
diff --git a/queue-5.4/mmc-sdhci-refactor-sdhci_set_timeout.patch b/queue-5.4/mmc-sdhci-refactor-sdhci_set_timeout.patch
new file mode 100644 (file)
index 0000000..69e2faf
--- /dev/null
@@ -0,0 +1,89 @@
+From 3bc30206161348240b7d2827248f05fac43d9ca6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jan 2020 16:21:50 +0530
+Subject: mmc: sdhci: Refactor sdhci_set_timeout()
+
+From: Faiz Abbas <faiz_abbas@ti.com>
+
+[ Upstream commit 7d76ed77cfbd39468ae58d419f537d35ca892d83 ]
+
+Refactor sdhci_set_timeout() such that platform drivers can do some
+functionality in a set_timeout() callback and then call
+__sdhci_set_timeout() to complete the operation.
+
+Signed-off-by: Faiz Abbas <faiz_abbas@ti.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20200116105154.7685-7-faiz_abbas@ti.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci.c | 38 ++++++++++++++++++++------------------
+ drivers/mmc/host/sdhci.h |  1 +
+ 2 files changed, 21 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 4c40fd4ba21b1..50514fedbc76f 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -992,27 +992,29 @@ void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+ }
+ EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
+-static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
++void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+ {
+-      u8 count;
+-
+-      if (host->ops->set_timeout) {
+-              host->ops->set_timeout(host, cmd);
+-      } else {
+-              bool too_big = false;
+-
+-              count = sdhci_calc_timeout(host, cmd, &too_big);
++      bool too_big = false;
++      u8 count = sdhci_calc_timeout(host, cmd, &too_big);
++
++      if (too_big &&
++          host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
++              sdhci_calc_sw_timeout(host, cmd);
++              sdhci_set_data_timeout_irq(host, false);
++      } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
++              sdhci_set_data_timeout_irq(host, true);
++      }
+-              if (too_big &&
+-                  host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
+-                      sdhci_calc_sw_timeout(host, cmd);
+-                      sdhci_set_data_timeout_irq(host, false);
+-              } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
+-                      sdhci_set_data_timeout_irq(host, true);
+-              }
++      sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
++}
++EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
+-              sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+-      }
++static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
++{
++      if (host->ops->set_timeout)
++              host->ops->set_timeout(host, cmd);
++      else
++              __sdhci_set_timeout(host, cmd);
+ }
+ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 4613d71b3cd6e..76e69288632db 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -796,5 +796,6 @@ void sdhci_reset_tuning(struct sdhci_host *host);
+ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
+ void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
+ void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
++void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
+ #endif /* __SDHCI_HW_H */
+-- 
+2.20.1
+
diff --git a/queue-5.4/powerpc-kasan-fix-kasan_remap_early_shadow_ro.patch b/queue-5.4/powerpc-kasan-fix-kasan_remap_early_shadow_ro.patch
new file mode 100644 (file)
index 0000000..4eed205
--- /dev/null
@@ -0,0 +1,41 @@
+From 61662102c94d7670eac425c769e98ac60f9b3aad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Mar 2020 15:09:40 +0000
+Subject: powerpc/kasan: Fix kasan_remap_early_shadow_ro()
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+[ Upstream commit af92bad615be75c6c0d1b1c5b48178360250a187 ]
+
+At the moment kasan_remap_early_shadow_ro() does nothing, because
+k_end is 0 and k_cur < 0 is always true.
+
+Change the test to k_cur != k_end, as done in
+kasan_init_shadow_page_tables()
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Fixes: cbd18991e24f ("powerpc/mm: Fix an Oops in kasan_mmu_init()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/4e7b56865e01569058914c991143f5961b5d4719.1583507333.git.christophe.leroy@c-s.fr
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/mm/kasan/kasan_init_32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
+index 0e6ed4413eeac..1cfe57b51d7e3 100644
+--- a/arch/powerpc/mm/kasan/kasan_init_32.c
++++ b/arch/powerpc/mm/kasan/kasan_init_32.c
+@@ -117,7 +117,7 @@ static void __init kasan_remap_early_shadow_ro(void)
+       kasan_populate_pte(kasan_early_shadow_pte, prot);
+-      for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
++      for (k_cur = k_start & PAGE_MASK; k_cur != k_end; k_cur += PAGE_SIZE) {
+               pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+               pte_t *ptep = pte_offset_kernel(pmd, k_cur);
+-- 
+2.20.1
+
diff --git a/queue-5.4/revert-drm-dp_mst-remove-vcpi-while-disabling-topolo.patch b/queue-5.4/revert-drm-dp_mst-remove-vcpi-while-disabling-topolo.patch
new file mode 100644 (file)
index 0000000..9f81770
--- /dev/null
@@ -0,0 +1,173 @@
+From 0432ffd6af118de5edd59df28a223ead97ffe9dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Apr 2020 22:08:22 -0400
+Subject: Revert "drm/dp_mst: Remove VCPI while disabling topology mgr"
+
+[ Upstream commit a86675968e2300fb567994459da3dbc4cd1b322a ]
+
+This reverts commit 64e62bdf04ab8529f45ed0a85122c703035dec3a.
+
+This commit ends up causing some lockdep splats due to trying to grab the
+payload lock while holding the mgr's lock:
+
+[   54.010099]
+[   54.011765] ======================================================
+[   54.018670] WARNING: possible circular locking dependency detected
+[   54.025577] 5.5.0-rc6-02274-g77381c23ee63 #47 Not tainted
+[   54.031610] ------------------------------------------------------
+[   54.038516] kworker/1:6/1040 is trying to acquire lock:
+[   54.044354] ffff888272af3228 (&mgr->payload_lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[   54.054957]
+[   54.054957] but task is already holding lock:
+[   54.061473] ffff888272af3060 (&mgr->lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x3c/0x2e4
+[   54.071193]
+[   54.071193] which lock already depends on the new lock.
+[   54.071193]
+[   54.080334]
+[   54.080334] the existing dependency chain (in reverse order) is:
+[   54.088697]
+[   54.088697] -> #1 (&mgr->lock){+.+.}:
+[   54.094440]        __mutex_lock+0xc3/0x498
+[   54.099015]        drm_dp_mst_topology_get_port_validated+0x25/0x80
+[   54.106018]        drm_dp_update_payload_part1+0xa2/0x2e2
+[   54.112051]        intel_mst_pre_enable_dp+0x144/0x18f
+[   54.117791]        intel_encoders_pre_enable+0x63/0x70
+[   54.123532]        hsw_crtc_enable+0xa1/0x722
+[   54.128396]        intel_update_crtc+0x50/0x194
+[   54.133455]        skl_commit_modeset_enables+0x40c/0x540
+[   54.139485]        intel_atomic_commit_tail+0x5f7/0x130d
+[   54.145418]        intel_atomic_commit+0x2c8/0x2d8
+[   54.150770]        drm_atomic_helper_set_config+0x5a/0x70
+[   54.156801]        drm_mode_setcrtc+0x2ab/0x833
+[   54.161862]        drm_ioctl+0x2e5/0x424
+[   54.166242]        vfs_ioctl+0x21/0x2f
+[   54.170426]        do_vfs_ioctl+0x5fb/0x61e
+[   54.175096]        ksys_ioctl+0x55/0x75
+[   54.179377]        __x64_sys_ioctl+0x1a/0x1e
+[   54.184146]        do_syscall_64+0x5c/0x6d
+[   54.188721]        entry_SYSCALL_64_after_hwframe+0x49/0xbe
+[   54.194946]
+[   54.194946] -> #0 (&mgr->payload_lock){+.+.}:
+[   54.201463]
+[   54.201463] other info that might help us debug this:
+[   54.201463]
+[   54.210410]  Possible unsafe locking scenario:
+[   54.210410]
+[   54.217025]        CPU0                    CPU1
+[   54.222082]        ----                    ----
+[   54.227138]   lock(&mgr->lock);
+[   54.230643]                                lock(&mgr->payload_lock);
+[   54.237742]                                lock(&mgr->lock);
+[   54.244062]   lock(&mgr->payload_lock);
+[   54.248346]
+[   54.248346]  *** DEADLOCK ***
+[   54.248346]
+[   54.254959] 7 locks held by kworker/1:6/1040:
+[   54.259822]  #0: ffff888275c4f528 ((wq_completion)events){+.+.},
+at: worker_thread+0x455/0x6e2
+[   54.269451]  #1: ffffc9000119beb0
+((work_completion)(&(&dev_priv->hotplug.hotplug_work)->work)){+.+.},
+at: worker_thread+0x455/0x6e2
+[   54.282768]  #2: ffff888272a403f0 (&dev->mode_config.mutex){+.+.},
+at: i915_hotplug_work_func+0x4b/0x2be
+[   54.293368]  #3: ffffffff824fc6c0 (drm_connector_list_iter){.+.+},
+at: i915_hotplug_work_func+0x17e/0x2be
+[   54.304061]  #4: ffffc9000119bc58 (crtc_ww_class_acquire){+.+.},
+at: drm_helper_probe_detect_ctx+0x40/0xfd
+[   54.314855]  #5: ffff888272a40470 (crtc_ww_class_mutex){+.+.}, at:
+drm_modeset_lock+0x74/0xe2
+[   54.324385]  #6: ffff888272af3060 (&mgr->lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x3c/0x2e4
+[   54.334597]
+[   54.334597] stack backtrace:
+[   54.339464] CPU: 1 PID: 1040 Comm: kworker/1:6 Not tainted
+5.5.0-rc6-02274-g77381c23ee63 #47
+[   54.348893] Hardware name: Google Fizz/Fizz, BIOS
+Google_Fizz.10139.39.0 01/04/2018
+[   54.357451] Workqueue: events i915_hotplug_work_func
+[   54.362995] Call Trace:
+[   54.365724]  dump_stack+0x71/0x9c
+[   54.369427]  check_noncircular+0x91/0xbc
+[   54.373809]  ? __lock_acquire+0xc9e/0xf66
+[   54.378286]  ? __lock_acquire+0xc9e/0xf66
+[   54.382763]  ? lock_acquire+0x175/0x1ac
+[   54.387048]  ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[   54.393177]  ? __mutex_lock+0xc3/0x498
+[   54.397362]  ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[   54.403492]  ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[   54.409620]  ? drm_dp_dpcd_access+0xd9/0x101
+[   54.414390]  ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[   54.420517]  ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[   54.426645]  ? intel_digital_port_connected+0x34d/0x35c
+[   54.432482]  ? intel_dp_detect+0x227/0x44e
+[   54.437056]  ? ww_mutex_lock+0x49/0x9a
+[   54.441242]  ? drm_helper_probe_detect_ctx+0x75/0xfd
+[   54.446789]  ? intel_encoder_hotplug+0x4b/0x97
+[   54.451752]  ? intel_ddi_hotplug+0x61/0x2e0
+[   54.456423]  ? mark_held_locks+0x53/0x68
+[   54.460803]  ? _raw_spin_unlock_irqrestore+0x3a/0x51
+[   54.466347]  ? lockdep_hardirqs_on+0x187/0x1a4
+[   54.471310]  ? drm_connector_list_iter_next+0x89/0x9a
+[   54.476953]  ? i915_hotplug_work_func+0x206/0x2be
+[   54.482208]  ? worker_thread+0x4d5/0x6e2
+[   54.486587]  ? worker_thread+0x455/0x6e2
+[   54.490966]  ? queue_work_on+0x64/0x64
+[   54.495151]  ? kthread+0x1e9/0x1f1
+[   54.498946]  ? queue_work_on+0x64/0x64
+[   54.503130]  ? kthread_unpark+0x5e/0x5e
+[   54.507413]  ? ret_from_fork+0x3a/0x50
+
+The proper fix for this is probably cleanup the VCPI allocations when we're
+enabling the topology, or on the first payload allocation. For now though,
+let's just revert.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 64e62bdf04ab ("drm/dp_mst: Remove VCPI while disabling topology mgr")
+Cc: Sean Paul <sean@poorly.run>
+Cc: Wayne Lin <Wayne.Lin@amd.com>
+Reviewed-by: Sean Paul <sean@poorly.run>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200117205149.97262-1-lyude@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index c5e9e2305fffc..a48a4c21b1b38 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2694,7 +2694,6 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
+ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
+ {
+       int ret = 0;
+-      int i = 0;
+       struct drm_dp_mst_branch *mstb = NULL;
+       mutex_lock(&mgr->lock);
+@@ -2755,21 +2754,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+               /* this can fail if the device is gone */
+               drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+               ret = 0;
+-              mutex_lock(&mgr->payload_lock);
+               memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+               mgr->payload_mask = 0;
+               set_bit(0, &mgr->payload_mask);
+-              for (i = 0; i < mgr->max_payloads; i++) {
+-                      struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+-
+-                      if (vcpi) {
+-                              vcpi->vcpi = 0;
+-                              vcpi->num_slots = 0;
+-                      }
+-                      mgr->proposed_vcpis[i] = NULL;
+-              }
+               mgr->vcpi_mask = 0;
+-              mutex_unlock(&mgr->payload_lock);
+       }
+ out_unlock:
+-- 
+2.20.1
+
diff --git a/queue-5.4/scsi-lpfc-add-registration-for-cpu-offline-online-ev.patch b/queue-5.4/scsi-lpfc-add-registration-for-cpu-offline-online-ev.patch
new file mode 100644 (file)
index 0000000..1953e4e
--- /dev/null
@@ -0,0 +1,646 @@
+From 0473e4e020557fe3eb16c3d1cf6e583b1850e2a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Nov 2019 16:57:05 -0800
+Subject: scsi: lpfc: Add registration for CPU Offline/Online events
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit 93a4d6f40198dffcca35d9a928c409f9290f1fe0 ]
+
+The recent affinitization didn't address cpu offlining/onlining.  If an
+interrupt vector is shared and the low order cpu owning the vector is
+offlined, as interrupts are managed, the vector is taken offline. This
+causes the other CPUs sharing the vector will hang as they can't get io
+completions.
+
+Correct by registering callbacks with the system for Offline/Online
+events. When a cpu is taken offline, its eq, which is tied to an interrupt
+vector is found. If the cpu is the "owner" of the vector and if the
+eq/vector is shared by other CPUs, the eq is placed into a polled mode.
+Additionally, code paths that perform io submission on the "sharing CPUs"
+will check the eq state and poll for completion after submission of new io
+to a wq that uses the eq.
+
+Similarly, when a cpu comes back online and owns an offlined vector, the eq
+is taken out of polled mode and rearmed to start driving interrupts for eq.
+
+Link: https://lore.kernel.org/r/20191105005708.7399-9-jsmart2021@gmail.com
+Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc.h      |   7 ++
+ drivers/scsi/lpfc/lpfc_crtn.h |   6 +
+ drivers/scsi/lpfc/lpfc_init.c | 202 +++++++++++++++++++++++++++++++++-
+ drivers/scsi/lpfc/lpfc_sli.c  | 164 ++++++++++++++++++++++++++-
+ drivers/scsi/lpfc/lpfc_sli4.h |  21 +++-
+ 5 files changed, 388 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index 691acbdcc46df..84b0f0ac26e7e 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -1209,6 +1209,13 @@ struct lpfc_hba {
+       uint64_t ktime_seg10_min;
+       uint64_t ktime_seg10_max;
+ #endif
++
++      struct hlist_node cpuhp;        /* used for cpuhp per hba callback */
++      struct timer_list cpuhp_poll_timer;
++      struct list_head poll_list;     /* slowpath eq polling list */
++#define LPFC_POLL_HB  1               /* slowpath heartbeat */
++#define LPFC_POLL_FASTPATH    0       /* called from fastpath */
++#define LPFC_POLL_SLOWPATH    1       /* called from slowpath */
+ };
+ static inline struct Scsi_Host *
+diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
+index b2ad8c7504862..9e477d766ce98 100644
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -215,6 +215,12 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
+ irqreturn_t lpfc_sli4_intr_handler(int, void *);
+ irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
++inline void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
++int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
++void lpfc_sli4_poll_hbtimer(struct timer_list *t);
++void lpfc_sli4_start_polling(struct lpfc_queue *q);
++void lpfc_sli4_stop_polling(struct lpfc_queue *q);
++
+ void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
+ void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
+ void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index e8813d26e5941..1bf79445c15bf 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -40,6 +40,7 @@
+ #include <linux/irq.h>
+ #include <linux/bitops.h>
+ #include <linux/crash_dump.h>
++#include <linux/cpuhotplug.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_device.h>
+@@ -66,9 +67,13 @@
+ #include "lpfc_version.h"
+ #include "lpfc_ids.h"
++static enum cpuhp_state lpfc_cpuhp_state;
+ /* Used when mapping IRQ vectors in a driver centric manner */
+ static uint32_t lpfc_present_cpu;
++static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
++static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
++static void lpfc_cpuhp_add(struct lpfc_hba *phba);
+ static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
+ static int lpfc_post_rcv_buf(struct lpfc_hba *);
+ static int lpfc_sli4_queue_verify(struct lpfc_hba *);
+@@ -3387,6 +3392,8 @@ lpfc_online(struct lpfc_hba *phba)
+       if (phba->cfg_xri_rebalancing)
+               lpfc_create_multixri_pools(phba);
++      lpfc_cpuhp_add(phba);
++
+       lpfc_unblock_mgmt_io(phba);
+       return 0;
+ }
+@@ -3545,6 +3552,7 @@ lpfc_offline(struct lpfc_hba *phba)
+                       spin_unlock_irq(shost->host_lock);
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
++      __lpfc_cpuhp_remove(phba);
+       if (phba->cfg_xri_rebalancing)
+               lpfc_destroy_multixri_pools(phba);
+@@ -9160,6 +9168,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
+       }
+       spin_unlock_irq(&phba->hbalock);
++      lpfc_sli4_cleanup_poll_list(phba);
++
+       /* Release HBA eqs */
+       if (phba->sli4_hba.hdwq)
+               lpfc_sli4_release_hdwq(phba);
+@@ -10962,6 +10972,170 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
+       return;
+ }
++/**
++ * lpfc_cpuhp_get_eq
++ *
++ * @phba:   pointer to lpfc hba data structure.
++ * @cpu:    cpu going offline
++ * @eqlist:
++ */
++static void
++lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
++                struct list_head *eqlist)
++{
++      struct lpfc_vector_map_info *map;
++      const struct cpumask *maskp;
++      struct lpfc_queue *eq;
++      unsigned int i;
++      cpumask_t tmp;
++      u16 idx;
++
++      for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
++              maskp = pci_irq_get_affinity(phba->pcidev, idx);
++              if (!maskp)
++                      continue;
++              /*
++               * if irq is not affinitized to the cpu going
++               * then we don't need to poll the eq attached
++               * to it.
++               */
++              if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
++                      continue;
++              /* get the cpus that are online and are affini-
++               * tized to this irq vector.  If the count is
++               * more than 1 then cpuhp is not going to shut-
++               * down this vector.  Since this cpu has not
++               * gone offline yet, we need >1.
++               */
++              cpumask_and(&tmp, maskp, cpu_online_mask);
++              if (cpumask_weight(&tmp) > 1)
++                      continue;
++
++              /* Now that we have an irq to shutdown, get the eq
++               * mapped to this irq.  Note: multiple hdwq's in
++               * the software can share an eq, but eventually
++               * only eq will be mapped to this vector
++               */
++              for_each_possible_cpu(i) {
++                      map = &phba->sli4_hba.cpu_map[i];
++                      if (!(map->irq == pci_irq_vector(phba->pcidev, idx)))
++                              continue;
++                      eq = phba->sli4_hba.hdwq[map->hdwq].hba_eq;
++                      list_add(&eq->_poll_list, eqlist);
++                      /* 1 is good enough. others will be a copy of this */
++                      break;
++              }
++      }
++}
++
++static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
++{
++      if (phba->sli_rev != LPFC_SLI_REV4)
++              return;
++
++      cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
++                                          &phba->cpuhp);
++      /*
++       * unregistering the instance doesn't stop the polling
++       * timer. Wait for the poll timer to retire.
++       */
++      synchronize_rcu();
++      del_timer_sync(&phba->cpuhp_poll_timer);
++}
++
++static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
++{
++      if (phba->pport->fc_flag & FC_OFFLINE_MODE)
++              return;
++
++      __lpfc_cpuhp_remove(phba);
++}
++
++static void lpfc_cpuhp_add(struct lpfc_hba *phba)
++{
++      if (phba->sli_rev != LPFC_SLI_REV4)
++              return;
++
++      rcu_read_lock();
++
++      if (!list_empty(&phba->poll_list)) {
++              timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
++              mod_timer(&phba->cpuhp_poll_timer,
++                        jiffies + msecs_to_jiffies(LPFC_POLL_HB));
++      }
++
++      rcu_read_unlock();
++
++      cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
++                                       &phba->cpuhp);
++}
++
++static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
++{
++      if (phba->pport->load_flag & FC_UNLOADING) {
++              *retval = -EAGAIN;
++              return true;
++      }
++
++      if (phba->sli_rev != LPFC_SLI_REV4) {
++              *retval = 0;
++              return true;
++      }
++
++      /* proceed with the hotplug */
++      return false;
++}
++
++static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
++{
++      struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
++      struct lpfc_queue *eq, *next;
++      LIST_HEAD(eqlist);
++      int retval;
++
++      if (!phba) {
++              WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
++              return 0;
++      }
++
++      if (__lpfc_cpuhp_checks(phba, &retval))
++              return retval;
++
++      lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
++
++      /* start polling on these eq's */
++      list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
++              list_del_init(&eq->_poll_list);
++              lpfc_sli4_start_polling(eq);
++      }
++
++      return 0;
++}
++
++static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
++{
++      struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
++      struct lpfc_queue *eq, *next;
++      unsigned int n;
++      int retval;
++
++      if (!phba) {
++              WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
++              return 0;
++      }
++
++      if (__lpfc_cpuhp_checks(phba, &retval))
++              return retval;
++
++      list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
++              n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
++              if (n == cpu)
++                      lpfc_sli4_stop_polling(eq);
++      }
++
++      return 0;
++}
++
+ /**
+  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
+  * @phba: pointer to lpfc hba data structure.
+@@ -11367,6 +11541,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+       /* Wait for completion of device XRI exchange busy */
+       lpfc_sli4_xri_exchange_busy_wait(phba);
++      /* per-phba callback de-registration for hotplug event */
++      lpfc_cpuhp_remove(phba);
++
+       /* Disable PCI subsystem interrupt */
+       lpfc_sli4_disable_intr(phba);
+@@ -12632,6 +12809,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
+       /* Enable RAS FW log support */
+       lpfc_sli4_ras_setup(phba);
++      INIT_LIST_HEAD(&phba->poll_list);
++      cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
++
+       return 0;
+ out_free_sysfs_attr:
+@@ -13450,11 +13630,24 @@ lpfc_init(void)
+       /* Initialize in case vector mapping is needed */
+       lpfc_present_cpu = num_present_cpus();
++      error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
++                                      "lpfc/sli4:online",
++                                      lpfc_cpu_online, lpfc_cpu_offline);
++      if (error < 0)
++              goto cpuhp_failure;
++      lpfc_cpuhp_state = error;
++
+       error = pci_register_driver(&lpfc_driver);
+-      if (error) {
+-              fc_release_transport(lpfc_transport_template);
+-              fc_release_transport(lpfc_vport_transport_template);
+-      }
++      if (error)
++              goto unwind;
++
++      return error;
++
++unwind:
++      cpuhp_remove_multi_state(lpfc_cpuhp_state);
++cpuhp_failure:
++      fc_release_transport(lpfc_transport_template);
++      fc_release_transport(lpfc_vport_transport_template);
+       return error;
+ }
+@@ -13471,6 +13664,7 @@ lpfc_exit(void)
+ {
+       misc_deregister(&lpfc_mgmt_dev);
+       pci_unregister_driver(&lpfc_driver);
++      cpuhp_remove_multi_state(lpfc_cpuhp_state);
+       fc_release_transport(lpfc_transport_template);
+       fc_release_transport(lpfc_vport_transport_template);
+       idr_destroy(&lpfc_hba_index);
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index e2cec1f6e659b..fb2b0dc52d9bc 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -485,7 +485,8 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+ }
+ static int
+-lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
++lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
++                   uint8_t rearm)
+ {
+       struct lpfc_eqe *eqe;
+       int count = 0, consumed = 0;
+@@ -519,8 +520,8 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
+       eq->queue_claimed = 0;
+ rearm_and_exit:
+-      /* Always clear and re-arm the EQ */
+-      phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
++      /* Always clear the EQ. */
++      phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
+       return count;
+ }
+@@ -7894,7 +7895,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
+       if (mbox_pending)
+               /* process and rearm the EQ */
+-              lpfc_sli4_process_eq(phba, fpeq);
++              lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
+       else
+               /* Always clear and re-arm the EQ */
+               sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
+@@ -10055,10 +10056,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+                   struct lpfc_iocbq *piocb, uint32_t flag)
+ {
+       struct lpfc_sli_ring *pring;
++      struct lpfc_queue *eq;
+       unsigned long iflags;
+       int rc;
+       if (phba->sli_rev == LPFC_SLI_REV4) {
++              eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
++
+               pring = lpfc_sli4_calc_ring(phba, piocb);
+               if (unlikely(pring == NULL))
+                       return IOCB_ERROR;
+@@ -10066,6 +10070,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+               spin_lock_irqsave(&pring->ring_lock, iflags);
+               rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+               spin_unlock_irqrestore(&pring->ring_lock, iflags);
++
++              lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
+       } else {
+               /* For now, SLI2/3 will still use hbalock */
+               spin_lock_irqsave(&phba->hbalock, iflags);
+@@ -14245,7 +14251,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
+               lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
+       /* process and rearm the EQ */
+-      ecount = lpfc_sli4_process_eq(phba, fpeq);
++      ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
+       if (unlikely(ecount == 0)) {
+               fpeq->EQ_no_entry++;
+@@ -14305,6 +14311,147 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
+       return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
+ } /* lpfc_sli4_intr_handler */
++void lpfc_sli4_poll_hbtimer(struct timer_list *t)
++{
++      struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
++      struct lpfc_queue *eq;
++      int i = 0;
++
++      rcu_read_lock();
++
++      list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
++              i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
++      if (!list_empty(&phba->poll_list))
++              mod_timer(&phba->cpuhp_poll_timer,
++                        jiffies + msecs_to_jiffies(LPFC_POLL_HB));
++
++      rcu_read_unlock();
++}
++
++inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
++{
++      struct lpfc_hba *phba = eq->phba;
++      int i = 0;
++
++      /*
++       * Unlocking an irq is one of the entry point to check
++       * for re-schedule, but we are good for io submission
++       * path as midlayer does a get_cpu to glue us in. Flush
++       * out the invalidate queue so we can see the updated
++       * value for flag.
++       */
++      smp_rmb();
++
++      if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
++              /* We will not likely get the completion for the caller
++               * during this iteration but i guess that's fine.
++               * Future io's coming on this eq should be able to
++               * pick it up.  As for the case of single io's, they
++               * will be handled through a sched from polling timer
++               * function which is currently triggered every 1msec.
++               */
++              i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
++
++      return i;
++}
++
++static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
++{
++      struct lpfc_hba *phba = eq->phba;
++
++      if (list_empty(&phba->poll_list)) {
++              timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
++              /* kickstart slowpath processing for this eq */
++              mod_timer(&phba->cpuhp_poll_timer,
++                        jiffies + msecs_to_jiffies(LPFC_POLL_HB));
++      }
++
++      list_add_rcu(&eq->_poll_list, &phba->poll_list);
++      synchronize_rcu();
++}
++
++static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
++{
++      struct lpfc_hba *phba = eq->phba;
++
++      /* Disable slowpath processing for this eq.  Kick start the eq
++       * by RE-ARMING the eq's ASAP
++       */
++      list_del_rcu(&eq->_poll_list);
++      synchronize_rcu();
++
++      if (list_empty(&phba->poll_list))
++              del_timer_sync(&phba->cpuhp_poll_timer);
++}
++
++inline void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
++{
++      struct lpfc_queue *eq, *next;
++
++      list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
++              list_del(&eq->_poll_list);
++
++      INIT_LIST_HEAD(&phba->poll_list);
++      synchronize_rcu();
++}
++
++static inline void
++__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
++{
++      if (mode == eq->mode)
++              return;
++      /*
++       * currently this function is only called during a hotplug
++       * event and the cpu on which this function is executing
++       * is going offline.  By now the hotplug has instructed
++       * the scheduler to remove this cpu from cpu active mask.
++       * So we don't need to work about being put aside by the
++       * scheduler for a high priority process.  Yes, the inte-
++       * rrupts could come but they are known to retire ASAP.
++       */
++
++      /* Disable polling in the fastpath */
++      WRITE_ONCE(eq->mode, mode);
++      /* flush out the store buffer */
++      smp_wmb();
++
++      /*
++       * Add this eq to the polling list and start polling. For
++       * a grace period both interrupt handler and poller will
++       * try to process the eq _but_ that's fine.  We have a
++       * synchronization mechanism in place (queue_claimed) to
++       * deal with it.  This is just a draining phase for int-
++       * errupt handler (not eq's) as we have guranteed through
++       * barrier that all the CPUs have seen the new CQ_POLLED
++       * state. which will effectively disable the REARMING of
++       * the EQ.  The whole idea is eq's die off eventually as
++       * we are not rearming EQ's anymore.
++       */
++      mode ? lpfc_sli4_add_to_poll_list(eq) :
++             lpfc_sli4_remove_from_poll_list(eq);
++}
++
++void lpfc_sli4_start_polling(struct lpfc_queue *eq)
++{
++      __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
++}
++
++void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
++{
++      struct lpfc_hba *phba = eq->phba;
++
++      __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
++
++      /* Kick start for the pending io's in h/w.
++       * Once we switch back to interrupt processing on a eq
++       * the io path completion will only arm eq's when it
++       * receives a completion.  But since eq's are in disa-
++       * rmed state it doesn't receive a completion.  This
++       * creates a deadlock scenaro.
++       */
++      phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
++}
++
+ /**
+  * lpfc_sli4_queue_free - free a queue structure and associated memory
+  * @queue: The queue structure to free.
+@@ -14379,6 +14526,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
+               return NULL;
+       INIT_LIST_HEAD(&queue->list);
++      INIT_LIST_HEAD(&queue->_poll_list);
+       INIT_LIST_HEAD(&queue->wq_list);
+       INIT_LIST_HEAD(&queue->wqfull_list);
+       INIT_LIST_HEAD(&queue->page_list);
+@@ -19698,6 +19846,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
+               lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
+               spin_unlock_irqrestore(&pring->ring_lock, iflags);
++
++              lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+               return 0;
+       }
+@@ -19718,6 +19868,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
+               }
+               lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
+               spin_unlock_irqrestore(&pring->ring_lock, iflags);
++
++              lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+               return 0;
+       }
+@@ -19746,6 +19898,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
+               }
+               lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
+               spin_unlock_irqrestore(&pring->ring_lock, iflags);
++
++              lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+               return 0;
+       }
+       return WQE_ERROR;
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index 0d4882a9e634c..c60a636a88949 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -133,6 +133,23 @@ struct lpfc_rqb {
+ struct lpfc_queue {
+       struct list_head list;
+       struct list_head wq_list;
++
++      /*
++       * If interrupts are in effect on _all_ the eq's the footprint
++       * of polling code is zero (except mode). This memory is chec-
++       * ked for every io to see if the io needs to be polled and
++       * while completion to check if the eq's needs to be rearmed.
++       * Keep in same cacheline as the queue ptr to avoid cpu fetch
++       * stalls. Using 1B memory will leave us with 7B hole. Fill
++       * it with other frequently used members.
++       */
++      uint16_t last_cpu;      /* most recent cpu */
++      uint16_t hdwq;
++      uint8_t  qe_valid;
++      uint8_t  mode;  /* interrupt or polling */
++#define LPFC_EQ_INTERRUPT     0
++#define LPFC_EQ_POLL          1
++
+       struct list_head wqfull_list;
+       enum lpfc_sli4_queue_type type;
+       enum lpfc_sli4_queue_subtype subtype;
+@@ -239,10 +256,8 @@ struct lpfc_queue {
+       struct delayed_work     sched_spwork;
+       uint64_t isr_timestamp;
+-      uint16_t hdwq;
+-      uint16_t last_cpu;      /* most recent cpu */
+-      uint8_t qe_valid;
+       struct lpfc_queue *assoc_qp;
++      struct list_head _poll_list;
+       void **q_pgs;   /* array to index entries per page */
+ };
+-- 
+2.20.1
+
diff --git a/queue-5.4/scsi-lpfc-fix-broken-credit-recovery-after-driver-lo.patch b/queue-5.4/scsi-lpfc-fix-broken-credit-recovery-after-driver-lo.patch
new file mode 100644 (file)
index 0000000..5cb7a84
--- /dev/null
@@ -0,0 +1,152 @@
+From 92ae8a184a93f2133885a511a8aa6e5c5a196d2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Jan 2020 16:23:03 -0800
+Subject: scsi: lpfc: Fix broken Credit Recovery after driver load
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit 835214f5d5f516a38069bc077c879c7da00d6108 ]
+
+When driver is set to enable bb credit recovery, the switch displayed the
+setting as inactive.  If the link bounces, it switches to Active.
+
+During link up processing, the driver currently does a MBX_READ_SPARAM
+followed by a MBX_CONFIG_LINK. These mbox commands are queued to be
+executed, one at a time and the completion is processed by the worker
+thread.  Since the MBX_READ_SPARAM is done BEFORE the MBX_CONFIG_LINK, the
+BB_SC_N bit is never set the the returned values. BB Credit recovery status
+only gets set after the driver requests the feature in CONFIG_LINK, which
+is done after the link up. Thus the ordering of READ_SPARAM needs to follow
+the CONFIG_LINK.
+
+Fix by reordering so that READ_SPARAM is done after CONFIG_LINK.  Added a
+HBA_DEFER_FLOGI flag so that any FLOGI handling waits until after the
+READ_SPARAM is done so that the proper BB credit value is set in the FLOGI
+payload.
+
+Fixes: 6bfb16208298 ("scsi: lpfc: Fix configuration of BB credit recovery in service parameters")
+Cc: <stable@vger.kernel.org> # v5.4+
+Link: https://lore.kernel.org/r/20200128002312.16346-4-jsmart2021@gmail.com
+Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc.h         |  1 +
+ drivers/scsi/lpfc/lpfc_hbadisc.c | 59 +++++++++++++++++++++-----------
+ 2 files changed, 40 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index e492ca2d0b8be..8943d42fc406e 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -742,6 +742,7 @@ struct lpfc_hba {
+                                        * capability
+                                        */
+ #define HBA_FLOGI_ISSUED      0x100000 /* FLOGI was issued */
++#define HBA_DEFER_FLOGI               0x800000 /* Defer FLOGI till read_sparm cmpl */
+       uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
+       struct lpfc_dmabuf slim2p;
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 3f7df471106e9..799db8a785c21 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -1163,13 +1163,16 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+       }
+       /* Start discovery by sending a FLOGI. port_state is identically
+-       * LPFC_FLOGI while waiting for FLOGI cmpl
++       * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending
++       * the FLOGI is being deferred till after MBX_READ_SPARAM completes.
+        */
+-      if (vport->port_state != LPFC_FLOGI)
+-              lpfc_initial_flogi(vport);
+-      else if (vport->fc_flag & FC_PT2PT)
+-              lpfc_disc_start(vport);
+-
++      if (vport->port_state != LPFC_FLOGI) {
++              if (!(phba->hba_flag & HBA_DEFER_FLOGI))
++                      lpfc_initial_flogi(vport);
++      } else {
++              if (vport->fc_flag & FC_PT2PT)
++                      lpfc_disc_start(vport);
++      }
+       return;
+ out:
+@@ -3094,6 +3097,14 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       mempool_free(pmb, phba->mbox_mem_pool);
++
++      /* Check if sending the FLOGI is being deferred to after we get
++       * up to date CSPs from MBX_READ_SPARAM.
++       */
++      if (phba->hba_flag & HBA_DEFER_FLOGI) {
++              lpfc_initial_flogi(vport);
++              phba->hba_flag &= ~HBA_DEFER_FLOGI;
++      }
+       return;
+ out:
+@@ -3224,6 +3235,23 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
+       }
+       lpfc_linkup(phba);
++      sparam_mbox = NULL;
++
++      if (!(phba->hba_flag & HBA_FCOE_MODE)) {
++              cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
++              if (!cfglink_mbox)
++                      goto out;
++              vport->port_state = LPFC_LOCAL_CFG_LINK;
++              lpfc_config_link(phba, cfglink_mbox);
++              cfglink_mbox->vport = vport;
++              cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
++              rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
++              if (rc == MBX_NOT_FINISHED) {
++                      mempool_free(cfglink_mbox, phba->mbox_mem_pool);
++                      goto out;
++              }
++      }
++
+       sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!sparam_mbox)
+               goto out;
+@@ -3244,20 +3272,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
+               goto out;
+       }
+-      if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+-              cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+-              if (!cfglink_mbox)
+-                      goto out;
+-              vport->port_state = LPFC_LOCAL_CFG_LINK;
+-              lpfc_config_link(phba, cfglink_mbox);
+-              cfglink_mbox->vport = vport;
+-              cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+-              rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
+-              if (rc == MBX_NOT_FINISHED) {
+-                      mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+-                      goto out;
+-              }
+-      } else {
++      if (phba->hba_flag & HBA_FCOE_MODE) {
+               vport->port_state = LPFC_VPORT_UNKNOWN;
+               /*
+                * Add the driver's default FCF record at FCF index 0 now. This
+@@ -3314,6 +3329,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
+               }
+               /* Reset FCF roundrobin bmask for new discovery */
+               lpfc_sli4_clear_fcf_rr_bmask(phba);
++      } else {
++              if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
++                  !(phba->link_flag & LS_LOOPBACK_MODE))
++                      phba->hba_flag |= HBA_DEFER_FLOGI;
+       }
+       /* Prepare for LINK up registrations */
+-- 
+2.20.1
+
diff --git a/queue-5.4/scsi-lpfc-fix-configuration-of-bb-credit-recovery-in.patch b/queue-5.4/scsi-lpfc-fix-configuration-of-bb-credit-recovery-in.patch
new file mode 100644 (file)
index 0000000..ea46002
--- /dev/null
@@ -0,0 +1,67 @@
+From 3d551ed3e6ce20eeb3d7b5e8e7fc4854af173d2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Nov 2019 16:56:59 -0800
+Subject: scsi: lpfc: Fix configuration of BB credit recovery in service
+ parameters
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit 6bfb1620829825c01e1dcdd63b6a7700352babd9 ]
+
+The driver today is reading service parameters from the firmware and then
+overwriting the firmware-provided values with values of its own.  There are
+some switch features that require preliminary FLOGI's that are
+switch-specific and done prior to the actual fabric FLOGI for traffic.  The
+fw will perform those FLOGIs and will revise the service parameters for the
+features configured. As the driver later overwrites those values with its
+own values, it misconfigures things like BBSCN use by doing so.
+
+Correct by eliminating the driver-overwrite of firmware values. The driver
+correctly re-reads the service parameters after each link up to obtain the
+latest values from firmware.
+
+Link: https://lore.kernel.org/r/20191105005708.7399-3-jsmart2021@gmail.com
+Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_hbadisc.c | 13 +++----------
+ 1 file changed, 3 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 39ca541935342..3f7df471106e9 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -1139,7 +1139,6 @@ void
+ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ {
+       struct lpfc_vport *vport = pmb->vport;
+-      uint8_t bbscn = 0;
+       if (pmb->u.mb.mbxStatus)
+               goto out;
+@@ -1166,17 +1165,11 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+       /* Start discovery by sending a FLOGI. port_state is identically
+        * LPFC_FLOGI while waiting for FLOGI cmpl
+        */
+-      if (vport->port_state != LPFC_FLOGI) {
+-              if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
+-                      bbscn = bf_get(lpfc_bbscn_def,
+-                                     &phba->sli4_hba.bbscn_params);
+-                      vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
+-                      vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
+-              }
++      if (vport->port_state != LPFC_FLOGI)
+               lpfc_initial_flogi(vport);
+-      } else if (vport->fc_flag & FC_PT2PT) {
++      else if (vport->fc_flag & FC_PT2PT)
+               lpfc_disc_start(vport);
+-      }
++
+       return;
+ out:
+-- 
+2.20.1
+
diff --git a/queue-5.4/scsi-lpfc-fix-fabric-hostname-registration-if-system.patch b/queue-5.4/scsi-lpfc-fix-fabric-hostname-registration-if-system.patch
new file mode 100644 (file)
index 0000000..9df5b3c
--- /dev/null
@@ -0,0 +1,194 @@
+From d664075922d676ac03ecbd9ec19882258f827d0e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Dec 2019 15:58:02 -0800
+Subject: scsi: lpfc: Fix Fabric hostname registration if system hostname
+ changes
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit e3ba04c9bad1d1c7f15df43da25e878045150777 ]
+
+There are reports of multiple ports on the same system displaying different
+hostnames in fabric FDMI displays.
+
+Currently, the driver registers the hostname at initialization and obtains
+the hostname via init_utsname()->nodename queried at the time the FC link
+comes up. Unfortunately, if the machine hostname is updated after
+initialization, such as via DHCP or admin command, the value registered
+initially will be incorrect.
+
+Fix by having the driver save the hostname that was registered with FDMI.
+The driver then runs a heartbeat action that will check the hostname.  If
+the name changes, reregister the FMDI data.
+
+The hostname is used in RSNN_NN, FDMI RPA and FDMI RHBA.
+
+Link: https://lore.kernel.org/r/20191218235808.31922-5-jsmart2021@gmail.com
+Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc.h         |  2 ++
+ drivers/scsi/lpfc/lpfc_crtn.h    |  2 +-
+ drivers/scsi/lpfc/lpfc_ct.c      | 48 ++++++++++++++++++++++++--------
+ drivers/scsi/lpfc/lpfc_hbadisc.c |  5 ++++
+ drivers/scsi/lpfc/lpfc_init.c    |  2 +-
+ 5 files changed, 46 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index 84b0f0ac26e7e..e492ca2d0b8be 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -1216,6 +1216,8 @@ struct lpfc_hba {
+ #define LPFC_POLL_HB  1               /* slowpath heartbeat */
+ #define LPFC_POLL_FASTPATH    0       /* called from fastpath */
+ #define LPFC_POLL_SLOWPATH    1       /* called from slowpath */
++
++      char os_host_name[MAXHOSTNAMELEN];
+ };
+ static inline struct Scsi_Host *
+diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
+index 9e477d766ce98..a03efe9ad2a42 100644
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -180,7 +180,7 @@ int lpfc_issue_gidft(struct lpfc_vport *vport);
+ int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
+ int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
+ int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
+-void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
++void lpfc_fdmi_change_check(struct lpfc_vport *vport);
+ void lpfc_delayed_disc_tmo(struct timer_list *);
+ void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
+diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
+index f81d1453eefbd..85f77c1ed23c8 100644
+--- a/drivers/scsi/lpfc/lpfc_ct.c
++++ b/drivers/scsi/lpfc/lpfc_ct.c
+@@ -1495,7 +1495,7 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
+       if (strlcat(symbol, tmp, size) >= size)
+               goto buffer_done;
+-      scnprintf(tmp, sizeof(tmp), " HN:%s", init_utsname()->nodename);
++      scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name);
+       if (strlcat(symbol, tmp, size) >= size)
+               goto buffer_done;
+@@ -1984,14 +1984,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ /**
+- * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to
++ * lpfc_fdmi_change_check - Check for changed FDMI parameters
+  * @vport: pointer to a host virtual N_Port data structure.
+  *
+- * Called from hbeat timeout routine to check if the number of discovered
+- * ports has changed. If so, re-register thar port Attribute.
++ * Check how many mapped NPorts we are connected to
++ * Check if our hostname changed
++ * Called from hbeat timeout routine to check if any FDMI parameters
++ * changed. If so, re-register those Attributes.
+  */
+ void
+-lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
++lpfc_fdmi_change_check(struct lpfc_vport *vport)
+ {
+       struct lpfc_hba *phba = vport->phba;
+       struct lpfc_nodelist *ndlp;
+@@ -2004,17 +2006,41 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
+       if (!(vport->fc_flag & FC_FABRIC))
+               return;
++      ndlp = lpfc_findnode_did(vport, FDMI_DID);
++      if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
++              return;
++
++      /* Check if system hostname changed */
++      if (strcmp(phba->os_host_name, init_utsname()->nodename)) {
++              memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
++              scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
++                        init_utsname()->nodename);
++              lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
++
++              /* Since this effects multiple HBA and PORT attributes, we need
++               * de-register and go thru the whole FDMI registration cycle.
++               * DHBA -> DPRT -> RHBA -> RPA  (physical port)
++               * DPRT -> RPRT (vports)
++               */
++              if (vport->port_type == LPFC_PHYSICAL_PORT)
++                      lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
++              else
++                      lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
++
++              /* Since this code path registers all the port attributes
++               * we can just return without further checking.
++               */
++              return;
++      }
++
+       if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
+               return;
++      /* Check if the number of mapped NPorts changed */
+       cnt = lpfc_find_map_node(vport);
+       if (cnt == vport->fdmi_num_disc)
+               return;
+-      ndlp = lpfc_findnode_did(vport, FDMI_DID);
+-      if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+-              return;
+-
+       if (vport->port_type == LPFC_PHYSICAL_PORT) {
+               lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
+                             LPFC_FDMI_PORT_ATTR_num_disc);
+@@ -2602,8 +2628,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
+       ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+       memset(ae, 0, 256);
+-      snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
+-               init_utsname()->nodename);
++      scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
++                vport->phba->os_host_name);
+       len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+       len += (len & 3) ? (4 - (len & 3)) : 4;
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index ee70d14e7a9dc..39ca541935342 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -28,6 +28,7 @@
+ #include <linux/kthread.h>
+ #include <linux/interrupt.h>
+ #include <linux/lockdep.h>
++#include <linux/utsname.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_device.h>
+@@ -3322,6 +3323,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
+               lpfc_sli4_clear_fcf_rr_bmask(phba);
+       }
++      /* Prepare for LINK up registrations */
++      memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
++      scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
++                init_utsname()->nodename);
+       return;
+ out:
+       lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 1bf79445c15bf..14d9f41977f1c 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -1370,7 +1370,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
+       if (vports != NULL)
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+                       lpfc_rcv_seq_check_edtov(vports[i]);
+-                      lpfc_fdmi_num_disc_check(vports[i]);
++                      lpfc_fdmi_change_check(vports[i]);
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
+-- 
+2.20.1
+
index 9437daca3cfc697143668bd613f1aca8df5792c7..6717b883f94b75283193bb6d44a93ef31bdd6c68 100644 (file)
@@ -215,3 +215,18 @@ powerpc-64-prevent-stack-protection-in-early-boot.patch
 scsi-mpt3sas-fix-kernel-panic-observed-on-soft-hba-unplug.patch
 powerpc-make-setjmp-longjmp-signature-standard.patch
 arm64-always-force-a-branch-protection-mode-when-the-compiler-has-one.patch
+dm-zoned-remove-duplicate-nr_rnd_zones-increase-in-d.patch
+dm-clone-replace-spin_lock_irqsave-with-spin_lock_ir.patch
+dm-clone-fix-handling-of-partial-region-discards.patch
+dm-clone-add-missing-casts-to-prevent-overflows-and-.patch
+scsi-lpfc-add-registration-for-cpu-offline-online-ev.patch
+scsi-lpfc-fix-fabric-hostname-registration-if-system.patch
+scsi-lpfc-fix-configuration-of-bb-credit-recovery-in.patch
+scsi-lpfc-fix-broken-credit-recovery-after-driver-lo.patch
+revert-drm-dp_mst-remove-vcpi-while-disabling-topolo.patch
+drm-dp_mst-fix-clearing-payload-state-on-topology-di.patch
+drm-amdgpu-fix-gfx-hang-during-suspend-with-video-pl.patch
+drm-i915-icl-don-t-enable-ddi-io-power-on-a-typec-po.patch
+powerpc-kasan-fix-kasan_remap_early_shadow_ro.patch
+mmc-sdhci-convert-sdhci_set_timeout_irq-to-non-stati.patch
+mmc-sdhci-refactor-sdhci_set_timeout.patch