]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 20 Aug 2020 08:43:12 +0000 (10:43 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 20 Aug 2020 08:43:12 +0000 (10:43 +0200)
added patches:
arm64-dts-marvell-espressobin-add-ethernet-alias.patch
dm-cache-pass-cache-structure-to-mode-functions.patch
dm-cache-remove-all-obsolete-writethrough-specific-code.patch
dm-cache-submit-writethrough-writes-in-parallel-to-origin-and-cache.patch
drm-radeon-fix-fb_div-check-in-ni_init_smc_spll_table.patch
genirq-affinity-handle-affinity-setting-on-inactive-interrupts-correctly.patch
genirq-affinity-make-affinity-setting-if-activated-opt-in.patch
khugepaged-retract_page_tables-remember-to-test-exit.patch

queue-4.14/arm64-dts-marvell-espressobin-add-ethernet-alias.patch [new file with mode: 0644]
queue-4.14/dm-cache-pass-cache-structure-to-mode-functions.patch [new file with mode: 0644]
queue-4.14/dm-cache-remove-all-obsolete-writethrough-specific-code.patch [new file with mode: 0644]
queue-4.14/dm-cache-submit-writethrough-writes-in-parallel-to-origin-and-cache.patch [new file with mode: 0644]
queue-4.14/drm-radeon-fix-fb_div-check-in-ni_init_smc_spll_table.patch [new file with mode: 0644]
queue-4.14/genirq-affinity-handle-affinity-setting-on-inactive-interrupts-correctly.patch [new file with mode: 0644]
queue-4.14/genirq-affinity-make-affinity-setting-if-activated-opt-in.patch [new file with mode: 0644]
queue-4.14/khugepaged-retract_page_tables-remember-to-test-exit.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/arm64-dts-marvell-espressobin-add-ethernet-alias.patch b/queue-4.14/arm64-dts-marvell-espressobin-add-ethernet-alias.patch
new file mode 100644 (file)
index 0000000..2af03f6
--- /dev/null
@@ -0,0 +1,37 @@
+From 5253cb8c00a6f4356760efb38bca0e0393aa06de Mon Sep 17 00:00:00 2001
+From: Tomasz Maciej Nowak <tmn505@gmail.com>
+Date: Thu, 27 Feb 2020 17:52:32 +0100
+Subject: arm64: dts: marvell: espressobin: add ethernet alias
+
+From: Tomasz Maciej Nowak <tmn505@gmail.com>
+
+commit 5253cb8c00a6f4356760efb38bca0e0393aa06de upstream.
+
+The maker of this board and its variants, stores MAC address in U-Boot
+environment. Add alias for bootloader to recognise, to which ethernet
+node inject the factory MAC address.
+
+Signed-off-by: Tomasz Maciej Nowak <tmn505@gmail.com>
+Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+[pali: Backported to 5.4 and older versions]
+Signed-off-by: Pali Rohár <pali@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
+@@ -52,6 +52,12 @@
+       model = "Globalscale Marvell ESPRESSOBin Board";
+       compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710";
++      aliases {
++              ethernet0 = &eth0;
++              serial0 = &uart0;
++              serial1 = &uart1;
++      };
++
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
diff --git a/queue-4.14/dm-cache-pass-cache-structure-to-mode-functions.patch b/queue-4.14/dm-cache-pass-cache-structure-to-mode-functions.patch
new file mode 100644 (file)
index 0000000..c7f1653
--- /dev/null
@@ -0,0 +1,127 @@
+From 8e3c3827776fc93728c0c8d7c7b731226dc6ee23 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Thu, 19 Oct 2017 21:01:04 -0400
+Subject: dm cache: pass cache structure to mode functions
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 8e3c3827776fc93728c0c8d7c7b731226dc6ee23 upstream.
+
+No functional changes, just a bit cleaner than passing cache_features
+structure.
+
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c |   32 ++++++++++++++++----------------
+ 1 file changed, 16 insertions(+), 16 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -515,19 +515,19 @@ struct dm_cache_migration {
+ /*----------------------------------------------------------------*/
+-static bool writethrough_mode(struct cache_features *f)
++static bool writethrough_mode(struct cache *cache)
+ {
+-      return f->io_mode == CM_IO_WRITETHROUGH;
++      return cache->features.io_mode == CM_IO_WRITETHROUGH;
+ }
+-static bool writeback_mode(struct cache_features *f)
++static bool writeback_mode(struct cache *cache)
+ {
+-      return f->io_mode == CM_IO_WRITEBACK;
++      return cache->features.io_mode == CM_IO_WRITEBACK;
+ }
+-static inline bool passthrough_mode(struct cache_features *f)
++static inline bool passthrough_mode(struct cache *cache)
+ {
+-      return unlikely(f->io_mode == CM_IO_PASSTHROUGH);
++      return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
+ }
+ /*----------------------------------------------------------------*/
+@@ -544,7 +544,7 @@ static void wake_deferred_writethrough_w
+ static void wake_migration_worker(struct cache *cache)
+ {
+-      if (passthrough_mode(&cache->features))
++      if (passthrough_mode(cache))
+               return;
+       queue_work(cache->wq, &cache->migration_worker);
+@@ -626,7 +626,7 @@ static unsigned lock_level(struct bio *b
+ static size_t get_per_bio_data_size(struct cache *cache)
+ {
+-      return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
++      return writethrough_mode(cache) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
+ }
+ static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
+@@ -1209,7 +1209,7 @@ static bool bio_writes_complete_block(st
+ static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
+ {
+-      return writeback_mode(&cache->features) &&
++      return writeback_mode(cache) &&
+               (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
+ }
+@@ -1862,7 +1862,7 @@ static int map_bio(struct cache *cache,
+                * Passthrough always maps to the origin, invalidating any
+                * cache blocks that are written to.
+                */
+-              if (passthrough_mode(&cache->features)) {
++              if (passthrough_mode(cache)) {
+                       if (bio_data_dir(bio) == WRITE) {
+                               bio_drop_shared_lock(cache, bio);
+                               atomic_inc(&cache->stats.demotion);
+@@ -1871,7 +1871,7 @@ static int map_bio(struct cache *cache,
+                               remap_to_origin_clear_discard(cache, bio, block);
+               } else {
+-                      if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
++                      if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
+                           !is_dirty(cache, cblock)) {
+                               remap_to_origin_then_cache(cache, bio, block, cblock);
+                               accounted_begin(cache, bio);
+@@ -2649,7 +2649,7 @@ static int cache_create(struct cache_arg
+               goto bad;
+       }
+-      if (passthrough_mode(&cache->features)) {
++      if (passthrough_mode(cache)) {
+               bool all_clean;
+               r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
+@@ -3279,13 +3279,13 @@ static void cache_status(struct dm_targe
+               else
+                       DMEMIT("1 ");
+-              if (writethrough_mode(&cache->features))
++              if (writethrough_mode(cache))
+                       DMEMIT("writethrough ");
+-              else if (passthrough_mode(&cache->features))
++              else if (passthrough_mode(cache))
+                       DMEMIT("passthrough ");
+-              else if (writeback_mode(&cache->features))
++              else if (writeback_mode(cache))
+                       DMEMIT("writeback ");
+               else {
+@@ -3451,7 +3451,7 @@ static int process_invalidate_cblocks_me
+       unsigned i;
+       struct cblock_range range;
+-      if (!passthrough_mode(&cache->features)) {
++      if (!passthrough_mode(cache)) {
+               DMERR("%s: cache has to be in passthrough mode for invalidation",
+                     cache_device_name(cache));
+               return -EPERM;
diff --git a/queue-4.14/dm-cache-remove-all-obsolete-writethrough-specific-code.patch b/queue-4.14/dm-cache-remove-all-obsolete-writethrough-specific-code.patch
new file mode 100644 (file)
index 0000000..17dc12b
--- /dev/null
@@ -0,0 +1,171 @@
+From 9958f1d9a04efb3db19134482b3f4c6897e0e7b8 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Thu, 19 Oct 2017 17:30:20 -0400
+Subject: dm cache: remove all obsolete writethrough-specific code
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 9958f1d9a04efb3db19134482b3f4c6897e0e7b8 upstream.
+
+Now that the writethrough code is much simpler there is no need to track
+so much state or cascade bio submission (as was done, via
+writethrough_endio(), to issue origin then cache IO in series).
+
+As such the obsolete writethrough list and workqueue is also removed.
+
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c |   82 -------------------------------------------
+ 1 file changed, 1 insertion(+), 81 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -410,7 +410,6 @@ struct cache {
+       spinlock_t lock;
+       struct list_head deferred_cells;
+       struct bio_list deferred_bios;
+-      struct bio_list deferred_writethrough_bios;
+       sector_t migration_threshold;
+       wait_queue_head_t migration_wait;
+       atomic_t nr_allocated_migrations;
+@@ -446,7 +445,6 @@ struct cache {
+       struct dm_kcopyd_client *copier;
+       struct workqueue_struct *wq;
+       struct work_struct deferred_bio_worker;
+-      struct work_struct deferred_writethrough_worker;
+       struct work_struct migration_worker;
+       struct delayed_work waker;
+       struct dm_bio_prison_v2 *prison;
+@@ -491,15 +489,6 @@ struct per_bio_data {
+       struct dm_bio_prison_cell_v2 *cell;
+       struct dm_hook_info hook_info;
+       sector_t len;
+-
+-      /*
+-       * writethrough fields.  These MUST remain at the end of this
+-       * structure and the 'cache' member must be the first as it
+-       * is used to determine the offset of the writethrough fields.
+-       */
+-      struct cache *cache;
+-      dm_cblock_t cblock;
+-      struct dm_bio_details bio_details;
+ };
+ struct dm_cache_migration {
+@@ -538,11 +527,6 @@ static void wake_deferred_bio_worker(str
+       queue_work(cache->wq, &cache->deferred_bio_worker);
+ }
+-static void wake_deferred_writethrough_worker(struct cache *cache)
+-{
+-      queue_work(cache->wq, &cache->deferred_writethrough_worker);
+-}
+-
+ static void wake_migration_worker(struct cache *cache)
+ {
+       if (passthrough_mode(cache))
+@@ -619,15 +603,9 @@ static unsigned lock_level(struct bio *b
+  * Per bio data
+  *--------------------------------------------------------------*/
+-/*
+- * If using writeback, leave out struct per_bio_data's writethrough fields.
+- */
+-#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
+-#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
+-
+ static size_t get_per_bio_data_size(struct cache *cache)
+ {
+-      return writethrough_mode(cache) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
++      return sizeof(struct per_bio_data);
+ }
+ static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
+@@ -945,39 +923,6 @@ static void issue_op(struct bio *bio, vo
+       accounted_request(cache, bio);
+ }
+-static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
+-{
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&cache->lock, flags);
+-      bio_list_add(&cache->deferred_writethrough_bios, bio);
+-      spin_unlock_irqrestore(&cache->lock, flags);
+-
+-      wake_deferred_writethrough_worker(cache);
+-}
+-
+-static void writethrough_endio(struct bio *bio)
+-{
+-      struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
+-
+-      dm_unhook_bio(&pb->hook_info, bio);
+-
+-      if (bio->bi_status) {
+-              bio_endio(bio);
+-              return;
+-      }
+-
+-      dm_bio_restore(&pb->bio_details, bio);
+-      remap_to_cache(pb->cache, bio, pb->cblock);
+-
+-      /*
+-       * We can't issue this bio directly, since we're in interrupt
+-       * context.  So it gets put on a bio list for processing by the
+-       * worker thread.
+-       */
+-      defer_writethrough_bio(pb->cache, bio);
+-}
+-
+ /*
+  * When running in writethrough mode we need to send writes to clean blocks
+  * to both the cache and origin devices.  Clone the bio and send them in parallel.
+@@ -2013,28 +1958,6 @@ static void process_deferred_bios(struct
+               schedule_commit(&cache->committer);
+ }
+-static void process_deferred_writethrough_bios(struct work_struct *ws)
+-{
+-      struct cache *cache = container_of(ws, struct cache, deferred_writethrough_worker);
+-
+-      unsigned long flags;
+-      struct bio_list bios;
+-      struct bio *bio;
+-
+-      bio_list_init(&bios);
+-
+-      spin_lock_irqsave(&cache->lock, flags);
+-      bio_list_merge(&bios, &cache->deferred_writethrough_bios);
+-      bio_list_init(&cache->deferred_writethrough_bios);
+-      spin_unlock_irqrestore(&cache->lock, flags);
+-
+-      /*
+-       * These bios have already been through accounted_begin()
+-       */
+-      while ((bio = bio_list_pop(&bios)))
+-              generic_make_request(bio);
+-}
+-
+ /*----------------------------------------------------------------
+  * Main worker loop
+  *--------------------------------------------------------------*/
+@@ -2690,7 +2613,6 @@ static int cache_create(struct cache_arg
+       spin_lock_init(&cache->lock);
+       INIT_LIST_HEAD(&cache->deferred_cells);
+       bio_list_init(&cache->deferred_bios);
+-      bio_list_init(&cache->deferred_writethrough_bios);
+       atomic_set(&cache->nr_allocated_migrations, 0);
+       atomic_set(&cache->nr_io_migrations, 0);
+       init_waitqueue_head(&cache->migration_wait);
+@@ -2729,8 +2651,6 @@ static int cache_create(struct cache_arg
+               goto bad;
+       }
+       INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
+-      INIT_WORK(&cache->deferred_writethrough_worker,
+-                process_deferred_writethrough_bios);
+       INIT_WORK(&cache->migration_worker, check_migrations);
+       INIT_DELAYED_WORK(&cache->waker, do_waker);
diff --git a/queue-4.14/dm-cache-submit-writethrough-writes-in-parallel-to-origin-and-cache.patch b/queue-4.14/dm-cache-submit-writethrough-writes-in-parallel-to-origin-and-cache.patch
new file mode 100644 (file)
index 0000000..b7c6911
--- /dev/null
@@ -0,0 +1,139 @@
+From 2df3bae9a6543e90042291707b8db0cbfbae9ee9 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Thu, 19 Oct 2017 17:16:54 -0400
+Subject: dm cache: submit writethrough writes in parallel to origin and cache
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 2df3bae9a6543e90042291707b8db0cbfbae9ee9 upstream.
+
+Discontinue issuing writethrough write IO in series to the origin and
+then cache.
+
+Use bio_clone_fast() to create a new origin clone bio that will be
+mapped to the origin device and then bio_chain() it to the bio that gets
+remapped to the cache device.  The origin clone bio does _not_ have a
+copy of the per_bio_data -- as such check_if_tick_bio_needed() will not
+be called.
+
+The cache bio (parent bio) will not complete until the origin bio has
+completed -- this fulfills bio_clone_fast()'s requirements as well as
+the requirement to not complete the original IO until the write IO has
+completed to both the origin and cache device.
+
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c |   54 +++++++++++++++++++++++++++++--------------
+ 1 file changed, 37 insertions(+), 17 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -450,6 +450,7 @@ struct cache {
+       struct work_struct migration_worker;
+       struct delayed_work waker;
+       struct dm_bio_prison_v2 *prison;
++      struct bio_set *bs;
+       mempool_t *migration_pool;
+@@ -868,16 +869,23 @@ static void check_if_tick_bio_needed(str
+       spin_unlock_irqrestore(&cache->lock, flags);
+ }
+-static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+-                                        dm_oblock_t oblock)
++static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
++                                          dm_oblock_t oblock, bool bio_has_pbd)
+ {
+-      // FIXME: this is called way too much.
+-      check_if_tick_bio_needed(cache, bio);
++      if (bio_has_pbd)
++              check_if_tick_bio_needed(cache, bio);
+       remap_to_origin(cache, bio);
+       if (bio_data_dir(bio) == WRITE)
+               clear_discard(cache, oblock_to_dblock(cache, oblock));
+ }
++static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
++                                        dm_oblock_t oblock)
++{
++      // FIXME: check_if_tick_bio_needed() is called way too much through this interface
++      __remap_to_origin_clear_discard(cache, bio, oblock, true);
++}
++
+ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
+                                dm_oblock_t oblock, dm_cblock_t cblock)
+ {
+@@ -971,23 +979,25 @@ static void writethrough_endio(struct bi
+ }
+ /*
+- * FIXME: send in parallel, huge latency as is.
+  * When running in writethrough mode we need to send writes to clean blocks
+- * to both the cache and origin devices.  In future we'd like to clone the
+- * bio and send them in parallel, but for now we're doing them in
+- * series as this is easier.
++ * to both the cache and origin devices.  Clone the bio and send them in parallel.
+  */
+-static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
+-                                     dm_oblock_t oblock, dm_cblock_t cblock)
++static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
++                                    dm_oblock_t oblock, dm_cblock_t cblock)
+ {
+-      struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
++      struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, cache->bs);
++
++      BUG_ON(!origin_bio);
+-      pb->cache = cache;
+-      pb->cblock = cblock;
+-      dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
+-      dm_bio_record(&pb->bio_details, bio);
++      bio_chain(origin_bio, bio);
++      /*
++       * Passing false to __remap_to_origin_clear_discard() skips
++       * all code that might use per_bio_data (since clone doesn't have it)
++       */
++      __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
++      submit_bio(origin_bio);
+-      remap_to_origin_clear_discard(pb->cache, bio, oblock);
++      remap_to_cache(cache, bio, cblock);
+ }
+ /*----------------------------------------------------------------
+@@ -1873,7 +1883,7 @@ static int map_bio(struct cache *cache,
+               } else {
+                       if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
+                           !is_dirty(cache, cblock)) {
+-                              remap_to_origin_then_cache(cache, bio, block, cblock);
++                              remap_to_origin_and_cache(cache, bio, block, cblock);
+                               accounted_begin(cache, bio);
+                       } else
+                               remap_to_cache_dirty(cache, bio, block, cblock);
+@@ -2132,6 +2142,9 @@ static void destroy(struct cache *cache)
+               kfree(cache->ctr_args[i]);
+       kfree(cache->ctr_args);
++      if (cache->bs)
++              bioset_free(cache->bs);
++
+       kfree(cache);
+ }
+@@ -2589,6 +2602,13 @@ static int cache_create(struct cache_arg
+       cache->features = ca->features;
+       ti->per_io_data_size = get_per_bio_data_size(cache);
++      if (writethrough_mode(cache)) {
++              /* Create bioset for writethrough bios issued to origin */
++              cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
++              if (!cache->bs)
++                      goto bad;
++      }
++
+       cache->callbacks.congested_fn = cache_is_congested;
+       dm_table_add_target_callbacks(ti->table, &cache->callbacks);
diff --git a/queue-4.14/drm-radeon-fix-fb_div-check-in-ni_init_smc_spll_table.patch b/queue-4.14/drm-radeon-fix-fb_div-check-in-ni_init_smc_spll_table.patch
new file mode 100644 (file)
index 0000000..8ab4b61
--- /dev/null
@@ -0,0 +1,33 @@
+From f29aa08852e1953e461f2d47ab13c34e14bc08b3 Mon Sep 17 00:00:00 2001
+From: Denis Efremov <efremov@linux.com>
+Date: Mon, 22 Jun 2020 23:31:22 +0300
+Subject: drm/radeon: fix fb_div check in ni_init_smc_spll_table()
+
+From: Denis Efremov <efremov@linux.com>
+
+commit f29aa08852e1953e461f2d47ab13c34e14bc08b3 upstream.
+
+clk_s is checked twice in a row in ni_init_smc_spll_table().
+fb_div should be checked instead.
+
+Fixes: 69e0b57a91ad ("drm/radeon/kms: add dpm support for cayman (v5)")
+Cc: stable@vger.kernel.org
+Signed-off-by: Denis Efremov <efremov@linux.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/ni_dpm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/ni_dpm.c
++++ b/drivers/gpu/drm/radeon/ni_dpm.c
+@@ -2123,7 +2123,7 @@ static int ni_init_smc_spll_table(struct
+               if (p_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
+                       ret = -EINVAL;
+-              if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
++              if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
+                       ret = -EINVAL;
+               if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
diff --git a/queue-4.14/genirq-affinity-handle-affinity-setting-on-inactive-interrupts-correctly.patch b/queue-4.14/genirq-affinity-handle-affinity-setting-on-inactive-interrupts-correctly.patch
new file mode 100644 (file)
index 0000000..10b494d
--- /dev/null
@@ -0,0 +1,123 @@
+From baedb87d1b53532f81b4bd0387f83b05d4f7eb9a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 17 Jul 2020 18:00:02 +0200
+Subject: genirq/affinity: Handle affinity setting on inactive interrupts correctly
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit baedb87d1b53532f81b4bd0387f83b05d4f7eb9a upstream.
+
+Setting interrupt affinity on inactive interrupts is inconsistent when
+hierarchical irq domains are enabled. The core code should just store the
+affinity and not call into the irq chip driver for inactive interrupts
+because the chip drivers may not be in a state to handle such requests.
+
+X86 has a hacky workaround for that but all other irq chips have not which
+causes problems e.g. on GIC V3 ITS.
+
+Instead of adding more ugly hacks all over the place, solve the problem in
+the core code. If the affinity is set on an inactive interrupt then:
+
+    - Store it in the irq descriptors affinity mask
+    - Update the effective affinity to reflect that so user space has
+      a consistent view
+    - Don't call into the irq chip driver
+
+This is the core equivalent of the X86 workaround and works correctly
+because the affinity setting is established in the irq chip when the
+interrupt is activated later on.
+
+Note, that this is only effective when hierarchical irq domains are enabled
+by the architecture. Doing it unconditionally would break legacy irq chip
+implementations.
+
+For hierarchial irq domains this works correctly as none of the drivers can
+have a dependency on affinity setting in inactive state by design.
+
+Remove the X86 workaround as it is not longer required.
+
+Fixes: 02edee152d6e ("x86/apic/vector: Ignore set_affinity call for inactive interrupts")
+Reported-by: Ali Saidi <alisaidi@amazon.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Ali Saidi <alisaidi@amazon.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200529015501.15771-1-alisaidi@amazon.com
+Link: https://lkml.kernel.org/r/877dv2rv25.fsf@nanos.tec.linutronix.de
+[fllinden@amazon.com - 4.14 never had the x86 workaround, so skip x86 changes]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/irq/manage.c |   37 +++++++++++++++++++++++++++++++++++--
+ 1 file changed, 35 insertions(+), 2 deletions(-)
+
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -168,9 +168,9 @@ void irq_set_thread_affinity(struct irq_
+                       set_bit(IRQTF_AFFINITY, &action->thread_flags);
+ }
++#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ static void irq_validate_effective_affinity(struct irq_data *data)
+ {
+-#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+       const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
+       struct irq_chip *chip = irq_data_get_irq_chip(data);
+@@ -178,9 +178,19 @@ static void irq_validate_effective_affin
+               return;
+       pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
+                    chip->name, data->irq);
+-#endif
+ }
++static inline void irq_init_effective_affinity(struct irq_data *data,
++                                             const struct cpumask *mask)
++{
++      cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
++}
++#else
++static inline void irq_validate_effective_affinity(struct irq_data *data) { }
++static inline void irq_init_effective_affinity(struct irq_data *data,
++                                             const struct cpumask *mask) { }
++#endif
++
+ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+                       bool force)
+ {
+@@ -205,6 +215,26 @@ int irq_do_set_affinity(struct irq_data
+       return ret;
+ }
++static bool irq_set_affinity_deactivated(struct irq_data *data,
++                                       const struct cpumask *mask, bool force)
++{
++      struct irq_desc *desc = irq_data_to_desc(data);
++
++      /*
++       * If the interrupt is not yet activated, just store the affinity
++       * mask and do not call the chip driver at all. On activation the
++       * driver has to make sure anyway that the interrupt is in a
++       * useable state so startup works.
++       */
++      if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data))
++              return false;
++
++      cpumask_copy(desc->irq_common_data.affinity, mask);
++      irq_init_effective_affinity(data, mask);
++      irqd_set(data, IRQD_AFFINITY_SET);
++      return true;
++}
++
+ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+                           bool force)
+ {
+@@ -215,6 +245,9 @@ int irq_set_affinity_locked(struct irq_d
+       if (!chip || !chip->irq_set_affinity)
+               return -EINVAL;
++      if (irq_set_affinity_deactivated(data, mask, force))
++              return 0;
++
+       if (irq_can_move_pcntxt(data)) {
+               ret = irq_do_set_affinity(data, mask, force);
+       } else {
diff --git a/queue-4.14/genirq-affinity-make-affinity-setting-if-activated-opt-in.patch b/queue-4.14/genirq-affinity-make-affinity-setting-if-activated-opt-in.patch
new file mode 100644 (file)
index 0000000..b540468
--- /dev/null
@@ -0,0 +1,138 @@
+From f0c7baca180046824e07fc5f1326e83a8fd150c7 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 24 Jul 2020 22:44:41 +0200
+Subject: genirq/affinity: Make affinity setting if activated opt-in
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit f0c7baca180046824e07fc5f1326e83a8fd150c7 upstream.
+
+John reported that on a RK3288 system the perf per CPU interrupts are all
+affine to CPU0 and provided the analysis:
+
+ "It looks like what happens is that because the interrupts are not per-CPU
+  in the hardware, armpmu_request_irq() calls irq_force_affinity() while
+  the interrupt is deactivated and then request_irq() with IRQF_PERCPU |
+  IRQF_NOBALANCING.
+
+  Now when irq_startup() runs with IRQ_STARTUP_NORMAL, it calls
+  irq_setup_affinity() which returns early because IRQF_PERCPU and
+  IRQF_NOBALANCING are set, leaving the interrupt on its original CPU."
+
+This was broken by the recent commit which blocked interrupt affinity
+setting in hardware before activation of the interrupt. While this works in
+general, it does not work for this particular case. As contrary to the
+initial analysis not all interrupt chip drivers implement an activate
+callback, the safe cure is to make the deferred interrupt affinity setting
+at activation time opt-in.
+
+Implement the necessary core logic and make the two irqchip implementations
+for which this is required opt-in. In hindsight this would have been the
+right thing to do, but ...
+
+Fixes: baedb87d1b53 ("genirq/affinity: Handle affinity setting on inactive interrupts correctly")
+Reported-by: John Keeping <john@metanate.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Marc Zyngier <maz@kernel.org>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/87blk4tzgm.fsf@nanos.tec.linutronix.de
+[fllinden@amazon.com - backported to 4.14]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/apic/vector.c    |    4 ++++
+ drivers/irqchip/irq-gic-v3-its.c |    5 ++++-
+ include/linux/irq.h              |   12 ++++++++++++
+ kernel/irq/manage.c              |    6 +++++-
+ 4 files changed, 25 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -368,6 +368,10 @@ static int x86_vector_alloc_irqs(struct
+               irq_data->chip = &lapic_controller;
+               irq_data->chip_data = data;
+               irq_data->hwirq = virq + i;
++
++              /* Don't invoke affinity setter on deactivated interrupts */
++              irqd_set_affinity_on_activate(irq_data);
++
+               err = assign_irq_vector_policy(virq + i, node, data, info,
+                                              irq_data);
+               if (err) {
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -2199,6 +2199,7 @@ static int its_irq_domain_alloc(struct i
+ {
+       msi_alloc_info_t *info = args;
+       struct its_device *its_dev = info->scratchpad[0].ptr;
++      struct irq_data *irqd;
+       irq_hw_number_t hwirq;
+       int err;
+       int i;
+@@ -2214,7 +2215,9 @@ static int its_irq_domain_alloc(struct i
+               irq_domain_set_hwirq_and_chip(domain, virq + i,
+                                             hwirq + i, &its_irq_chip, its_dev);
+-              irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
++              irqd = irq_get_irq_data(virq + i);
++              irqd_set_single_target(irqd);
++              irqd_set_affinity_on_activate(irqd);
+               pr_debug("ID:%d pID:%d vID:%d\n",
+                        (int)(hwirq + i - its_dev->event_map.lpi_base),
+                        (int)(hwirq + i), virq + i);
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -212,6 +212,8 @@ struct irq_data {
+  *                              mask. Applies only to affinity managed irqs.
+  * IRQD_SINGLE_TARGET         - IRQ allows only a single affinity target
+  * IRQD_DEFAULT_TRIGGER_SET   - Expected trigger already been set
++ * IRQD_AFFINITY_ON_ACTIVATE  - Affinity is set on activation. Don't call
++ *                              irq_chip::irq_set_affinity() when deactivated.
+  */
+ enum {
+       IRQD_TRIGGER_MASK               = 0xf,
+@@ -233,6 +235,7 @@ enum {
+       IRQD_MANAGED_SHUTDOWN           = (1 << 23),
+       IRQD_SINGLE_TARGET              = (1 << 24),
+       IRQD_DEFAULT_TRIGGER_SET        = (1 << 25),
++      IRQD_AFFINITY_ON_ACTIVATE       = (1 << 29),
+ };
+ #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+@@ -377,6 +380,15 @@ static inline bool irqd_is_managed_and_s
+       return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
+ }
++static inline void irqd_set_affinity_on_activate(struct irq_data *d)
++{
++      __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
++}
++
++static inline bool irqd_affinity_on_activate(struct irq_data *d)
++{
++      return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
++}
+ #undef __irqd_to_state
+ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -221,12 +221,16 @@ static bool irq_set_affinity_deactivated
+       struct irq_desc *desc = irq_data_to_desc(data);
+       /*
++       * Handle irq chips which can handle affinity only in activated
++       * state correctly
++       *
+        * If the interrupt is not yet activated, just store the affinity
+        * mask and do not call the chip driver at all. On activation the
+        * driver has to make sure anyway that the interrupt is in a
+        * useable state so startup works.
+        */
+-      if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data))
++      if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
++          irqd_is_activated(data) || !irqd_affinity_on_activate(data))
+               return false;
+       cpumask_copy(desc->irq_common_data.affinity, mask);
diff --git a/queue-4.14/khugepaged-retract_page_tables-remember-to-test-exit.patch b/queue-4.14/khugepaged-retract_page_tables-remember-to-test-exit.patch
new file mode 100644 (file)
index 0000000..432d59e
--- /dev/null
@@ -0,0 +1,94 @@
+From 18e77600f7a1ed69f8ce46c9e11cad0985712dfa Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 6 Aug 2020 23:26:22 -0700
+Subject: khugepaged: retract_page_tables() remember to test exit
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 18e77600f7a1ed69f8ce46c9e11cad0985712dfa upstream.
+
+Only once have I seen this scenario (and forgot even to notice what forced
+the eventual crash): a sequence of "BUG: Bad page map" alerts from
+vm_normal_page(), from zap_pte_range() servicing exit_mmap();
+pmd:00000000, pte values corresponding to data in physical page 0.
+
+The pte mappings being zapped in this case were supposed to be from a huge
+page of ext4 text (but could as well have been shmem): my belief is that
+it was racing with collapse_file()'s retract_page_tables(), found *pmd
+pointing to a page table, locked it, but *pmd had become 0 by the time
+start_pte was decided.
+
+In most cases, that possibility is excluded by holding mmap lock; but
+exit_mmap() proceeds without mmap lock.  Most of what's run by khugepaged
+checks khugepaged_test_exit() after acquiring mmap lock:
+khugepaged_collapse_pte_mapped_thps() and hugepage_vma_revalidate() do so,
+for example.  But retract_page_tables() did not: fix that.
+
+The fix is for retract_page_tables() to check khugepaged_test_exit(),
+after acquiring mmap lock, before doing anything to the page table.
+Getting the mmap lock serializes with __mmput(), which briefly takes and
+drops it in __khugepaged_exit(); then the khugepaged_test_exit() check on
+mm_users makes sure we don't touch the page table once exit_mmap() might
+reach it, since exit_mmap() will be proceeding without mmap lock, not
+expecting anyone to be racing with it.
+
+Fixes: f3f0e1d2150b ("khugepaged: add support of collapse for tmpfs/shmem pages")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: <stable@vger.kernel.org>   [4.8+]
+Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008021215400.27773@eggly.anvils
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/khugepaged.c |   22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1252,6 +1252,7 @@ static void collect_mm_slot(struct mm_sl
+ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
+ {
+       struct vm_area_struct *vma;
++      struct mm_struct *mm;
+       unsigned long addr;
+       pmd_t *pmd, _pmd;
+@@ -1265,7 +1266,8 @@ static void retract_page_tables(struct a
+                       continue;
+               if (vma->vm_end < addr + HPAGE_PMD_SIZE)
+                       continue;
+-              pmd = mm_find_pmd(vma->vm_mm, addr);
++              mm = vma->vm_mm;
++              pmd = mm_find_pmd(mm, addr);
+               if (!pmd)
+                       continue;
+               /*
+@@ -1274,14 +1276,16 @@ static void retract_page_tables(struct a
+                * re-fault. Not ideal, but it's more important to not disturb
+                * the system too much.
+                */
+-              if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
+-                      spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
+-                      /* assume page table is clear */
+-                      _pmd = pmdp_collapse_flush(vma, addr, pmd);
+-                      spin_unlock(ptl);
+-                      up_write(&vma->vm_mm->mmap_sem);
+-                      atomic_long_dec(&vma->vm_mm->nr_ptes);
+-                      pte_free(vma->vm_mm, pmd_pgtable(_pmd));
++              if (down_write_trylock(&mm->mmap_sem)) {
++                      if (!khugepaged_test_exit(mm)) {
++                              spinlock_t *ptl = pmd_lock(mm, pmd);
++                              /* assume page table is clear */
++                              _pmd = pmdp_collapse_flush(vma, addr, pmd);
++                              spin_unlock(ptl);
++                              atomic_long_dec(&mm->nr_ptes);
++                              pte_free(mm, pmd_pgtable(_pmd));
++                      }
++                      up_write(&mm->mmap_sem);
+               }
+       }
+       i_mmap_unlock_write(mapping);
index 72f6d59e85e1df3ad9762a86babaa31d52d0abb3..3a6dc63ffc5a74e9b26ea0008c6e569e767c805d 100644 (file)
@@ -218,3 +218,11 @@ alsa-echoaudio-fix-potential-oops-in-snd_echo_resume.patch
 perf-bench-mem-always-memset-source-before-memcpy.patch
 tools-build-feature-quote-cc-and-cxx-for-their-argum.patch
 sh-landisk-add-missing-initialization-of-sh_io_port_.patch
+khugepaged-retract_page_tables-remember-to-test-exit.patch
+genirq-affinity-handle-affinity-setting-on-inactive-interrupts-correctly.patch
+genirq-affinity-make-affinity-setting-if-activated-opt-in.patch
+arm64-dts-marvell-espressobin-add-ethernet-alias.patch
+dm-cache-pass-cache-structure-to-mode-functions.patch
+dm-cache-submit-writethrough-writes-in-parallel-to-origin-and-cache.patch
+dm-cache-remove-all-obsolete-writethrough-specific-code.patch
+drm-radeon-fix-fb_div-check-in-ni_init_smc_spll_table.patch