]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 10 Mar 2021 12:15:56 +0000 (13:15 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 10 Mar 2021 12:15:56 +0000 (13:15 +0100)
added patches:
dm-table-fix-dax-iterate_devices-based-device-capability-checks.patch
dm-table-fix-iterate_devices-based-device-capability-checks.patch
dm-table-fix-zoned-iterate_devices-based-device-capability-checks.patch
iommu-amd-fix-sleeping-in-atomic-in-increase_address_space.patch

queue-4.14/dm-table-fix-dax-iterate_devices-based-device-capability-checks.patch [new file with mode: 0644]
queue-4.14/dm-table-fix-iterate_devices-based-device-capability-checks.patch [new file with mode: 0644]
queue-4.14/dm-table-fix-zoned-iterate_devices-based-device-capability-checks.patch [new file with mode: 0644]
queue-4.14/iommu-amd-fix-sleeping-in-atomic-in-increase_address_space.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/dm-table-fix-dax-iterate_devices-based-device-capability-checks.patch b/queue-4.14/dm-table-fix-dax-iterate_devices-based-device-capability-checks.patch
new file mode 100644 (file)
index 0000000..783d761
--- /dev/null
@@ -0,0 +1,83 @@
+From foo@baz Wed Mar 10 01:10:44 PM CET 2021
+From: Jeffle Xu <jefflexu@linux.alibaba.com>
+Date: Tue,  9 Mar 2021 11:27:44 +0800
+Subject: dm table: fix DAX iterate_devices based device capability checks
+To: snitzer@redhat.com, gregkh@linuxfoundation.org, sashal@kernel.org
+Cc: stable@vger.kernel.org, jefflexu@linux.alibaba.com
+Message-ID: <20210309032745.106175-3-jefflexu@linux.alibaba.com>
+
+From: Jeffle Xu <jefflexu@linux.alibaba.com>
+
+commit 5b0fab508992c2e120971da658ce80027acbc405 upstream.
+
+Fix dm_table_supports_dax() and invert logic of both
+iterate_devices_callout_fn so that all devices' DAX capabilities are
+properly checked.
+
+Fixes: 545ed20e6df6 ("dm: add infrastructure for DAX support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+[jeffle: no dax synchronous]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-table.c |   25 ++++---------------------
+ 1 file changed, 4 insertions(+), 21 deletions(-)
+
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -889,10 +889,10 @@ void dm_table_set_type(struct dm_table *
+ }
+ EXPORT_SYMBOL_GPL(dm_table_set_type);
+-static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
++static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
+                              sector_t start, sector_t len, void *data)
+ {
+-      return bdev_dax_supported(dev->bdev, PAGE_SIZE);
++      return !bdev_dax_supported(dev->bdev, PAGE_SIZE);
+ }
+ static bool dm_table_supports_dax(struct dm_table *t)
+@@ -908,7 +908,7 @@ static bool dm_table_supports_dax(struct
+                       return false;
+               if (!ti->type->iterate_devices ||
+-                  !ti->type->iterate_devices(ti, device_supports_dax, NULL))
++                  ti->type->iterate_devices(ti, device_not_dax_capable, NULL))
+                       return false;
+       }
+@@ -1690,23 +1690,6 @@ static int device_dax_write_cache_enable
+       return false;
+ }
+-static int dm_table_supports_dax_write_cache(struct dm_table *t)
+-{
+-      struct dm_target *ti;
+-      unsigned i;
+-
+-      for (i = 0; i < dm_table_get_num_targets(t); i++) {
+-              ti = dm_table_get_target(t, i);
+-
+-              if (ti->type->iterate_devices &&
+-                  ti->type->iterate_devices(ti,
+-                              device_dax_write_cache_enabled, NULL))
+-                      return true;
+-      }
+-
+-      return false;
+-}
+-
+ static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
+                               sector_t start, sector_t len, void *data)
+ {
+@@ -1854,7 +1837,7 @@ void dm_table_set_restrictions(struct dm
+       else
+               queue_flag_clear_unlocked(QUEUE_FLAG_DAX, q);
+-      if (dm_table_supports_dax_write_cache(t))
++      if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled))
+               dax_write_cache(t->md->dax_dev, true);
+       /* Ensure that all underlying devices are non-rotational. */
diff --git a/queue-4.14/dm-table-fix-iterate_devices-based-device-capability-checks.patch b/queue-4.14/dm-table-fix-iterate_devices-based-device-capability-checks.patch
new file mode 100644 (file)
index 0000000..a32eb95
--- /dev/null
@@ -0,0 +1,221 @@
+From foo@baz Wed Mar 10 01:10:44 PM CET 2021
+From: Jeffle Xu <jefflexu@linux.alibaba.com>
+Date: Tue,  9 Mar 2021 11:27:43 +0800
+Subject: dm table: fix iterate_devices based device capability checks
+To: snitzer@redhat.com, gregkh@linuxfoundation.org, sashal@kernel.org
+Cc: stable@vger.kernel.org, jefflexu@linux.alibaba.com
+Message-ID: <20210309032745.106175-2-jefflexu@linux.alibaba.com>
+
+From: Jeffle Xu <jefflexu@linux.alibaba.com>
+
+commit a4c8dd9c2d0987cf542a2a0c42684c9c6d78a04e upstream.
+
+According to the definition of dm_iterate_devices_fn:
+ * This function must iterate through each section of device used by the
+ * target until it encounters a non-zero return code, which it then returns.
+ * Returns zero if no callout returned non-zero.
+
+For some target type (e.g. dm-stripe), one call of iterate_devices() may
+iterate multiple underlying devices internally, in which case a non-zero
+return code returned by iterate_devices_callout_fn will stop the iteration
+in advance. No iterate_devices_callout_fn should return non-zero unless
+device iteration should stop.
+
+Rename dm_table_requires_stable_pages() to dm_table_any_dev_attr() and
+elevate it for reuse to stop iterating (and return non-zero) on the
+first device that causes iterate_devices_callout_fn to return non-zero.
+Use dm_table_any_dev_attr() to properly iterate through devices.
+
+Rename device_is_nonrot() to device_is_rotational() and invert logic
+accordingly to fix improper disposition.
+
+[jeffle: backport notes]
+Also convert the no_sg_merge capability check, which is introduced by
+commit 200612ec33e5 ("dm table: propagate QUEUE_FLAG_NO_SG_MERGE"), and
+removed since commit 2705c93742e9 ("block: kill QUEUE_FLAG_NO_SG_MERGE")
+in v5.1.
+
+Fixes: c3c4555edd10 ("dm table: clear add_random unless all devices have it set")
+Fixes: 4693c9668fdc ("dm table: propagate non rotational flag")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-table.c |  109 ++++++++++++++++++++++++++------------------------
+ 1 file changed, 57 insertions(+), 52 deletions(-)
+
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1351,6 +1351,46 @@ struct dm_target *dm_table_find_target(s
+       return &t->targets[(KEYS_PER_NODE * n) + k];
+ }
++/*
++ * type->iterate_devices() should be called when the sanity check needs to
++ * iterate and check all underlying data devices. iterate_devices() will
++ * iterate all underlying data devices until it encounters a non-zero return
++ * code, returned by whether the input iterate_devices_callout_fn, or
++ * iterate_devices() itself internally.
++ *
++ * For some target type (e.g. dm-stripe), one call of iterate_devices() may
++ * iterate multiple underlying devices internally, in which case a non-zero
++ * return code returned by iterate_devices_callout_fn will stop the iteration
++ * in advance.
++ *
++ * Cases requiring _any_ underlying device supporting some kind of attribute,
++ * should use the iteration structure like dm_table_any_dev_attr(), or call
++ * it directly. @func should handle semantics of positive examples, e.g.
++ * capable of something.
++ *
++ * Cases requiring _all_ underlying devices supporting some kind of attribute,
++ * should use the iteration structure like dm_table_supports_nowait() or
++ * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
++ * uses an @anti_func that handle semantics of counter examples, e.g. not
++ * capable of something. So: return !dm_table_any_dev_attr(t, anti_func);
++ */
++static bool dm_table_any_dev_attr(struct dm_table *t,
++                                iterate_devices_callout_fn func)
++{
++      struct dm_target *ti;
++      unsigned int i;
++
++      for (i = 0; i < dm_table_get_num_targets(t); i++) {
++              ti = dm_table_get_target(t, i);
++
++              if (ti->type->iterate_devices &&
++                  ti->type->iterate_devices(ti, func, NULL))
++                      return true;
++        }
++
++      return false;
++}
++
+ static int count_device(struct dm_target *ti, struct dm_dev *dev,
+                       sector_t start, sector_t len, void *data)
+ {
+@@ -1667,12 +1707,12 @@ static int dm_table_supports_dax_write_c
+       return false;
+ }
+-static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
+-                          sector_t start, sector_t len, void *data)
++static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
++                              sector_t start, sector_t len, void *data)
+ {
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+-      return q && blk_queue_nonrot(q);
++      return q && !blk_queue_nonrot(q);
+ }
+ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
+@@ -1683,29 +1723,12 @@ static int device_is_not_random(struct d
+       return q && !blk_queue_add_random(q);
+ }
+-static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
+-                                 sector_t start, sector_t len, void *data)
++static int queue_no_sg_merge(struct dm_target *ti, struct dm_dev *dev,
++                           sector_t start, sector_t len, void *data)
+ {
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+-      return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
+-}
+-
+-static bool dm_table_all_devices_attribute(struct dm_table *t,
+-                                         iterate_devices_callout_fn func)
+-{
+-      struct dm_target *ti;
+-      unsigned i;
+-
+-      for (i = 0; i < dm_table_get_num_targets(t); i++) {
+-              ti = dm_table_get_target(t, i);
+-
+-              if (!ti->type->iterate_devices ||
+-                  !ti->type->iterate_devices(ti, func, NULL))
+-                      return false;
+-      }
+-
+-      return true;
++      return q && test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
+ }
+ static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
+@@ -1804,27 +1827,6 @@ static int device_requires_stable_pages(
+       return q && bdi_cap_stable_pages_required(q->backing_dev_info);
+ }
+-/*
+- * If any underlying device requires stable pages, a table must require
+- * them as well.  Only targets that support iterate_devices are considered:
+- * don't want error, zero, etc to require stable pages.
+- */
+-static bool dm_table_requires_stable_pages(struct dm_table *t)
+-{
+-      struct dm_target *ti;
+-      unsigned i;
+-
+-      for (i = 0; i < dm_table_get_num_targets(t); i++) {
+-              ti = dm_table_get_target(t, i);
+-
+-              if (ti->type->iterate_devices &&
+-                  ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
+-                      return true;
+-      }
+-
+-      return false;
+-}
+-
+ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+                              struct queue_limits *limits)
+ {
+@@ -1856,28 +1858,31 @@ void dm_table_set_restrictions(struct dm
+               dax_write_cache(t->md->dax_dev, true);
+       /* Ensure that all underlying devices are non-rotational. */
+-      if (dm_table_all_devices_attribute(t, device_is_nonrot))
+-              queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+-      else
++      if (dm_table_any_dev_attr(t, device_is_rotational))
+               queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
++      else
++              queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+       if (!dm_table_supports_write_same(t))
+               q->limits.max_write_same_sectors = 0;
+       if (!dm_table_supports_write_zeroes(t))
+               q->limits.max_write_zeroes_sectors = 0;
+-      if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
+-              queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+-      else
++      if (dm_table_any_dev_attr(t, queue_no_sg_merge))
+               queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
++      else
++              queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+       dm_table_verify_integrity(t);
+       /*
+        * Some devices don't use blk_integrity but still want stable pages
+        * because they do their own checksumming.
++       * If any underlying device requires stable pages, a table must require
++       * them as well.  Only targets that support iterate_devices are considered:
++       * don't want error, zero, etc to require stable pages.
+        */
+-      if (dm_table_requires_stable_pages(t))
++      if (dm_table_any_dev_attr(t, device_requires_stable_pages))
+               q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+       else
+               q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+@@ -1888,7 +1893,7 @@ void dm_table_set_restrictions(struct dm
+        * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
+        * have it set.
+        */
+-      if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
++      if (blk_queue_add_random(q) && dm_table_any_dev_attr(t, device_is_not_random))
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+       /*
diff --git a/queue-4.14/dm-table-fix-zoned-iterate_devices-based-device-capability-checks.patch b/queue-4.14/dm-table-fix-zoned-iterate_devices-based-device-capability-checks.patch
new file mode 100644 (file)
index 0000000..d87c30b
--- /dev/null
@@ -0,0 +1,165 @@
+From foo@baz Wed Mar 10 01:10:44 PM CET 2021
+From: Jeffle Xu <jefflexu@linux.alibaba.com>
+Date: Tue,  9 Mar 2021 11:27:45 +0800
+Subject: dm table: fix zoned iterate_devices based device capability checks
+To: snitzer@redhat.com, gregkh@linuxfoundation.org, sashal@kernel.org
+Cc: stable@vger.kernel.org, jefflexu@linux.alibaba.com
+Message-ID: <20210309032745.106175-4-jefflexu@linux.alibaba.com>
+
+From: Jeffle Xu <jefflexu@linux.alibaba.com>
+
+commit 24f6b6036c9eec21191646930ad42808e6180510 upstream.
+
+Fix dm_table_supports_zoned_model() and invert logic of both
+iterate_devices_callout_fn so that all devices' zoned capabilities are
+properly checked.
+
+Add one more parameter to dm_table_any_dev_attr(), which is actually
+used as the @data parameter of iterate_devices_callout_fn, so that
+dm_table_matches_zone_sectors() can be replaced by
+dm_table_any_dev_attr().
+
+Fixes: dd88d313bef02 ("dm table: add zoned block devices validation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+[jeffle: also convert no_sg_merge check]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-table.c |   50 +++++++++++++++++---------------------------------
+ 1 file changed, 17 insertions(+), 33 deletions(-)
+
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1372,10 +1372,10 @@ struct dm_target *dm_table_find_target(s
+  * should use the iteration structure like dm_table_supports_nowait() or
+  * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
+  * uses an @anti_func that handle semantics of counter examples, e.g. not
+- * capable of something. So: return !dm_table_any_dev_attr(t, anti_func);
++ * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
+  */
+ static bool dm_table_any_dev_attr(struct dm_table *t,
+-                                iterate_devices_callout_fn func)
++                                iterate_devices_callout_fn func, void *data)
+ {
+       struct dm_target *ti;
+       unsigned int i;
+@@ -1384,7 +1384,7 @@ static bool dm_table_any_dev_attr(struct
+               ti = dm_table_get_target(t, i);
+               if (ti->type->iterate_devices &&
+-                  ti->type->iterate_devices(ti, func, NULL))
++                  ti->type->iterate_devices(ti, func, data))
+                       return true;
+         }
+@@ -1427,13 +1427,13 @@ bool dm_table_has_no_data_devices(struct
+       return true;
+ }
+-static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
+-                               sector_t start, sector_t len, void *data)
++static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
++                                sector_t start, sector_t len, void *data)
+ {
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+       enum blk_zoned_model *zoned_model = data;
+-      return q && blk_queue_zoned_model(q) == *zoned_model;
++      return !q || blk_queue_zoned_model(q) != *zoned_model;
+ }
+ static bool dm_table_supports_zoned_model(struct dm_table *t,
+@@ -1450,37 +1450,20 @@ static bool dm_table_supports_zoned_mode
+                       return false;
+               if (!ti->type->iterate_devices ||
+-                  !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
++                  ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
+                       return false;
+       }
+       return true;
+ }
+-static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
+-                                     sector_t start, sector_t len, void *data)
++static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
++                                         sector_t start, sector_t len, void *data)
+ {
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+       unsigned int *zone_sectors = data;
+-      return q && blk_queue_zone_sectors(q) == *zone_sectors;
+-}
+-
+-static bool dm_table_matches_zone_sectors(struct dm_table *t,
+-                                        unsigned int zone_sectors)
+-{
+-      struct dm_target *ti;
+-      unsigned i;
+-
+-      for (i = 0; i < dm_table_get_num_targets(t); i++) {
+-              ti = dm_table_get_target(t, i);
+-
+-              if (!ti->type->iterate_devices ||
+-                  !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
+-                      return false;
+-      }
+-
+-      return true;
++      return !q || blk_queue_zone_sectors(q) != *zone_sectors;
+ }
+ static int validate_hardware_zoned_model(struct dm_table *table,
+@@ -1500,7 +1483,7 @@ static int validate_hardware_zoned_model
+       if (!zone_sectors || !is_power_of_2(zone_sectors))
+               return -EINVAL;
+-      if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
++      if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
+               DMERR("%s: zone sectors is not consistent across all devices",
+                     dm_device_name(table->md));
+               return -EINVAL;
+@@ -1837,11 +1820,11 @@ void dm_table_set_restrictions(struct dm
+       else
+               queue_flag_clear_unlocked(QUEUE_FLAG_DAX, q);
+-      if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled))
++      if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
+               dax_write_cache(t->md->dax_dev, true);
+       /* Ensure that all underlying devices are non-rotational. */
+-      if (dm_table_any_dev_attr(t, device_is_rotational))
++      if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
+               queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+       else
+               queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+@@ -1851,7 +1834,7 @@ void dm_table_set_restrictions(struct dm
+       if (!dm_table_supports_write_zeroes(t))
+               q->limits.max_write_zeroes_sectors = 0;
+-      if (dm_table_any_dev_attr(t, queue_no_sg_merge))
++      if (dm_table_any_dev_attr(t, queue_no_sg_merge, NULL))
+               queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+       else
+               queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+@@ -1865,7 +1848,7 @@ void dm_table_set_restrictions(struct dm
+        * them as well.  Only targets that support iterate_devices are considered:
+        * don't want error, zero, etc to require stable pages.
+        */
+-      if (dm_table_any_dev_attr(t, device_requires_stable_pages))
++      if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
+               q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+       else
+               q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+@@ -1876,7 +1859,8 @@ void dm_table_set_restrictions(struct dm
+        * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
+        * have it set.
+        */
+-      if (blk_queue_add_random(q) && dm_table_any_dev_attr(t, device_is_not_random))
++      if (blk_queue_add_random(q) &&
++          dm_table_any_dev_attr(t, device_is_not_random, NULL))
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+       /*
diff --git a/queue-4.14/iommu-amd-fix-sleeping-in-atomic-in-increase_address_space.patch b/queue-4.14/iommu-amd-fix-sleeping-in-atomic-in-increase_address_space.patch
new file mode 100644 (file)
index 0000000..d45ef16
--- /dev/null
@@ -0,0 +1,79 @@
+From 140456f994195b568ecd7fc2287a34eadffef3ca Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <arbn@yandex-team.com>
+Date: Wed, 17 Feb 2021 17:30:04 +0300
+Subject: iommu/amd: Fix sleeping in atomic in increase_address_space()
+
+From: Andrey Ryabinin <arbn@yandex-team.com>
+
+commit 140456f994195b568ecd7fc2287a34eadffef3ca upstream.
+
+increase_address_space() calls get_zeroed_page(gfp) under spin_lock with
+disabled interrupts. gfp flags passed to increase_address_space() may allow
+sleeping, so it comes to this:
+
+ BUG: sleeping function called from invalid context at mm/page_alloc.c:4342
+ in_atomic(): 1, irqs_disabled(): 1, pid: 21555, name: epdcbbf1qnhbsd8
+
+ Call Trace:
+  dump_stack+0x66/0x8b
+  ___might_sleep+0xec/0x110
+  __alloc_pages_nodemask+0x104/0x300
+  get_zeroed_page+0x15/0x40
+  iommu_map_page+0xdd/0x3e0
+  amd_iommu_map+0x50/0x70
+  iommu_map+0x106/0x220
+  vfio_iommu_type1_ioctl+0x76e/0x950 [vfio_iommu_type1]
+  do_vfs_ioctl+0xa3/0x6f0
+  ksys_ioctl+0x66/0x70
+  __x64_sys_ioctl+0x16/0x20
+  do_syscall_64+0x4e/0x100
+  entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Fix this by moving get_zeroed_page() out of spin_lock/unlock section.
+
+Fixes: 754265bcab ("iommu/amd: Fix race in increase_address_space()")
+Signed-off-by: Andrey Ryabinin <arbn@yandex-team.com>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20210217143004.19165-1-arbn@yandex-team.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Andrey Ryabinin <arbn@yandex-team.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/amd_iommu.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1347,24 +1347,26 @@ static void increase_address_space(struc
+       unsigned long flags;
+       u64 *pte;
++      pte = (void *)get_zeroed_page(gfp);
++      if (!pte)
++              return;
++
+       spin_lock_irqsave(&domain->lock, flags);
+       if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
+               /* address space already 64 bit large */
+               goto out;
+-      pte = (void *)get_zeroed_page(gfp);
+-      if (!pte)
+-              goto out;
+-
+       *pte             = PM_LEVEL_PDE(domain->mode,
+                                       iommu_virt_to_phys(domain->pt_root));
+       domain->pt_root  = pte;
+       domain->mode    += 1;
+       domain->updated  = true;
++      pte              = NULL;
+ out:
+       spin_unlock_irqrestore(&domain->lock, flags);
++      free_page((unsigned long)pte);
+       return;
+ }
index c569dcb523482d6142fd538ab31db532ead52a90..e6fbb275074f63373dfa835bf3b80480ab4e4861 100644 (file)
@@ -4,3 +4,7 @@ pm-runtime-update-device-status-before-letting-suppliers-suspend.patch
 usbip-tools-fix-build-error-for-multiple-definition.patch
 alsa-ctxfi-cthw20k2-fix-mask-on-conf-to-allow-4-bits.patch
 rsxx-return-efault-if-copy_to_user-fails.patch
+dm-table-fix-iterate_devices-based-device-capability-checks.patch
+dm-table-fix-dax-iterate_devices-based-device-capability-checks.patch
+dm-table-fix-zoned-iterate_devices-based-device-capability-checks.patch
+iommu-amd-fix-sleeping-in-atomic-in-increase_address_space.patch