]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Nov 2020 10:34:34 +0000 (11:34 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Nov 2020 10:34:34 +0000 (11:34 +0100)
added patches:
null_blk-fix-scheduling-in-atomic-with-zoned-mode.patch
powerpc-603-always-fault-when-_page_accessed-is-not-set.patch

queue-5.9/null_blk-fix-scheduling-in-atomic-with-zoned-mode.patch [new file with mode: 0644]
queue-5.9/powerpc-603-always-fault-when-_page_accessed-is-not-set.patch [new file with mode: 0644]
queue-5.9/series

diff --git a/queue-5.9/null_blk-fix-scheduling-in-atomic-with-zoned-mode.patch b/queue-5.9/null_blk-fix-scheduling-in-atomic-with-zoned-mode.patch
new file mode 100644 (file)
index 0000000..a3e38b6
--- /dev/null
@@ -0,0 +1,109 @@
+From e1777d099728a76a8f8090f89649aac961e7e530 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Fri, 6 Nov 2020 20:01:41 +0900
+Subject: null_blk: Fix scheduling in atomic with zoned mode
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit e1777d099728a76a8f8090f89649aac961e7e530 upstream.
+
+Commit aa1c09cb65e2 ("null_blk: Fix locking in zoned mode") changed
+zone locking to using the potentially sleeping wait_on_bit_io()
+function. This is acceptable when memory backing is enabled as the
+device queue is in that case marked as blocking, but this triggers a
+scheduling while in atomic context with memory backing disabled.
+
+Fix this by relying solely on the device zone spinlock for zone
+information protection without temporarily releasing this lock around
+null_process_cmd() execution in null_zone_write(). This is OK to do
+since when memory backing is disabled, command processing does not
+block and the memory backing lock nullb->lock is unused. This solution
+avoids the overhead of having to mark a zoned null_blk device queue as
+blocking when memory backing is unused.
+
+This patch also adds comments to the zone locking code to explain the
+unusual locking scheme.
+
+Fixes: aa1c09cb65e2 ("null_blk: Fix locking in zoned mode")
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/block/null_blk.h       |    1 +
+ drivers/block/null_blk_zoned.c |   31 +++++++++++++++++++++++++------
+ 2 files changed, 26 insertions(+), 6 deletions(-)
+
+--- a/drivers/block/null_blk.h
++++ b/drivers/block/null_blk.h
+@@ -44,6 +44,7 @@ struct nullb_device {
+       unsigned int nr_zones;
+       struct blk_zone *zones;
+       sector_t zone_size_sects;
++      spinlock_t zone_lock;
+       unsigned long *zone_locks;
+       unsigned long size; /* device size in MB */
+--- a/drivers/block/null_blk_zoned.c
++++ b/drivers/block/null_blk_zoned.c
+@@ -46,10 +46,20 @@ int null_init_zoned_dev(struct nullb_dev
+       if (!dev->zones)
+               return -ENOMEM;
+-      dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
+-      if (!dev->zone_locks) {
+-              kvfree(dev->zones);
+-              return -ENOMEM;
++      /*
++       * With memory backing, the zone_lock spinlock needs to be temporarily
++       * released to avoid scheduling in atomic context. To guarantee zone
++       * information protection, use a bitmap to lock zones with
++       * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
++       * implies that the queue is marked with BLK_MQ_F_BLOCKING.
++       */
++      spin_lock_init(&dev->zone_lock);
++      if (dev->memory_backed) {
++              dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
++              if (!dev->zone_locks) {
++                      kvfree(dev->zones);
++                      return -ENOMEM;
++              }
+       }
+       if (dev->zone_nr_conv >= dev->nr_zones) {
+@@ -118,12 +128,16 @@ void null_free_zoned_dev(struct nullb_de
+ static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
+ {
+-      wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
++      if (dev->memory_backed)
++              wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
++      spin_lock_irq(&dev->zone_lock);
+ }
+ static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
+ {
+-      clear_and_wake_up_bit(zno, dev->zone_locks);
++      spin_unlock_irq(&dev->zone_lock);
++      if (dev->memory_backed)
++              clear_and_wake_up_bit(zno, dev->zone_locks);
+ }
+ int null_report_zones(struct gendisk *disk, sector_t sector,
+@@ -233,7 +247,12 @@ static blk_status_t null_zone_write(stru
+               if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
+                       zone->cond = BLK_ZONE_COND_IMP_OPEN;
++              if (dev->memory_backed)
++                      spin_unlock_irq(&dev->zone_lock);
+               ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
++              if (dev->memory_backed)
++                      spin_lock_irq(&dev->zone_lock);
++
+               if (ret != BLK_STS_OK)
+                       break;
diff --git a/queue-5.9/powerpc-603-always-fault-when-_page_accessed-is-not-set.patch b/queue-5.9/powerpc-603-always-fault-when-_page_accessed-is-not-set.patch
new file mode 100644 (file)
index 0000000..e011ca9
--- /dev/null
@@ -0,0 +1,64 @@
+From 11522448e641e8f1690c9db06e01985e8e19b401 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Sat, 10 Oct 2020 15:14:30 +0000
+Subject: powerpc/603: Always fault when _PAGE_ACCESSED is not set
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit 11522448e641e8f1690c9db06e01985e8e19b401 upstream.
+
+The kernel expects pte_young() to work regardless of CONFIG_SWAP.
+
+Make sure a minor fault is taken to set _PAGE_ACCESSED when it
+is not already set, regardless of the selection of CONFIG_SWAP.
+
+Fixes: 84de6ab0e904 ("powerpc/603: don't handle PAGE_ACCESSED in TLB miss handlers.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/a44367744de54e2315b2f1a8cbbd7f88488072e0.1602342806.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/powerpc/kernel/head_32.S |   12 ------------
+ 1 file changed, 12 deletions(-)
+
+--- a/arch/powerpc/kernel/head_32.S
++++ b/arch/powerpc/kernel/head_32.S
+@@ -472,11 +472,7 @@ InstructionTLBMiss:
+       cmplw   0,r1,r3
+ #endif
+       mfspr   r2, SPRN_SPRG_PGDIR
+-#ifdef CONFIG_SWAP
+       li      r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+-#else
+-      li      r1,_PAGE_PRESENT | _PAGE_EXEC
+-#endif
+ #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
+       bgt-    112f
+       lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
+@@ -538,11 +534,7 @@ DataLoadTLBMiss:
+       lis     r1, TASK_SIZE@h         /* check if kernel address */
+       cmplw   0,r1,r3
+       mfspr   r2, SPRN_SPRG_PGDIR
+-#ifdef CONFIG_SWAP
+       li      r1, _PAGE_PRESENT | _PAGE_ACCESSED
+-#else
+-      li      r1, _PAGE_PRESENT
+-#endif
+       bgt-    112f
+       lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
+       addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
+@@ -618,11 +610,7 @@ DataStoreTLBMiss:
+       lis     r1, TASK_SIZE@h         /* check if kernel address */
+       cmplw   0,r1,r3
+       mfspr   r2, SPRN_SPRG_PGDIR
+-#ifdef CONFIG_SWAP
+       li      r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
+-#else
+-      li      r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
+-#endif
+       bgt-    112f
+       lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
+       addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
index 58824a29a1482a740fbf174bdadf9bedf513ecdf..95c94bb40de472f62a0c89b6afcd775bd3d13add 100644 (file)
@@ -248,3 +248,5 @@ tipc-fix-memory-leak-in-tipc_topsrv_start.patch
 devlink-avoid-overwriting-port-attributes-of-registered-port.patch
 limit.patch
 tunnels-fix-off-by-one-in-lower-mtu-bounds-for-icmp-icmpv6-replies.patch
+powerpc-603-always-fault-when-_page_accessed-is-not-set.patch
+null_blk-fix-scheduling-in-atomic-with-zoned-mode.patch