From: Sasha Levin Date: Thu, 19 Aug 2021 18:31:43 +0000 (-0400) Subject: Drop mm-cma-change-cma-mutex-to-irq-safe-spinlock.patch X-Git-Tag: v5.13.13~30 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=2c3e239a3bdcabe6fdb4b68c23bb130652cc6113;p=thirdparty%2Fkernel%2Fstable-queue.git Drop mm-cma-change-cma-mutex-to-irq-safe-spinlock.patch Signed-off-by: Sasha Levin --- diff --git a/queue-5.10/mm-cma-change-cma-mutex-to-irq-safe-spinlock.patch b/queue-5.10/mm-cma-change-cma-mutex-to-irq-safe-spinlock.patch deleted file mode 100644 index f4e6c0020c2..00000000000 --- a/queue-5.10/mm-cma-change-cma-mutex-to-irq-safe-spinlock.patch +++ /dev/null @@ -1,197 +0,0 @@ -From 25e9036a041c6d3c258c02e970b67fa62c10bdad Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Tue, 4 May 2021 18:34:44 -0700 -Subject: mm/cma: change cma mutex to irq safe spinlock - -From: Mike Kravetz - -[ Upstream commit 0ef7dcac998fefc4767b7f10eb3b6df150c38a4e ] - -Patch series "make hugetlb put_page safe for all calling contexts", v5. - -This effort is the result a recent bug report [1]. Syzbot found a -potential deadlock in the hugetlb put_page/free_huge_page_path. WARNING: -SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected Since the -free_huge_page_path already has code to 'hand off' page free requests to a -workqueue, a suggestion was proposed to make the in_irq() detection -accurate by always enabling PREEMPT_COUNT [2]. The outcome of that -discussion was that the hugetlb put_page path (free_huge_page) path should -be properly fixed and safe for all calling contexts. - -[1] https://lore.kernel.org/linux-mm/000000000000f1c03b05bc43aadc@google.com/ -[2] http://lkml.kernel.org/r/20210311021321.127500-1-mike.kravetz@oracle.com - -This patch (of 8): - -cma_release is currently a sleepable operatation because the bitmap -manipulation is protected by cma->lock mutex. Hugetlb code which relies -on cma_release for CMA backed (giga) hugetlb pages, however, needs to be -irq safe. - -The lock doesn't protect any sleepable operation so it can be changed to a -(irq aware) spin lock. The bitmap processing should be quite fast in -typical case but if cma sizes grow to TB then we will likely need to -replace the lock by a more optimized bitmap implementation. - -Link: https://lkml.kernel.org/r/20210409205254.242291-1-mike.kravetz@oracle.com -Link: https://lkml.kernel.org/r/20210409205254.242291-2-mike.kravetz@oracle.com -Signed-off-by: Mike Kravetz -Acked-by: Michal Hocko -Reviewed-by: David Hildenbrand -Acked-by: Roman Gushchin -Cc: Shakeel Butt -Cc: Oscar Salvador -Cc: Muchun Song -Cc: David Rientjes -Cc: Miaohe Lin -Cc: Peter Zijlstra -Cc: Matthew Wilcox -Cc: HORIGUCHI NAOYA -Cc: "Aneesh Kumar K . V" -Cc: Waiman Long -Cc: Peter Xu -Cc: Mina Almasry -Cc: Hillf Danton -Cc: Joonsoo Kim -Cc: Barry Song -Cc: Will Deacon -Signed-off-by: Andrew Morton -Signed-off-by: Linus Torvalds -Signed-off-by: Sasha Levin ---- - mm/cma.c | 18 +++++++++--------- - mm/cma.h | 2 +- - mm/cma_debug.c | 8 ++++---- - 3 files changed, 14 insertions(+), 14 deletions(-) - -diff --git a/mm/cma.c b/mm/cma.c -index 7f415d7cda9f..019e3aa2cc09 100644 ---- a/mm/cma.c -+++ b/mm/cma.c -@@ -24,7 +24,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -84,13 +83,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, - unsigned int count) - { - unsigned long bitmap_no, bitmap_count; -+ unsigned long flags; - - bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; - bitmap_count = cma_bitmap_pages_to_bits(cma, count); - -- mutex_lock(&cma->lock); -+ spin_lock_irqsave(&cma->lock, flags); - bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); -- mutex_unlock(&cma->lock); -+ spin_unlock_irqrestore(&cma->lock, flags); - } - - static void __init cma_activate_area(struct cma *cma) -@@ -124,7 +124,7 @@ static void __init cma_activate_area(struct cma *cma) - init_cma_reserved_pageblock(pfn_to_page(base_pfn)); - } while (--i); - -- mutex_init(&cma->lock); -+ spin_lock_init(&cma->lock); - - #ifdef CONFIG_CMA_DEBUGFS - INIT_HLIST_HEAD(&cma->mem_head); -@@ -377,7 +377,7 @@ static void cma_debug_show_areas(struct cma *cma) - unsigned long nr_part, nr_total = 0; - unsigned long nbits = cma_bitmap_maxno(cma); - -- mutex_lock(&cma->lock); -+ spin_lock_irq(&cma->lock); - pr_info("number of available pages: "); - for (;;) { - next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); -@@ -392,7 +392,7 @@ static void cma_debug_show_areas(struct cma *cma) - start = next_zero_bit + nr_zero; - } - pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); -- mutex_unlock(&cma->lock); -+ spin_unlock_irq(&cma->lock); - } - #else - static inline void cma_debug_show_areas(struct cma *cma) { } -@@ -437,12 +437,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, - return NULL; - - for (;;) { -- mutex_lock(&cma->lock); -+ spin_lock_irq(&cma->lock); - bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, - bitmap_maxno, start, bitmap_count, mask, - offset); - if (bitmap_no >= bitmap_maxno) { -- mutex_unlock(&cma->lock); -+ spin_unlock_irq(&cma->lock); - break; - } - bitmap_set(cma->bitmap, bitmap_no, bitmap_count); -@@ -451,7 +451,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, - * our exclusive use. If the migration fails we will take the - * lock again and unmark it. - */ -- mutex_unlock(&cma->lock); -+ spin_unlock_irq(&cma->lock); - - pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); -diff --git a/mm/cma.h b/mm/cma.h -index 42ae082cb067..c46c5050aaa9 100644 ---- a/mm/cma.h -+++ b/mm/cma.h -@@ -9,7 +9,7 @@ struct cma { - unsigned long count; - unsigned long *bitmap; - unsigned int order_per_bit; /* Order of pages represented by one bit */ -- struct mutex lock; -+ spinlock_t lock; - #ifdef CONFIG_CMA_DEBUGFS - struct hlist_head mem_head; - spinlock_t mem_head_lock; -diff --git a/mm/cma_debug.c b/mm/cma_debug.c -index d5bf8aa34fdc..2e7704955f4f 100644 ---- a/mm/cma_debug.c -+++ b/mm/cma_debug.c -@@ -36,10 +36,10 @@ static int cma_used_get(void *data, u64 *val) - struct cma *cma = data; - unsigned long used; - -- mutex_lock(&cma->lock); -+ spin_lock_irq(&cma->lock); - /* pages counter is smaller than sizeof(int) */ - used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); -- mutex_unlock(&cma->lock); -+ spin_unlock_irq(&cma->lock); - *val = (u64)used << cma->order_per_bit; - - return 0; -@@ -53,7 +53,7 @@ static int cma_maxchunk_get(void *data, u64 *val) - unsigned long start, end = 0; - unsigned long bitmap_maxno = cma_bitmap_maxno(cma); - -- mutex_lock(&cma->lock); -+ spin_lock_irq(&cma->lock); - for (;;) { - start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); - if (start >= bitmap_maxno) -@@ -61,7 +61,7 @@ static int cma_maxchunk_get(void *data, u64 *val) - end = find_next_bit(cma->bitmap, bitmap_maxno, start); - maxchunk = max(end - start, maxchunk); - } -- mutex_unlock(&cma->lock); -+ spin_unlock_irq(&cma->lock); - *val = (u64)maxchunk << cma->order_per_bit; - - return 0; --- -2.30.2 - diff --git a/queue-5.10/series b/queue-5.10/series index 311817d36bc..9108d59d179 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -9,7 +9,6 @@ media-zr364xx-fix-memory-leaks-in-probe.patch vdpa-extend-routine-to-accept-vdpa-device-name.patch vdpa-define-vdpa-mgmt-device-ops-and-a-netlink-inter.patch media-drivers-media-usb-fix-memory-leak-in-zr364xx_p.patch -mm-cma-change-cma-mutex-to-irq-safe-spinlock.patch kvm-x86-factor-out-x86-instruction-emulation-with-de.patch kvm-x86-fix-warning-caused-by-stale-emulation-contex.patch usb-core-avoid-warnings-for-0-length-descriptor-requ.patch diff --git a/queue-5.4/mm-cma-change-cma-mutex-to-irq-safe-spinlock.patch b/queue-5.4/mm-cma-change-cma-mutex-to-irq-safe-spinlock.patch deleted file mode 100644 index c7df4d1ea52..00000000000 --- a/queue-5.4/mm-cma-change-cma-mutex-to-irq-safe-spinlock.patch +++ /dev/null @@ -1,197 +0,0 @@ -From 6f985bb70e108d80eab25dcd0146721bab8f36fb Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Tue, 4 May 2021 18:34:44 -0700 -Subject: mm/cma: change cma mutex to irq safe spinlock - -From: Mike Kravetz - -[ Upstream commit 0ef7dcac998fefc4767b7f10eb3b6df150c38a4e ] - -Patch series "make hugetlb put_page safe for all calling contexts", v5. - -This effort is the result a recent bug report [1]. Syzbot found a -potential deadlock in the hugetlb put_page/free_huge_page_path. WARNING: -SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected Since the -free_huge_page_path already has code to 'hand off' page free requests to a -workqueue, a suggestion was proposed to make the in_irq() detection -accurate by always enabling PREEMPT_COUNT [2]. The outcome of that -discussion was that the hugetlb put_page path (free_huge_page) path should -be properly fixed and safe for all calling contexts. - -[1] https://lore.kernel.org/linux-mm/000000000000f1c03b05bc43aadc@google.com/ -[2] http://lkml.kernel.org/r/20210311021321.127500-1-mike.kravetz@oracle.com - -This patch (of 8): - -cma_release is currently a sleepable operatation because the bitmap -manipulation is protected by cma->lock mutex. Hugetlb code which relies -on cma_release for CMA backed (giga) hugetlb pages, however, needs to be -irq safe. - -The lock doesn't protect any sleepable operation so it can be changed to a -(irq aware) spin lock. The bitmap processing should be quite fast in -typical case but if cma sizes grow to TB then we will likely need to -replace the lock by a more optimized bitmap implementation. - -Link: https://lkml.kernel.org/r/20210409205254.242291-1-mike.kravetz@oracle.com -Link: https://lkml.kernel.org/r/20210409205254.242291-2-mike.kravetz@oracle.com -Signed-off-by: Mike Kravetz -Acked-by: Michal Hocko -Reviewed-by: David Hildenbrand -Acked-by: Roman Gushchin -Cc: Shakeel Butt -Cc: Oscar Salvador -Cc: Muchun Song -Cc: David Rientjes -Cc: Miaohe Lin -Cc: Peter Zijlstra -Cc: Matthew Wilcox -Cc: HORIGUCHI NAOYA -Cc: "Aneesh Kumar K . V" -Cc: Waiman Long -Cc: Peter Xu -Cc: Mina Almasry -Cc: Hillf Danton -Cc: Joonsoo Kim -Cc: Barry Song -Cc: Will Deacon -Signed-off-by: Andrew Morton -Signed-off-by: Linus Torvalds -Signed-off-by: Sasha Levin ---- - mm/cma.c | 18 +++++++++--------- - mm/cma.h | 2 +- - mm/cma_debug.c | 8 ++++---- - 3 files changed, 14 insertions(+), 14 deletions(-) - -diff --git a/mm/cma.c b/mm/cma.c -index 7de520c0a1db..b895b2ca8800 100644 ---- a/mm/cma.c -+++ b/mm/cma.c -@@ -24,7 +24,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -84,13 +83,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, - unsigned int count) - { - unsigned long bitmap_no, bitmap_count; -+ unsigned long flags; - - bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; - bitmap_count = cma_bitmap_pages_to_bits(cma, count); - -- mutex_lock(&cma->lock); -+ spin_lock_irqsave(&cma->lock, flags); - bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); -- mutex_unlock(&cma->lock); -+ spin_unlock_irqrestore(&cma->lock, flags); - } - - static void __init cma_activate_area(struct cma *cma) -@@ -124,7 +124,7 @@ static void __init cma_activate_area(struct cma *cma) - init_cma_reserved_pageblock(pfn_to_page(base_pfn)); - } while (--i); - -- mutex_init(&cma->lock); -+ spin_lock_init(&cma->lock); - - #ifdef CONFIG_CMA_DEBUGFS - INIT_HLIST_HEAD(&cma->mem_head); -@@ -376,7 +376,7 @@ static void cma_debug_show_areas(struct cma *cma) - unsigned long nr_part, nr_total = 0; - unsigned long nbits = cma_bitmap_maxno(cma); - -- mutex_lock(&cma->lock); -+ spin_lock_irq(&cma->lock); - pr_info("number of available pages: "); - for (;;) { - next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); -@@ -391,7 +391,7 @@ static void cma_debug_show_areas(struct cma *cma) - start = next_zero_bit + nr_zero; - } - pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); -- mutex_unlock(&cma->lock); -+ spin_unlock_irq(&cma->lock); - } - #else - static inline void cma_debug_show_areas(struct cma *cma) { } -@@ -436,12 +436,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, - return NULL; - - for (;;) { -- mutex_lock(&cma->lock); -+ spin_lock_irq(&cma->lock); - bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, - bitmap_maxno, start, bitmap_count, mask, - offset); - if (bitmap_no >= bitmap_maxno) { -- mutex_unlock(&cma->lock); -+ spin_unlock_irq(&cma->lock); - break; - } - bitmap_set(cma->bitmap, bitmap_no, bitmap_count); -@@ -450,7 +450,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, - * our exclusive use. If the migration fails we will take the - * lock again and unmark it. - */ -- mutex_unlock(&cma->lock); -+ spin_unlock_irq(&cma->lock); - - pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); -diff --git a/mm/cma.h b/mm/cma.h -index 33c0b517733c..043bfbff9225 100644 ---- a/mm/cma.h -+++ b/mm/cma.h -@@ -7,7 +7,7 @@ struct cma { - unsigned long count; - unsigned long *bitmap; - unsigned int order_per_bit; /* Order of pages represented by one bit */ -- struct mutex lock; -+ spinlock_t lock; - #ifdef CONFIG_CMA_DEBUGFS - struct hlist_head mem_head; - spinlock_t mem_head_lock; -diff --git a/mm/cma_debug.c b/mm/cma_debug.c -index a7dd9e8e10d5..d27881adce19 100644 ---- a/mm/cma_debug.c -+++ b/mm/cma_debug.c -@@ -36,10 +36,10 @@ static int cma_used_get(void *data, u64 *val) - struct cma *cma = data; - unsigned long used; - -- mutex_lock(&cma->lock); -+ spin_lock_irq(&cma->lock); - /* pages counter is smaller than sizeof(int) */ - used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); -- mutex_unlock(&cma->lock); -+ spin_unlock_irq(&cma->lock); - *val = (u64)used << cma->order_per_bit; - - return 0; -@@ -53,7 +53,7 @@ static int cma_maxchunk_get(void *data, u64 *val) - unsigned long start, end = 0; - unsigned long bitmap_maxno = cma_bitmap_maxno(cma); - -- mutex_lock(&cma->lock); -+ spin_lock_irq(&cma->lock); - for (;;) { - start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); - if (start >= bitmap_maxno) -@@ -61,7 +61,7 @@ static int cma_maxchunk_get(void *data, u64 *val) - end = find_next_bit(cma->bitmap, bitmap_maxno, start); - maxchunk = max(end - start, maxchunk); - } -- mutex_unlock(&cma->lock); -+ spin_unlock_irq(&cma->lock); - *val = (u64)maxchunk << cma->order_per_bit; - - return 0; --- -2.30.2 - diff --git a/queue-5.4/series b/queue-5.4/series index 32433270755..7959381b0bd 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -9,7 +9,6 @@ mtd-cfi_cmdset_0002-fix-crash-when-erasing-writing-amd-cards.patch media-zr364xx-propagate-errors-from-zr364xx_start_re.patch media-zr364xx-fix-memory-leaks-in-probe.patch media-drivers-media-usb-fix-memory-leak-in-zr364xx_p.patch -mm-cma-change-cma-mutex-to-irq-safe-spinlock.patch usb-core-avoid-warnings-for-0-length-descriptor-requ.patch dmaengine-xilinx_dma-fix-read-after-free-bug-when-te.patch dmaengine-usb-dmac-fix-pm-reference-leak-in-usb_dmac.patch