From: Greg Kroah-Hartman Date: Mon, 2 Aug 2021 11:49:09 +0000 (+0200) Subject: 5.13-stable patches X-Git-Tag: v4.4.278~23 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=b13414115f037ed09d55f75e8e494a0344f4490b;p=thirdparty%2Fkernel%2Fstable-queue.git 5.13-stable patches added patches: io_uring-fix-race-in-unified-task_work-running.patch powerpc-pseries-fix-regression-while-building-external-modules.patch powerpc-vdso-don-t-use-r30-to-avoid-breaking-go-lang.patch revert-perf-map-fix-dso-nsinfo-refcounting.patch smb3-fix-readpage-for-large-swap-cache.patch --- diff --git a/queue-5.13/io_uring-fix-race-in-unified-task_work-running.patch b/queue-5.13/io_uring-fix-race-in-unified-task_work-running.patch new file mode 100644 index 00000000000..7bc799caddd --- /dev/null +++ b/queue-5.13/io_uring-fix-race-in-unified-task_work-running.patch @@ -0,0 +1,52 @@ +From 110aa25c3ce417a44e35990cf8ed22383277933a Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Mon, 26 Jul 2021 10:42:56 -0600 +Subject: io_uring: fix race in unified task_work running + +From: Jens Axboe + +commit 110aa25c3ce417a44e35990cf8ed22383277933a upstream. + +We use a bit to manage if we need to add the shared task_work, but +a list + lock for the pending work. Before aborting a current run +of the task_work we check if the list is empty, but we do so without +grabbing the lock that protects it. This can lead to races where +we think we have nothing left to run, where in practice we could be +racing with a task adding new work to the list. If we do hit that +race condition, we could be left with work items that need processing, +but the shared task_work is not active. + +Ensure that we grab the lock before checking if the list is empty, +so we know if it's safe to exit the run or not. + +Link: https://lore.kernel.org/io-uring/c6bd5987-e9ae-cd02-49d0-1b3ac1ef65b1@tnonline.net/ +Cc: stable@vger.kernel.org # 5.11+ +Reported-by: Forza +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + fs/io_uring.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -1899,7 +1899,7 @@ static void tctx_task_work(struct callba + + clear_bit(0, &tctx->task_state); + +- while (!wq_list_empty(&tctx->task_list)) { ++ while (true) { + struct io_ring_ctx *ctx = NULL; + struct io_wq_work_list list; + struct io_wq_work_node *node; +@@ -1909,6 +1909,9 @@ static void tctx_task_work(struct callba + INIT_WQ_LIST(&tctx->task_list); + spin_unlock_irq(&tctx->task_lock); + ++ if (wq_list_empty(&list)) ++ break; ++ + node = list.first; + while (node) { + struct io_wq_work_node *next = node->next; diff --git a/queue-5.13/powerpc-pseries-fix-regression-while-building-external-modules.patch b/queue-5.13/powerpc-pseries-fix-regression-while-building-external-modules.patch new file mode 100644 index 00000000000..c628dafdf25 --- /dev/null +++ b/queue-5.13/powerpc-pseries-fix-regression-while-building-external-modules.patch @@ -0,0 +1,77 @@ +From 333cf507465fbebb3727f5b53e77538467df312a Mon Sep 17 00:00:00 2001 +From: Srikar Dronamraju +Date: Thu, 29 Jul 2021 11:34:49 +0530 +Subject: powerpc/pseries: Fix regression while building external modules + +From: Srikar Dronamraju + +commit 333cf507465fbebb3727f5b53e77538467df312a upstream. + +With commit c9f3401313a5 ("powerpc: Always enable queued spinlocks for +64s, disable for others") CONFIG_PPC_QUEUED_SPINLOCKS is always +enabled on ppc64le, external modules that use spinlock APIs are +failing. + + ERROR: modpost: GPL-incompatible module XXX.ko uses GPL-only symbol 'shared_processor' + +Before the above commit, modules were able to build without any +issues. Also this problem is not seen on other architectures. This +problem can be workaround if CONFIG_UNINLINE_SPIN_UNLOCK is enabled in +the config. However CONFIG_UNINLINE_SPIN_UNLOCK is not enabled by +default and only enabled in certain conditions like +CONFIG_DEBUG_SPINLOCKS is set in the kernel config. + + #include + spinlock_t spLock; + + static int __init spinlock_test_init(void) + { + spin_lock_init(&spLock); + spin_lock(&spLock); + spin_unlock(&spLock); + return 0; + } + + static void __exit spinlock_test_exit(void) + { + printk("spinlock_test unloaded\n"); + } + module_init(spinlock_test_init); + module_exit(spinlock_test_exit); + + MODULE_DESCRIPTION ("spinlock_test"); + MODULE_LICENSE ("non-GPL"); + MODULE_AUTHOR ("Srikar Dronamraju"); + +Given that spin locks are one of the basic facilities for module code, +this effectively makes it impossible to build/load almost any non GPL +modules on ppc64le. + +This was first reported at https://github.com/openzfs/zfs/issues/11172 + +Currently shared_processor is exported as GPL only symbol. +Fix this for parity with other architectures by exposing +shared_processor to non-GPL modules too. + +Fixes: 14c73bd344da ("powerpc/vcpu: Assume dedicated processors as non-preempt") +Cc: stable@vger.kernel.org # v5.5+ +Reported-by: marc.c.dionne@gmail.com +Signed-off-by: Srikar Dronamraju +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20210729060449.292780-1-srikar@linux.vnet.ibm.com +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/platforms/pseries/setup.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -77,7 +77,7 @@ + #include "../../../../drivers/pci/pci.h" + + DEFINE_STATIC_KEY_FALSE(shared_processor); +-EXPORT_SYMBOL_GPL(shared_processor); ++EXPORT_SYMBOL(shared_processor); + + int CMO_PrPSP = -1; + int CMO_SecPSP = -1; diff --git a/queue-5.13/powerpc-vdso-don-t-use-r30-to-avoid-breaking-go-lang.patch b/queue-5.13/powerpc-vdso-don-t-use-r30-to-avoid-breaking-go-lang.patch new file mode 100644 index 00000000000..deeaa06322d --- /dev/null +++ b/queue-5.13/powerpc-vdso-don-t-use-r30-to-avoid-breaking-go-lang.patch @@ -0,0 +1,62 @@ +From a88603f4b92ecef9e2359e40bcb99ad399d85dd7 Mon Sep 17 00:00:00 2001 +From: Michael Ellerman +Date: Thu, 29 Jul 2021 22:56:36 +1000 +Subject: powerpc/vdso: Don't use r30 to avoid breaking Go lang + +From: Michael Ellerman + +commit a88603f4b92ecef9e2359e40bcb99ad399d85dd7 upstream. + +The Go runtime uses r30 for some special value called 'g'. It assumes +that value will remain unchanged even when calling VDSO functions. +Although r30 is non-volatile across function calls, the callee is free +to use it, as long as the callee saves the value and restores it before +returning. + +It used to be true by accident that the VDSO didn't use r30, because the +VDSO was hand-written asm. When we switched to building the VDSO from C +the compiler started using r30, at least in some builds, leading to +crashes in Go. eg: + + ~/go/src$ ./all.bash + Building Go cmd/dist using /usr/lib/go-1.16. (go1.16.2 linux/ppc64le) + Building Go toolchain1 using /usr/lib/go-1.16. + go build os/exec: /usr/lib/go-1.16/pkg/tool/linux_ppc64le/compile: signal: segmentation fault + go build reflect: /usr/lib/go-1.16/pkg/tool/linux_ppc64le/compile: signal: segmentation fault + go tool dist: FAILED: /usr/lib/go-1.16/bin/go install -gcflags=-l -tags=math_big_pure_go compiler_bootstrap bootstrap/cmd/...: exit status 1 + +There are patches in flight to fix Go[1], but until they are released +and widely deployed we can workaround it in the VDSO by avoiding use of +r30. + +Note this only works with GCC, clang does not support -ffixed-rN. + +1: https://go-review.googlesource.com/c/go/+/328110 + +Fixes: ab037dd87a2f ("powerpc/vdso: Switch VDSO to generic C implementation.") +Cc: stable@vger.kernel.org # v5.11+ +Reported-by: Paul Menzel +Tested-by: Paul Menzel +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20210729131244.2595519-1-mpe@ellerman.id.au +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/vdso64/Makefile | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/arch/powerpc/kernel/vdso64/Makefile ++++ b/arch/powerpc/kernel/vdso64/Makefile +@@ -27,6 +27,13 @@ KASAN_SANITIZE := n + + ccflags-y := -shared -fno-common -fno-builtin -nostdlib \ + -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both ++ ++# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true ++# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is ++# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code ++# generation is minimal, it will just use r29 instead. ++ccflags-y += $(call cc-option, -ffixed-r30) ++ + asflags-y := -D__VDSO64__ -s + + targets += vdso64.lds diff --git a/queue-5.13/revert-perf-map-fix-dso-nsinfo-refcounting.patch b/queue-5.13/revert-perf-map-fix-dso-nsinfo-refcounting.patch new file mode 100644 index 00000000000..bf10cce89f3 --- /dev/null +++ b/queue-5.13/revert-perf-map-fix-dso-nsinfo-refcounting.patch @@ -0,0 +1,39 @@ +From 9bac1bd6e6d36459087a728a968e79e37ebcea1a Mon Sep 17 00:00:00 2001 +From: Arnaldo Carvalho de Melo +Date: Fri, 30 Jul 2021 18:26:22 -0300 +Subject: Revert "perf map: Fix dso->nsinfo refcounting" + +From: Arnaldo Carvalho de Melo + +commit 9bac1bd6e6d36459087a728a968e79e37ebcea1a upstream. + +This makes 'perf top' abort in some cases, and the right fix will +involve surgery that is too much to do at this stage, so revert for now +and fix it in the next merge window. + +This reverts commit 2d6b74baa7147251c30a46c4996e8cc224aa2dc5. + +Cc: Riccardo Mancini +Cc: Ian Rogers +Cc: Jiri Olsa +Cc: Krister Johansen +Cc: Mark Rutland +Cc: Namhyung Kim +Cc: Peter Zijlstra +Signed-off-by: Arnaldo Carvalho de Melo +Signed-off-by: Greg Kroah-Hartman +--- + tools/perf/util/map.c | 2 -- + 1 file changed, 2 deletions(-) + +--- a/tools/perf/util/map.c ++++ b/tools/perf/util/map.c +@@ -192,8 +192,6 @@ struct map *map__new(struct machine *mac + if (!(prot & PROT_EXEC)) + dso__set_loaded(dso); + } +- +- nsinfo__put(dso->nsinfo); + dso->nsinfo = nsi; + + if (build_id__is_defined(bid)) diff --git a/queue-5.13/series b/queue-5.13/series index 5c937f89cc7..e27458127b7 100644 --- a/queue-5.13/series +++ b/queue-5.13/series @@ -93,3 +93,8 @@ bpf-fix-leakage-due-to-insufficient-speculative-stor.patch bpf-remove-superfluous-aux-sanitation-on-subprog-rejection.patch bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch +smb3-fix-readpage-for-large-swap-cache.patch +powerpc-vdso-don-t-use-r30-to-avoid-breaking-go-lang.patch +powerpc-pseries-fix-regression-while-building-external-modules.patch +revert-perf-map-fix-dso-nsinfo-refcounting.patch +io_uring-fix-race-in-unified-task_work-running.patch diff --git a/queue-5.13/smb3-fix-readpage-for-large-swap-cache.patch b/queue-5.13/smb3-fix-readpage-for-large-swap-cache.patch new file mode 100644 index 00000000000..b1665b8a0dd --- /dev/null +++ b/queue-5.13/smb3-fix-readpage-for-large-swap-cache.patch @@ -0,0 +1,41 @@ +From f2a26a3cff27dfa456fef386fe5df56dcb4b47b6 Mon Sep 17 00:00:00 2001 +From: Steve French +Date: Fri, 23 Jul 2021 18:35:15 -0500 +Subject: SMB3: fix readpage for large swap cache + +From: Steve French + +commit f2a26a3cff27dfa456fef386fe5df56dcb4b47b6 upstream. + +readpage was calculating the offset of the page incorrectly +for the case of large swapcaches. + + loff_t offset = (loff_t)page->index << PAGE_SHIFT; + +As pointed out by Matthew Wilcox, this needs to use +page_file_offset() to calculate the offset instead. +Pages coming from the swap cache have page->index set +to their index within the swapcache, not within the backing +file. For a sufficiently large swapcache, we could have +overlapping values of page->index within the same backing file. + +Suggested by: Matthew Wilcox (Oracle) +Cc: # v5.7+ +Reviewed-by: Ronnie Sahlberg +Signed-off-by: Steve French +Signed-off-by: Greg Kroah-Hartman +--- + fs/cifs/file.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -4631,7 +4631,7 @@ read_complete: + + static int cifs_readpage(struct file *file, struct page *page) + { +- loff_t offset = (loff_t)page->index << PAGE_SHIFT; ++ loff_t offset = page_file_offset(page); + int rc = -EACCES; + unsigned int xid; +