--- /dev/null
+From 5e6b8a50a7cec5686ee2c4bda1d49899c79a7eae Mon Sep 17 00:00:00 2001
+From: Yang Yingliang <yangyingliang@huawei.com>
+Date: Wed, 26 May 2021 22:38:05 +0800
+Subject: cred: add missing return error code when set_cred_ucounts() failed
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+commit 5e6b8a50a7cec5686ee2c4bda1d49899c79a7eae upstream.
+
+If set_cred_ucounts() failed, we need return the error code.
+
+Fixes: 905ae01c4ae2 ("Add a reference to ucounts for each cred")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lkml.kernel.org/r/20210526143805.2549649-1-yangyingliang@huawei.com
+Reviewed-by: Alexey Gladkov <legion@kernel.org>
+Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cred.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -372,7 +372,8 @@ int copy_creds(struct task_struct *p, un
+ ret = create_user_ns(new);
+ if (ret < 0)
+ goto error_put;
+- if (set_cred_ucounts(new) < 0)
++ ret = set_cred_ucounts(new);
++ if (ret < 0)
+ goto error_put;
+ }
+
--- /dev/null
+From a8bc4f5e7a72e4067f5afd7e98b61624231713ca Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <weiyongjun1@huawei.com>
+Date: Wed, 2 Jun 2021 11:36:45 +0000
+Subject: crypto: qce - fix error return code in qce_skcipher_async_req_handle()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+commit a8bc4f5e7a72e4067f5afd7e98b61624231713ca upstream.
+
+Fix to return a negative error code from the error handling
+case instead of 0, as done elsewhere in this function.
+
+Fixes: 1339a7c3ba05 ("crypto: qce: skcipher: Fix incorrect sg count for dma transfers")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Reviewed-by: Thara Gopinath <thara.gopinath@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/qce/skcipher.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/qce/skcipher.c
++++ b/drivers/crypto/qce/skcipher.c
+@@ -124,13 +124,17 @@ qce_skcipher_async_req_handle(struct cry
+ rctx->dst_sg = rctx->dst_tbl.sgl;
+
+ dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+- if (dst_nents < 0)
++ if (dst_nents < 0) {
++ ret = dst_nents;
+ goto error_free;
++ }
+
+ if (diff_dst) {
+ src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+- if (src_nents < 0)
++ if (src_nents < 0) {
++ ret = src_nents;
+ goto error_unmap_dst;
++ }
+ rctx->src_sg = req->src;
+ } else {
+ rctx->src_sg = rctx->dst_sg;
--- /dev/null
+From 7154cbd31c2069726cf730b0ed94e2e79a221602 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Mon, 7 Jun 2021 14:49:05 +0200
+Subject: iommu/dma: Fix compile warning in 32-bit builds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit 7154cbd31c2069726cf730b0ed94e2e79a221602 upstream.
+
+Compiling the recent dma-iommu changes under 32-bit x86 triggers this
+compile warning:
+
+drivers/iommu/dma-iommu.c:249:5: warning: format ‘%llx’ expects argument of type ‘long long unsigned int’, but argument 3 has type ‘phys_addr_t’ {aka ‘unsigned int’} [-Wformat=]
+
+The reason is that %llx is used to print a variable of type
+phys_addr_t. Fix it by using the correct %pa format specifier for
+phys_addr_t.
+
+Cc: Srinath Mannam <srinath.mannam@broadcom.com>
+Cc: Robin Murphy <robin.murphy@arm.com>
+Cc: Oza Pawandeep <poza@codeaurora.org>
+Fixes: 571f316074a20 ("iommu/dma: Fix IOVA reserve dma ranges")
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Link: https://lore.kernel.org/r/20210607124905.27525-1-joro@8bytes.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/dma-iommu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -246,8 +246,8 @@ resv_iova:
+ } else if (end < start) {
+ /* dma_ranges list should be sorted */
+ dev_err(&dev->dev,
+- "Failed to reserve IOVA [%#010llx-%#010llx]\n",
+- start, end);
++ "Failed to reserve IOVA [%pa-%pa]\n",
++ &start, &end);
+ return -EINVAL;
+ }
+
--- /dev/null
+From 2c669ef6979c370f98d4b876e54f19613c81e075 Mon Sep 17 00:00:00 2001
+From: Valentin Schneider <valentin.schneider@arm.com>
+Date: Wed, 7 Jul 2021 19:38:31 +0100
+Subject: powerpc/preempt: Don't touch the idle task's preempt_count during hotplug
+
+From: Valentin Schneider <valentin.schneider@arm.com>
+
+commit 2c669ef6979c370f98d4b876e54f19613c81e075 upstream.
+
+Powerpc currently resets a CPU's idle task preempt_count to 0 before
+said task starts executing the secondary startup routine (and becomes an
+idle task proper).
+
+This conflicts with commit f1a0a376ca0c ("sched/core: Initialize the
+idle task with preemption disabled").
+
+which initializes all of the idle tasks' preempt_count to
+PREEMPT_DISABLED during smp_init(). Note that this was superfluous
+before said commit, as back then the hotplug machinery would invoke
+init_idle() via idle_thread_get(), which would have already reset the
+CPU's idle task's preempt_count to PREEMPT_ENABLED.
+
+Get rid of this preempt_count write.
+
+Fixes: f1a0a376ca0c ("sched/core: Initialize the idle task with preemption disabled")
+Reported-by: Bharata B Rao <bharata@linux.ibm.com>
+Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Bharata B Rao <bharata@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210707183831.2106509-1-valentin.schneider@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/cell/smp.c | 3 ---
+ arch/powerpc/platforms/pseries/smp.c | 3 ---
+ 2 files changed, 6 deletions(-)
+
+--- a/arch/powerpc/platforms/cell/smp.c
++++ b/arch/powerpc/platforms/cell/smp.c
+@@ -78,9 +78,6 @@ static inline int smp_startup_cpu(unsign
+
+ pcpu = get_hard_smp_processor_id(lcpu);
+
+- /* Fixup atomic count: it exited inside IRQ handler. */
+- task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
+-
+ /*
+ * If the RTAS start-cpu token does not exist then presume the
+ * cpu is already spinning.
+--- a/arch/powerpc/platforms/pseries/smp.c
++++ b/arch/powerpc/platforms/pseries/smp.c
+@@ -105,9 +105,6 @@ static inline int smp_startup_cpu(unsign
+ return 1;
+ }
+
+- /* Fixup atomic count: it exited inside IRQ handler. */
+- task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
+-
+ /*
+ * If the RTAS start-cpu token does not exist then presume the
+ * cpu is already spinning.
--- /dev/null
+From 6a942f5780545ebd11aca8b3ac4b163397962322 Mon Sep 17 00:00:00 2001
+From: Valentin Schneider <valentin.schneider@arm.com>
+Date: Wed, 7 Jul 2021 17:33:38 +0100
+Subject: s390: preempt: Fix preempt_count initialization
+
+From: Valentin Schneider <valentin.schneider@arm.com>
+
+commit 6a942f5780545ebd11aca8b3ac4b163397962322 upstream.
+
+S390's init_idle_preempt_count(p, cpu) doesn't actually let us initialize the
+preempt_count of the requested CPU's idle task: it unconditionally writes
+to the current CPU's. This clearly conflicts with idle_threads_init(),
+which intends to initialize *all* the idle tasks, including their
+preempt_count (or their CPU's, if the arch uses a per-CPU preempt_count).
+
+Unfortunately, it seems the way s390 does things doesn't let us initialize
+every possible CPU's preempt_count early on, as the pages where this
+resides are only allocated when a CPU is brought up and are freed when it
+is brought down.
+
+Let the arch-specific code set a CPU's preempt_count when its lowcore is
+allocated, and turn init_idle_preempt_count() into an empty stub.
+
+Fixes: f1a0a376ca0c ("sched/core: Initialize the idle task with preemption disabled")
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Link: https://lore.kernel.org/r/20210707163338.1623014-1-valentin.schneider@arm.com
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/preempt.h | 16 ++++------------
+ arch/s390/kernel/setup.c | 1 +
+ arch/s390/kernel/smp.c | 1 +
+ 3 files changed, 6 insertions(+), 12 deletions(-)
+
+--- a/arch/s390/include/asm/preempt.h
++++ b/arch/s390/include/asm/preempt.h
+@@ -29,12 +29,6 @@ static inline void preempt_count_set(int
+ old, new) != old);
+ }
+
+-#define init_task_preempt_count(p) do { } while (0)
+-
+-#define init_idle_preempt_count(p, cpu) do { \
+- S390_lowcore.preempt_count = PREEMPT_DISABLED; \
+-} while (0)
+-
+ static inline void set_preempt_need_resched(void)
+ {
+ __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+@@ -88,12 +82,6 @@ static inline void preempt_count_set(int
+ S390_lowcore.preempt_count = pc;
+ }
+
+-#define init_task_preempt_count(p) do { } while (0)
+-
+-#define init_idle_preempt_count(p, cpu) do { \
+- S390_lowcore.preempt_count = PREEMPT_DISABLED; \
+-} while (0)
+-
+ static inline void set_preempt_need_resched(void)
+ {
+ }
+@@ -130,6 +118,10 @@ static inline bool should_resched(int pr
+
+ #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
++#define init_task_preempt_count(p) do { } while (0)
++/* Deferred to CPU bringup time */
++#define init_idle_preempt_count(p, cpu) do { } while (0)
++
+ #ifdef CONFIG_PREEMPTION
+ extern void preempt_schedule(void);
+ #define __preempt_schedule() preempt_schedule()
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -466,6 +466,7 @@ static void __init setup_lowcore_dat_off
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
+ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
+ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
++ lc->preempt_count = PREEMPT_DISABLED;
+
+ set_prefix((u32)(unsigned long) lc);
+ lowcore_ptr[0] = lc;
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -219,6 +219,7 @@ static int pcpu_alloc_lowcore(struct pcp
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
+ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
+ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
++ lc->preempt_count = PREEMPT_DISABLED;
+ if (nmi_alloc_per_cpu(lc))
+ goto out_stack;
+ lowcore_ptr[cpu] = lc;
--- /dev/null
+From a8ea6fc9b089156d9230bfeef964dd9be101a4a9 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Wed, 26 May 2021 01:58:49 +0200
+Subject: sched: Stop PF_NO_SETAFFINITY from being inherited by various init system threads
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit a8ea6fc9b089156d9230bfeef964dd9be101a4a9 upstream.
+
+Commit:
+
+ 00b89fe0197f ("sched: Make the idle task quack like a per-CPU kthread")
+
+... added PF_KTHREAD | PF_NO_SETAFFINITY to the idle kernel threads.
+
+Unfortunately these properties are inherited to the init/0 children
+through kernel_thread() calls: init/1 and kthreadd. There are several
+side effects to that:
+
+1) kthreadd affinity can not be reset anymore from userspace. Also
+ PF_NO_SETAFFINITY propagates to all kthreadd children, including
+ the unbound kthreads Therefore it's not possible anymore to overwrite
+ the affinity of any of them. Here is an example of warning reported
+ by rcutorture:
+
+ WARNING: CPU: 0 PID: 116 at kernel/rcu/tree_nocb.h:1306 rcu_bind_current_to_nocb+0x31/0x40
+ Call Trace:
+ rcu_torture_fwd_prog+0x62/0x730
+ kthread+0x122/0x140
+ ret_from_fork+0x22/0x30
+
+2) init/1 does an exec() in the end which clears both
+ PF_KTHREAD and PF_NO_SETAFFINITY so we are fine once kernel_init()
+ escapes to userspace. But until then, no initcall or init code can
+ successfully call sched_setaffinity() to init/1.
+
+ Also PF_KTHREAD looks legit on init/1 before it calls exec() but
+ we better be careful with unknown introduced side effects.
+
+One way to solve the PF_NO_SETAFFINITY issue is to not inherit this flag
+on copy_process() at all. The cases where it matters are:
+
+* fork_idle(): explicitly set the flag already.
+* fork() syscalls: userspace tasks that shouldn't be concerned by that.
+* create_io_thread(): the callers explicitly attribute the flag to the
+ newly created tasks.
+* kernel_thread():
+ - Fix the issues on init/1 and kthreadd
+ - Fix the issues on kthreadd children.
+ - Usermode helper created by an unbound workqueue. This shouldn't
+ matter. In the worst case it gives more control to userspace
+ on setting affinity to these short living tasks although this can
+ be tuned with inherited unbound workqueues affinity already.
+
+Fixes: 00b89fe0197f ("sched: Make the idle task quack like a per-CPU kthread")
+Reported-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Paul E. McKenney <paulmck@kernel.org>
+Link: https://lore.kernel.org/r/20210525235849.441842-1-frederic@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/fork.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1999,7 +1999,7 @@ static __latent_entropy struct task_stru
+ goto bad_fork_cleanup_count;
+
+ delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
+- p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
++ p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY);
+ p->flags |= PF_FORKNOEXEC;
+ INIT_LIST_HEAD(&p->children);
+ INIT_LIST_HEAD(&p->sibling);
mmc-vub3000-fix-control-request-direction.patch
media-exynos4-is-remove-a-now-unused-integer.patch
scsi-core-retry-i-o-for-notify-enable-spinup-required-error.patch
+crypto-qce-fix-error-return-code-in-qce_skcipher_async_req_handle.patch
+s390-preempt-fix-preempt_count-initialization.patch
+sched-stop-pf_no_setaffinity-from-being-inherited-by-various-init-system-threads.patch
+cred-add-missing-return-error-code-when-set_cred_ucounts-failed.patch
+iommu-dma-fix-compile-warning-in-32-bit-builds.patch
+powerpc-preempt-don-t-touch-the-idle-task-s-preempt_count-during-hotplug.patch