--- /dev/null
+From 85e42e660d87366911383895423becb11b188305 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Dec 2019 19:09:06 +0000
+Subject: dmaengine: k3dma: Avoid null pointer traversal
+
+From: John Stultz <john.stultz@linaro.org>
+
+[ Upstream commit 2f42e05b942fe2fbfb9bbc6e34e1dd8c3ce4f3a4 ]
+
+In some cases we seem to submit two transactions in a row, which
+causes us to lose track of the first. If we then cancel the
+request, we may still get an interrupt, which traverses a null
+ds_run value.
+
+So try to avoid starting a new transaction if the ds_run value
+is set.
+
+While this patch avoids the null pointer crash, I've had some
+reports of the k3dma driver still getting confused, which
+suggests the ds_run/ds_done value handling still isn't quite
+right. However, I've not run into an issue recently with it
+so I think this patch is worth pushing upstream to avoid the
+crash.
+
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+[add ss tag]
+Link: https://lore.kernel.org/r/20191218190906.6641-1-john.stultz@linaro.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/k3dma.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
+index 4b36c8810517..d05471653224 100644
+--- a/drivers/dma/k3dma.c
++++ b/drivers/dma/k3dma.c
+@@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
+ c = p->vchan;
+ if (c && (tc1 & BIT(i))) {
+ spin_lock_irqsave(&c->vc.lock, flags);
+- vchan_cookie_complete(&p->ds_run->vd);
+- p->ds_done = p->ds_run;
+- p->ds_run = NULL;
++ if (p->ds_run != NULL) {
++ vchan_cookie_complete(&p->ds_run->vd);
++ p->ds_done = p->ds_run;
++ p->ds_run = NULL;
++ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+ if (c && (tc2 & BIT(i))) {
+@@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
+ if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
+ return -EAGAIN;
+
++ /* Avoid losing track of ds_run if a transaction is in flight */
++ if (c->phy->ds_run)
++ return -EAGAIN;
++
+ if (vd) {
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+--
+2.20.1
+
--- /dev/null
+From 80a2b0136c73289bf84133ee0a179b792d3f6661 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Dec 2019 13:46:06 +0800
+Subject: drm/amdgpu: enable gfxoff for raven1 refresh
+
+From: changzhu <Changfeng.Zhu@amd.com>
+
+[ Upstream commit e0c63812352298efbce2a71483c1dab627d0c288 ]
+
+When smu version is larger than 0x41e2b, it will load
+raven_kicker_rlc.bin.To enable gfxoff for raven_kicker_rlc.bin,it
+needs to avoid adev->pm.pp_feature &= ~PP_GFXOFF_MASK when it loads
+raven_kicker_rlc.bin.
+
+Signed-off-by: changzhu <Changfeng.Zhu@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 15 ++++-----------
+ 1 file changed, 4 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index c9ba2ec6d038..ab4a0d8545dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1038,17 +1038,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
+ case CHIP_VEGA20:
+ break;
+ case CHIP_RAVEN:
+- /* Disable GFXOFF on original raven. There are combinations
+- * of sbios and platforms that are not stable.
+- */
+- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
+- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+- else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+- &&((adev->gfx.rlc_fw_version != 106 &&
+- adev->gfx.rlc_fw_version < 531) ||
+- (adev->gfx.rlc_fw_version == 53815) ||
+- (adev->gfx.rlc_feature_version < 1) ||
+- !adev->gfx.rlc.is_rlc_v2_1))
++ if (!(adev->rev_id >= 0x8 ||
++ adev->pdev->device == 0x15d8) &&
++ (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
++ !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+--
+2.20.1
+
--- /dev/null
+From de4d970b88c5af8af2a6a8c92c5b435971ebabfd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Dec 2019 11:53:09 +0000
+Subject: drm/arm/mali: make malidp_mw_connector_helper_funcs static
+
+From: Ben Dooks (Codethink) <ben.dooks@codethink.co.uk>
+
+[ Upstream commit ac2917b01992c098b8d4e6837115e3ca347fdd90 ]
+
+The malidp_mw_connector_helper_funcs is not referenced by name
+outside of the file it is in, so make it static to avoid the
+following warning:
+
+drivers/gpu/drm/arm/malidp_mw.c:59:41: warning: symbol 'malidp_mw_connector_helper_funcs' was not declared. Should it be static?
+
+Signed-off-by: Ben Dooks (Codethink) <ben.dooks@codethink.co.uk>
+Signed-off-by: Liviu Dudau <Liviu.Dudau@arm.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191217115309.2133503-1-ben.dooks@codethink.co.uk
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/arm/malidp_mw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
+index 875a3a9eabfa..7d0e7b031e44 100644
+--- a/drivers/gpu/drm/arm/malidp_mw.c
++++ b/drivers/gpu/drm/arm/malidp_mw.c
+@@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector,
+ return MODE_OK;
+ }
+
+-const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
++static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+ .get_modes = malidp_mw_connector_get_modes,
+ .mode_valid = malidp_mw_connector_mode_valid,
+ };
+--
+2.20.1
+
--- /dev/null
+From af6dfc2921ca31539d8ad9c22bc1091296c03793 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Nov 2019 06:51:11 +0000
+Subject: gpio: mpc8xxx: Add platform device to gpiochip->parent
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Johnson CH Chen (陳昭勳) <JohnsonCH.Chen@moxa.com>
+
+[ Upstream commit 322f6a3182d42df18059a89c53b09d33919f755e ]
+
+Dear Linus Walleij,
+
+In old kernels, some APIs still try to use parent->of_node from struct gpio_chip,
+and it could be resulted in kernel panic because parent is NULL. Adding platform
+device to gpiochip->parent can fix this problem.
+
+Signed-off-by: Johnson Chen <johnsonch.chen@moxa.com>
+Link: https://patchwork.kernel.org/patch/11234609
+Link: https://lore.kernel.org/r/HK0PR01MB3521489269F76467DFD7843FFA450@HK0PR01MB3521.apcprd01.prod.exchangelabs.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpio-mpc8xxx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index a031cbcdf6ef..d72a3a5507b0 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ gc = &mpc8xxx_gc->gc;
++ gc->parent = &pdev->dev;
+
+ if (of_property_read_bool(np, "little-endian")) {
+ ret = bgpio_init(gc, &pdev->dev, 4,
+--
+2.20.1
+
--- /dev/null
+From 2a24837a5d5c1496f1529259e7b4e914922bb7e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 4 Jan 2020 12:59:59 -0800
+Subject: hexagon: parenthesize registers in asm predicates
+
+From: Nick Desaulniers <ndesaulniers@google.com>
+
+[ Upstream commit 780a0cfda9006a9a22d6473c2d4c527f5c68eb2e ]
+
+Hexagon requires that register predicates in assembly be parenthesized.
+
+Link: https://github.com/ClangBuiltLinux/linux/issues/754
+Link: http://lkml.kernel.org/r/20191209222956.239798-3-ndesaulniers@google.com
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Suggested-by: Sid Manning <sidneym@codeaurora.org>
+Acked-by: Brian Cain <bcain@codeaurora.org>
+Cc: Lee Jones <lee.jones@linaro.org>
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: Tuowen Zhao <ztuowen@gmail.com>
+Cc: Mika Westerberg <mika.westerberg@linux.intel.com>
+Cc: Luis Chamberlain <mcgrof@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Alexios Zavras <alexios.zavras@intel.com>
+Cc: Allison Randal <allison@lohutok.net>
+Cc: Will Deacon <will@kernel.org>
+Cc: Richard Fontana <rfontana@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/hexagon/include/asm/atomic.h | 8 ++++----
+ arch/hexagon/include/asm/bitops.h | 8 ++++----
+ arch/hexagon/include/asm/cmpxchg.h | 2 +-
+ arch/hexagon/include/asm/futex.h | 6 +++---
+ arch/hexagon/include/asm/spinlock.h | 20 ++++++++++----------
+ arch/hexagon/kernel/vm_entry.S | 2 +-
+ 6 files changed, 23 insertions(+), 23 deletions(-)
+
+diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
+index 12cd9231c4b8..0231d69c8bf2 100644
+--- a/arch/hexagon/include/asm/atomic.h
++++ b/arch/hexagon/include/asm/atomic.h
+@@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+- " if !P3 jump 1b;\n" \
++ " if (!P3) jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+- " if !P3 jump 1b;\n" \
++ " if (!P3) jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+@@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
+ "1: %0 = memw_locked(%2);\n" \
+ " %1 = "#op "(%0,%3);\n" \
+ " memw_locked(%2,P3)=%1;\n" \
+- " if !P3 jump 1b;\n" \
++ " if (!P3) jump 1b;\n" \
+ : "=&r" (output), "=&r" (val) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+@@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+ " }"
+ " memw_locked(%2, p3) = %1;"
+ " {"
+- " if !p3 jump 1b;"
++ " if (!p3) jump 1b;"
+ " }"
+ "2:"
+ : "=&r" (__oldval), "=&r" (tmp)
+diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
+index 47384b094b94..71429f756af0 100644
+--- a/arch/hexagon/include/asm/bitops.h
++++ b/arch/hexagon/include/asm/bitops.h
+@@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
+ "1: R12 = memw_locked(R10);\n"
+ " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
+ " memw_locked(R10,P1) = R12;\n"
+- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
++ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
+ : "=&r" (oldval)
+ : "r" (addr), "r" (nr)
+ : "r10", "r11", "r12", "p0", "p1", "memory"
+@@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
+ "1: R12 = memw_locked(R10);\n"
+ " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
+ " memw_locked(R10,P1) = R12;\n"
+- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
++ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
+ : "=&r" (oldval)
+ : "r" (addr), "r" (nr)
+ : "r10", "r11", "r12", "p0", "p1", "memory"
+@@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
+ "1: R12 = memw_locked(R10);\n"
+ " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
+ " memw_locked(R10,P1) = R12;\n"
+- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
++ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
+ : "=&r" (oldval)
+ : "r" (addr), "r" (nr)
+ : "r10", "r11", "r12", "p0", "p1", "memory"
+@@ -223,7 +223,7 @@ static inline int ffs(int x)
+ int r;
+
+ asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
+- "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
++ "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
+ : "=&r" (r)
+ : "r" (x)
+ : "p0");
+diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
+index 6091322c3af9..92b8a02e588a 100644
+--- a/arch/hexagon/include/asm/cmpxchg.h
++++ b/arch/hexagon/include/asm/cmpxchg.h
+@@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ __asm__ __volatile__ (
+ "1: %0 = memw_locked(%1);\n" /* load into retval */
+ " memw_locked(%1,P0) = %2;\n" /* store into memory */
+- " if !P0 jump 1b;\n"
++ " if (!P0) jump 1b;\n"
+ : "=&r" (retval)
+ : "r" (ptr), "r" (x)
+ : "memory", "p0"
+diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
+index cb635216a732..0191f7c7193e 100644
+--- a/arch/hexagon/include/asm/futex.h
++++ b/arch/hexagon/include/asm/futex.h
+@@ -16,7 +16,7 @@
+ /* For example: %1 = %4 */ \
+ insn \
+ "2: memw_locked(%3,p2) = %1;\n" \
+- " if !p2 jump 1b;\n" \
++ " if (!p2) jump 1b;\n" \
+ " %1 = #0;\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+@@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+ "1: %1 = memw_locked(%3)\n"
+ " {\n"
+ " p2 = cmp.eq(%1,%4)\n"
+- " if !p2.new jump:NT 3f\n"
++ " if (!p2.new) jump:NT 3f\n"
+ " }\n"
+ "2: memw_locked(%3,p2) = %5\n"
+- " if !p2 jump 1b\n"
++ " if (!p2) jump 1b\n"
+ "3:\n"
+ ".section .fixup,\"ax\"\n"
+ "4: %0 = #%6\n"
+diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
+index bfe07d842ff3..ef103b73bec8 100644
+--- a/arch/hexagon/include/asm/spinlock.h
++++ b/arch/hexagon/include/asm/spinlock.h
+@@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
+ __asm__ __volatile__(
+ "1: R6 = memw_locked(%0);\n"
+ " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
+- " { if !P3 jump 1b; }\n"
++ " { if (!P3) jump 1b; }\n"
+ " memw_locked(%0,P3) = R6;\n"
+- " { if !P3 jump 1b; }\n"
++ " { if (!P3) jump 1b; }\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+@@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
+ "1: R6 = memw_locked(%0);\n"
+ " R6 = add(R6,#-1);\n"
+ " memw_locked(%0,P3) = R6\n"
+- " if !P3 jump 1b;\n"
++ " if (!P3) jump 1b;\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+@@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
+ __asm__ __volatile__(
+ " R6 = memw_locked(%1);\n"
+ " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
+- " { if !P3 jump 1f; }\n"
++ " { if (!P3) jump 1f; }\n"
+ " memw_locked(%1,P3) = R6;\n"
+ " { %0 = P3 }\n"
+ "1:\n"
+@@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
+ __asm__ __volatile__(
+ "1: R6 = memw_locked(%0)\n"
+ " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
+- " { if !P3 jump 1b; }\n"
++ " { if (!P3) jump 1b; }\n"
+ " memw_locked(%0,P3) = R6;\n"
+- " { if !P3 jump 1b; }\n"
++ " { if (!P3) jump 1b; }\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+@@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
+ __asm__ __volatile__(
+ " R6 = memw_locked(%1)\n"
+ " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
+- " { if !P3 jump 1f; }\n"
++ " { if (!P3) jump 1f; }\n"
+ " memw_locked(%1,P3) = R6;\n"
+ " %0 = P3;\n"
+ "1:\n"
+@@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
+ __asm__ __volatile__(
+ "1: R6 = memw_locked(%0);\n"
+ " P3 = cmp.eq(R6,#0);\n"
+- " { if !P3 jump 1b; R6 = #1; }\n"
++ " { if (!P3) jump 1b; R6 = #1; }\n"
+ " memw_locked(%0,P3) = R6;\n"
+- " { if !P3 jump 1b; }\n"
++ " { if (!P3) jump 1b; }\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+@@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
+ __asm__ __volatile__(
+ " R6 = memw_locked(%1);\n"
+ " P3 = cmp.eq(R6,#0);\n"
+- " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
++ " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
+ " memw_locked(%1,P3) = R6;\n"
+ " %0 = P3;\n"
+ "1:\n"
+diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S
+index 12242c27e2df..4023fdbea490 100644
+--- a/arch/hexagon/kernel/vm_entry.S
++++ b/arch/hexagon/kernel/vm_entry.S
+@@ -369,7 +369,7 @@ ret_from_fork:
+ R26.L = #LO(do_work_pending);
+ R0 = #VM_INT_DISABLE;
+ }
+- if P0 jump check_work_pending
++ if (P0) jump check_work_pending
+ {
+ R0 = R25;
+ callr R24
+--
+2.20.1
+
--- /dev/null
+From a14d37dede08a65d5b5e4f70adabce7f4f1bf0e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 4 Jan 2020 13:00:02 -0800
+Subject: hexagon: work around compiler crash
+
+From: Nick Desaulniers <ndesaulniers@google.com>
+
+[ Upstream commit 63e80314ab7cf4783526d2e44ee57a90514911c9 ]
+
+Clang cannot translate the string "r30" into a valid register yet.
+
+Link: https://github.com/ClangBuiltLinux/linux/issues/755
+Link: http://lkml.kernel.org/r/20191028155722.23419-1-ndesaulniers@google.com
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Suggested-by: Sid Manning <sidneym@quicinc.com>
+Reviewed-by: Brian Cain <bcain@codeaurora.org>
+Cc: Allison Randal <allison@lohutok.net>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Richard Fontana <rfontana@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/hexagon/kernel/stacktrace.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c
+index 35f29423fda8..5ed02f699479 100644
+--- a/arch/hexagon/kernel/stacktrace.c
++++ b/arch/hexagon/kernel/stacktrace.c
+@@ -11,8 +11,6 @@
+ #include <linux/thread_info.h>
+ #include <linux/module.h>
+
+-register unsigned long current_frame_pointer asm("r30");
+-
+ struct stackframe {
+ unsigned long fp;
+ unsigned long rets;
+@@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace)
+
+ low = (unsigned long)task_stack_page(current);
+ high = low + THREAD_SIZE;
+- fp = current_frame_pointer;
++ fp = (unsigned long)__builtin_frame_address(0);
+
+ while (fp >= low && fp <= (high - sizeof(*frame))) {
+ frame = (struct stackframe *)fp;
+--
+2.20.1
+
--- /dev/null
+From 58313617726706d16ec89f6548a068d0e0043ad5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Dec 2019 17:55:30 +0000
+Subject: ioat: ioat_alloc_ring() failure handling.
+
+From: Alexander.Barabash@dell.com <Alexander.Barabash@dell.com>
+
+[ Upstream commit b0b5ce1010ffc50015eaec72b0028aaae3f526bb ]
+
+If dma_alloc_coherent() returns NULL in ioat_alloc_ring(), ring
+allocation must not proceed.
+
+Until now, if the first call to dma_alloc_coherent() in
+ioat_alloc_ring() returned NULL, the processing could proceed, failing
+with NULL-pointer dereferencing further down the line.
+
+Signed-off-by: Alexander Barabash <alexander.barabash@dell.com>
+Acked-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/75e9c0e84c3345d693c606c64f8b9ab5@x13pwhopdag1307.AMER.DELL.COM
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ioat/dma.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
+index 1a422a8b43cf..18c011e57592 100644
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
+
+ descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
+ SZ_2M, &descs->hw, flags);
+- if (!descs->virt && (i > 0)) {
++ if (!descs->virt) {
+ int idx;
+
+ for (idx = 0; idx < i; idx++) {
++ descs = &ioat_chan->descs[idx];
+ dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ descs->virt, descs->hw);
+ descs->virt = NULL;
+--
+2.20.1
+
--- /dev/null
+From a212e336f265b92c221592b6763c7e190fc01dbe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Dec 2019 15:07:47 +0100
+Subject: kbuild/deb-pkg: annotate libelf-dev dependency as :native
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Upstream commit 8ffdc54b6f4cd718a45802e645bb853e3a46a078 ]
+
+Cross compiling the x86 kernel on a non-x86 build machine produces
+the following error when CONFIG_UNWINDER_ORC is enabled, regardless
+of whether libelf-dev is installed or not.
+
+ dpkg-checkbuilddeps: error: Unmet build dependencies: libelf-dev
+ dpkg-buildpackage: warning: build dependencies/conflicts unsatisfied; aborting
+ dpkg-buildpackage: warning: (Use -d flag to override.)
+
+Since this is a build time dependency for a build tool, we need to
+depend on the native version of libelf-dev so add the appropriate
+annotation.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/package/mkdebian | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
+index 7c230016b08d..357dc56bcf30 100755
+--- a/scripts/package/mkdebian
++++ b/scripts/package/mkdebian
+@@ -136,7 +136,7 @@ mkdir -p debian/source/
+ echo "1.0" > debian/source/format
+
+ echo $debarch > debian/arch
+-extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev)"
++extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)"
+ extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
+
+ # Generate a simple changelog template
+--
+2.20.1
+
--- /dev/null
+From b6c100695d523e52b55681c59abdaca4b13e6a43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Nov 2019 12:57:07 +0100
+Subject: media: intel-ipu3: Align struct ipu3_uapi_awb_fr_config_s to 32 bytes
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+[ Upstream commit ce644cf3fa06504c2c71ab1b794160d54aaccbc0 ]
+
+A struct that needs to be aligned to 32 bytes has a size of 28. Increase
+the size to 32.
+
+This makes elements of arrays of this struct aligned to 32 as well, and
+other structs where members are aligned to 32 mixing
+ipu3_uapi_awb_fr_config_s as well as other types.
+
+Fixes: commit dca5ef2aa1e6 ("media: staging/intel-ipu3: remove the unnecessary compiler flags")
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Tested-by: Bingbu Cao <bingbu.cao@intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/media/ipu3/include/intel-ipu3.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
+index c7cd27efac8a..0b1cb9f9cbd1 100644
+--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
++++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
+@@ -449,7 +449,7 @@ struct ipu3_uapi_awb_fr_config_s {
+ __u16 reserved1;
+ __u32 bayer_sign;
+ __u8 bayer_nf;
+- __u8 reserved2[3];
++ __u8 reserved2[7];
+ } __attribute__((aligned(32))) __packed;
+
+ /**
+--
+2.20.1
+
--- /dev/null
+From 151acad0a766d2642b4d48e127b827edf8d8f9b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Nov 2019 16:07:31 +0200
+Subject: mips: cacheinfo: report shared CPU map
+
+From: Vladimir Kondratiev <vladimir.kondratiev@intel.com>
+
+[ Upstream commit 3b1313eb32c499d46dc4c3e896d19d9564c879c4 ]
+
+Report L1 caches as shared per core; L2 - per cluster.
+
+This fixes "perf" that went crazy if shared_cpu_map attribute not
+reported on sysfs, in form of
+
+/sys/devices/system/cpu/cpu*/cache/index*/shared_cpu_list
+/sys/devices/system/cpu/cpu*/cache/index*/shared_cpu_map
+
+Signed-off-by: Vladimir Kondratiev <vladimir.kondratiev@intel.com>
+Signed-off-by: Paul Burton <paulburton@kernel.org>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: James Hogan <jhogan@kernel.org>
+Cc: linux-mips@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/kernel/cacheinfo.c | 27 ++++++++++++++++++++++++++-
+ 1 file changed, 26 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
+index f777e44653d5..47312c529410 100644
+--- a/arch/mips/kernel/cacheinfo.c
++++ b/arch/mips/kernel/cacheinfo.c
+@@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu)
+ return 0;
+ }
+
++static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
++{
++ int cpu1;
++
++ for_each_possible_cpu(cpu1)
++ if (cpus_are_siblings(cpu, cpu1))
++ cpumask_set_cpu(cpu1, cpu_map);
++}
++
++static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
++{
++ int cpu1;
++ int cluster = cpu_cluster(&cpu_data[cpu]);
++
++ for_each_possible_cpu(cpu1)
++ if (cpu_cluster(&cpu_data[cpu1]) == cluster)
++ cpumask_set_cpu(cpu1, cpu_map);
++}
++
+ static int __populate_cache_leaves(unsigned int cpu)
+ {
+ struct cpuinfo_mips *c = ¤t_cpu_data;
+@@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu)
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+
+ if (c->icache.waysize) {
++ /* L1 caches are per core */
++ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
++ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
+ } else {
+ populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
+ }
+
+- if (c->scache.waysize)
++ if (c->scache.waysize) {
++ /* L2 cache is per cluster */
++ fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
++ }
+
+ if (c->tcache.waysize)
+ populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
+--
+2.20.1
+
--- /dev/null
+From 5f06b2a51ea7447dbe0641e0f3e2188293368394 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Nov 2019 14:36:58 +0000
+Subject: mips: Fix gettimeofday() in the vdso library
+
+From: Vincenzo Frascino <vincenzo.frascino@arm.com>
+
+[ Upstream commit 7d2aa4bb90f5f6f1b8de8848c26042403f2d7bf9 ]
+
+The libc provides a discovery mechanism for vDSO library and its
+symbols. When a symbol is not exposed by the vDSOs the libc falls back
+on the system calls.
+
+With the introduction of the unified vDSO library on mips this behavior
+is not honored anymore by the kernel in the case of gettimeofday().
+
+The issue has been noticed and reported due to a dhclient failure on the
+CI20 board:
+
+root@letux:~# dhclient
+../../../../lib/isc/unix/time.c:200: Operation not permitted
+root@letux:~#
+
+Restore the original behavior fixing gettimeofday() in the vDSO library.
+
+Reported-by: H. Nikolaus Schaller <hns@goldelico.com>
+Tested-by: H. Nikolaus Schaller <hns@goldelico.com> # CI20 with JZ4780
+Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Signed-off-by: Paul Burton <paulburton@kernel.org>
+Cc: mips-creator-ci20-dev@googlegroups.com
+Cc: letux-kernel@openphoenux.org
+Cc: linux-mips@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/vdso/gettimeofday.h | 13 -------------
+ arch/mips/vdso/vgettimeofday.c | 20 ++++++++++++++++++++
+ 2 files changed, 20 insertions(+), 13 deletions(-)
+
+diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
+index b08825531e9f..0ae9b4cbc153 100644
+--- a/arch/mips/include/asm/vdso/gettimeofday.h
++++ b/arch/mips/include/asm/vdso/gettimeofday.h
+@@ -26,8 +26,6 @@
+
+ #define __VDSO_USE_SYSCALL ULLONG_MAX
+
+-#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+-
+ static __always_inline long gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+@@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback(
+ return error ? -ret : ret;
+ }
+
+-#else
+-
+-static __always_inline long gettimeofday_fallback(
+- struct __kernel_old_timeval *_tv,
+- struct timezone *_tz)
+-{
+- return -1;
+-}
+-
+-#endif
+-
+ static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c
+index 6ebdc37c89fc..6b83b6376a4b 100644
+--- a/arch/mips/vdso/vgettimeofday.c
++++ b/arch/mips/vdso/vgettimeofday.c
+@@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock,
+ return __cvdso_clock_gettime32(clock, ts);
+ }
+
++#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
++
++/*
++ * This is behind the ifdef so that we don't provide the symbol when there's no
++ * possibility of there being a usable clocksource, because there's nothing we
++ * can do without it. When libc fails the symbol lookup it should fall back on
++ * the standard syscall path.
++ */
+ int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+ {
+ return __cvdso_gettimeofday(tv, tz);
+ }
+
++#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
++
+ int __vdso_clock_getres(clockid_t clock_id,
+ struct old_timespec32 *res)
+ {
+@@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock,
+ return __cvdso_clock_gettime(clock, ts);
+ }
+
++#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
++
++/*
++ * This is behind the ifdef so that we don't provide the symbol when there's no
++ * possibility of there being a usable clocksource, because there's nothing we
++ * can do without it. When libc fails the symbol lookup it should fall back on
++ * the standard syscall path.
++ */
+ int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+ {
+ return __cvdso_gettimeofday(tv, tz);
+ }
+
++#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
++
+ int __vdso_clock_getres(clockid_t clock_id,
+ struct __kernel_timespec *res)
+ {
+--
+2.20.1
+
--- /dev/null
+From d7c843adb6f7d8f548df25b2518b691ea19d95cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Dec 2019 14:37:07 +0200
+Subject: MIPS: Prevent link failure with kcov instrumentation
+
+From: Jouni Hogander <jouni.hogander@unikie.com>
+
+[ Upstream commit a4a3893114a41e365274d5fab5d9ff5acc235ff0 ]
+
+__sanitizer_cov_trace_pc() is not linked in and causing link
+failure if KCOV_INSTRUMENT is enabled. Fix this by disabling
+instrumentation for compressed image.
+
+Signed-off-by: Jouni Hogander <jouni.hogander@unikie.com>
+Signed-off-by: Paul Burton <paulburton@kernel.org>
+Cc: Lukas Bulwahn <lukas.bulwahn@gmail.com>
+Cc: linux-mips@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/boot/compressed/Makefile | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
+index 172801ed35b8..d859f079b771 100644
+--- a/arch/mips/boot/compressed/Makefile
++++ b/arch/mips/boot/compressed/Makefile
+@@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
+ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
+ -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
+
++# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
++KCOV_INSTRUMENT := n
++
+ # decompressor objects (linked with vmlinuz)
+ vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
+
+--
+2.20.1
+
--- /dev/null
+From a1a32489ab351d41753c885099556ae49d210e21 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 4 Jan 2020 13:00:18 -0800
+Subject: ocfs2: call journal flush to mark journal as empty after journal
+ recovery when mount
+
+From: Kai Li <li.kai4@h3c.com>
+
+[ Upstream commit 397eac17f86f404f5ba31d8c3e39ec3124b39fd3 ]
+
+If journal is dirty when mount, it will be replayed but jbd2 sb log tail
+cannot be updated to mark a new start because journal->j_flag has
+already been set with JBD2_ABORT first in journal_init_common.
+
+When a new transaction is committed, it will be recored in block 1
+first(journal->j_tail is set to 1 in journal_reset). If emergency
+restart happens again before journal super block is updated
+unfortunately, the new recorded trans will not be replayed in the next
+mount.
+
+The following steps describe this procedure in detail.
+1. mount and touch some files
+2. these transactions are committed to journal area but not checkpointed
+3. emergency restart
+4. mount again and its journals are replayed
+5. journal super block's first s_start is 1, but its s_seq is not updated
+6. touch a new file and its trans is committed but not checkpointed
+7. emergency restart again
+8. mount and journal is dirty, but trans committed in 6 will not be
+replayed.
+
+This exception happens easily when this lun is used by only one node.
+If it is used by multi-nodes, other node will replay its journal and its
+journal super block will be updated after recovery like what this patch
+does.
+
+ocfs2_recover_node->ocfs2_replay_journal.
+
+The following jbd2 journal can be generated by touching a new file after
+journal is replayed, and seq 15 is the first valid commit, but first seq
+is 13 in journal super block.
+
+logdump:
+ Block 0: Journal Superblock
+ Seq: 0 Type: 4 (JBD2_SUPERBLOCK_V2)
+ Blocksize: 4096 Total Blocks: 32768 First Block: 1
+ First Commit ID: 13 Start Log Blknum: 1
+ Error: 0
+ Feature Compat: 0
+ Feature Incompat: 2 block64
+ Feature RO compat: 0
+ Journal UUID: 4ED3822C54294467A4F8E87D2BA4BC36
+ FS Share Cnt: 1 Dynamic Superblk Blknum: 0
+ Per Txn Block Limit Journal: 0 Data: 0
+
+ Block 1: Journal Commit Block
+ Seq: 14 Type: 2 (JBD2_COMMIT_BLOCK)
+
+ Block 2: Journal Descriptor
+ Seq: 15 Type: 1 (JBD2_DESCRIPTOR_BLOCK)
+ No. Blocknum Flags
+ 0. 587 none
+ UUID: 00000000000000000000000000000000
+ 1. 8257792 JBD2_FLAG_SAME_UUID
+ 2. 619 JBD2_FLAG_SAME_UUID
+ 3. 24772864 JBD2_FLAG_SAME_UUID
+ 4. 8257802 JBD2_FLAG_SAME_UUID
+ 5. 513 JBD2_FLAG_SAME_UUID JBD2_FLAG_LAST_TAG
+ ...
+ Block 7: Inode
+ Inode: 8257802 Mode: 0640 Generation: 57157641 (0x3682809)
+ FS Generation: 2839773110 (0xa9437fb6)
+ CRC32: 00000000 ECC: 0000
+ Type: Regular Attr: 0x0 Flags: Valid
+ Dynamic Features: (0x1) InlineData
+ User: 0 (root) Group: 0 (root) Size: 7
+ Links: 1 Clusters: 0
+ ctime: 0x5de5d870 0x11104c61 -- Tue Dec 3 11:37:20.286280801 2019
+ atime: 0x5de5d870 0x113181a1 -- Tue Dec 3 11:37:20.288457121 2019
+ mtime: 0x5de5d870 0x11104c61 -- Tue Dec 3 11:37:20.286280801 2019
+ dtime: 0x0 -- Thu Jan 1 08:00:00 1970
+ ...
+ Block 9: Journal Commit Block
+ Seq: 15 Type: 2 (JBD2_COMMIT_BLOCK)
+
+The following is journal recovery log when recovering the upper jbd2
+journal when mount again.
+
+syslog:
+ ocfs2: File system on device (252,1) was not unmounted cleanly, recovering it.
+ fs/jbd2/recovery.c:(do_one_pass, 449): Starting recovery pass 0
+ fs/jbd2/recovery.c:(do_one_pass, 449): Starting recovery pass 1
+ fs/jbd2/recovery.c:(do_one_pass, 449): Starting recovery pass 2
+ fs/jbd2/recovery.c:(jbd2_journal_recover, 278): JBD2: recovery, exit status 0, recovered transactions 13 to 13
+
+Due to first commit seq 13 recorded in journal super is not consistent
+with the value recorded in block 1(seq is 14), journal recovery will be
+terminated before seq 15 even though it is an unbroken commit, inode
+8257802 is a new file and it will be lost.
+
+Link: http://lkml.kernel.org/r/20191217020140.2197-1-li.kai4@h3c.com
+Signed-off-by: Kai Li <li.kai4@h3c.com>
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Reviewed-by: Changwei Ge <gechangwei@live.cn>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Gang He <ghe@suse.com>
+Cc: Jun Piao <piaojun@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ocfs2/journal.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 699a560efbb0..900e4ef686bf 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -1066,6 +1066,14 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
+
+ ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
+
++ if (replayed) {
++ jbd2_journal_lock_updates(journal->j_journal);
++ status = jbd2_journal_flush(journal->j_journal);
++ jbd2_journal_unlock_updates(journal->j_journal);
++ if (status < 0)
++ mlog_errno(status);
++ }
++
+ status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
+ if (status < 0) {
+ mlog_errno(status);
+--
+2.20.1
+
--- /dev/null
+From f5017eb0ad47b394708d3b2069ea3fe131b77c94 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Dec 2019 20:07:04 -0800
+Subject: riscv: export flush_icache_all to modules
+
+From: Olof Johansson <olof@lixom.net>
+
+[ Upstream commit 1833e327a5ea1d1f356fbf6ded0760c9ff4b0594 ]
+
+This is needed by LKDTM (crash dump test module), it calls
+flush_icache_range(), which on RISC-V turns into flush_icache_all(). On
+other architectures, the actual implementation is exported, so follow
+that precedence and export it here too.
+
+Fixes build of CONFIG_LKDTM that fails with:
+ERROR: "flush_icache_all" [drivers/misc/lkdtm/lkdtm.ko] undefined!
+
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Signed-off-by: Paul Walmsley <paul.walmsley@sifive.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/mm/cacheflush.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
+index 3f15938dec89..c54bd3c79955 100644
+--- a/arch/riscv/mm/cacheflush.c
++++ b/arch/riscv/mm/cacheflush.c
+@@ -14,6 +14,7 @@ void flush_icache_all(void)
+ {
+ sbi_remote_fence_i(NULL);
+ }
++EXPORT_SYMBOL(flush_icache_all);
+
+ /*
+ * Performs an icache flush for the given MM context. RISC-V has no direct
+--
+2.20.1
+
--- /dev/null
+From d9ca24b0cb7f99e20382c123b28972a2c2adce01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Dec 2019 11:28:57 -0500
+Subject: rseq/selftests: Turn off timeout setting
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+[ Upstream commit af9cb29c5488381083b0b5ccdfb3cd931063384a ]
+
+As the rseq selftests can run for a long period of time, disable the
+timeout that the general selftests have.
+
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Shuah Khan <skhan@linuxfoundation.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: "Paul E. McKenney" <paulmck@linux.ibm.com>
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: "H . Peter Anvin" <hpa@zytor.com>
+Cc: Paul Turner <pjt@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/rseq/settings | 1 +
+ 1 file changed, 1 insertion(+)
+ create mode 100644 tools/testing/selftests/rseq/settings
+
+diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings
+new file mode 100644
+index 000000000000..e7b9417537fb
+--- /dev/null
++++ b/tools/testing/selftests/rseq/settings
+@@ -0,0 +1 @@
++timeout=0
+--
+2.20.1
+
--- /dev/null
+From f91eaff6d33ecf2acffece53019b5173dca175de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2019 16:20:56 +0000
+Subject: rxrpc: Don't take call->user_mutex in rxrpc_new_incoming_call()
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 13b7955a0252e15265386b229b814152f109b234 ]
+
+Standard kernel mutexes cannot be used in any way from interrupt or softirq
+context, so the user_mutex which manages access to a call cannot be a mutex
+since on a new call the mutex must start off locked and be unlocked within
+the softirq handler to prevent userspace interfering with a call we're
+setting up.
+
+Commit a0855d24fc22d49cdc25664fb224caee16998683 ("locking/mutex: Complain
+upon mutex API misuse in IRQ contexts") causes big warnings to be splashed
+in dmesg for each a new call that comes in from the server. Whilst it
+*seems* like it should be okay, since the accept path uses trylock, there
+are issues with PI boosting and marking the wrong task as the owner.
+
+Fix this by not taking the mutex in the softirq path at all. It's not
+obvious that there should be any need for it as the state is set before the
+first notification is generated for the new call.
+
+There's also no particular reason why the link-assessing ping should be
+triggered inside the mutex. It's not actually transmitted there anyway,
+but rather it has to be deferred to a workqueue.
+
+Further, I don't think that there's any particular reason that the socket
+notification needs to be done from within rx->incoming_lock, so the amount
+of time that lock is held can be shortened too and the ping prepared before
+the new call notification is sent.
+
+Fixes: 540b1c48c37a ("rxrpc: Fix deadlock between call creation and sendmsg/recvmsg")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Peter Zijlstra (Intel) <peterz@infradead.org>
+cc: Ingo Molnar <mingo@redhat.com>
+cc: Will Deacon <will@kernel.org>
+cc: Davidlohr Bueso <dave@stgolabs.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/call_accept.c | 20 +++-----------------
+ 1 file changed, 3 insertions(+), 17 deletions(-)
+
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
+index 3685b1732f65..44fa22b020ef 100644
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -381,18 +381,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ trace_rxrpc_receive(call, rxrpc_receive_incoming,
+ sp->hdr.serial, sp->hdr.seq);
+
+- /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
+- * sendmsg()/recvmsg() inconveniently stealing the mutex once the
+- * notification is generated.
+- *
+- * The BUG should never happen because the kernel should be well
+- * behaved enough not to access the call before the first notification
+- * event and userspace is prevented from doing so until the state is
+- * appropriate.
+- */
+- if (!mutex_trylock(&call->user_mutex))
+- BUG();
+-
+ /* Make the call live. */
+ rxrpc_incoming_call(rx, call, skb);
+ conn = call->conn;
+@@ -433,6 +421,9 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ BUG();
+ }
+ spin_unlock(&conn->state_lock);
++ spin_unlock(&rx->incoming_lock);
++
++ rxrpc_send_ping(call, skb);
+
+ if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
+ rxrpc_notify_socket(call);
+@@ -444,11 +435,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ */
+ rxrpc_put_call(call, rxrpc_call_put);
+
+- spin_unlock(&rx->incoming_lock);
+-
+- rxrpc_send_ping(call, skb);
+- mutex_unlock(&call->user_mutex);
+-
+ _leave(" = %p{%d}", call, call->debug_id);
+ return call;
+
+--
+2.20.1
+
--- /dev/null
+From 8497bec9c49f6756c7a85ba3cf8e41df03908a8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2019 16:17:16 +0000
+Subject: rxrpc: Fix missing security check on incoming calls
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 063c60d39180cec7c9317f5acfc3071f8fecd705 ]
+
+Fix rxrpc_new_incoming_call() to check that we have a suitable service key
+available for the combination of service ID and security class of a new
+incoming call - and to reject calls for which we don't.
+
+This causes an assertion like the following to appear:
+
+ rxrpc: Assertion failed - 6(0x6) == 12(0xc) is false
+ kernel BUG at net/rxrpc/call_object.c:456!
+
+Where call->state is RXRPC_CALL_SERVER_SECURING (6) rather than
+RXRPC_CALL_COMPLETE (12).
+
+Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code")
+Reported-by: Marc Dionne <marc.dionne@auristor.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/ar-internal.h | 10 ++++--
+ net/rxrpc/call_accept.c | 14 ++++++--
+ net/rxrpc/conn_event.c | 16 +--------
+ net/rxrpc/conn_service.c | 4 +++
+ net/rxrpc/rxkad.c | 5 +--
+ net/rxrpc/security.c | 70 +++++++++++++++++++---------------------
+ 6 files changed, 59 insertions(+), 60 deletions(-)
+
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index 7c7d10f2e0c1..5e99df80e80a 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -209,6 +209,7 @@ struct rxrpc_skb_priv {
+ struct rxrpc_security {
+ const char *name; /* name of this service */
+ u8 security_index; /* security type provided */
++ u32 no_key_abort; /* Abort code indicating no key */
+
+ /* Initialise a security service */
+ int (*init)(void);
+@@ -977,8 +978,9 @@ static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
+ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
+ struct sk_buff *);
+ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
+-void rxrpc_new_incoming_connection(struct rxrpc_sock *,
+- struct rxrpc_connection *, struct sk_buff *);
++void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *,
++ const struct rxrpc_security *, struct key *,
++ struct sk_buff *);
+ void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
+
+ /*
+@@ -1103,7 +1105,9 @@ extern const struct rxrpc_security rxkad;
+ int __init rxrpc_init_security(void);
+ void rxrpc_exit_security(void);
+ int rxrpc_init_client_conn_security(struct rxrpc_connection *);
+-int rxrpc_init_server_conn_security(struct rxrpc_connection *);
++bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *,
++ const struct rxrpc_security **, struct key **,
++ struct sk_buff *);
+
+ /*
+ * sendmsg.c
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
+index 44fa22b020ef..70e44abf106c 100644
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -263,6 +263,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
+ struct rxrpc_local *local,
+ struct rxrpc_peer *peer,
+ struct rxrpc_connection *conn,
++ const struct rxrpc_security *sec,
++ struct key *key,
+ struct sk_buff *skb)
+ {
+ struct rxrpc_backlog *b = rx->backlog;
+@@ -310,7 +312,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
+ conn->params.local = rxrpc_get_local(local);
+ conn->params.peer = peer;
+ rxrpc_see_connection(conn);
+- rxrpc_new_incoming_connection(rx, conn, skb);
++ rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
+ } else {
+ rxrpc_get_connection(conn);
+ }
+@@ -349,9 +351,11 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ struct sk_buff *skb)
+ {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++ const struct rxrpc_security *sec = NULL;
+ struct rxrpc_connection *conn;
+ struct rxrpc_peer *peer = NULL;
+- struct rxrpc_call *call;
++ struct rxrpc_call *call = NULL;
++ struct key *key = NULL;
+
+ _enter("");
+
+@@ -372,7 +376,11 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ */
+ conn = rxrpc_find_connection_rcu(local, skb, &peer);
+
+- call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
++ if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
++ goto no_call;
++
++ call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
++ key_put(key);
+ if (!call) {
+ skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
+ goto no_call;
+diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
+index a1ceef4f5cd0..808a4723f868 100644
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -376,21 +376,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
+ _enter("{%d}", conn->debug_id);
+
+ ASSERT(conn->security_ix != 0);
+-
+- if (!conn->params.key) {
+- _debug("set up security");
+- ret = rxrpc_init_server_conn_security(conn);
+- switch (ret) {
+- case 0:
+- break;
+- case -ENOENT:
+- abort_code = RX_CALL_DEAD;
+- goto abort;
+- default:
+- abort_code = RXKADNOAUTH;
+- goto abort;
+- }
+- }
++ ASSERT(conn->server_key);
+
+ if (conn->security->issue_challenge(conn) < 0) {
+ abort_code = RX_CALL_DEAD;
+diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
+index 123d6ceab15c..21da48e3d2e5 100644
+--- a/net/rxrpc/conn_service.c
++++ b/net/rxrpc/conn_service.c
+@@ -148,6 +148,8 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
+ */
+ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
+ struct rxrpc_connection *conn,
++ const struct rxrpc_security *sec,
++ struct key *key,
+ struct sk_buff *skb)
+ {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+@@ -160,6 +162,8 @@ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
+ conn->service_id = sp->hdr.serviceId;
+ conn->security_ix = sp->hdr.securityIndex;
+ conn->out_clientflag = 0;
++ conn->security = sec;
++ conn->server_key = key_get(key);
+ if (conn->security_ix)
+ conn->state = RXRPC_CONN_SERVICE_UNSECURED;
+ else
+diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
+index 8d8aa3c230b5..098f1f9ec53b 100644
+--- a/net/rxrpc/rxkad.c
++++ b/net/rxrpc/rxkad.c
+@@ -648,9 +648,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
+ u32 serial;
+ int ret;
+
+- _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key));
++ _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
+
+- ret = key_validate(conn->params.key);
++ ret = key_validate(conn->server_key);
+ if (ret < 0)
+ return ret;
+
+@@ -1293,6 +1293,7 @@ static void rxkad_exit(void)
+ const struct rxrpc_security rxkad = {
+ .name = "rxkad",
+ .security_index = RXRPC_SECURITY_RXKAD,
++ .no_key_abort = RXKADUNKNOWNKEY,
+ .init = rxkad_init,
+ .exit = rxkad_exit,
+ .init_connection_security = rxkad_init_connection_security,
+diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
+index a4c47d2b7054..9b1fb9ed0717 100644
+--- a/net/rxrpc/security.c
++++ b/net/rxrpc/security.c
+@@ -101,62 +101,58 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
+ }
+
+ /*
+- * initialise the security on a server connection
++ * Find the security key for a server connection.
+ */
+-int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
++bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock *rx,
++ const struct rxrpc_security **_sec,
++ struct key **_key,
++ struct sk_buff *skb)
+ {
+ const struct rxrpc_security *sec;
+- struct rxrpc_local *local = conn->params.local;
+- struct rxrpc_sock *rx;
+- struct key *key;
+- key_ref_t kref;
++ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++ key_ref_t kref = NULL;
+ char kdesc[5 + 1 + 3 + 1];
+
+ _enter("");
+
+- sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix);
++ sprintf(kdesc, "%u:%u", sp->hdr.serviceId, sp->hdr.securityIndex);
+
+- sec = rxrpc_security_lookup(conn->security_ix);
++ sec = rxrpc_security_lookup(sp->hdr.securityIndex);
+ if (!sec) {
+- _leave(" = -ENOKEY [lookup]");
+- return -ENOKEY;
++ trace_rxrpc_abort(0, "SVS",
++ sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
++ RX_INVALID_OPERATION, EKEYREJECTED);
++ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
++ skb->priority = RX_INVALID_OPERATION;
++ return false;
+ }
+
+- /* find the service */
+- read_lock(&local->services_lock);
+- rx = rcu_dereference_protected(local->service,
+- lockdep_is_held(&local->services_lock));
+- if (rx && (rx->srx.srx_service == conn->service_id ||
+- rx->second_service == conn->service_id))
+- goto found_service;
++ if (sp->hdr.securityIndex == RXRPC_SECURITY_NONE)
++ goto out;
+
+- /* the service appears to have died */
+- read_unlock(&local->services_lock);
+- _leave(" = -ENOENT");
+- return -ENOENT;
+-
+-found_service:
+ if (!rx->securities) {
+- read_unlock(&local->services_lock);
+- _leave(" = -ENOKEY");
+- return -ENOKEY;
++ trace_rxrpc_abort(0, "SVR",
++ sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
++ RX_INVALID_OPERATION, EKEYREJECTED);
++ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
++ skb->priority = RX_INVALID_OPERATION;
++ return false;
+ }
+
+ /* look through the service's keyring */
+ kref = keyring_search(make_key_ref(rx->securities, 1UL),
+ &key_type_rxrpc_s, kdesc, true);
+ if (IS_ERR(kref)) {
+- read_unlock(&local->services_lock);
+- _leave(" = %ld [search]", PTR_ERR(kref));
+- return PTR_ERR(kref);
++ trace_rxrpc_abort(0, "SVK",
++ sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
++ sec->no_key_abort, EKEYREJECTED);
++ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
++ skb->priority = sec->no_key_abort;
++ return false;
+ }
+
+- key = key_ref_to_ptr(kref);
+- read_unlock(&local->services_lock);
+-
+- conn->server_key = key;
+- conn->security = sec;
+-
+- _leave(" = 0");
+- return 0;
++out:
++ *_sec = sec;
++ *_key = key_ref_to_ptr(kref);
++ return true;
+ }
+--
+2.20.1
+
--- /dev/null
+From 677d64221a8a3e9297e338386e2c61518e071fa9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Dec 2019 16:38:49 +0000
+Subject: rxrpc: Unlock new call in rxrpc_new_incoming_call() rather than the
+ caller
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit f33121cbe91973a08e68e4bde8c3f7e6e4e351c1 ]
+
+Move the unlock and the ping transmission for a new incoming call into
+rxrpc_new_incoming_call() rather than doing it in the caller. This makes
+it clearer to see what's going on.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+cc: Ingo Molnar <mingo@redhat.com>
+cc: Will Deacon <will@kernel.org>
+cc: Davidlohr Bueso <dave@stgolabs.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/call_accept.c | 36 ++++++++++++++++++++++++++++--------
+ net/rxrpc/input.c | 18 ------------------
+ 2 files changed, 28 insertions(+), 26 deletions(-)
+
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
+index 135bf5cd8dd5..3685b1732f65 100644
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -239,6 +239,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
+ kfree(b);
+ }
+
++/*
++ * Ping the other end to fill our RTT cache and to retrieve the rwind
++ * and MTU parameters.
++ */
++static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
++{
++ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++ ktime_t now = skb->tstamp;
++
++ if (call->peer->rtt_usage < 3 ||
++ ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
++ rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
++ true, true,
++ rxrpc_propose_ack_ping_for_params);
++}
++
+ /*
+ * Allocate a new incoming call from the prealloc pool, along with a connection
+ * and a peer as necessary.
+@@ -346,9 +362,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+ skb->priority = RX_INVALID_OPERATION;
+- _leave(" = NULL [close]");
+- call = NULL;
+- goto out;
++ goto no_call;
+ }
+
+ /* The peer, connection and call may all have sprung into existence due
+@@ -361,9 +375,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
+ if (!call) {
+ skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
+- _leave(" = NULL [busy]");
+- call = NULL;
+- goto out;
++ goto no_call;
+ }
+
+ trace_rxrpc_receive(call, rxrpc_receive_incoming,
+@@ -432,10 +444,18 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ */
+ rxrpc_put_call(call, rxrpc_call_put);
+
+- _leave(" = %p{%d}", call, call->debug_id);
+-out:
+ spin_unlock(&rx->incoming_lock);
++
++ rxrpc_send_ping(call, skb);
++ mutex_unlock(&call->user_mutex);
++
++ _leave(" = %p{%d}", call, call->debug_id);
+ return call;
++
++no_call:
++ spin_unlock(&rx->incoming_lock);
++ _leave(" = NULL [%u]", skb->mark);
++ return NULL;
+ }
+
+ /*
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 157be1ff8697..86bd133b4fa0 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -192,22 +192,6 @@ send_extra_data:
+ goto out_no_clear_ca;
+ }
+
+-/*
+- * Ping the other end to fill our RTT cache and to retrieve the rwind
+- * and MTU parameters.
+- */
+-static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
+-{
+- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+- ktime_t now = skb->tstamp;
+-
+- if (call->peer->rtt_usage < 3 ||
+- ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
+- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+- true, true,
+- rxrpc_propose_ack_ping_for_params);
+-}
+-
+ /*
+ * Apply a hard ACK by advancing the Tx window.
+ */
+@@ -1396,8 +1380,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
+ call = rxrpc_new_incoming_call(local, rx, skb);
+ if (!call)
+ goto reject_packet;
+- rxrpc_send_ping(call, skb);
+- mutex_unlock(&call->user_mutex);
+ }
+
+ /* Process a call packet; this either discards or passes on the ref
+--
+2.20.1
+
--- /dev/null
+From 9dc03909a3217a78a2f6c4247d33c02dde72c0c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Dec 2019 15:03:22 +0100
+Subject: s390/qeth: lock the card while changing its hsuid
+
+From: Julian Wiedmann <jwi@linux.ibm.com>
+
+[ Upstream commit 5b6c7b55cfe26224b0f41b1c226d3534c542787f ]
+
+qeth_l3_dev_hsuid_store() initially checks the card state, but doesn't
+take the conf_mutex to ensure that the card stays in this state while
+being reconfigured.
+
+Rework the code to take this lock, and drop a redundant state check in a
+helper function.
+
+Fixes: b333293058aa ("qeth: add support for af_iucv HiperSockets transport")
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/net/qeth_core_main.c | 5 ----
+ drivers/s390/net/qeth_l3_sys.c | 40 +++++++++++++++++++++----------
+ 2 files changed, 28 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 94e5b6e15ef9..5be4d800e4ba 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -3378,11 +3378,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
+ goto out;
+ }
+
+- if (card->state != CARD_STATE_DOWN) {
+- rc = -1;
+- goto out;
+- }
+-
+ qeth_free_qdio_queues(card);
+ card->options.cq = cq;
+ rc = 0;
+diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
+index 2f73b33c9347..333fd4619dc6 100644
+--- a/drivers/s390/net/qeth_l3_sys.c
++++ b/drivers/s390/net/qeth_l3_sys.c
+@@ -270,24 +270,36 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+ {
+ struct qeth_card *card = dev_get_drvdata(dev);
++ int rc = 0;
+ char *tmp;
+- int rc;
+
+ if (!card)
+ return -EINVAL;
+
+ if (!IS_IQD(card))
+ return -EPERM;
+- if (card->state != CARD_STATE_DOWN)
+- return -EPERM;
+- if (card->options.sniffer)
+- return -EPERM;
+- if (card->options.cq == QETH_CQ_NOTAVAILABLE)
+- return -EPERM;
++
++ mutex_lock(&card->conf_mutex);
++ if (card->state != CARD_STATE_DOWN) {
++ rc = -EPERM;
++ goto out;
++ }
++
++ if (card->options.sniffer) {
++ rc = -EPERM;
++ goto out;
++ }
++
++ if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
++ rc = -EPERM;
++ goto out;
++ }
+
+ tmp = strsep((char **)&buf, "\n");
+- if (strlen(tmp) > 8)
+- return -EINVAL;
++ if (strlen(tmp) > 8) {
++ rc = -EINVAL;
++ goto out;
++ }
+
+ if (card->options.hsuid[0])
+ /* delete old ip address */
+@@ -298,11 +310,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
+ card->options.hsuid[0] = '\0';
+ memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+ qeth_configure_cq(card, QETH_CQ_DISABLED);
+- return count;
++ goto out;
+ }
+
+- if (qeth_configure_cq(card, QETH_CQ_ENABLED))
+- return -EPERM;
++ if (qeth_configure_cq(card, QETH_CQ_ENABLED)) {
++ rc = -EPERM;
++ goto out;
++ }
+
+ snprintf(card->options.hsuid, sizeof(card->options.hsuid),
+ "%-8s", tmp);
+@@ -311,6 +325,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
+
+ rc = qeth_l3_modify_hsuid(card, true);
+
++out:
++ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
+ }
+
+--
+2.20.1
+
--- /dev/null
+From d1f969cb4a7d97769f14a285cbde3d8c74907c4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Dec 2019 19:15:31 +0530
+Subject: scsi: libcxgbi: fix NULL pointer dereference in
+ cxgbi_device_destroy()
+
+From: Varun Prakash <varun@chelsio.com>
+
+[ Upstream commit 71482fde704efdd8c3abe0faf34d922c61e8d76b ]
+
+If cxgb4i_ddp_init() fails then cdev->cdev2ppm will be NULL, so add a check
+for NULL pointer before dereferencing it.
+
+Link: https://lore.kernel.org/r/1576676731-3068-1-git-send-email-varun@chelsio.com
+Signed-off-by: Varun Prakash <varun@chelsio.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/cxgbi/libcxgbi.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
+index 3e17af8aedeb..2cd2761bd249 100644
+--- a/drivers/scsi/cxgbi/libcxgbi.c
++++ b/drivers/scsi/cxgbi/libcxgbi.c
+@@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
+ "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
+ cxgbi_hbas_remove(cdev);
+ cxgbi_device_portmap_cleanup(cdev);
+- cxgbi_ppm_release(cdev->cdev2ppm(cdev));
++ if (cdev->cdev2ppm)
++ cxgbi_ppm_release(cdev->cdev2ppm(cdev));
+ if (cdev->pmap.max_connect)
+ cxgbi_free_big_mem(cdev->pmap.port_csk);
+ kfree(cdev);
+--
+2.20.1
+
--- /dev/null
+From e7ac3add0a5807881cf5c4848cb234dd07040047 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Dec 2019 17:36:02 +0200
+Subject: scsi: target/iblock: Fix protection error with blocks greater than
+ 512B
+
+From: Israel Rukshin <israelr@mellanox.com>
+
+[ Upstream commit e4dc9a4c31fe10d1751c542702afc85be8a5c56a ]
+
+The sector size of the block layer is 512 bytes, but integrity interval
+size might be different (in case of 4K block size of the media). At the
+initiator side the virtual start sector is the one that was originally
+submitted by the block layer (512 bytes) for the Reftag usage. The
+initiator converts the Reftag to integrity interval units and sends it to
+the target. So the target virtual start sector should be calculated at
+integrity interval units. prepare_fn() and complete_fn() don't remap
+correctly the Reftag when using incorrect units of the virtual start
+sector, which leads to the following protection error at the device:
+
+"blk_update_request: protection error, dev sdb, sector 2048 op 0x0:(READ)
+flags 0x10000 phys_seg 1 prio class 0"
+
+To fix that, set the seed in integrity interval units.
+
+Link: https://lore.kernel.org/r/1576078562-15240-1-git-send-email-israelr@mellanox.com
+Signed-off-by: Israel Rukshin <israelr@mellanox.com>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/target/target_core_iblock.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
+index 6949ea8bc387..51ffd5c002de 100644
+--- a/drivers/target/target_core_iblock.c
++++ b/drivers/target/target_core_iblock.c
+@@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
+ }
+
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
+- bip_set_seed(bip, bio->bi_iter.bi_sector);
++ /* virtual start sector must be in integrity interval units */
++ bip_set_seed(bip, bio->bi_iter.bi_sector >>
++ (bi->interval_exp - SECTOR_SHIFT));
+
+ pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
+ (unsigned long long)bip->bip_iter.bi_sector);
+--
+2.20.1
+
--- /dev/null
+From 03c2b3ffb7d2af25c7e2cd5436dc719c5d4e7217 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Dec 2019 18:56:06 -0700
+Subject: selftests: firmware: Fix it to do root uid check and skip
+
+From: Shuah Khan <skhan@linuxfoundation.org>
+
+[ Upstream commit c65e41538b04e0d64a673828745a00cb68a24371 ]
+
+firmware attempts to load test modules that require root access
+and fail. Fix it to check for root uid and exit with skip code
+instead.
+
+Before this fix:
+
+selftests: firmware: fw_run_tests.sh
+modprobe: ERROR: could not insert 'test_firmware': Operation not permitted
+You must have the following enabled in your kernel:
+CONFIG_TEST_FIRMWARE=y
+CONFIG_FW_LOADER=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+not ok 1 selftests: firmware: fw_run_tests.sh # SKIP
+
+With this fix:
+
+selftests: firmware: fw_run_tests.sh
+skip all tests: must be run as root
+not ok 1 selftests: firmware: fw_run_tests.sh # SKIP
+
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Reviwed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/firmware/fw_lib.sh | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
+index b879305a766d..5b8c0fedee76 100755
+--- a/tools/testing/selftests/firmware/fw_lib.sh
++++ b/tools/testing/selftests/firmware/fw_lib.sh
+@@ -34,6 +34,12 @@ test_modprobe()
+
+ check_mods()
+ {
++ local uid=$(id -u)
++ if [ $uid -ne 0 ]; then
++ echo "skip all tests: must be run as root" >&2
++ exit $ksft_skip
++ fi
++
+ trap "test_modprobe" EXIT
+ if [ ! -d $DIR ]; then
+ modprobe test_firmware
+--
+2.20.1
+
rtc-msm6242-fix-reading-of-10-hour-digit.patch
rtc-brcmstb-waketimer-add-missed-clk_disable_unprepare.patch
rtc-bd70528-add-module-alias-to-autoload-module.patch
+gpio-mpc8xxx-add-platform-device-to-gpiochip-parent.patch
+scsi-libcxgbi-fix-null-pointer-dereference-in-cxgbi_.patch
+scsi-target-iblock-fix-protection-error-with-blocks-.patch
+selftests-firmware-fix-it-to-do-root-uid-check-and-s.patch
+rseq-selftests-turn-off-timeout-setting.patch
+riscv-export-flush_icache_all-to-modules.patch
+mips-cacheinfo-report-shared-cpu-map.patch
+mips-fix-gettimeofday-in-the-vdso-library.patch
+tomoyo-suppress-rcu-warning-at-list_for_each_entry_r.patch
+mips-prevent-link-failure-with-kcov-instrumentation.patch
+drm-arm-mali-make-malidp_mw_connector_helper_funcs-s.patch
+rxrpc-unlock-new-call-in-rxrpc_new_incoming_call-rat.patch
+rxrpc-don-t-take-call-user_mutex-in-rxrpc_new_incomi.patch
+rxrpc-fix-missing-security-check-on-incoming-calls.patch
+dmaengine-k3dma-avoid-null-pointer-traversal.patch
+s390-qeth-lock-the-card-while-changing-its-hsuid.patch
+ioat-ioat_alloc_ring-failure-handling.patch
+drm-amdgpu-enable-gfxoff-for-raven1-refresh.patch
+media-intel-ipu3-align-struct-ipu3_uapi_awb_fr_confi.patch
+kbuild-deb-pkg-annotate-libelf-dev-dependency-as-nat.patch
+hexagon-parenthesize-registers-in-asm-predicates.patch
+hexagon-work-around-compiler-crash.patch
+ocfs2-call-journal-flush-to-mark-journal-as-empty-af.patch
--- /dev/null
+From b8370b5c2a5c826b550b7d52be3caa2b584e3374 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Dec 2019 19:16:48 +0900
+Subject: tomoyo: Suppress RCU warning at list_for_each_entry_rcu().
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 6bd5ce6089b561f5392460bfb654dea89356ab1b ]
+
+John Garry has reported that allmodconfig kernel on arm64 causes flood of
+"RCU-list traversed in non-reader section!!" warning. I don't know what
+change caused this warning, but this warning is safe because TOMOYO uses
+SRCU lock instead. Let's suppress this warning by explicitly telling that
+the caller is holding SRCU lock.
+
+Reported-and-tested-by: John Garry <john.garry@huawei.com>
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/tomoyo/common.c | 9 ++++++---
+ security/tomoyo/domain.c | 15 ++++++++++-----
+ security/tomoyo/group.c | 9 ++++++---
+ security/tomoyo/util.c | 6 ++++--
+ 4 files changed, 26 insertions(+), 13 deletions(-)
+
+diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
+index dd3d5942e669..c36bafbcd77e 100644
+--- a/security/tomoyo/common.c
++++ b/security/tomoyo/common.c
+@@ -951,7 +951,8 @@ static bool tomoyo_manager(void)
+ exe = tomoyo_get_exe();
+ if (!exe)
+ return false;
+- list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list) {
++ list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (!ptr->head.is_deleted &&
+ (!tomoyo_pathcmp(domainname, ptr->manager) ||
+ !strcmp(exe, ptr->manager->name))) {
+@@ -1095,7 +1096,8 @@ static int tomoyo_delete_domain(char *domainname)
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return -EINTR;
+ /* Is there an active domain? */
+- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
++ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ /* Never delete tomoyo_kernel_domain */
+ if (domain == &tomoyo_kernel_domain)
+ continue;
+@@ -2778,7 +2780,8 @@ void tomoyo_check_profile(void)
+
+ tomoyo_policy_loaded = true;
+ pr_info("TOMOYO: 2.6.0\n");
+- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
++ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ const u8 profile = domain->profile;
+ struct tomoyo_policy_namespace *ns = domain->ns;
+
+diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
+index 8526a0a74023..7869d6a9980b 100644
+--- a/security/tomoyo/domain.c
++++ b/security/tomoyo/domain.c
+@@ -41,7 +41,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
+
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return -ENOMEM;
+- list_for_each_entry_rcu(entry, list, list) {
++ list_for_each_entry_rcu(entry, list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ if (!check_duplicate(entry, new_entry))
+@@ -119,7 +120,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
+ }
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+- list_for_each_entry_rcu(entry, list, list) {
++ list_for_each_entry_rcu(entry, list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ if (!tomoyo_same_acl_head(entry, new_entry) ||
+@@ -166,7 +168,8 @@ void tomoyo_check_acl(struct tomoyo_request_info *r,
+ u16 i = 0;
+
+ retry:
+- list_for_each_entry_rcu(ptr, list, list) {
++ list_for_each_entry_rcu(ptr, list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (ptr->is_deleted || ptr->type != r->param_type)
+ continue;
+ if (!check_entry(r, ptr))
+@@ -298,7 +301,8 @@ static inline bool tomoyo_scan_transition
+ {
+ const struct tomoyo_transition_control *ptr;
+
+- list_for_each_entry_rcu(ptr, list, head.list) {
++ list_for_each_entry_rcu(ptr, list, head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (ptr->head.is_deleted || ptr->type != type)
+ continue;
+ if (ptr->domainname) {
+@@ -735,7 +739,8 @@ retry:
+
+ /* Check 'aggregator' directive. */
+ candidate = &exename;
+- list_for_each_entry_rcu(ptr, list, head.list) {
++ list_for_each_entry_rcu(ptr, list, head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (ptr->head.is_deleted ||
+ !tomoyo_path_matches_pattern(&exename,
+ ptr->original_name))
+diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
+index a37c7dc66e44..1cecdd797597 100644
+--- a/security/tomoyo/group.c
++++ b/security/tomoyo/group.c
+@@ -133,7 +133,8 @@ tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
+ {
+ struct tomoyo_path_group *member;
+
+- list_for_each_entry_rcu(member, &group->member_list, head.list) {
++ list_for_each_entry_rcu(member, &group->member_list, head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (member->head.is_deleted)
+ continue;
+ if (!tomoyo_path_matches_pattern(pathname, member->member_name))
+@@ -161,7 +162,8 @@ bool tomoyo_number_matches_group(const unsigned long min,
+ struct tomoyo_number_group *member;
+ bool matched = false;
+
+- list_for_each_entry_rcu(member, &group->member_list, head.list) {
++ list_for_each_entry_rcu(member, &group->member_list, head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (member->head.is_deleted)
+ continue;
+ if (min > member->number.values[1] ||
+@@ -191,7 +193,8 @@ bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ bool matched = false;
+ const u8 size = is_ipv6 ? 16 : 4;
+
+- list_for_each_entry_rcu(member, &group->member_list, head.list) {
++ list_for_each_entry_rcu(member, &group->member_list, head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (member->head.is_deleted)
+ continue;
+ if (member->address.is_ipv6 != is_ipv6)
+diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
+index 52752e1a84ed..eba0b3395851 100644
+--- a/security/tomoyo/util.c
++++ b/security/tomoyo/util.c
+@@ -594,7 +594,8 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
+
+ name.name = domainname;
+ tomoyo_fill_path_info(&name);
+- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
++ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (!domain->is_deleted &&
+ !tomoyo_pathcmp(&name, domain->domainname))
+ return domain;
+@@ -1028,7 +1029,8 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
+ return false;
+ if (!domain)
+ return true;
+- list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
++ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ u16 perm;
+ u8 i;
+
+--
+2.20.1
+