]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Mon, 24 May 2021 00:40:12 +0000 (20:40 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 24 May 2021 00:40:12 +0000 (20:40 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.10/locking-lockdep-correct-calling-tracepoints.patch [new file with mode: 0644]
queue-5.10/locking-mutex-clear-mutex_flags-if-wait_list-is-empt.patch [new file with mode: 0644]
queue-5.10/perf-x86-avoid-touching-lbr_tos-msr-for-arch-lbr.patch [new file with mode: 0644]
queue-5.10/powerpc-fix-early-setup-to-make-early_ioremap-work.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/locking-lockdep-correct-calling-tracepoints.patch b/queue-5.10/locking-lockdep-correct-calling-tracepoints.patch
new file mode 100644 (file)
index 0000000..db72c8e
--- /dev/null
@@ -0,0 +1,56 @@
+From 59b5b3613e92e4e23f35075d20deafa69b864ffb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 May 2021 20:09:37 +0800
+Subject: locking/lockdep: Correct calling tracepoints
+
+From: Leo Yan <leo.yan@linaro.org>
+
+[ Upstream commit 89e70d5c583c55088faa2201d397ee30a15704aa ]
+
+The commit eb1f00237aca ("lockdep,trace: Expose tracepoints") reverses
+tracepoints for lock_contended() and lock_acquired(), thus the ftrace
+log shows the wrong locking sequence that "acquired" event is prior to
+"contended" event:
+
+  <idle>-0       [001] d.s3 20803.501685: lock_acquire: 0000000008b91ab4 &sg_policy->update_lock
+  <idle>-0       [001] d.s3 20803.501686: lock_acquired: 0000000008b91ab4 &sg_policy->update_lock
+  <idle>-0       [001] d.s3 20803.501689: lock_contended: 0000000008b91ab4 &sg_policy->update_lock
+  <idle>-0       [001] d.s3 20803.501690: lock_release: 0000000008b91ab4 &sg_policy->update_lock
+
+This patch fixes calling tracepoints for lock_contended() and
+lock_acquired().
+
+Fixes: eb1f00237aca ("lockdep,trace: Expose tracepoints")
+Signed-off-by: Leo Yan <leo.yan@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20210512120937.90211-1-leo.yan@linaro.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/locking/lockdep.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 38d7c03e694c..858b96b438ce 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -5664,7 +5664,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
+ {
+       unsigned long flags;
+-      trace_lock_acquired(lock, ip);
++      trace_lock_contended(lock, ip);
+       if (unlikely(!lock_stat || !lockdep_enabled()))
+               return;
+@@ -5682,7 +5682,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
+ {
+       unsigned long flags;
+-      trace_lock_contended(lock, ip);
++      trace_lock_acquired(lock, ip);
+       if (unlikely(!lock_stat || !lockdep_enabled()))
+               return;
+-- 
+2.30.2
+
diff --git a/queue-5.10/locking-mutex-clear-mutex_flags-if-wait_list-is-empt.patch b/queue-5.10/locking-mutex-clear-mutex_flags-if-wait_list-is-empt.patch
new file mode 100644 (file)
index 0000000..bf9f667
--- /dev/null
@@ -0,0 +1,136 @@
+From 65fbf89e28a98cfd6cedfe7ac8aeb8b84c8ef66a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 May 2021 11:40:05 +0800
+Subject: locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal
+
+From: Zqiang <qiang.zhang@windriver.com>
+
+[ Upstream commit 3a010c493271f04578b133de977e0e5dd2848cea ]
+
+When a interruptible mutex locker is interrupted by a signal
+without acquiring this lock and removed from the wait queue.
+if the mutex isn't contended enough to have a waiter
+put into the wait queue again, the setting of the WAITER
+bit will force mutex locker to go into the slowpath to
+acquire the lock every time, so if the wait queue is empty,
+the WAITER bit need to be clear.
+
+Fixes: 040a0a371005 ("mutex: Add support for wound/wait style locks")
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Zqiang <qiang.zhang@windriver.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20210517034005.30828-1-qiang.zhang@windriver.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/locking/mutex-debug.c |  4 ++--
+ kernel/locking/mutex-debug.h |  2 +-
+ kernel/locking/mutex.c       | 18 +++++++++++++-----
+ kernel/locking/mutex.h       |  4 +---
+ 4 files changed, 17 insertions(+), 11 deletions(-)
+
+diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
+index a7276aaf2abc..db9301591e3f 100644
+--- a/kernel/locking/mutex-debug.c
++++ b/kernel/locking/mutex-debug.c
+@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+       task->blocked_on = waiter;
+ }
+-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
++void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+                        struct task_struct *task)
+ {
+       DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
+@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+       DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
+       task->blocked_on = NULL;
+-      list_del_init(&waiter->list);
++      INIT_LIST_HEAD(&waiter->list);
+       waiter->task = NULL;
+ }
+diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
+index 1edd3f45a4ec..53e631e1d76d 100644
+--- a/kernel/locking/mutex-debug.h
++++ b/kernel/locking/mutex-debug.h
+@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
+ extern void debug_mutex_add_waiter(struct mutex *lock,
+                                  struct mutex_waiter *waiter,
+                                  struct task_struct *task);
+-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
++extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+                               struct task_struct *task);
+ extern void debug_mutex_unlock(struct mutex *lock);
+ extern void debug_mutex_init(struct mutex *lock, const char *name,
+diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
+index 2c25b830203c..15ac7c4bb111 100644
+--- a/kernel/locking/mutex.c
++++ b/kernel/locking/mutex.c
+@@ -204,7 +204,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
+  * Add @waiter to a given location in the lock wait_list and set the
+  * FLAG_WAITERS flag if it's the first waiter.
+  */
+-static void __sched
++static void
+ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+                  struct list_head *list)
+ {
+@@ -215,6 +215,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+               __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
+ }
++static void
++__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
++{
++      list_del(&waiter->list);
++      if (likely(list_empty(&lock->wait_list)))
++              __mutex_clear_flag(lock, MUTEX_FLAGS);
++
++      debug_mutex_remove_waiter(lock, waiter, current);
++}
++
+ /*
+  * Give up ownership to a specific task, when @task = NULL, this is equivalent
+  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
+@@ -1071,9 +1081,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+                       __ww_mutex_check_waiters(lock, ww_ctx);
+       }
+-      mutex_remove_waiter(lock, &waiter, current);
+-      if (likely(list_empty(&lock->wait_list)))
+-              __mutex_clear_flag(lock, MUTEX_FLAGS);
++      __mutex_remove_waiter(lock, &waiter);
+       debug_mutex_free_waiter(&waiter);
+@@ -1090,7 +1098,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ err:
+       __set_current_state(TASK_RUNNING);
+-      mutex_remove_waiter(lock, &waiter, current);
++      __mutex_remove_waiter(lock, &waiter);
+ err_early_kill:
+       spin_unlock(&lock->wait_lock);
+       debug_mutex_free_waiter(&waiter);
+diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
+index 1c2287d3fa71..f0c710b1d192 100644
+--- a/kernel/locking/mutex.h
++++ b/kernel/locking/mutex.h
+@@ -10,12 +10,10 @@
+  * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
+  */
+-#define mutex_remove_waiter(lock, waiter, task) \
+-              __list_del((waiter)->list.prev, (waiter)->list.next)
+-
+ #define debug_mutex_wake_waiter(lock, waiter)         do { } while (0)
+ #define debug_mutex_free_waiter(waiter)                       do { } while (0)
+ #define debug_mutex_add_waiter(lock, waiter, ti)      do { } while (0)
++#define debug_mutex_remove_waiter(lock, waiter, ti)     do { } while (0)
+ #define debug_mutex_unlock(lock)                      do { } while (0)
+ #define debug_mutex_init(lock, name, key)             do { } while (0)
+-- 
+2.30.2
+
diff --git a/queue-5.10/perf-x86-avoid-touching-lbr_tos-msr-for-arch-lbr.patch b/queue-5.10/perf-x86-avoid-touching-lbr_tos-msr-for-arch-lbr.patch
new file mode 100644 (file)
index 0000000..e74d0e6
--- /dev/null
@@ -0,0 +1,44 @@
+From 94285c357fc8550a125c6fe1b6e662e914861ee9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Apr 2021 13:22:46 +0800
+Subject: perf/x86: Avoid touching LBR_TOS MSR for Arch LBR
+
+From: Like Xu <like.xu@linux.intel.com>
+
+[ Upstream commit 3317c26a4b413b41364f2c4b83c778c6aba1576d ]
+
+The Architecture LBR does not have MSR_LBR_TOS (0x000001c9).
+In a guest that should support Architecture LBR, check_msr()
+will be a non-related check for the architecture MSR 0x0
+(IA32_P5_MC_ADDR) that is also not supported by KVM.
+
+The failure will cause x86_pmu.lbr_nr = 0, thereby preventing
+the initialization of the guest Arch LBR. Fix it by avoiding
+this extraneous check in intel_pmu_init() for Arch LBR.
+
+Fixes: 47125db27e47 ("perf/x86/intel/lbr: Support Architectural LBR")
+Signed-off-by: Like Xu <like.xu@linux.intel.com>
+[peterz: simpler still]
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20210430052247.3079672-1-like.xu@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/intel/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 0b9975200ae3..ee659b5faf71 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -5563,7 +5563,7 @@ __init int intel_pmu_init(void)
+        * Check all LBT MSR here.
+        * Disable LBR access if any LBR MSRs can not be accessed.
+        */
+-      if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
++      if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+               x86_pmu.lbr_nr = 0;
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
+               if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
+-- 
+2.30.2
+
diff --git a/queue-5.10/powerpc-fix-early-setup-to-make-early_ioremap-work.patch b/queue-5.10/powerpc-fix-early-setup-to-make-early_ioremap-work.patch
new file mode 100644 (file)
index 0000000..f9a0096
--- /dev/null
@@ -0,0 +1,54 @@
+From f0364b818102df582ba6cecde0af1e8637c3c4af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 May 2021 13:29:19 +1000
+Subject: powerpc: Fix early setup to make early_ioremap() work
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+[ Upstream commit e2f5efd0f0e229bd110eab513e7c0331d61a4649 ]
+
+The immediate problem is that after commit
+0bd3f9e953bd ("powerpc/legacy_serial: Use early_ioremap()") the kernel
+silently reboots on some systems.
+
+The reason is that early_ioremap() returns broken addresses as it uses
+slot_virt[] array which initialized with offsets from FIXADDR_TOP ==
+IOREMAP_END+FIXADDR_SIZE == KERN_IO_END - FIXADDR_SIZ + FIXADDR_SIZE ==
+__kernel_io_end which is 0 when early_ioremap_setup() is called.
+__kernel_io_end is initialized little bit later in early_init_mmu().
+
+This fixes the initialization by swapping early_ioremap_setup() and
+early_init_mmu().
+
+Fixes: 265c3491c4bc ("powerpc: Add support for GENERIC_EARLY_IOREMAP")
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+[mpe: Drop unrelated cleanup & cleanup change log]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210520032919.358935-1-aik@ozlabs.ru
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/setup_64.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 3b871ecb3a92..3f8426bccd16 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -368,11 +368,11 @@ void __init early_setup(unsigned long dt_ptr)
+       apply_feature_fixups();
+       setup_feature_keys();
+-      early_ioremap_setup();
+-
+       /* Initialize the hash table or TLB handling */
+       early_init_mmu();
++      early_ioremap_setup();
++
+       /*
+        * After firmware and early platform setup code has set things up,
+        * we note the SPR values for configurable control/performance
+-- 
+2.30.2
+
index 87db834abde460c8e33a4bd881ce1b319d077e59..0bd2ae9e457a8d9e9e62e17438bbaed0060e3a26 100644 (file)
@@ -24,3 +24,7 @@ tools-testing-selftests-exec-fix-link-error.patch
 powerpc-pseries-fix-hcall-tracing-recursion-in-pv-qu.patch
 ptrace-make-ptrace-fail-if-the-tracee-changed-its-pi.patch
 nvmet-seset-ns-file-when-open-fails.patch
+perf-x86-avoid-touching-lbr_tos-msr-for-arch-lbr.patch
+locking-lockdep-correct-calling-tracepoints.patch
+locking-mutex-clear-mutex_flags-if-wait_list-is-empt.patch
+powerpc-fix-early-setup-to-make-early_ioremap-work.patch