From: Sasha Levin Date: Mon, 24 May 2021 00:40:12 +0000 (-0400) Subject: Fixes for 5.12 X-Git-Tag: v4.4.270~69 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=1644afc3e9583cec2801fb8d2dfe4c559280ff79;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 5.12 Signed-off-by: Sasha Levin --- diff --git a/queue-5.12/locking-lockdep-correct-calling-tracepoints.patch b/queue-5.12/locking-lockdep-correct-calling-tracepoints.patch new file mode 100644 index 00000000000..1458448bf96 --- /dev/null +++ b/queue-5.12/locking-lockdep-correct-calling-tracepoints.patch @@ -0,0 +1,56 @@ +From e5bd1a01972ed0217eca115a60aa81e593fe1a5e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 12 May 2021 20:09:37 +0800 +Subject: locking/lockdep: Correct calling tracepoints + +From: Leo Yan + +[ Upstream commit 89e70d5c583c55088faa2201d397ee30a15704aa ] + +The commit eb1f00237aca ("lockdep,trace: Expose tracepoints") reverses +tracepoints for lock_contended() and lock_acquired(), thus the ftrace +log shows the wrong locking sequence that "acquired" event is prior to +"contended" event: + + -0 [001] d.s3 20803.501685: lock_acquire: 0000000008b91ab4 &sg_policy->update_lock + -0 [001] d.s3 20803.501686: lock_acquired: 0000000008b91ab4 &sg_policy->update_lock + -0 [001] d.s3 20803.501689: lock_contended: 0000000008b91ab4 &sg_policy->update_lock + -0 [001] d.s3 20803.501690: lock_release: 0000000008b91ab4 &sg_policy->update_lock + +This patch fixes calling tracepoints for lock_contended() and +lock_acquired(). + +Fixes: eb1f00237aca ("lockdep,trace: Expose tracepoints") +Signed-off-by: Leo Yan +Signed-off-by: Peter Zijlstra (Intel) +Link: https://lkml.kernel.org/r/20210512120937.90211-1-leo.yan@linaro.org +Signed-off-by: Sasha Levin +--- + kernel/locking/lockdep.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c +index f160f1c97ca1..f39c383c7180 100644 +--- a/kernel/locking/lockdep.c ++++ b/kernel/locking/lockdep.c +@@ -5731,7 +5731,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) + { + unsigned long flags; + +- trace_lock_acquired(lock, ip); ++ trace_lock_contended(lock, ip); + + if (unlikely(!lock_stat || !lockdep_enabled())) + return; +@@ -5749,7 +5749,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip) + { + unsigned long flags; + +- trace_lock_contended(lock, ip); ++ trace_lock_acquired(lock, ip); + + if (unlikely(!lock_stat || !lockdep_enabled())) + return; +-- +2.30.2 + diff --git a/queue-5.12/locking-mutex-clear-mutex_flags-if-wait_list-is-empt.patch b/queue-5.12/locking-mutex-clear-mutex_flags-if-wait_list-is-empt.patch new file mode 100644 index 00000000000..891b9be336f --- /dev/null +++ b/queue-5.12/locking-mutex-clear-mutex_flags-if-wait_list-is-empt.patch @@ -0,0 +1,136 @@ +From cae3e5ca6b676880eef932e2eb42bc320ec8d1b4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 17 May 2021 11:40:05 +0800 +Subject: locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal + +From: Zqiang + +[ Upstream commit 3a010c493271f04578b133de977e0e5dd2848cea ] + +When a interruptible mutex locker is interrupted by a signal +without acquiring this lock and removed from the wait queue. +if the mutex isn't contended enough to have a waiter +put into the wait queue again, the setting of the WAITER +bit will force mutex locker to go into the slowpath to +acquire the lock every time, so if the wait queue is empty, +the WAITER bit need to be clear. + +Fixes: 040a0a371005 ("mutex: Add support for wound/wait style locks") +Suggested-by: Peter Zijlstra +Signed-off-by: Zqiang +Signed-off-by: Peter Zijlstra (Intel) +Link: https://lkml.kernel.org/r/20210517034005.30828-1-qiang.zhang@windriver.com +Signed-off-by: Sasha Levin +--- + kernel/locking/mutex-debug.c | 4 ++-- + kernel/locking/mutex-debug.h | 2 +- + kernel/locking/mutex.c | 18 +++++++++++++----- + kernel/locking/mutex.h | 4 +--- + 4 files changed, 17 insertions(+), 11 deletions(-) + +diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c +index a7276aaf2abc..db9301591e3f 100644 +--- a/kernel/locking/mutex-debug.c ++++ b/kernel/locking/mutex-debug.c +@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, + task->blocked_on = waiter; + } + +-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, ++void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, + struct task_struct *task) + { + DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); +@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); + task->blocked_on = NULL; + +- list_del_init(&waiter->list); ++ INIT_LIST_HEAD(&waiter->list); + waiter->task = NULL; + } + +diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h +index 1edd3f45a4ec..53e631e1d76d 100644 +--- a/kernel/locking/mutex-debug.h ++++ b/kernel/locking/mutex-debug.h +@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); + extern void debug_mutex_add_waiter(struct mutex *lock, + struct mutex_waiter *waiter, + struct task_struct *task); +-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, ++extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, + struct task_struct *task); + extern void debug_mutex_unlock(struct mutex *lock); + extern void debug_mutex_init(struct mutex *lock, const char *name, +diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c +index 622ebdfcd083..3899157c13b1 100644 +--- a/kernel/locking/mutex.c ++++ b/kernel/locking/mutex.c +@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait + * Add @waiter to a given location in the lock wait_list and set the + * FLAG_WAITERS flag if it's the first waiter. + */ +-static void __sched ++static void + __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, + struct list_head *list) + { +@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, + __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); + } + ++static void ++__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) ++{ ++ list_del(&waiter->list); ++ if (likely(list_empty(&lock->wait_list))) ++ __mutex_clear_flag(lock, MUTEX_FLAGS); ++ ++ debug_mutex_remove_waiter(lock, waiter, current); ++} ++ + /* + * Give up ownership to a specific task, when @task = NULL, this is equivalent + * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves +@@ -1061,9 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + __ww_mutex_check_waiters(lock, ww_ctx); + } + +- mutex_remove_waiter(lock, &waiter, current); +- if (likely(list_empty(&lock->wait_list))) +- __mutex_clear_flag(lock, MUTEX_FLAGS); ++ __mutex_remove_waiter(lock, &waiter); + + debug_mutex_free_waiter(&waiter); + +@@ -1080,7 +1088,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + + err: + __set_current_state(TASK_RUNNING); +- mutex_remove_waiter(lock, &waiter, current); ++ __mutex_remove_waiter(lock, &waiter); + err_early_kill: + spin_unlock(&lock->wait_lock); + debug_mutex_free_waiter(&waiter); +diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h +index 1c2287d3fa71..f0c710b1d192 100644 +--- a/kernel/locking/mutex.h ++++ b/kernel/locking/mutex.h +@@ -10,12 +10,10 @@ + * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs: + */ + +-#define mutex_remove_waiter(lock, waiter, task) \ +- __list_del((waiter)->list.prev, (waiter)->list.next) +- + #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) + #define debug_mutex_free_waiter(waiter) do { } while (0) + #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) ++#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0) + #define debug_mutex_unlock(lock) do { } while (0) + #define debug_mutex_init(lock, name, key) do { } while (0) + +-- +2.30.2 + diff --git a/queue-5.12/perf-x86-avoid-touching-lbr_tos-msr-for-arch-lbr.patch b/queue-5.12/perf-x86-avoid-touching-lbr_tos-msr-for-arch-lbr.patch new file mode 100644 index 00000000000..4825589cb43 --- /dev/null +++ b/queue-5.12/perf-x86-avoid-touching-lbr_tos-msr-for-arch-lbr.patch @@ -0,0 +1,44 @@ +From 67eeb42e26f6cfb9f2da92a9ad94cfb2ee99c2f6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 30 Apr 2021 13:22:46 +0800 +Subject: perf/x86: Avoid touching LBR_TOS MSR for Arch LBR + +From: Like Xu + +[ Upstream commit 3317c26a4b413b41364f2c4b83c778c6aba1576d ] + +The Architecture LBR does not have MSR_LBR_TOS (0x000001c9). +In a guest that should support Architecture LBR, check_msr() +will be a non-related check for the architecture MSR 0x0 +(IA32_P5_MC_ADDR) that is also not supported by KVM. + +The failure will cause x86_pmu.lbr_nr = 0, thereby preventing +the initialization of the guest Arch LBR. Fix it by avoiding +this extraneous check in intel_pmu_init() for Arch LBR. + +Fixes: 47125db27e47 ("perf/x86/intel/lbr: Support Architectural LBR") +Signed-off-by: Like Xu +[peterz: simpler still] +Signed-off-by: Peter Zijlstra (Intel) +Link: https://lkml.kernel.org/r/20210430052247.3079672-1-like.xu@linux.intel.com +Signed-off-by: Sasha Levin +--- + arch/x86/events/intel/core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index c57ec8e27907..4c18e7fb58f5 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -5741,7 +5741,7 @@ __init int intel_pmu_init(void) + * Check all LBT MSR here. + * Disable LBR access if any LBR MSRs can not be accessed. + */ +- if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL)) ++ if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL)) + x86_pmu.lbr_nr = 0; + for (i = 0; i < x86_pmu.lbr_nr; i++) { + if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && +-- +2.30.2 + diff --git a/queue-5.12/powerpc-fix-early-setup-to-make-early_ioremap-work.patch b/queue-5.12/powerpc-fix-early-setup-to-make-early_ioremap-work.patch new file mode 100644 index 00000000000..4b051702db0 --- /dev/null +++ b/queue-5.12/powerpc-fix-early-setup-to-make-early_ioremap-work.patch @@ -0,0 +1,54 @@ +From 0192416ebc73ce9757b60f221c8cde2dd4944586 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 20 May 2021 13:29:19 +1000 +Subject: powerpc: Fix early setup to make early_ioremap() work + +From: Alexey Kardashevskiy + +[ Upstream commit e2f5efd0f0e229bd110eab513e7c0331d61a4649 ] + +The immediate problem is that after commit +0bd3f9e953bd ("powerpc/legacy_serial: Use early_ioremap()") the kernel +silently reboots on some systems. + +The reason is that early_ioremap() returns broken addresses as it uses +slot_virt[] array which initialized with offsets from FIXADDR_TOP == +IOREMAP_END+FIXADDR_SIZE == KERN_IO_END - FIXADDR_SIZ + FIXADDR_SIZE == +__kernel_io_end which is 0 when early_ioremap_setup() is called. +__kernel_io_end is initialized little bit later in early_init_mmu(). + +This fixes the initialization by swapping early_ioremap_setup() and +early_init_mmu(). + +Fixes: 265c3491c4bc ("powerpc: Add support for GENERIC_EARLY_IOREMAP") +Signed-off-by: Alexey Kardashevskiy +Reviewed-by: Christophe Leroy +[mpe: Drop unrelated cleanup & cleanup change log] +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20210520032919.358935-1-aik@ozlabs.ru +Signed-off-by: Sasha Levin +--- + arch/powerpc/kernel/setup_64.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c +index 830fee91b2d9..c914fe8a2c67 100644 +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -369,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr) + apply_feature_fixups(); + setup_feature_keys(); + +- early_ioremap_setup(); +- + /* Initialize the hash table or TLB handling */ + early_init_mmu(); + ++ early_ioremap_setup(); ++ + /* + * After firmware and early platform setup code has set things up, + * we note the SPR values for configurable control/performance +-- +2.30.2 + diff --git a/queue-5.12/series b/queue-5.12/series index f3a744fe6d4..837aac204da 100644 --- a/queue-5.12/series +++ b/queue-5.12/series @@ -28,3 +28,7 @@ drm-ttm-do-not-add-non-system-domain-bo-into-swap-li.patch powerpc-pseries-fix-hcall-tracing-recursion-in-pv-qu.patch ptrace-make-ptrace-fail-if-the-tracee-changed-its-pi.patch nvmet-seset-ns-file-when-open-fails.patch +perf-x86-avoid-touching-lbr_tos-msr-for-arch-lbr.patch +locking-lockdep-correct-calling-tracepoints.patch +locking-mutex-clear-mutex_flags-if-wait_list-is-empt.patch +powerpc-fix-early-setup-to-make-early_ioremap-work.patch