]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched/membarrier: reduce the ability to hammer on sys_membarrier
authorLinus Torvalds <torvalds@linuxfoundation.org>
Sun, 4 Feb 2024 15:25:12 +0000 (15:25 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 23 Feb 2024 07:42:32 +0000 (08:42 +0100)
commit 944d5fe50f3f03daacfea16300e656a1691c4a23 upstream.

On some systems, sys_membarrier can be very expensive, causing overall
slowdowns for everything.  So put a lock on the path in order to
serialize the accesses to prevent the ability for this to be called at
too high of a frequency and saturate the machine.

Reviewed-and-tested-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Borislav Petkov <bp@alien8.de>
Fixes: 22e4ebb97582 ("membarrier: Provide expedited private command")
Fixes: c5f58bd58f43 ("membarrier: Provide GLOBAL_EXPEDITED command")
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[ converted to explicit mutex_*() calls - cleanup.h is not in this stable
  branch - gregkh ]
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/sched/membarrier.c

index cc7cd512e4e33833d781b5b6686fd6085fbf879f..1b7c3bdba8f752d37178bbd2f8648cc60fe1231b 100644 (file)
@@ -34,6 +34,8 @@
        | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK                \
        | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
 
+static DEFINE_MUTEX(membarrier_ipi_mutex);
+
 static void ipi_mb(void *info)
 {
        smp_mb();       /* IPIs should be serializing but paranoid. */
@@ -119,6 +121,7 @@ static int membarrier_global_expedited(void)
        if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
                return -ENOMEM;
 
+       mutex_lock(&membarrier_ipi_mutex);
        cpus_read_lock();
        rcu_read_lock();
        for_each_online_cpu(cpu) {
@@ -165,6 +168,8 @@ static int membarrier_global_expedited(void)
         * rq->curr modification in scheduler.
         */
        smp_mb();       /* exit from system call is not a mb */
+       mutex_unlock(&membarrier_ipi_mutex);
+
        return 0;
 }
 
@@ -208,6 +213,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
        if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
                return -ENOMEM;
 
+       mutex_lock(&membarrier_ipi_mutex);
        cpus_read_lock();
 
        if (cpu_id >= 0) {
@@ -280,6 +286,7 @@ out:
         * rq->curr modification in scheduler.
         */
        smp_mb();       /* exit from system call is not a mb */
+       mutex_unlock(&membarrier_ipi_mutex);
 
        return 0;
 }
@@ -321,6 +328,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
         * between threads which are users of @mm has its membarrier state
         * updated.
         */
+       mutex_lock(&membarrier_ipi_mutex);
        cpus_read_lock();
        rcu_read_lock();
        for_each_online_cpu(cpu) {
@@ -337,6 +345,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
 
        free_cpumask_var(tmpmask);
        cpus_read_unlock();
+       mutex_unlock(&membarrier_ipi_mutex);
 
        return 0;
 }