]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
lib/percpu_counter: percpu_counter_add_batch() overflow/underflow
authorManfred Spraul <manfred@colorfullife.com>
Fri, 16 Dec 2022 15:04:39 +0000 (16:04 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:50:01 +0000 (22:50 -0800)
Patch series "various irq handling fixes/docu updates".

If an interrupt happens between __this_cpu_read(*fbc->counters) and
this_cpu_add(*fbc->counters, amount), and that interrupt modifies the
per_cpu_counter, then the this_cpu_add() after the interrupt returns may
under/overflow.

Link: https://lkml.kernel.org/r/20221216150155.200389-1-manfred@colorfullife.com
Link: https://lkml.kernel.org/r/20221216150441.200533-1-manfred@colorfullife.com
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: "Sun, Jiebin" <jiebin.sun@intel.com>
Cc: <1vier1@web.de>
Cc: Alexander Sverdlin <alexander.sverdlin@siemens.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lib/percpu_counter.c

index 42f729c8e56c4033c12239e1e559afe2b4b1d19f..dba56c5c1837912132b5e01bd66eba8a8e3a01fa 100644 (file)
@@ -73,28 +73,33 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 EXPORT_SYMBOL(percpu_counter_set);
 
 /*
- * This function is both preempt and irq safe. The former is due to explicit
- * preemption disable. The latter is guaranteed by the fact that the slow path
- * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
- * this_cpu_add which is irq-safe by definition. Hence there is no need muck
- * with irq state before calling this one
+ * local_irq_save() is needed to make the function irq safe:
+ * - The slow path would be ok as protected by an irq-safe spinlock.
+ * - this_cpu_add would be ok as it is irq-safe by definition.
+ * But:
+ * The decision slow path/fast path and the actual update must be atomic, too.
+ * Otherwise a call in process context could check the current values and
+ * decide that the fast path can be used. If now an interrupt occurs before
+ * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
+ * then the this_cpu_add() that is executed after the interrupt has completed
+ * can produce values larger than "batch" or even overflows.
  */
 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
 {
        s64 count;
+       unsigned long flags;
 
-       preempt_disable();
+       local_irq_save(flags);
        count = __this_cpu_read(*fbc->counters) + amount;
        if (abs(count) >= batch) {
-               unsigned long flags;
-               raw_spin_lock_irqsave(&fbc->lock, flags);
+               raw_spin_lock(&fbc->lock);
                fbc->count += count;
                __this_cpu_sub(*fbc->counters, count - amount);
-               raw_spin_unlock_irqrestore(&fbc->lock, flags);
+               raw_spin_unlock(&fbc->lock);
        } else {
                this_cpu_add(*fbc->counters, amount);
        }
-       preempt_enable();
+       local_irq_restore(flags);
 }
 EXPORT_SYMBOL(percpu_counter_add_batch);