]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
percpu_counter: extend _limited_add() to negative amounts
authorHugh Dickins <hughd@google.com>
Thu, 12 Oct 2023 04:40:09 +0000 (21:40 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 18 Oct 2023 21:34:14 +0000 (14:34 -0700)
Though tmpfs does not need it, percpu_counter_limited_add() can be twice
as useful if it works sensibly with negative amounts (subs) - typically
decrements towards a limit of 0 or nearby: as suggested by Dave Chinner.

And in the course of that reworking, skip the percpu counter sum if it is
already obvious that the limit would be passed: as suggested by Tim Chen.

Extend the comment above __percpu_counter_limited_add(), defining the
behaviour with positive and negative amounts, allowing negative limits,
but not bothering about overflow beyond S64_MAX.

Link: https://lkml.kernel.org/r/8f86083b-c452-95d4-365b-f16a2e4ebcd4@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Carlos Maiolino <cem@kernel.org>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Darrick J. Wong <djwong@kernel.org>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Tim Chen <tim.c.chen@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/percpu_counter.h
lib/percpu_counter.c

index 8cb7c071bd5c428131db7869d402f7720b3b87f8..3a44dd1e33d241589fce9fb8b629cd6fa32c6a12 100644 (file)
@@ -198,14 +198,21 @@ static inline bool
 percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
 {
        unsigned long flags;
+       bool good = false;
        s64 count;
 
+       if (amount == 0)
+               return true;
+
        local_irq_save(flags);
        count = fbc->count + amount;
-       if (count <= limit)
+       if ((amount > 0 && count <= limit) ||
+           (amount < 0 && count >= limit)) {
                fbc->count = count;
+               good = true;
+       }
        local_irq_restore(flags);
-       return count <= limit;
+       return good;
 }
 
 /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
index 58a3392f471b97dbb9ac9641742314bef2c753e5..44dd133594d4d6a005321d27acbf76f9aa1d4c95 100644 (file)
@@ -279,8 +279,16 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
 EXPORT_SYMBOL(__percpu_counter_compare);
 
 /*
- * Compare counter, and add amount if the total is within limit.
- * Return true if amount was added, false if it would exceed limit.
+ * Compare counter, and add amount if total is: less than or equal to limit if
+ * amount is positive, or greater than or equal to limit if amount is negative.
+ * Return true if amount is added, or false if total would be beyond the limit.
+ *
+ * Negative limit is allowed, but unusual.
+ * When negative amounts (subs) are given to percpu_counter_limited_add(),
+ * the limit would most naturally be 0 - but other limits are also allowed.
+ *
+ * Overflow beyond S64_MAX is not allowed for: counter, limit and amount
+ * are all assumed to be sane (far from S64_MIN and S64_MAX).
  */
 bool __percpu_counter_limited_add(struct percpu_counter *fbc,
                                  s64 limit, s64 amount, s32 batch)
@@ -288,10 +296,10 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
        s64 count;
        s64 unknown;
        unsigned long flags;
-       bool good;
+       bool good = false;
 
-       if (amount > limit)
-               return false;
+       if (amount == 0)
+               return true;
 
        local_irq_save(flags);
        unknown = batch * num_online_cpus();
@@ -299,7 +307,8 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
 
        /* Skip taking the lock when safe */
        if (abs(count + amount) <= batch &&
-           fbc->count + unknown <= limit) {
+           ((amount > 0 && fbc->count + unknown <= limit) ||
+            (amount < 0 && fbc->count - unknown >= limit))) {
                this_cpu_add(*fbc->counters, amount);
                local_irq_restore(flags);
                return true;
@@ -309,7 +318,19 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
        count = fbc->count + amount;
 
        /* Skip percpu_counter_sum() when safe */
-       if (count + unknown > limit) {
+       if (amount > 0) {
+               if (count - unknown > limit)
+                       goto out;
+               if (count + unknown <= limit)
+                       good = true;
+       } else {
+               if (count + unknown < limit)
+                       goto out;
+               if (count - unknown >= limit)
+                       good = true;
+       }
+
+       if (!good) {
                s32 *pcount;
                int cpu;
 
@@ -317,15 +338,20 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
                        pcount = per_cpu_ptr(fbc->counters, cpu);
                        count += *pcount;
                }
+               if (amount > 0) {
+                       if (count > limit)
+                               goto out;
+               } else {
+                       if (count < limit)
+                               goto out;
+               }
+               good = true;
        }
 
-       good = count <= limit;
-       if (good) {
-               count = __this_cpu_read(*fbc->counters);
-               fbc->count += count + amount;
-               __this_cpu_sub(*fbc->counters, count);
-       }
-
+       count = __this_cpu_read(*fbc->counters);
+       fbc->count += count + amount;
+       __this_cpu_sub(*fbc->counters, count);
+out:
        raw_spin_unlock(&fbc->lock);
        local_irq_restore(flags);
        return good;