1 From: Mingming Cao <cmm@us.ibm.com>
2 Subject: percpu counter: clean up percpu_counter_sum_and_set()
3 References: fate#303783
5 percpu_counter_sum_and_set() and percpu_counter_sum() is the same except
6 the former update the global counter after accounting. Since we are
7 taking the fbc->lock to calculate the precise value of the
8 counter in percpu_counter_sum() anyway, it should simply set fbc->count
9 too, as the percpu_counter_sum_and_set() dose.
11 This patch merge these two interfaces into one.
13 Signed-off-by: Mingming Cao <cmm@us.ibm.com>
14 Acked-by: Jan Kara <jack@suse.cz>
16 fs/ext4/balloc.c | 2 +-
17 include/linux/percpu_counter.h | 12 +++---------
18 lib/percpu_counter.c | 8 +++-----
19 3 files changed, 7 insertions(+), 15 deletions(-)
21 Index: linux-2.6.27-rc3/fs/ext4/balloc.c
22 ===================================================================
23 --- linux-2.6.27-rc3.orig/fs/ext4/balloc.c 2008-08-20 17:25:35.000000000 -0700
24 +++ linux-2.6.27-rc3/fs/ext4/balloc.c 2008-08-21 14:34:32.000000000 -0700
27 if (free_blocks - root_blocks < FBC_BATCH)
29 - percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
30 + percpu_counter_sum(&sbi->s_freeblocks_counter);
32 if (free_blocks <= root_blocks)
33 /* we don't have free space */
34 Index: linux-2.6.27-rc3/include/linux/percpu_counter.h
35 ===================================================================
36 --- linux-2.6.27-rc3.orig/include/linux/percpu_counter.h 2008-08-20 17:25:35.000000000 -0700
37 +++ linux-2.6.27-rc3/include/linux/percpu_counter.h 2008-08-21 14:34:32.000000000 -0700
39 void percpu_counter_destroy(struct percpu_counter *fbc);
40 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
41 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
42 -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
43 +s64 __percpu_counter_sum(struct percpu_counter *fbc);
45 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
49 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
51 - s64 ret = __percpu_counter_sum(fbc, 0);
52 + s64 ret = __percpu_counter_sum(fbc);
53 return ret < 0 ? 0 : ret;
56 -static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
58 - return __percpu_counter_sum(fbc, 1);
62 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
64 - return __percpu_counter_sum(fbc, 0);
65 + return __percpu_counter_sum(fbc);
68 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
69 Index: linux-2.6.27-rc3/lib/percpu_counter.c
70 ===================================================================
71 --- linux-2.6.27-rc3.orig/lib/percpu_counter.c 2008-08-20 17:25:35.000000000 -0700
72 +++ linux-2.6.27-rc3/lib/percpu_counter.c 2008-08-21 14:34:32.000000000 -0700
74 * Add up all the per-cpu counts, return the result. This is a more accurate
75 * but much slower version of percpu_counter_read_positive()
77 -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
78 +s64 __percpu_counter_sum(struct percpu_counter *fbc)
83 for_each_online_cpu(cpu) {
84 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
94 spin_unlock(&fbc->lock);