]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.fixes/percpu_counter_sum_cleanup.patch
Reenabled linux-xen, added patches for Xen Kernel Version 2.6.27.31,
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.fixes / percpu_counter_sum_cleanup.patch
CommitLineData
2cb7cef9
BS
1From: Mingming Cao <cmm@us.ibm.com>
2Subject: percpu counter: clean up percpu_counter_sum_and_set()
3References: fate#303783
4
5percpu_counter_sum_and_set() and percpu_counter_sum() is the same except
6the former update the global counter after accounting. Since we are
7taking the fbc->lock to calculate the precise value of the
8counter in percpu_counter_sum() anyway, it should simply set fbc->count
9too, as the percpu_counter_sum_and_set() dose.
10
11This patch merge these two interfaces into one.
12
13Signed-off-by: Mingming Cao <cmm@us.ibm.com>
14Acked-by: Jan Kara <jack@suse.cz>
15---
16 fs/ext4/balloc.c | 2 +-
17 include/linux/percpu_counter.h | 12 +++---------
18 lib/percpu_counter.c | 8 +++-----
19 3 files changed, 7 insertions(+), 15 deletions(-)
20
21Index: linux-2.6.27-rc3/fs/ext4/balloc.c
22===================================================================
23--- linux-2.6.27-rc3.orig/fs/ext4/balloc.c 2008-08-20 17:25:35.000000000 -0700
24+++ linux-2.6.27-rc3/fs/ext4/balloc.c 2008-08-21 14:34:32.000000000 -0700
25@@ -1624,7 +1624,7 @@
26 #ifdef CONFIG_SMP
27 if (free_blocks - root_blocks < FBC_BATCH)
28 free_blocks =
29- percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
30+ percpu_counter_sum(&sbi->s_freeblocks_counter);
31 #endif
32 if (free_blocks <= root_blocks)
33 /* we don't have free space */
34Index: linux-2.6.27-rc3/include/linux/percpu_counter.h
35===================================================================
36--- linux-2.6.27-rc3.orig/include/linux/percpu_counter.h 2008-08-20 17:25:35.000000000 -0700
37+++ linux-2.6.27-rc3/include/linux/percpu_counter.h 2008-08-21 14:34:32.000000000 -0700
38@@ -35,7 +35,7 @@
39 void percpu_counter_destroy(struct percpu_counter *fbc);
40 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
41 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
42-s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
43+s64 __percpu_counter_sum(struct percpu_counter *fbc);
44
45 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
46 {
47@@ -44,19 +44,13 @@
48
49 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
50 {
51- s64 ret = __percpu_counter_sum(fbc, 0);
52+ s64 ret = __percpu_counter_sum(fbc);
53 return ret < 0 ? 0 : ret;
54 }
55
56-static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
57-{
58- return __percpu_counter_sum(fbc, 1);
59-}
60-
61-
62 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
63 {
64- return __percpu_counter_sum(fbc, 0);
65+ return __percpu_counter_sum(fbc);
66 }
67
68 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
69Index: linux-2.6.27-rc3/lib/percpu_counter.c
70===================================================================
71--- linux-2.6.27-rc3.orig/lib/percpu_counter.c 2008-08-20 17:25:35.000000000 -0700
72+++ linux-2.6.27-rc3/lib/percpu_counter.c 2008-08-21 14:34:32.000000000 -0700
73@@ -52,7 +52,7 @@
74 * Add up all the per-cpu counts, return the result. This is a more accurate
75 * but much slower version of percpu_counter_read_positive()
76 */
77-s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
78+s64 __percpu_counter_sum(struct percpu_counter *fbc)
79 {
80 s64 ret;
81 int cpu;
82@@ -62,11 +62,9 @@
83 for_each_online_cpu(cpu) {
84 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
85 ret += *pcount;
86- if (set)
87- *pcount = 0;
88+ *pcount = 0;
89 }
90- if (set)
91- fbc->count = ret;
92+ fbc->count = ret;
93
94 spin_unlock(&fbc->lock);
95 return ret;