]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.31/patches.fixes/percpu_counter_sum_cleanup.patch
Revert "Move xen patchset to new version's subdir."
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.fixes / percpu_counter_sum_cleanup.patch
diff --git a/src/patches/suse-2.6.27.31/patches.fixes/percpu_counter_sum_cleanup.patch b/src/patches/suse-2.6.27.31/patches.fixes/percpu_counter_sum_cleanup.patch
deleted file mode 100644 (file)
index 8962ab2..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-From: Mingming Cao <cmm@us.ibm.com>
-Subject: percpu counter: clean up percpu_counter_sum_and_set()
-References: fate#303783
-
-percpu_counter_sum_and_set() and percpu_counter_sum() is the same except
-the former update the global counter after accounting.  Since we are
-taking the fbc->lock to calculate the precise value of the
-counter in percpu_counter_sum() anyway, it should simply set fbc->count
-too, as the percpu_counter_sum_and_set() dose.
-
-This patch merge these two interfaces into one.
-Signed-off-by: Mingming Cao <cmm@us.ibm.com>
-Acked-by: Jan Kara <jack@suse.cz>
----
- fs/ext4/balloc.c               |    2 +-
- include/linux/percpu_counter.h |   12 +++---------
- lib/percpu_counter.c           |    8 +++-----
- 3 files changed, 7 insertions(+), 15 deletions(-)
-
-Index: linux-2.6.27-rc3/fs/ext4/balloc.c
-===================================================================
---- linux-2.6.27-rc3.orig/fs/ext4/balloc.c     2008-08-20 17:25:35.000000000 -0700
-+++ linux-2.6.27-rc3/fs/ext4/balloc.c  2008-08-21 14:34:32.000000000 -0700
-@@ -1624,7 +1624,7 @@
- #ifdef CONFIG_SMP
-       if (free_blocks - root_blocks < FBC_BATCH)
-               free_blocks =
--                      percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
-+                      percpu_counter_sum(&sbi->s_freeblocks_counter);
- #endif
-       if (free_blocks <= root_blocks)
-               /* we don't have free space */
-Index: linux-2.6.27-rc3/include/linux/percpu_counter.h
-===================================================================
---- linux-2.6.27-rc3.orig/include/linux/percpu_counter.h       2008-08-20 17:25:35.000000000 -0700
-+++ linux-2.6.27-rc3/include/linux/percpu_counter.h    2008-08-21 14:34:32.000000000 -0700
-@@ -35,7 +35,7 @@
- void percpu_counter_destroy(struct percpu_counter *fbc);
- void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
- void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
--s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
-+s64 __percpu_counter_sum(struct percpu_counter *fbc);
- static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
- {
-@@ -44,19 +44,13 @@
- static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
- {
--      s64 ret = __percpu_counter_sum(fbc, 0);
-+      s64 ret = __percpu_counter_sum(fbc);
-       return ret < 0 ? 0 : ret;
- }
--static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
--{
--      return __percpu_counter_sum(fbc, 1);
--}
--
--
- static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
- {
--      return __percpu_counter_sum(fbc, 0);
-+      return __percpu_counter_sum(fbc);
- }
- static inline s64 percpu_counter_read(struct percpu_counter *fbc)
-Index: linux-2.6.27-rc3/lib/percpu_counter.c
-===================================================================
---- linux-2.6.27-rc3.orig/lib/percpu_counter.c 2008-08-20 17:25:35.000000000 -0700
-+++ linux-2.6.27-rc3/lib/percpu_counter.c      2008-08-21 14:34:32.000000000 -0700
-@@ -52,7 +52,7 @@
-  * Add up all the per-cpu counts, return the result.  This is a more accurate
-  * but much slower version of percpu_counter_read_positive()
-  */
--s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
-+s64 __percpu_counter_sum(struct percpu_counter *fbc)
- {
-       s64 ret;
-       int cpu;
-@@ -62,11 +62,9 @@
-       for_each_online_cpu(cpu) {
-               s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
-               ret += *pcount;
--              if (set)
--                      *pcount = 0;
-+              *pcount = 0;
-       }
--      if (set)
--              fbc->count = ret;
-+      fbc->count = ret;
-       spin_unlock(&fbc->lock);
-       return ret;