]> err.no Git - linux-2.6/commitdiff
percpu_counter: new function percpu_counter_sum_and_set
authorMingming Cao <cmm@us.ibm.com>
Fri, 11 Jul 2008 23:27:31 +0000 (19:27 -0400)
committerTheodore Ts'o <tytso@mit.edu>
Fri, 11 Jul 2008 23:27:31 +0000 (19:27 -0400)
Delayed allocation need to check free blocks at every write time.
percpu_counter_read_positive() is not quit accurate. delayed
allocation need a more accurate accounting, but using
percpu_counter_sum_positive() is frequently is quite expensive.

This patch added a new function to update center counter when sum
per-cpu counter, to increase the accurate rate for next
percpu_counter_read() and require less calling expensive
percpu_counter_sum().

Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
fs/ext4/balloc.c
include/linux/percpu_counter.h
lib/percpu_counter.c

index 25f63d8c1b3da0890901c11182839df1f453f848..6369bacf0dcb8594bb2e7bd469696b2b56d511e7 100644 (file)
@@ -1621,7 +1621,7 @@ ext4_fsblk_t ext4_has_free_blocks(struct ext4_sb_info *sbi,
 #ifdef CONFIG_SMP
        if (free_blocks - root_blocks < FBC_BATCH)
                free_blocks =
-                       percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
+                       percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
 #endif
        if (free_blocks - root_blocks < nblocks)
                return free_blocks - root_blocks;
index 9007ccdfc1127cfe73db03e31dd82a843f8f4fa8..20838883535704c89cf72bafe29a28d12dea45e3 100644 (file)
@@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
 void percpu_counter_destroy(struct percpu_counter *fbc);
 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
-s64 __percpu_counter_sum(struct percpu_counter *fbc);
+s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
 
 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 {
@@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 
 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
 {
-       s64 ret = __percpu_counter_sum(fbc);
+       s64 ret = __percpu_counter_sum(fbc, 0);
        return ret < 0 ? 0 : ret;
 }
 
+static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
+{
+       return __percpu_counter_sum(fbc, 1);
+}
+
+
 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
 {
-       return __percpu_counter_sum(fbc);
+       return __percpu_counter_sum(fbc, 0);
 }
 
 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
index 119174494cb5c096eaf5b1da239dbd5a4040ebc0..4a8ba4bf5f6f2b1c0de7d16f794d6d39cbb00d31 100644 (file)
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
  * Add up all the per-cpu counts, return the result.  This is a more accurate
  * but much slower version of percpu_counter_read_positive()
  */
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
+s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
 {
        s64 ret;
        int cpu;
@@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
        for_each_online_cpu(cpu) {
                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
                ret += *pcount;
+               if (set)
+                       *pcount = 0;
        }
+       if (set)
+               fbc->count = ret;
+
        spin_unlock(&fbc->lock);
        return ret;
 }