summaryrefslogtreecommitdiffstats
path: root/lib/percpu_counter.c
diff options
context:
space:
mode:
authorMingming Cao <cmm@us.ibm.com>2008-07-12 01:27:31 +0200
committerTheodore Ts'o <tytso@mit.edu>2008-07-12 01:27:31 +0200
commite8ced39d5e8911c662d4d69a342b9d053eaaac4e (patch)
treecd4d643bfbd37c67ff9bd2feff2c0b477a56f117 /lib/percpu_counter.c
parentext4: Add delayed allocation support in data=writeback mode (diff)
downloadlinux-e8ced39d5e8911c662d4d69a342b9d053eaaac4e.tar.xz
linux-e8ced39d5e8911c662d4d69a342b9d053eaaac4e.zip
percpu_counter: new function percpu_counter_sum_and_set
Delayed allocation need to check free blocks at every write time. percpu_counter_read_positive() is not quit accurate. delayed allocation need a more accurate accounting, but using percpu_counter_sum_positive() is frequently is quite expensive. This patch added a new function to update center counter when sum per-cpu counter, to increase the accurate rate for next percpu_counter_read() and require less calling expensive percpu_counter_sum(). Signed-off-by: Mingming Cao <cmm@us.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r--lib/percpu_counter.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 119174494cb5..4a8ba4bf5f6f 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
+s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
{
s64 ret;
int cpu;
@@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
+ if (set)
+ *pcount = 0;
}
+ if (set)
+ fbc->count = ret;
+
spin_unlock(&fbc->lock);
return ret;
}