summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-03-20 21:34:54 +0100
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-10-18 13:49:39 +0200
commitbe5f1797523004d0d9aaee81a523d86e3b890007 (patch)
tree79cabae66b7506020d1b220c29a0c1b642867030 /lib
parentmm/writeback: Rename __add_wb_stat() to wb_stat_mod() (diff)
downloadlinux-be5f1797523004d0d9aaee81a523d86e3b890007.tar.xz
linux-be5f1797523004d0d9aaee81a523d86e3b890007.zip
flex_proportions: Allow N events instead of 1
When batching events (such as writing back N pages in a single I/O), it is better to do one flex_proportion operation instead of N. There is only one caller of __fprop_inc_percpu_max(), and it's the one we're going to change in the next patch, so rename it instead of adding a compatibility wrapper. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'lib')
-rw-r--r--lib/flex_proportions.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 451543937524..53e7eb1dd76c 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -217,11 +217,12 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
}
/* Event of type pl happened */
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr)
{
fprop_reflect_period_percpu(p, pl);
- percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
- percpu_counter_add(&p->events, 1);
+ percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
+ percpu_counter_add(&p->events, nr);
}
void fprop_fraction_percpu(struct fprop_global *p,
@@ -253,20 +254,29 @@ void fprop_fraction_percpu(struct fprop_global *p,
}
/*
- * Like __fprop_inc_percpu() except that event is counted only if the given
+ * Like __fprop_add_percpu() except that event is counted only if the given
* type has fraction smaller than @max_frac/FPROP_FRAC_BASE
*/
-void __fprop_inc_percpu_max(struct fprop_global *p,
- struct fprop_local_percpu *pl, int max_frac)
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr)
{
if (unlikely(max_frac < FPROP_FRAC_BASE)) {
unsigned long numerator, denominator;
+ s64 tmp;
fprop_fraction_percpu(p, pl, &numerator, &denominator);
- if (numerator >
- (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
+ /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
+ tmp = (u64)denominator * max_frac -
+ ((u64)numerator << FPROP_FRAC_SHIFT);
+ if (tmp < 0) {
+ /* Maximum fraction already exceeded? */
return;
+ } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
+ /* Add just enough for the fraction to saturate */
+ nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
+ FPROP_FRAC_BASE - max_frac);
+ }
}
- __fprop_inc_percpu(p, pl);
+ __fprop_add_percpu(p, pl, nr);
}