summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 17:59:41 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 17:59:41 +0200
commita4c20b9a574b9720acf6c647eaff5e7e1e688086 (patch)
treed25c1d51c6f55de593028fde1276ea9976214014 /lib
parentMerge branch 'parisc-4.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff)
parentpercpu: fix static checker warnings in pcpu_destroy_chunk (diff)
downloadlinux-a4c20b9a574b9720acf6c647eaff5e7e1e688086.tar.xz
linux-a4c20b9a574b9720acf6c647eaff5e7e1e688086.zip
Merge branch 'for-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu updates from Tejun Heo: "These are the percpu changes for the v4.13-rc1 merge window. There are a couple visibility related changes - tracepoints and allocator stats through debugfs, along with __ro_after_init markings and a cosmetic rename in percpu_counter. Please note that the simple O(#elements_in_the_chunk) area allocator used by percpu allocator is again showing scalability issues, primarily with bpf allocating and freeing large number of counters. Dennis is working on the replacement allocator and the percpu allocator will be seeing increased churns in the coming cycles" * 'for-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu: fix static checker warnings in pcpu_destroy_chunk percpu: fix early calls for spinlock in pcpu_stats percpu: resolve err may not be initialized in pcpu_alloc percpu_counter: Rename __percpu_counter_add to percpu_counter_add_batch percpu: add tracepoint support for percpu memory percpu: expose statistics about percpu memory via debugfs percpu: migrate percpu data structures to internal header percpu: add missing lockdep_assert_held to func pcpu_free_area mark most percpu globals as __ro_after_init
Diffstat (limited to 'lib')
-rw-r--r--lib/flex_proportions.c6
-rw-r--r--lib/percpu_counter.c4
2 files changed, 5 insertions, 5 deletions
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index a71cf1bdd4c9..2cc1f94e03a1 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -207,7 +207,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
if (val < (nr_cpu_ids * PROP_BATCH))
val = percpu_counter_sum(&pl->events);
- __percpu_counter_add(&pl->events,
+ percpu_counter_add_batch(&pl->events,
-val + (val >> (period-pl->period)), PROP_BATCH);
} else
percpu_counter_set(&pl->events, 0);
@@ -219,7 +219,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
{
fprop_reflect_period_percpu(p, pl);
- __percpu_counter_add(&pl->events, 1, PROP_BATCH);
+ percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
}
@@ -267,6 +267,6 @@ void __fprop_inc_percpu_max(struct fprop_global *p,
return;
} else
fprop_reflect_period_percpu(p, pl);
- __percpu_counter_add(&pl->events, 1, PROP_BATCH);
+ percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
}
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 9c21000df0b5..8ee7e5ec21be 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -72,7 +72,7 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
}
EXPORT_SYMBOL(percpu_counter_set);
-void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
+void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
@@ -89,7 +89,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
}
preempt_enable();
}
-EXPORT_SYMBOL(__percpu_counter_add);
+EXPORT_SYMBOL(percpu_counter_add_batch);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate