summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2016-09-16 21:49:34 +0200
committerTejun Heo <tj@kernel.org>2016-09-17 19:18:21 +0200
commiteac0337af12b6a55f08c69429400d6530d602dff (patch)
treea154b887be6b2a848d2ca5a83c1efda5a8918c8b /mm
parentpower, workqueue: remove keventd_up() usage (diff)
downloadlinux-eac0337af12b6a55f08c69429400d6530d602dff.tar.xz
linux-eac0337af12b6a55f08c69429400d6530d602dff.zip
slab, workqueue: remove keventd_up() usage
Now that workqueue can handle work item queueing from very early during boot, there is no need to gate schedule_delayed_work_on() while !keventd_up(). Remove it. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: linux-mm@kvack.org
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/mm/slab.c b/mm/slab.c
index b67271024135..dc69b6b625b1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -550,12 +550,7 @@ static void start_cpu_timer(int cpu)
{
struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
- /*
- * When this gets called from do_initcalls via cpucache_init(),
- * init_workqueues() has already run, so keventd will be setup
- * at that time.
- */
- if (keventd_up() && reap_work->work.func == NULL) {
+ if (reap_work->work.func == NULL) {
init_reap_node(cpu);
INIT_DEFERRABLE_WORK(reap_work, cache_reap);
schedule_delayed_work_on(cpu, reap_work,