summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-06-25 14:49:10 +0200
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 19:01:22 +0200
commitfc75cdfa5b43ac4d3232b490800cd35063adafd3 (patch)
tree0529ffd9633667457d5340a09b3cb352d09436d2 /kernel
parent[PATCH] fs: sys_poll with timeout -1 bug fix (diff)
downloadlinux-fc75cdfa5b43ac4d3232b490800cd35063adafd3.tar.xz
linux-fc75cdfa5b43ac4d3232b490800cd35063adafd3.zip
[PATCH] cpu hotplug: fix CPU_UP_CANCEL handling
If a cpu hotplug callback fails on CPU_UP_PREPARE, all callbacks will be called with CPU_UP_CANCELED. A few of these callbacks assume that on CPU_UP_PREPARE a pointer to task has been stored in a percpu array. This assumption is not true if CPU_UP_PREPARE fails and the following calls to kthread_bind() in CPU_UP_CANCELED will cause an addressing exception because of passing a NULL pointer. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/workqueue.c2
4 files changed, 8 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f8d540b324ca..f06d059edef5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4756,6 +4756,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
+ if (!cpu_rq(cpu)->migration_thread)
+ break;
/* Unbind it from offline cpu so it can run. Fall thru. */
kthread_bind(cpu_rq(cpu)->migration_thread,
any_online_cpu(cpu_online_map));
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 336f92d64e2e..9e2f1c6e73d7 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -470,6 +470,8 @@ static int cpu_callback(struct notifier_block *nfb,
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
+ if (!per_cpu(ksoftirqd, hotcpu))
+ break;
/* Unbind so it can run. Fall thru. */
kthread_bind(per_cpu(ksoftirqd, hotcpu),
any_online_cpu(cpu_online_map));
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 2c1be1163edc..b5c3b94e01ce 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -127,6 +127,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
+ if (!per_cpu(watchdog_task, hotcpu))
+ break;
/* Unbind so it can run. Fall thru. */
kthread_bind(per_cpu(watchdog_task, hotcpu),
any_online_cpu(cpu_online_map));
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f869aff6bc0c..565cf7a1febd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -590,6 +590,8 @@ static int workqueue_cpu_callback(struct notifier_block *nfb,
case CPU_UP_CANCELED:
list_for_each_entry(wq, &workqueues, list) {
+ if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
+ continue;
/* Unbind so it can run. */
kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
any_online_cpu(cpu_online_map));