diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2007-05-09 11:34:03 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 21:30:51 +0200 |
commit | baaca49f415b25fdbe2a8f3c22b39929e450fbfd (patch) | |
tree | b152b4235fc07fe56619bd3a2e975e5ca90b8c5b /kernel/cpu.c | |
parent | Extend notifier_call_chain to count nr_calls made (diff) | |
download | linux-baaca49f415b25fdbe2a8f3c22b39929e450fbfd.tar.xz linux-baaca49f415b25fdbe2a8f3c22b39929e450fbfd.zip |
Define and use new events,CPU_LOCK_ACQUIRE and CPU_LOCK_RELEASE
This is an attempt to provide an alternate mechanism for postponing
a hotplug event instead of using a global mechanism like lock_cpu_hotplug.
The proposal is to add two new events namely CPU_LOCK_ACQUIRE and
CPU_LOCK_RELEASE. The notification for these two events would be sent
out before and after a cpu_hotplug event respectively.
During the CPU_LOCK_ACQUIRE event, a cpu-hotplug-aware subsystem is
supposed to acquire any per-subsystem hotcpu mutex ( Eg. workqueue_mutex
in kernel/workqueue.c ).
During the CPU_LOCK_RELEASE release event the cpu-hotplug-aware subsystem
is supposed to release the per-subsystem hotcpu mutex.
The reasons for defining new events as opposed to reusing the existing events
like CPU_UP_PREPARE/CPU_UP_FAILED/CPU_ONLINE for locking/unlocking of
per-subsystem hotcpu mutexes are as follow:
- CPU_LOCK_ACQUIRE: All hotcpu mutexes are taken before subsystems
start handling pre-hotplug events like CPU_UP_PREPARE/CPU_DOWN_PREPARE
etc, thus ensuring a clean handling of these events.
- CPU_LOCK_RELEASE: The hotcpu mutexes will be released only after
all subsystems have handled post-hotplug events like CPU_DOWN_FAILED,
CPU_DEAD,CPU_ONLINE etc thereby ensuring that there are no subsequent
clashes amongst the interdependent subsystems after a cpu hotplugs.
This patch also uses __raw_notifier_call chain in _cpu_up to take care
of the dependency between the two consequetive calls to
raw_notifier_call_chain.
[akpm@linux-foundation.org: fix a bug]
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 19 |
1 files changed, 14 insertions, 5 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 36e70845cfc3..48810498b355 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -132,12 +132,15 @@ static int _cpu_down(unsigned int cpu) if (!cpu_online(cpu)) return -EINVAL; + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, + (void *)(long)cpu); err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, (void *)(long)cpu); if (err == NOTIFY_BAD) { printk("%s: attempt to take down CPU %u failed\n", __FUNCTION__, cpu); - return -EINVAL; + err = -EINVAL; + goto out_release; } /* Ensure that we are not runnable on dying cpu */ @@ -185,6 +188,9 @@ out_thread: err = kthread_stop(p); out_allowed: set_cpus_allowed(current, old_allowed); +out_release: + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, + (void *)(long)cpu); return err; } @@ -206,13 +212,15 @@ int cpu_down(unsigned int cpu) /* Requires cpu_add_remove_lock to be held */ static int __cpuinit _cpu_up(unsigned int cpu) { - int ret; + int ret, nr_calls = 0; void *hcpu = (void *)(long)cpu; if (cpu_online(cpu) || !cpu_present(cpu)) return -EINVAL; - ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); + ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, + -1, &nr_calls); if (ret == NOTIFY_BAD) { printk("%s: attempt to bring up CPU %u failed\n", __FUNCTION__, cpu); @@ -233,8 +241,9 @@ static int __cpuinit _cpu_up(unsigned int cpu) out_notify: if (ret != 0) - raw_notifier_call_chain(&cpu_chain, - CPU_UP_CANCELED, hcpu); + __raw_notifier_call_chain(&cpu_chain, + CPU_UP_CANCELED, hcpu, nr_calls, NULL); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); return ret; } |