diff options
author | Ashok Raj <ashok.raj@intel.com> | 2005-11-28 22:43:46 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-28 23:42:23 +0100 |
commit | a9d9baa1e819b2f92f9cfa5240f766c535e636a6 (patch) | |
tree | 0ae15e5b1071b395affa0ac9abf6fd746ad60b0e | |
parent | [PATCH] Workaround for gcc 2.96 (undefined references) (diff) | |
download | linux-a9d9baa1e819b2f92f9cfa5240f766c535e636a6.tar.xz linux-a9d9baa1e819b2f92f9cfa5240f766c535e636a6.zip |
[PATCH] clean up lock_cpu_hotplug() in cpufreq
There are some callers in cpufreq hotplug notify path that the lowest
function calls lock_cpu_hotplug(). The lock is already held during
cpu_up() and cpu_down() calls when the notify calls are broadcast to
registered clients.
Ideally if possible, we could disable_preempt() at the highest caller and
make sure we dont sleep in the path down in cpufreq->driver_target() calls
but the calls are so intertwined and cumbersome to cleanup.
Hence we consistently use lock_cpu_hotplug() and unlock_cpu_hotplug() in
all places.
- Removed export of cpucontrol semaphore and made it static.
- removed explicit uses of up/down with lock_cpu_hotplug()
so we can keep track of the the callers in same thread context and
just keep refcounts without calling a down() that causes a deadlock.
- Removed current_in_hotplug() uses
- Removed PF_HOTPLUG_CPU in sched.h introduced for the current_in_hotplug()
temporary workaround.
Tested with insmod of cpufreq_stat.ko, and logical online/offline
to make sure we dont have any hang situations.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Cc: Zwane Mwaikambo <zwane@linuxpower.ca>
Cc: Shaohua Li <shaohua.li@intel.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 12 | ||||
-rw-r--r-- | include/linux/cpu.h | 7 | ||||
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/cpu.c | 83 |
4 files changed, 54 insertions, 49 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1c0f62d0f938..815902c2c856 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1113,21 +1113,13 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, { int retval = -EINVAL; - /* - * If we are already in context of hotplug thread, we dont need to - * acquire the hotplug lock. Otherwise acquire cpucontrol to prevent - * hotplug from removing this cpu that we are working on. - */ - if (!current_in_cpu_hotplug()) - lock_cpu_hotplug(); - + lock_cpu_hotplug(); dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); - if (!current_in_cpu_hotplug()) - unlock_cpu_hotplug(); + unlock_cpu_hotplug(); return retval; } diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 43c44530ef9d..0ed1d4853c69 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -65,10 +65,9 @@ extern struct sysdev_class cpu_sysdev_class; #ifdef CONFIG_HOTPLUG_CPU /* Stop CPUs going up and down. */ -extern struct semaphore cpucontrol; -#define lock_cpu_hotplug() down(&cpucontrol) -#define unlock_cpu_hotplug() up(&cpucontrol) -#define lock_cpu_hotplug_interruptible() down_interruptible(&cpucontrol) +extern void lock_cpu_hotplug(void); +extern void unlock_cpu_hotplug(void); +extern int lock_cpu_hotplug_interruptible(void); #define hotcpu_notifier(fn, pri) { \ static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = pri }; \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 2038bd27b041..b0ad6f30679e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -908,7 +908,6 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ #define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ #define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ -#define PF_HOTPLUG_CPU 0x01000000 /* Currently performing CPU hotplug */ /* * Only the _current_ task can read/write to tsk->flags, but other diff --git a/kernel/cpu.c b/kernel/cpu.c index d61ba88f34e5..e882c6babf41 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -16,47 +16,76 @@ #include <asm/semaphore.h> /* This protects CPUs going up and down... */ -DECLARE_MUTEX(cpucontrol); -EXPORT_SYMBOL_GPL(cpucontrol); +static DECLARE_MUTEX(cpucontrol); static struct notifier_block *cpu_chain; -/* - * Used to check by callers if they need to acquire the cpucontrol - * or not to protect a cpu from being removed. Its sometimes required to - * call these functions both for normal operations, and in response to - * a cpu being added/removed. If the context of the call is in the same - * thread context as a CPU hotplug thread, we dont need to take the lock - * since its already protected - * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj - */ +#ifdef CONFIG_HOTPLUG_CPU +static struct task_struct *lock_cpu_hotplug_owner; +static int lock_cpu_hotplug_depth; -int current_in_cpu_hotplug(void) +static int __lock_cpu_hotplug(int interruptible) { - return (current->flags & PF_HOTPLUG_CPU); + int ret = 0; + + if (lock_cpu_hotplug_owner != current) { + if (interruptible) + ret = down_interruptible(&cpucontrol); + else + down(&cpucontrol); + } + + /* + * Set only if we succeed in locking + */ + if (!ret) { + lock_cpu_hotplug_depth++; + lock_cpu_hotplug_owner = current; + } + + return ret; } -EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); +void lock_cpu_hotplug(void) +{ + __lock_cpu_hotplug(0); +} +EXPORT_SYMBOL_GPL(lock_cpu_hotplug); +void unlock_cpu_hotplug(void) +{ + if (--lock_cpu_hotplug_depth == 0) { + lock_cpu_hotplug_owner = NULL; + up(&cpucontrol); + } +} +EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); + +int lock_cpu_hotplug_interruptible(void) +{ + return __lock_cpu_hotplug(1); +} +EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); +#endif /* CONFIG_HOTPLUG_CPU */ /* Need to know about CPUs going up/down? */ int register_cpu_notifier(struct notifier_block *nb) { int ret; - if ((ret = down_interruptible(&cpucontrol)) != 0) + if ((ret = lock_cpu_hotplug_interruptible()) != 0) return ret; ret = notifier_chain_register(&cpu_chain, nb); - up(&cpucontrol); + unlock_cpu_hotplug(); return ret; } EXPORT_SYMBOL(register_cpu_notifier); void unregister_cpu_notifier(struct notifier_block *nb) { - down(&cpucontrol); + lock_cpu_hotplug(); notifier_chain_unregister(&cpu_chain, nb); - up(&cpucontrol); + unlock_cpu_hotplug(); } EXPORT_SYMBOL(unregister_cpu_notifier); @@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu) goto out; } - /* - * Leave a trace in current->flags indicating we are already in - * process of performing CPU hotplug. Callers can check if cpucontrol - * is already acquired by current thread, and if so not cause - * a dead lock by not acquiring the lock - */ - current->flags |= PF_HOTPLUG_CPU; err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, (void *)(long)cpu); if (err == NOTIFY_BAD) { @@ -171,7 +193,6 @@ out_thread: out_allowed: set_cpus_allowed(current, old_allowed); out: - current->flags &= ~PF_HOTPLUG_CPU; unlock_cpu_hotplug(); return err; } @@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu) int ret; void *hcpu = (void *)(long)cpu; - if ((ret = down_interruptible(&cpucontrol)) != 0) + if ((ret = lock_cpu_hotplug_interruptible()) != 0) return ret; if (cpu_online(cpu) || !cpu_present(cpu)) { @@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu) goto out; } - /* - * Leave a trace in current->flags indicating we are already in - * process of performing CPU hotplug. - */ - current->flags |= PF_HOTPLUG_CPU; ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); if (ret == NOTIFY_BAD) { printk("%s: attempt to bring up CPU %u failed\n", @@ -217,7 +233,6 @@ out_notify: if (ret != 0) notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); out: - current->flags &= ~PF_HOTPLUG_CPU; - up(&cpucontrol); + unlock_cpu_hotplug(); return ret; } |