summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-31 03:00:26 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-31 03:00:26 +0200
commitd17abcd5417d84cfa8a225160481203a37dc81d4 (patch)
tree74ce2c425c5c6550acec90bc92c8a3f735f0d257 /kernel
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest-an... (diff)
parentoprofile: Thou shalt not call __exit functions from __init functions (diff)
downloadlinux-d17abcd5417d84cfa8a225160481203a37dc81d4.tar.xz
linux-d17abcd5417d84cfa8a225160481203a37dc81d4.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask: oprofile: Thou shalt not call __exit functions from __init functions cpumask: remove the now-obsoleted pcibus_to_cpumask(): generic cpumask: remove cpumask_t from core cpumask: convert rcutorture.c cpumask: use new cpumask_ functions in core code. cpumask: remove references to struct irqaction's mask field. cpumask: use mm_cpumask() wrapper: kernel/fork.c cpumask: use set_cpu_active in init/main.c cpumask: remove node_to_first_cpu cpumask: fix seq_bitmap_*() functions. cpumask: remove dangerous CPU_MASK_ALL_PTR, &CPU_MASK_ALL
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/rcutorture.c25
-rw-r--r--kernel/sched_cpupri.h2
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/workqueue.c6
8 files changed, 29 insertions, 20 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 79e40f00dcb8..395b6974dc8d 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -281,7 +281,7 @@ int __ref cpu_down(unsigned int cpu)
goto out;
}
- cpu_clear(cpu, cpu_active_map);
+ set_cpu_active(cpu, false);
/*
* Make sure the all cpus did the reschedule and are not
@@ -296,7 +296,7 @@ int __ref cpu_down(unsigned int cpu)
err = _cpu_down(cpu, 0);
if (cpu_online(cpu))
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(cpu, true);
out:
cpu_maps_update_done();
@@ -333,7 +333,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
goto out_notify;
BUG_ON(!cpu_online(cpu));
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(cpu, true);
/* Now call notifier in preparation. */
raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
diff --git a/kernel/fork.c b/kernel/fork.c
index 6715ebc3761d..47c15840a381 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -284,7 +284,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
- cpus_clear(mm->cpu_vm_mask);
+ cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index a27a5f64443d..f0c8f545180d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -167,7 +167,7 @@ static int ____call_usermodehelper(void *data)
}
/* We can run anywhere, unlike our parent keventd(). */
- set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(current, cpu_all_mask);
/*
* Our parent is keventd, which runs with elevated scheduling priority.
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4fbc456f393d..84bbadd4d021 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -110,7 +110,7 @@ static void create_kthread(struct kthread_create_info *create)
*/
sched_setscheduler(create->result, SCHED_NORMAL, &param);
set_user_nice(create->result, KTHREAD_NICE_LEVEL);
- set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(create->result, cpu_all_mask);
}
complete(&create->done);
}
@@ -240,7 +240,7 @@ int kthreadd(void *unused)
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
set_user_nice(tsk, KTHREAD_NICE_LEVEL);
- set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(tsk, cpu_all_mask);
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 7c4142a79f0a..9b4a975a4b4a 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -126,6 +126,7 @@ static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_error;
static long n_rcu_torture_timers = 0;
static struct list_head rcu_torture_removed;
+static cpumask_var_t shuffle_tmp_mask;
static int stutter_pause_test = 0;
@@ -889,10 +890,9 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
*/
static void rcu_torture_shuffle_tasks(void)
{
- cpumask_t tmp_mask;
int i;
- cpus_setall(tmp_mask);
+ cpumask_setall(shuffle_tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
@@ -902,29 +902,29 @@ static void rcu_torture_shuffle_tasks(void)
}
if (rcu_idle_cpu != -1)
- cpu_clear(rcu_idle_cpu, tmp_mask);
+ cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
- set_cpus_allowed_ptr(current, &tmp_mask);
+ set_cpus_allowed_ptr(current, shuffle_tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
set_cpus_allowed_ptr(reader_tasks[i],
- &tmp_mask);
+ shuffle_tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
- &tmp_mask);
+ shuffle_tmp_mask);
}
if (writer_task)
- set_cpus_allowed_ptr(writer_task, &tmp_mask);
+ set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
if (stats_task)
- set_cpus_allowed_ptr(stats_task, &tmp_mask);
+ set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
@@ -1012,6 +1012,7 @@ rcu_torture_cleanup(void)
if (shuffler_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
kthread_stop(shuffler_task);
+ free_cpumask_var(shuffle_tmp_mask);
}
shuffler_task = NULL;
@@ -1190,10 +1191,18 @@ rcu_torture_init(void)
}
if (test_no_idle_hz) {
rcu_idle_cpu = num_online_cpus() - 1;
+
+ if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
+ firsterr = -ENOMEM;
+ VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
+ goto unwind;
+ }
+
/* Create the shuffler thread */
shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
"rcu_torture_shuffle");
if (IS_ERR(shuffler_task)) {
+ free_cpumask_var(shuffle_tmp_mask);
firsterr = PTR_ERR(shuffler_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
shuffler_task = NULL;
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 642a94ef8a0a..9a7e859b8fbf 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -25,7 +25,7 @@ struct cpupri {
#ifdef CONFIG_SMP
int cpupri_find(struct cpupri *cp,
- struct task_struct *p, cpumask_t *lowest_mask);
+ struct task_struct *p, struct cpumask *lowest_mask);
void cpupri_set(struct cpupri *cp, int cpu, int pri);
int cpupri_init(struct cpupri *cp, bool bootmem);
void cpupri_cleanup(struct cpupri *cp);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 74541ca49536..912823e2a11b 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -44,7 +44,7 @@ static DEFINE_MUTEX(setup_lock);
static int refcount;
static struct workqueue_struct *stop_machine_wq;
static struct stop_machine_data active, idle;
-static const cpumask_t *active_cpus;
+static const struct cpumask *active_cpus;
static void *stop_machine_work;
static void set_state(enum stopmachine_state newstate)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1f0c509b40d3..9aedd9fd825b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -416,7 +416,7 @@ void flush_workqueue(struct workqueue_struct *wq)
might_sleep();
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -547,7 +547,7 @@ static void wait_on_work(struct work_struct *work)
wq = cwq->wq;
cpu_map = wq_cpu_map(wq);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
@@ -911,7 +911,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
list_del(&wq->list);
spin_unlock(&workqueue_lock);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
cpu_maps_update_done();