diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-16 05:10:39 +0100 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-16 05:10:39 +0100 |
commit | 81f1adf01224f5c0be5f90f43664f799c1f7bb2d (patch) | |
tree | b38dbf1bc7e144a44ec38a6b1787fa870eac367f /arch/sparc/kernel/smp_64.c | |
parent | cpumask: remove dangerous CPU_MASK_ALL_PTR, &CPU_MASK_ALL.: sparc (diff) | |
download | linux-81f1adf01224f5c0be5f90f43664f799c1f7bb2d.tar.xz linux-81f1adf01224f5c0be5f90f43664f799c1f7bb2d.zip |
cpumask: use mm_cpumask() wrapper: sparc
Makes code futureproof against the impending change to mm->cpu_vm_mask.
It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch/sparc/kernel/smp_64.c')
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 4e17eec41478..2de937c7232e 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -850,7 +850,7 @@ static void tsb_sync(void *info) void smp_tsb_sync(struct mm_struct *mm) { - smp_call_function_many(&mm->cpu_vm_mask, tsb_sync, mm, 1); + smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); } extern unsigned long xcall_flush_tlb_mm; @@ -1055,13 +1055,13 @@ void smp_flush_tlb_mm(struct mm_struct *mm) int cpu = get_cpu(); if (atomic_read(&mm->mm_users) == 1) { - mm->cpu_vm_mask = cpumask_of_cpu(cpu); + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); goto local_flush_and_out; } smp_cross_call_masked(&xcall_flush_tlb_mm, ctx, 0, 0, - &mm->cpu_vm_mask); + mm_cpumask(mm)); local_flush_and_out: __flush_tlb_mm(ctx, SECONDARY_CONTEXT); @@ -1075,11 +1075,11 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long int cpu = get_cpu(); if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) - mm->cpu_vm_mask = cpumask_of_cpu(cpu); + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else smp_cross_call_masked(&xcall_flush_tlb_pending, ctx, nr, (unsigned long) vaddrs, - &mm->cpu_vm_mask); + mm_cpumask(mm)); __flush_tlb_pending(ctx, nr, vaddrs); |