diff options
author | David S. Miller <davem@davemloft.net> | 2008-08-05 01:56:15 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-05 01:56:15 +0200 |
commit | ae583885bfd07474789059cdef399289bd66c8d0 (patch) | |
tree | bae0bad634f2dc560e8aed5727243989d52374b6 /arch/sparc64 | |
parent | sparc64: Kill error_mask from hypervisor_xcall_deliver(). (diff) | |
download | linux-ae583885bfd07474789059cdef399289bd66c8d0.tar.xz linux-ae583885bfd07474789059cdef399289bd66c8d0.zip |
sparc64: Remove all cpumask_t local variables in xcall dispatch.
All of the xcall delivery implementation is cpumask agnostic, so
we can pass around pointers to const cpumask_t objects everywhere.
The sad remaining case is the argument to arch_send_call_function_ipi().
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/kernel/smp.c | 33 |
1 files changed, 9 insertions, 24 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index ac8996ec97be..27b81775a4de 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -787,21 +787,17 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask * except self. Really, there are only two cases currently, * "&cpu_online_map" and "&mm->cpu_vm_mask". */ -static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask_p) +static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) { u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); - int this_cpu = get_cpu(); - cpumask_t mask; - mask = *mask_p; - if (mask_p != &cpu_online_map) - cpus_and(mask, mask, cpu_online_map); - cpu_clear(this_cpu, mask); - - xcall_deliver(data0, data1, data2, &mask); - /* NOTE: Caller runs local copy on master. */ + xcall_deliver(data0, data1, data2, mask); +} - put_cpu(); +/* Send cross call to all processors except self. */ +static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) +{ + smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map); } extern unsigned long xcall_sync_tick; @@ -827,10 +823,6 @@ void arch_send_call_function_single_ipi(int cpu) &cpumask_of_cpu(cpu)); } -/* Send cross call to all processors except self. */ -#define smp_cross_call(func, ctx, data1, data2) \ - smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map) - void smp_call_function_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); @@ -900,7 +892,6 @@ static inline void __local_flush_dcache_page(struct page *page) void smp_flush_dcache_page_impl(struct page *page, int cpu) { - cpumask_t mask = cpumask_of_cpu(cpu); int this_cpu; if (tlb_type == hypervisor) @@ -929,7 +920,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) } if (data0) { xcall_deliver(data0, __pa(pg_addr), - (u64) pg_addr, &mask); + (u64) pg_addr, &cpumask_of_cpu(cpu)); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes_xcall); #endif @@ -941,7 +932,6 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) void flush_dcache_page_all(struct mm_struct *mm, struct page *page) { - cpumask_t mask = cpu_online_map; void *pg_addr; int this_cpu; u64 data0; @@ -951,13 +941,9 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) this_cpu = get_cpu(); - cpu_clear(this_cpu, mask); - #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif - if (cpus_empty(mask)) - goto flush_self; data0 = 0; pg_addr = page_address(page); if (tlb_type == spitfire) { @@ -971,12 +957,11 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) } if (data0) { xcall_deliver(data0, __pa(pg_addr), - (u64) pg_addr, &mask); + (u64) pg_addr, &cpu_online_map); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes_xcall); #endif } - flush_self: __local_flush_dcache_page(page); put_cpu(); |