From 3d4422332711ef48ef0f132f1fcbfcbd56c7f3d1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 26 Jun 2008 11:21:34 +0200 Subject: Add generic helpers for arch IPI function calls This adds kernel/smp.c which contains helpers for IPI function calls. In addition to supporting the existing smp_call_function() in a more efficient manner, it also adds a more scalable variant called smp_call_function_single() for calling a given function on a single CPU only. The core of this is based on the x86-64 patch from Nick Piggin, lots of changes since then. "Alan D. Brunelle" has contributed lots of fixes and suggestions as well. Also thanks to Paul E. McKenney for reviewing RCU usage and getting rid of the data allocation fallback deadlock. Acked-by: Ingo Molnar Reviewed-by: Paul E. McKenney Signed-off-by: Jens Axboe --- arch/Kconfig | 3 + arch/sparc64/kernel/smp.c | 11 +- include/linux/smp.h | 35 ++++- init/main.c | 2 + kernel/Makefile | 1 + kernel/smp.c | 383 ++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 428 insertions(+), 7 deletions(-) create mode 100644 kernel/smp.c diff --git a/arch/Kconfig b/arch/Kconfig index 3ea332b009e5..ad89a33d8c6e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -39,3 +39,6 @@ config HAVE_KRETPROBES config HAVE_DMA_ATTRS def_bool n + +config USE_GENERIC_SMP_HELPERS + def_bool n diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index fa63c68a1819..b82d017a1744 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -816,8 +816,9 @@ extern unsigned long xcall_call_function; * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ -static int smp_call_function_mask(void (*func)(void *info), void *info, - int nonatomic, int wait, cpumask_t mask) +static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info, + int nonatomic, int wait, + cpumask_t mask) { struct call_data_struct data; int cpus; @@ -855,8 +856,8 @@ out_unlock: int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) { - return smp_call_function_mask(func, info, nonatomic, wait, - cpu_online_map); + return sparc64_smp_call_function_mask(func, info, nonatomic, wait, + cpu_online_map); } void smp_call_function_client(int irq, struct pt_regs *regs) @@ -893,7 +894,7 @@ static void tsb_sync(void *info) void smp_tsb_sync(struct mm_struct *mm) { - smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); + sparc64_smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); } extern unsigned long xcall_flush_tlb_mm; diff --git a/include/linux/smp.h b/include/linux/smp.h index 55232ccf9cfd..eac3e062250f 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -7,9 +7,19 @@ */ #include +#include +#include +#include extern void cpu_idle(void); +struct call_single_data { + struct list_head list; + void (*func) (void *info); + void *info; + unsigned int flags; +}; + #ifdef CONFIG_SMP #include @@ -53,9 +63,28 @@ extern void smp_cpus_done(unsigned int max_cpus); * Call a function on all other processors */ int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); - +int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, + int wait); int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, int retry, int wait); +void __smp_call_function_single(int cpuid, struct call_single_data *data); + +/* + * Generic and arch helpers + */ +#ifdef CONFIG_USE_GENERIC_SMP_HELPERS +void generic_smp_call_function_single_interrupt(void); +void generic_smp_call_function_interrupt(void); +void init_call_single_data(void); +void ipi_call_lock(void); +void ipi_call_unlock(void); +void ipi_call_lock_irq(void); +void ipi_call_unlock_irq(void); +#else +static inline void init_call_single_data(void) +{ +} +#endif /* * Call a function on all processors @@ -112,7 +141,9 @@ static inline void smp_send_reschedule(int cpu) { } }) #define smp_call_function_mask(mask, func, info, wait) \ (up_smp_call_function(func, info)) - +static inline void init_call_single_data(void) +{ +} #endif /* !SMP */ /* diff --git a/init/main.c b/init/main.c index f7fb20021d48..1efcccff1bdb 100644 --- a/init/main.c +++ b/init/main.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -779,6 +780,7 @@ static void __init do_pre_smp_initcalls(void) { extern int spawn_ksoftirqd(void); + init_call_single_data(); migration_init(); spawn_ksoftirqd(); if (!nosoftlockup) diff --git a/kernel/Makefile b/kernel/Makefile index 1c9938addb9d..9fa57976f252 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += cpu.o spinlock.o +obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_PROVE_LOCKING) += spinlock.o obj-$(CONFIG_UID16) += uid16.o diff --git a/kernel/smp.c b/kernel/smp.c new file mode 100644 index 000000000000..f77b75c027ad --- /dev/null +++ b/kernel/smp.c @@ -0,0 +1,383 @@ +/* + * Generic helpers for smp ipi calls + * + * (C) Jens Axboe 2008 + * + */ +#include +#include +#include +#include +#include + +static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); +static LIST_HEAD(call_function_queue); +__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); + +enum { + CSD_FLAG_WAIT = 0x01, + CSD_FLAG_ALLOC = 0x02, +}; + +struct call_function_data { + struct call_single_data csd; + spinlock_t lock; + unsigned int refs; + cpumask_t cpumask; + struct rcu_head rcu_head; +}; + +struct call_single_queue { + struct list_head list; + spinlock_t lock; +}; + +void __cpuinit init_call_single_data(void) +{ + int i; + + for_each_possible_cpu(i) { + struct call_single_queue *q = &per_cpu(call_single_queue, i); + + spin_lock_init(&q->lock); + INIT_LIST_HEAD(&q->list); + } +} + +static void csd_flag_wait(struct call_single_data *data) +{ + /* Wait for response */ + do { + /* + * We need to see the flags store in the IPI handler + */ + smp_mb(); + if (!(data->flags & CSD_FLAG_WAIT)) + break; + cpu_relax(); + } while (1); +} + +/* + * Insert a previously allocated call_single_data element for execution + * on the given CPU. data must already have ->func, ->info, and ->flags set. + */ +static void generic_exec_single(int cpu, struct call_single_data *data) +{ + struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); + int wait = data->flags & CSD_FLAG_WAIT, ipi; + unsigned long flags; + + spin_lock_irqsave(&dst->lock, flags); + ipi = list_empty(&dst->list); + list_add_tail(&data->list, &dst->list); + spin_unlock_irqrestore(&dst->lock, flags); + + if (ipi) + arch_send_call_function_single_ipi(cpu); + + if (wait) + csd_flag_wait(data); +} + +static void rcu_free_call_data(struct rcu_head *head) +{ + struct call_function_data *data; + + data = container_of(head, struct call_function_data, rcu_head); + + kfree(data); +} + +/* + * Invoked by arch to handle an IPI for call function. Must be called with + * interrupts disabled. + */ +void generic_smp_call_function_interrupt(void) +{ + struct call_function_data *data; + int cpu = get_cpu(); + + /* + * It's ok to use list_for_each_rcu() here even though we may delete + * 'pos', since list_del_rcu() doesn't clear ->next + */ + rcu_read_lock(); + list_for_each_entry_rcu(data, &call_function_queue, csd.list) { + int refs; + + if (!cpu_isset(cpu, data->cpumask)) + continue; + + data->csd.func(data->csd.info); + + spin_lock(&data->lock); + cpu_clear(cpu, data->cpumask); + WARN_ON(data->refs == 0); + data->refs--; + refs = data->refs; + spin_unlock(&data->lock); + + if (refs) + continue; + + spin_lock(&call_function_lock); + list_del_rcu(&data->csd.list); + spin_unlock(&call_function_lock); + + if (data->csd.flags & CSD_FLAG_WAIT) { + /* + * serialize stores to data with the flag clear + * and wakeup + */ + smp_wmb(); + data->csd.flags &= ~CSD_FLAG_WAIT; + } else + call_rcu(&data->rcu_head, rcu_free_call_data); + } + rcu_read_unlock(); + + put_cpu(); +} + +/* + * Invoked by arch to handle an IPI for call function single. Must be called + * from the arch with interrupts disabled. + */ +void generic_smp_call_function_single_interrupt(void) +{ + struct call_single_queue *q = &__get_cpu_var(call_single_queue); + LIST_HEAD(list); + + /* + * Need to see other stores to list head for checking whether + * list is empty without holding q->lock + */ + smp_mb(); + while (!list_empty(&q->list)) { + unsigned int data_flags; + + spin_lock(&q->lock); + list_replace_init(&q->list, &list); + spin_unlock(&q->lock); + + while (!list_empty(&list)) { + struct call_single_data *data; + + data = list_entry(list.next, struct call_single_data, + list); + list_del(&data->list); + + /* + * 'data' can be invalid after this call if + * flags == 0 (when called through + * generic_exec_single(), so save them away before + * making the call. + */ + data_flags = data->flags; + + data->func(data->info); + + if (data_flags & CSD_FLAG_WAIT) { + smp_wmb(); + data->flags &= ~CSD_FLAG_WAIT; + } else if (data_flags & CSD_FLAG_ALLOC) + kfree(data); + } + /* + * See comment on outer loop + */ + smp_mb(); + } +} + +/* + * smp_call_function_single - Run a function on a specific CPU + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @retry: Unused + * @wait: If true, wait until function has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. Note that @wait + * will be implicitly turned on in case of allocation failures, since + * we fall back to on-stack allocation. + */ +int smp_call_function_single(int cpu, void (*func) (void *info), void *info, + int retry, int wait) +{ + struct call_single_data d; + unsigned long flags; + /* prevent preemption and reschedule on another processor */ + int me = get_cpu(); + + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + + if (cpu == me) { + local_irq_save(flags); + func(info); + local_irq_restore(flags); + } else { + struct call_single_data *data = NULL; + + if (!wait) { + data = kmalloc(sizeof(*data), GFP_ATOMIC); + if (data) + data->flags = CSD_FLAG_ALLOC; + } + if (!data) { + data = &d; + data->flags = CSD_FLAG_WAIT; + } + + data->func = func; + data->info = info; + generic_exec_single(cpu, data); + } + + put_cpu(); + return 0; +} +EXPORT_SYMBOL(smp_call_function_single); + +/** + * __smp_call_function_single(): Run a function on another CPU + * @cpu: The CPU to run on. + * @data: Pre-allocated and setup data structure + * + * Like smp_call_function_single(), but allow caller to pass in a pre-allocated + * data structure. Useful for embedding @data inside other structures, for + * instance. + * + */ +void __smp_call_function_single(int cpu, struct call_single_data *data) +{ + /* Can deadlock when called with interrupts disabled */ + WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); + + generic_exec_single(cpu, data); +} + +/** + * smp_call_function_mask(): Run a function on a set of other CPUs. + * @mask: The set of cpus to run on. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait (atomically) until function has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. + * + * If @wait is true, then returns once @func has returned. Note that @wait + * will be implicitly turned on in case of allocation failures, since + * we fall back to on-stack allocation. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. Preemption + * must be disabled when calling this function. + */ +int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, + int wait) +{ + struct call_function_data d; + struct call_function_data *data = NULL; + cpumask_t allbutself; + unsigned long flags; + int cpu, num_cpus; + + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + + cpu = smp_processor_id(); + allbutself = cpu_online_map; + cpu_clear(cpu, allbutself); + cpus_and(mask, mask, allbutself); + num_cpus = cpus_weight(mask); + + /* + * If zero CPUs, return. If just a single CPU, turn this request + * into a targetted single call instead since it's faster. + */ + if (!num_cpus) + return 0; + else if (num_cpus == 1) { + cpu = first_cpu(mask); + return smp_call_function_single(cpu, func, info, 0, wait); + } + + if (!wait) { + data = kmalloc(sizeof(*data), GFP_ATOMIC); + if (data) + data->csd.flags = CSD_FLAG_ALLOC; + } + if (!data) { + data = &d; + data->csd.flags = CSD_FLAG_WAIT; + } + + spin_lock_init(&data->lock); + data->csd.func = func; + data->csd.info = info; + data->refs = num_cpus; + data->cpumask = mask; + + spin_lock_irqsave(&call_function_lock, flags); + list_add_tail_rcu(&data->csd.list, &call_function_queue); + spin_unlock_irqrestore(&call_function_lock, flags); + + /* Send a message to all CPUs in the map */ + arch_send_call_function_ipi(mask); + + /* optionally wait for the CPUs to complete */ + if (wait) + csd_flag_wait(&data->csd); + + return 0; +} +EXPORT_SYMBOL(smp_call_function_mask); + +/** + * smp_call_function(): Run a function on all other CPUs. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @natomic: Unused + * @wait: If true, wait (atomically) until function has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. + * + * If @wait is true, then returns once @func has returned; otherwise + * it returns just before the target cpu calls @func. In case of allocation + * failure, @wait will be implicitly turned on. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. + */ +int smp_call_function(void (*func)(void *), void *info, int natomic, int wait) +{ + int ret; + + preempt_disable(); + ret = smp_call_function_mask(cpu_online_map, func, info, wait); + preempt_enable(); + return ret; +} +EXPORT_SYMBOL(smp_call_function); + +void ipi_call_lock(void) +{ + spin_lock(&call_function_lock); +} + +void ipi_call_unlock(void) +{ + spin_unlock(&call_function_lock); +} + +void ipi_call_lock_irq(void) +{ + spin_lock_irq(&call_function_lock); +} + +void ipi_call_unlock_irq(void) +{ + spin_unlock_irq(&call_function_lock); +} -- cgit v1.2.3 From 3b16cf874861436725c43ba0b68bdd799297be7c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 26 Jun 2008 11:21:54 +0200 Subject: x86: convert to generic helpers for IPI function calls This converts x86, x86-64, and xen to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Acked-by: Ingo Molnar Acked-by: Jeremy Fitzhardinge Signed-off-by: Jens Axboe --- arch/x86/Kconfig | 1 + arch/x86/kernel/apic_32.c | 4 + arch/x86/kernel/entry_64.S | 3 + arch/x86/kernel/i8259_64.c | 4 + arch/x86/kernel/smp.c | 158 ++++------------------------- arch/x86/kernel/smpboot.c | 4 +- arch/x86/kernel/smpcommon.c | 56 ---------- arch/x86/mach-voyager/voyager_smp.c | 94 ++++------------- arch/x86/xen/enlighten.c | 4 +- arch/x86/xen/mmu.c | 2 +- arch/x86/xen/smp.c | 133 +++++++++--------------- arch/x86/xen/xen-ops.h | 9 +- include/asm-x86/hw_irq_32.h | 1 + include/asm-x86/hw_irq_64.h | 2 + include/asm-x86/mach-default/entry_arch.h | 1 + include/asm-x86/mach-default/irq_vectors.h | 1 + include/asm-x86/mach-voyager/entry_arch.h | 2 +- include/asm-x86/mach-voyager/irq_vectors.h | 4 +- include/asm-x86/smp.h | 21 ++-- include/asm-x86/xen/events.h | 1 + 20 files changed, 125 insertions(+), 380 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e0edaaa6920a..2f3fbebf51d8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -168,6 +168,7 @@ config GENERIC_PENDING_IRQ config X86_SMP bool depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) + select USE_GENERIC_SMP_HELPERS default y config X86_32_SMP diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 4b99b1bdeb6c..71017f71f4bc 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1358,6 +1358,10 @@ void __init smp_intr_init(void) /* IPI for generic function call */ set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); + + /* IPI for single call function */ + set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, + call_function_single_interrupt); } #endif diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 556a8df522a7..6d1fe270a96d 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -711,6 +711,9 @@ END(invalidate_interrupt\num) ENTRY(call_function_interrupt) apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt END(call_function_interrupt) +ENTRY(call_function_single_interrupt) + apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt +END(call_function_single_interrupt) ENTRY(irq_move_cleanup_interrupt) apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt END(irq_move_cleanup_interrupt) diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c index fa57a1568508..00d2ccdc69f8 100644 --- a/arch/x86/kernel/i8259_64.c +++ b/arch/x86/kernel/i8259_64.c @@ -494,6 +494,10 @@ void __init native_init_IRQ(void) /* IPI for generic function call */ set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); + /* IPI for generic single function call */ + set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, + call_function_single_interrupt); + /* Low priority IPI to cleanup after moving an irq */ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); #endif diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 0cb7aadc87cd..575aa3d7248a 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu) send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); } -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - */ -static DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -}; - -void lock_ipi_call_lock(void) +void native_send_call_func_single_ipi(int cpu) { - spin_lock_irq(&call_lock); -} - -void unlock_ipi_call_lock(void) -{ - spin_unlock_irq(&call_lock); -} - -static struct call_data_struct *call_data; - -static void __smp_call_function(void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - struct call_data_struct data; - int cpus = num_online_cpus() - 1; - - if (!cpus) - return; - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); - - /* Send a message to all other CPUs and wait for them to respond */ - send_IPI_allbutself(CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); + send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); } - -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * @mask: The set of cpus to run on. Must not include the current cpu. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -static int -native_smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) +void native_send_call_func_ipi(cpumask_t mask) { - struct call_data_struct data; cpumask_t allbutself; - int cpus; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - /* Holding any lock stops cpus from going down. */ - spin_lock(&call_lock); allbutself = cpu_online_map; cpu_clear(smp_processor_id(), allbutself); - cpus_and(mask, mask, allbutself); - cpus = cpus_weight(mask); - - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - wmb(); - - /* Send a message to other CPUs */ if (cpus_equal(mask, allbutself) && cpus_equal(cpu_online_map, cpu_callout_map)) send_IPI_allbutself(CALL_FUNCTION_VECTOR); else send_IPI_mask(mask, CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - spin_unlock(&call_lock); - - return 0; } static void stop_this_cpu(void *dummy) @@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy) static void native_smp_send_stop(void) { - int nolock; unsigned long flags; if (reboot_force) return; - /* Don't deadlock on the call lock in panic */ - nolock = !spin_trylock(&call_lock); + smp_call_function(stop_this_cpu, NULL, 0, 0); local_irq_save(flags); - __smp_call_function(stop_this_cpu, NULL, 0, 0); - if (!nolock) - spin_unlock(&call_lock); disable_local_APIC(); local_irq_restore(flags); } @@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs) void smp_call_function_interrupt(struct pt_regs *regs) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - ack_APIC_irq(); - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - mb(); - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ irq_enter(); - (*func)(info); + generic_smp_call_function_interrupt(); #ifdef CONFIG_X86_32 __get_cpu_var(irq_stat).irq_call_count++; #else add_pda(irq_call_count, 1); #endif irq_exit(); +} - if (wait) { - mb(); - atomic_inc(&call_data->finished); - } +void smp_call_function_single_interrupt(void) +{ + ack_APIC_irq(); + irq_enter(); + generic_smp_call_function_single_interrupt(); +#ifdef CONFIG_X86_32 + __get_cpu_var(irq_stat).irq_call_count++; +#else + add_pda(irq_call_count, 1); +#endif + irq_exit(); } struct smp_ops smp_ops = { @@ -338,7 +219,8 @@ struct smp_ops smp_ops = { .smp_send_stop = native_smp_send_stop, .smp_send_reschedule = native_smp_send_reschedule, - .smp_call_function_mask = native_smp_call_function_mask, + + .send_call_func_ipi = native_send_call_func_ipi, + .send_call_func_single_ipi = native_send_call_func_single_ipi, }; EXPORT_SYMBOL_GPL(smp_ops); - diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 56078d61c793..89647898f546 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -345,7 +345,7 @@ static void __cpuinit start_secondary(void *unused) * lock helps us to not include this cpu in a currently in progress * smp_call_function(). */ - lock_ipi_call_lock(); + ipi_call_lock_irq(); #ifdef CONFIG_X86_64 spin_lock(&vector_lock); @@ -357,7 +357,7 @@ static void __cpuinit start_secondary(void *unused) spin_unlock(&vector_lock); #endif cpu_set(smp_processor_id(), cpu_online_map); - unlock_ipi_call_lock(); + ipi_call_unlock_irq(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; setup_secondary_clock(); diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index 3449064d141a..99941b37eca0 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c @@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu) per_cpu(cpu_number, cpu) = cpu; } #endif - -/** - * smp_call_function(): Run a function on all other CPUs. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: Unused. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int smp_call_function(void (*func) (void *info), void *info, int nonatomic, - int wait) -{ - return smp_call_function_mask(cpu_online_map, func, info, wait); -} -EXPORT_SYMBOL(smp_call_function); - -/** - * smp_call_function_single - Run a function on a specific CPU - * @cpu: The target CPU. Cannot be the calling CPU. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: Unused. - * @wait: If true, wait until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - */ -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - /* prevent preemption and reschedule on another processor */ - int ret; - int me = get_cpu(); - if (cpu == me) { - local_irq_disable(); - func(info); - local_irq_enable(); - put_cpu(); - return 0; - } - - ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); - - put_cpu(); - return ret; -} -EXPORT_SYMBOL(smp_call_function_single); diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 8acbf0cdf1a5..cb34407a9930 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -955,94 +955,24 @@ static void smp_stop_cpu_function(void *dummy) halt(); } -static DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - volatile unsigned long started; - volatile unsigned long finished; - int wait; -}; - -static struct call_data_struct *call_data; - /* execute a thread on a new CPU. The function to be called must be * previously set up. This is used to schedule a function for * execution on all CPUs - set up the function then broadcast a * function_interrupt CPI to come here on each CPU */ static void smp_call_function_interrupt(void) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - /* must take copy of wait because call_data may be replaced - * unless the function is waiting for us to finish */ - int wait = call_data->wait; - __u8 cpu = smp_processor_id(); - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - mb(); - if (!test_and_clear_bit(cpu, &call_data->started)) { - /* If the bit wasn't set, this could be a replay */ - printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion" - " with no call pending\n", cpu); - return; - } - /* - * At this point the info structure may be out of scope unless wait==1 - */ irq_enter(); - (*func) (info); + generic_smp_call_function_interrupt(); __get_cpu_var(irq_stat).irq_call_count++; irq_exit(); - if (wait) { - mb(); - clear_bit(cpu, &call_data->finished); - } } -static int -voyager_smp_call_function_mask(cpumask_t cpumask, - void (*func) (void *info), void *info, int wait) +static void smp_call_function_single_interrupt(void) { - struct call_data_struct data; - u32 mask = cpus_addr(cpumask)[0]; - - mask &= ~(1 << smp_processor_id()); - - if (!mask) - return 0; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - data.started = mask; - data.wait = wait; - if (wait) - data.finished = mask; - - spin_lock(&call_lock); - call_data = &data; - wmb(); - /* Send a message to all other CPUs and wait for them to respond */ - send_CPI(mask, VIC_CALL_FUNCTION_CPI); - - /* Wait for response */ - while (data.started) - barrier(); - - if (wait) - while (data.finished) - barrier(); - - spin_unlock(&call_lock); - - return 0; + irq_enter(); + generic_smp_call_function_single_interrupt(); + __get_cpu_var(irq_stat).irq_call_count++; + irq_exit(); } /* Sorry about the name. In an APIC based system, the APICs @@ -1099,6 +1029,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs) smp_call_function_interrupt(); } +void smp_qic_call_function_single_interrupt(struct pt_regs *regs) +{ + ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI); + smp_call_function_single_interrupt(); +} + void smp_vic_cpi_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); @@ -1119,6 +1055,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs) smp_enable_irq_interrupt(); if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) smp_call_function_interrupt(); + if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu])) + smp_call_function_single_interrupt(); set_irq_regs(old_regs); } @@ -1862,5 +1800,7 @@ struct smp_ops smp_ops = { .smp_send_stop = voyager_smp_send_stop, .smp_send_reschedule = voyager_smp_send_reschedule, - .smp_call_function_mask = voyager_smp_call_function_mask, + + .send_call_func_ipi = native_send_call_func_ipi, + .send_call_func_single_ipi = native_send_call_func_single_ipi, }; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index f09c1c69c37a..8e317782fe37 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1108,7 +1108,9 @@ static const struct smp_ops xen_smp_ops __initdata = { .smp_send_stop = xen_smp_send_stop, .smp_send_reschedule = xen_smp_send_reschedule, - .smp_call_function_mask = xen_smp_call_function_mask, + + .send_call_func_ipi = xen_smp_send_call_function_ipi, + .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, }; #endif /* CONFIG_SMP */ diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index df40bf74ea75..5c01590380bc 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -558,7 +558,7 @@ static void drop_mm_ref(struct mm_struct *mm) } if (!cpus_empty(mask)) - xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); + smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); } #else static void drop_mm_ref(struct mm_struct *mm) diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 94e69000f982..b3786e749b8e 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -36,27 +36,14 @@ #include "mmu.h" static cpumask_t xen_cpu_initialized_map; -static DEFINE_PER_CPU(int, resched_irq) = -1; -static DEFINE_PER_CPU(int, callfunc_irq) = -1; -static DEFINE_PER_CPU(int, debug_irq) = -1; - -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - */ -static DEFINE_SPINLOCK(call_lock); -struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -}; +static DEFINE_PER_CPU(int, resched_irq); +static DEFINE_PER_CPU(int, callfunc_irq); +static DEFINE_PER_CPU(int, callfuncsingle_irq); +static DEFINE_PER_CPU(int, debug_irq) = -1; static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); - -static struct call_data_struct *call_data; +static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); /* * Reschedule call back. Nothing to do, @@ -122,6 +109,17 @@ static int xen_smp_intr_init(unsigned int cpu) goto fail; per_cpu(debug_irq, cpu) = rc; + callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); + rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, + cpu, + xen_call_function_single_interrupt, + IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, + callfunc_name, + NULL); + if (rc < 0) + goto fail; + per_cpu(callfuncsingle_irq, cpu) = rc; + return 0; fail: @@ -131,6 +129,9 @@ static int xen_smp_intr_init(unsigned int cpu) unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); if (per_cpu(debug_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); + if (per_cpu(callfuncsingle_irq, cpu) >= 0) + unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); + return rc; } @@ -338,7 +339,6 @@ void xen_smp_send_reschedule(int cpu) xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); } - static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) { unsigned cpu; @@ -349,83 +349,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) xen_send_IPI_one(cpu, vector); } +void xen_smp_send_call_function_ipi(cpumask_t mask) +{ + int cpu; + + xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); + + /* Make sure other vcpus get a chance to run if they need to. */ + for_each_cpu_mask(cpu, mask) { + if (xen_vcpu_stolen(cpu)) { + HYPERVISOR_sched_op(SCHEDOP_yield, 0); + break; + } + } +} + +void xen_smp_send_call_function_single_ipi(int cpu) +{ + xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); +} + static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - mb(); - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ irq_enter(); - (*func)(info); + generic_smp_call_function_interrupt(); __get_cpu_var(irq_stat).irq_call_count++; irq_exit(); - if (wait) { - mb(); /* commit everything before setting finished */ - atomic_inc(&call_data->finished); - } - return IRQ_HANDLED; } -int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), - void *info, int wait) +static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) { - struct call_data_struct data; - int cpus, cpu; - bool yield; - - /* Holding any lock stops cpus from going down. */ - spin_lock(&call_lock); - - cpu_clear(smp_processor_id(), mask); - - cpus = cpus_weight(mask); - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); /* write everything before IPI */ - - /* Send a message to other CPUs and wait for them to respond */ - xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); - - /* Make sure other vcpus get a chance to run if they need to. */ - yield = false; - for_each_cpu_mask(cpu, mask) - if (xen_vcpu_stolen(cpu)) - yield = true; - - if (yield) - HYPERVISOR_sched_op(SCHEDOP_yield, 0); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus || - (wait && atomic_read(&data.finished) != cpus)) - cpu_relax(); - - spin_unlock(&call_lock); + irq_enter(); + generic_smp_call_function_single_interrupt(); + __get_cpu_var(irq_stat).irq_call_count++; + irq_exit(); - return 0; + return IRQ_HANDLED; } diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index f1063ae08037..a636ab5e1341 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -46,13 +46,8 @@ void xen_smp_cpus_done(unsigned int max_cpus); void xen_smp_send_stop(void); void xen_smp_send_reschedule(int cpu); -int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic, - int wait); -int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait); - -int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), - void *info, int wait); +void xen_smp_send_call_function_ipi(cpumask_t mask); +void xen_smp_send_call_function_single_ipi(int cpu); /* Declare an asm function, along with symbols needed to make it diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h index ea88054e03f3..a87b1320c78f 100644 --- a/include/asm-x86/hw_irq_32.h +++ b/include/asm-x86/hw_irq_32.h @@ -32,6 +32,7 @@ extern void (*const interrupt[NR_IRQS])(void); void reschedule_interrupt(void); void invalidate_interrupt(void); void call_function_interrupt(void); +void call_function_single_interrupt(void); #endif #ifdef CONFIG_X86_LOCAL_APIC diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h index 0062ef390f67..fe657812d4df 100644 --- a/include/asm-x86/hw_irq_64.h +++ b/include/asm-x86/hw_irq_64.h @@ -68,6 +68,7 @@ #define ERROR_APIC_VECTOR 0xfe #define RESCHEDULE_VECTOR 0xfd #define CALL_FUNCTION_VECTOR 0xfc +#define CALL_FUNCTION_SINGLE_VECTOR 0xfb /* fb free - please don't readd KDB here because it's useless (hint - think what a NMI bit does to a vector) */ #define THERMAL_APIC_VECTOR 0xfa @@ -102,6 +103,7 @@ void spurious_interrupt(void); void error_interrupt(void); void reschedule_interrupt(void); void call_function_interrupt(void); +void call_function_single_interrupt(void); void irq_move_cleanup_interrupt(void); void invalidate_interrupt0(void); void invalidate_interrupt1(void); diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h index bc861469bdba..9283b60a1dd2 100644 --- a/include/asm-x86/mach-default/entry_arch.h +++ b/include/asm-x86/mach-default/entry_arch.h @@ -13,6 +13,7 @@ BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) +BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) #endif /* diff --git a/include/asm-x86/mach-default/irq_vectors.h b/include/asm-x86/mach-default/irq_vectors.h index 881c63ca61ad..ed7d4955c653 100644 --- a/include/asm-x86/mach-default/irq_vectors.h +++ b/include/asm-x86/mach-default/irq_vectors.h @@ -48,6 +48,7 @@ #define INVALIDATE_TLB_VECTOR 0xfd #define RESCHEDULE_VECTOR 0xfc #define CALL_FUNCTION_VECTOR 0xfb +#define CALL_FUNCTION_SINGLE_VECTOR 0xfa #define THERMAL_APIC_VECTOR 0xf0 /* diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h index 4a1e1e8c10b6..ae52624b5937 100644 --- a/include/asm-x86/mach-voyager/entry_arch.h +++ b/include/asm-x86/mach-voyager/entry_arch.h @@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI); BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); - +BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI); diff --git a/include/asm-x86/mach-voyager/irq_vectors.h b/include/asm-x86/mach-voyager/irq_vectors.h index 165421f5821c..fda57ad37b5d 100644 --- a/include/asm-x86/mach-voyager/irq_vectors.h +++ b/include/asm-x86/mach-voyager/irq_vectors.h @@ -33,6 +33,7 @@ #define VIC_RESCHEDULE_CPI 4 #define VIC_ENABLE_IRQ_CPI 5 #define VIC_CALL_FUNCTION_CPI 6 +#define VIC_CALL_FUNCTION_SINGLE_CPI 7 /* Now the QIC CPIs: Since we don't need the two initial levels, * these are 2 less than the VIC CPIs */ @@ -42,9 +43,10 @@ #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) +#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET) #define VIC_START_FAKE_CPI VIC_TIMER_CPI -#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI +#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI /* this is the SYS_INT CPI. */ #define VIC_SYS_INT 8 diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 1ebaa5cd3112..e3c24807b59b 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -59,9 +59,9 @@ struct smp_ops { void (*smp_send_stop)(void); void (*smp_send_reschedule)(int cpu); - int (*smp_call_function_mask)(cpumask_t mask, - void (*func)(void *info), void *info, - int wait); + + void (*send_call_func_ipi)(cpumask_t mask); + void (*send_call_func_single_ipi)(int cpu); }; /* Globals due to paravirt */ @@ -103,17 +103,22 @@ static inline void smp_send_reschedule(int cpu) smp_ops.smp_send_reschedule(cpu); } -static inline int smp_call_function_mask(cpumask_t mask, - void (*func) (void *info), void *info, - int wait) +static inline void arch_send_call_function_single_ipi(int cpu) +{ + smp_ops.send_call_func_single_ipi(cpu); +} + +static inline void arch_send_call_function_ipi(cpumask_t mask) { - return smp_ops.smp_call_function_mask(mask, func, info, wait); + smp_ops.send_call_func_ipi(mask); } void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); +void native_send_call_func_ipi(cpumask_t mask); +void native_send_call_func_single_ipi(int cpu); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); @@ -202,7 +207,5 @@ extern void cpu_uninit(void); #endif extern void smp_alloc_memory(void); -extern void lock_ipi_call_lock(void); -extern void unlock_ipi_call_lock(void); #endif /* __ASSEMBLY__ */ #endif diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h index 596312a7bfc9..f8d57ea1f05f 100644 --- a/include/asm-x86/xen/events.h +++ b/include/asm-x86/xen/events.h @@ -4,6 +4,7 @@ enum ipi_vector { XEN_RESCHEDULE_VECTOR, XEN_CALL_FUNCTION_VECTOR, + XEN_CALL_FUNCTION_SINGLE_VECTOR, XEN_NR_IPIS, }; -- cgit v1.2.3 From b7d7a2404f80386307ccc0cde63d8d2a5e3bc85c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 26 Jun 2008 11:22:13 +0200 Subject: powerpc: convert to generic helpers for IPI function calls This converts ppc to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). ppc loses the timeout functionality of smp_call_function_mask() with this change, as the generic code does not provide that. Acked-by: Paul Mackerras Signed-off-by: Jens Axboe --- arch/powerpc/Kconfig | 1 + arch/powerpc/kernel/smp.c | 234 +++----------------------------- arch/powerpc/platforms/cell/interrupt.c | 1 + arch/powerpc/platforms/ps3/smp.c | 7 +- arch/powerpc/platforms/pseries/xics.c | 6 +- arch/powerpc/sysdev/mpic.c | 2 +- include/asm-powerpc/smp.h | 8 +- 7 files changed, 33 insertions(+), 226 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3934e2659407..852d40c29637 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -110,6 +110,7 @@ config PPC select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_LMB + select USE_GENERIC_SMP_HELPERS if SMP config EARLY_PRINTK bool diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 1457aa0a08f1..37a5ab410dcc 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops; static volatile unsigned int cpu_callin_map[NR_CPUS]; -void smp_call_function_interrupt(void); - int smt_enabled_at_boot = 1; -static int ipi_fail_ok; - static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; #ifdef CONFIG_PPC64 @@ -99,12 +95,15 @@ void smp_message_recv(int msg) { switch(msg) { case PPC_MSG_CALL_FUNCTION: - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); break; case PPC_MSG_RESCHEDULE: /* XXX Do we have to do this? */ set_need_resched(); break; + case PPC_MSG_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; case PPC_MSG_DEBUGGER_BREAK: if (crash_ipi_function_ptr) { crash_ipi_function_ptr(get_irq_regs()); @@ -128,6 +127,19 @@ void smp_send_reschedule(int cpu) smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); } +void arch_send_call_function_single_ipi(int cpu) +{ + smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); +} + +void arch_send_call_function_ipi(cpumask_t mask) +{ + unsigned int cpu; + + for_each_cpu_mask(cpu, mask) + smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); +} + #ifdef CONFIG_DEBUGGER void smp_send_debugger_break(int cpu) { @@ -154,215 +166,9 @@ static void stop_this_cpu(void *dummy) ; } -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - * Stolen from the i386 version. - */ -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); - -static struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -} *call_data; - -/* delay of at least 8 seconds */ -#define SMP_CALL_TIMEOUT 8 - -/* - * These functions send a 'generic call function' IPI to other online - * CPUS in the system. - * - * [SUMMARY] Run a function on other CPUs. - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * currently unused. - * If true, wait (atomically) until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. Does not return until - * remote CPUs are nearly ready to execute <> or are or have executed. - * is a cpu map of the cpus to send IPI to. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -static int __smp_call_function_map(void (*func) (void *info), void *info, - int nonatomic, int wait, cpumask_t map) -{ - struct call_data_struct data; - int ret = -1, num_cpus; - int cpu; - u64 timeout; - - if (unlikely(smp_ops == NULL)) - return ret; - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - /* remove 'self' from the map */ - if (cpu_isset(smp_processor_id(), map)) - cpu_clear(smp_processor_id(), map); - - /* sanity check the map, remove any non-online processors. */ - cpus_and(map, map, cpu_online_map); - - num_cpus = cpus_weight(map); - if (!num_cpus) - goto done; - - call_data = &data; - smp_wmb(); - /* Send a message to all CPUs in the map */ - for_each_cpu_mask(cpu, map) - smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); - - timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; - - /* Wait for indication that they have received the message */ - while (atomic_read(&data.started) != num_cpus) { - HMT_low(); - if (get_tb() >= timeout) { - printk("smp_call_function on cpu %d: other cpus not " - "responding (%d)\n", smp_processor_id(), - atomic_read(&data.started)); - if (!ipi_fail_ok) - debugger(NULL); - goto out; - } - } - - /* optionally wait for the CPUs to complete */ - if (wait) { - while (atomic_read(&data.finished) != num_cpus) { - HMT_low(); - if (get_tb() >= timeout) { - printk("smp_call_function on cpu %d: other " - "cpus not finishing (%d/%d)\n", - smp_processor_id(), - atomic_read(&data.finished), - atomic_read(&data.started)); - debugger(NULL); - goto out; - } - } - } - - done: - ret = 0; - - out: - call_data = NULL; - HMT_medium(); - return ret; -} - -static int __smp_call_function(void (*func)(void *info), void *info, - int nonatomic, int wait) -{ - int ret; - spin_lock(&call_lock); - ret =__smp_call_function_map(func, info, nonatomic, wait, - cpu_online_map); - spin_unlock(&call_lock); - return ret; -} - -int smp_call_function(void (*func) (void *info), void *info, int nonatomic, - int wait) -{ - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - return __smp_call_function(func, info, nonatomic, wait); -} -EXPORT_SYMBOL(smp_call_function); - -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - cpumask_t map = CPU_MASK_NONE; - int ret = 0; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - if (!cpu_online(cpu)) - return -EINVAL; - - cpu_set(cpu, map); - if (cpu != get_cpu()) { - spin_lock(&call_lock); - ret = __smp_call_function_map(func, info, nonatomic, wait, map); - spin_unlock(&call_lock); - } else { - local_irq_disable(); - func(info); - local_irq_enable(); - } - put_cpu(); - return ret; -} -EXPORT_SYMBOL(smp_call_function_single); - void smp_send_stop(void) { - int nolock; - - /* It's OK to fail sending the IPI, since the alternative is to - * be stuck forever waiting on the other CPU to take the interrupt. - * - * It's better to at least continue and go through reboot, since this - * function is usually called at panic or reboot time in the first - * place. - */ - ipi_fail_ok = 1; - - /* Don't deadlock in case we got called through panic */ - nolock = !spin_trylock(&call_lock); - __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map); - if (!nolock) - spin_unlock(&call_lock); -} - -void smp_call_function_interrupt(void) -{ - void (*func) (void *info); - void *info; - int wait; - - /* call_data will be NULL if the sender timed out while - * waiting on us to receive the call. - */ - if (!call_data) - return; - - func = call_data->func; - info = call_data->info; - wait = call_data->wait; - - if (!wait) - smp_mb__before_atomic_inc(); - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ - (*func)(info); - if (wait) { - smp_mb__before_atomic_inc(); - atomic_inc(&call_data->finished); - } + smp_call_function(stop_this_cpu, NULL, 0, 0); } extern struct gettimeofday_struct do_gtod; @@ -596,9 +402,9 @@ int __devinit start_secondary(void *unused) secondary_cpu_time_init(); - spin_lock(&call_lock); + ipi_call_lock(); cpu_set(cpu, cpu_online_map); - spin_unlock(&call_lock); + ipi_call_unlock(); local_irq_enable(); diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 5bf7df146022..2d5bb22d6c09 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -218,6 +218,7 @@ void iic_request_IPIs(void) { iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); + iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single"); #ifdef CONFIG_DEBUGGER iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); #endif /* CONFIG_DEBUGGER */ diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c index f0b12f212363..a0927a3bacb7 100644 --- a/arch/powerpc/platforms/ps3/smp.c +++ b/arch/powerpc/platforms/ps3/smp.c @@ -105,9 +105,10 @@ static void __init ps3_smp_setup_cpu(int cpu) * to index needs to be setup. */ - BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); - BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); - BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); + BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); + BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); + BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2); + BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); for (i = 0; i < MSG_COUNT; i++) { result = ps3_event_receive_port_setup(cpu, &virqs[i]); diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index ebebc28fe895..0fc830f576f5 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -383,13 +383,11 @@ static irqreturn_t xics_ipi_dispatch(int cpu) mb(); smp_message_recv(PPC_MSG_RESCHEDULE); } -#if 0 - if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK, + if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, &xics_ipi_message[cpu].value)) { mb(); - smp_message_recv(PPC_MSG_MIGRATE_TASK); + smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); } -#endif #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, &xics_ipi_message[cpu].value)) { diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 7680001676a6..6c90c95b454e 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -1494,7 +1494,7 @@ void mpic_request_ipis(void) static char *ipi_names[] = { "IPI0 (call function)", "IPI1 (reschedule)", - "IPI2 (unused)", + "IPI2 (call function single)", "IPI3 (debugger break)", }; BUG_ON(mpic == NULL); diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h index 505f35bacaa9..c663a1fa77c5 100644 --- a/include/asm-powerpc/smp.h +++ b/include/asm-powerpc/smp.h @@ -67,10 +67,7 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); * in /proc/interrupts will be wrong!!! --Troy */ #define PPC_MSG_CALL_FUNCTION 0 #define PPC_MSG_RESCHEDULE 1 -/* This is unused now */ -#if 0 -#define PPC_MSG_MIGRATE_TASK 2 -#endif +#define PPC_MSG_CALL_FUNC_SINGLE 2 #define PPC_MSG_DEBUGGER_BREAK 3 void smp_init_iSeries(void); @@ -117,6 +114,9 @@ extern void smp_generic_take_timebase(void); extern struct smp_ops_t *smp_ops; +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ -- cgit v1.2.3 From f27b433ef32a77c8cb76f018507453df7c03e552 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 26 Jun 2008 11:22:30 +0200 Subject: ia64: convert to generic helpers for IPI function calls This converts ia64 to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Cc: Tony Luck Signed-off-by: Jens Axboe --- arch/ia64/Kconfig | 1 + arch/ia64/kernel/smp.c | 250 +++------------------------------------------ arch/ia64/kernel/smpboot.c | 4 +- include/asm-ia64/smp.h | 8 +- 4 files changed, 19 insertions(+), 244 deletions(-) diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 16be41446b5b..18bcc10903b4 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -303,6 +303,7 @@ config VIRT_CPU_ACCOUNTING config SMP bool "Symmetric multi-processing support" + select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you have a system with only one CPU, say N. If you have a system with more diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 983296f1c813..19152dcbf6e4 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -60,25 +60,9 @@ static struct local_tlb_flush_counts { static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; - -/* - * Structure and data for smp_call_function(). This is designed to minimise static memory - * requirements. It also looks cleaner. - */ -static __cacheline_aligned DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - long wait; - atomic_t started; - atomic_t finished; -}; - -static volatile struct call_data_struct *call_data; - #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 +#define IPI_CALL_FUNC_SINGLE 2 #define IPI_KDUMP_CPU_STOP 3 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ @@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); extern void cpu_halt (void); -void -lock_ipi_calllock(void) -{ - spin_lock_irq(&call_lock); -} - -void -unlock_ipi_calllock(void) -{ - spin_unlock_irq(&call_lock); -} - -static inline void -handle_call_data(void) -{ - struct call_data_struct *data; - void (*func)(void *info); - void *info; - int wait; - - /* release the 'pointer lock' */ - data = (struct call_data_struct *)call_data; - func = data->func; - info = data->info; - wait = data->wait; - - mb(); - atomic_inc(&data->started); - /* At this point the structure may be gone unless wait is true. */ - (*func)(info); - - /* Notify the sending CPU that the task is done. */ - mb(); - if (wait) - atomic_inc(&data->finished); -} - static void stop_this_cpu(void) { @@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id) ops &= ~(1 << which); switch (which) { - case IPI_CALL_FUNC: - handle_call_data(); - break; - case IPI_CPU_STOP: stop_this_cpu(); break; + case IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; + case IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; #ifdef CONFIG_KEXEC case IPI_KDUMP_CPU_STOP: unw_init_running(kdump_cpu_freeze, NULL); @@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id) return IRQ_HANDLED; } + + /* * Called with preemption disabled. */ @@ -360,190 +311,15 @@ smp_flush_tlb_mm (struct mm_struct *mm) on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); } -/* - * Run a function on a specific CPU - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * Currently unused. - * If true, wait until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. - * - * Does not return until the remote CPU is nearly ready to execute - * or is or has executed. - */ - -int -smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic, - int wait) -{ - struct call_data_struct data; - int cpus = 1; - int me = get_cpu(); /* prevent preemption and reschedule on another processor */ - - if (cpuid == me) { - local_irq_disable(); - func(info); - local_irq_enable(); - put_cpu(); - return 0; - } - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - spin_lock_bh(&call_lock); - - call_data = &data; - mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ - send_IPI_single(cpuid, IPI_CALL_FUNC); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - call_data = NULL; - - spin_unlock_bh(&call_lock); - put_cpu(); - return 0; -} -EXPORT_SYMBOL(smp_call_function_single); - -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * The set of cpus to run on. Must not include the current cpu. - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * If true, wait (atomically) until function - * has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) +void arch_send_call_function_single_ipi(int cpu) { - struct call_data_struct data; - cpumask_t allbutself; - int cpus; - - spin_lock(&call_lock); - allbutself = cpu_online_map; - cpu_clear(smp_processor_id(), allbutself); - - cpus_and(mask, mask, allbutself); - cpus = cpus_weight(mask); - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ - - /* Send a message to other CPUs */ - if (cpus_equal(mask, allbutself)) - send_IPI_allbutself(IPI_CALL_FUNC); - else - send_IPI_mask(mask, IPI_CALL_FUNC); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - call_data = NULL; - - spin_unlock(&call_lock); - return 0; - + send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); } -EXPORT_SYMBOL(smp_call_function_mask); -/* - * this function sends a 'generic call function' IPI to all other CPUs - * in the system. - */ - -/* - * [SUMMARY] Run a function on all other CPUs. - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * currently unused. - * If true, wait (atomically) until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. - * - * Does not return until remote CPUs are nearly ready to execute or are or have - * executed. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int -smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) +void arch_send_call_function_ipi(cpumask_t mask) { - struct call_data_struct data; - int cpus; - - spin_lock(&call_lock); - cpus = num_online_cpus() - 1; - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ - send_IPI_allbutself(IPI_CALL_FUNC); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - call_data = NULL; - - spin_unlock(&call_lock); - return 0; + send_IPI_mask(mask, IPI_CALL_FUNC); } -EXPORT_SYMBOL(smp_call_function); /* * this function calls the 'stop' function on all other CPUs in the system. diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index d7ad42b77d41..eaa1b6795a13 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -395,14 +395,14 @@ smp_callin (void) fix_b0_for_bsp(); - lock_ipi_calllock(); + ipi_call_lock_irq(); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ __setup_vector_irq(cpuid); cpu_set(cpuid, cpu_online_map); per_cpu(cpu_state, cpuid) = CPU_ONLINE; spin_unlock(&vector_lock); - unlock_ipi_calllock(); + ipi_call_unlock_irq(); smp_setup_percpu_timer(); diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index ec5f355fb7e3..27731e032ee9 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -38,9 +38,6 @@ ia64_get_lid (void) return lid.f.id << 8 | lid.f.eid; } -extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), - void *info, int wait); - #define hard_smp_processor_id() ia64_get_lid() #ifdef CONFIG_SMP @@ -124,11 +121,12 @@ extern void __init init_smp_config (void); extern void smp_do_timer (struct pt_regs *regs); extern void smp_send_reschedule (int cpu); -extern void lock_ipi_calllock(void); -extern void unlock_ipi_calllock(void); extern void identify_siblings (struct cpuinfo_ia64 *); extern int is_multithreading_enabled(void); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); + #else /* CONFIG_SMP */ #define cpu_logical_id(i) 0 -- cgit v1.2.3 From c524a1d8914408fd57241d9542fa2d402f004a33 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 10 Jun 2008 20:47:29 +0200 Subject: alpha: convert to generic helpers for IPI function calls This converts alpha to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Signed-off-by: Jens Axboe --- arch/alpha/Kconfig | 1 + arch/alpha/kernel/core_marvel.c | 6 +- arch/alpha/kernel/smp.c | 170 +++------------------------------------- include/asm-alpha/smp.h | 3 +- 4 files changed, 16 insertions(+), 164 deletions(-) diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 729cdbdf8036..dbe8c280fea9 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -528,6 +528,7 @@ config ARCH_MAY_HAVE_PC_FDC config SMP bool "Symmetric multi-processing support" depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL + select USE_GENERIC_SMP_HELPERS ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index b04f1feb1dda..ced4aae8b804 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -660,9 +660,9 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write) #ifdef CONFIG_SMP if (smp_processor_id() != boot_cpuid) - smp_call_function_on_cpu(__marvel_access_rtc, - &rtc_access, 1, 1, - cpumask_of_cpu(boot_cpuid)); + smp_call_function_single(boot_cpuid, + __marvel_access_rtc, + &rtc_access, 1, 1); else __marvel_access_rtc(&rtc_access); #else diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 2525692db0ab..95c905be9154 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -62,6 +62,7 @@ static struct { enum ipi_message_type { IPI_RESCHEDULE, IPI_CALL_FUNC, + IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, }; @@ -558,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) wripir(i); } -/* Structure and data for smp_call_function. This is designed to - minimize static memory requirements. Plus it looks cleaner. */ - -struct smp_call_struct { - void (*func) (void *info); - void *info; - long wait; - atomic_t unstarted_count; - atomic_t unfinished_count; -}; - -static struct smp_call_struct *smp_call_function_data; - -/* Atomicly drop data into a shared pointer. The pointer is free if - it is initially locked. If retry, spin until free. */ - -static int -pointer_lock (void *lock, void *data, int retry) -{ - void *old, *tmp; - - mb(); - again: - /* Compare and swap with zero. */ - asm volatile ( - "1: ldq_l %0,%1\n" - " mov %3,%2\n" - " bne %0,2f\n" - " stq_c %2,%1\n" - " beq %2,1b\n" - "2:" - : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) - : "r"(data) - : "memory"); - - if (old == 0) - return 0; - if (! retry) - return -EBUSY; - - while (*(void **)lock) - barrier(); - goto again; -} - void handle_ipi(struct pt_regs *regs) { @@ -632,31 +588,12 @@ handle_ipi(struct pt_regs *regs) break; case IPI_CALL_FUNC: - { - struct smp_call_struct *data; - void (*func)(void *info); - void *info; - int wait; - - data = smp_call_function_data; - func = data->func; - info = data->info; - wait = data->wait; - - /* Notify the sending CPU that the data has been - received, and execution is about to begin. */ - mb(); - atomic_dec (&data->unstarted_count); - - /* At this point the structure may be gone unless - wait is true. */ - (*func)(info); - - /* Notify the sending CPU that the task is done. */ - mb(); - if (wait) atomic_dec (&data->unfinished_count); + generic_smp_call_function_interrupt(); + break; + + case IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); break; - } case IPI_CPU_STOP: halt(); @@ -700,102 +637,15 @@ smp_send_stop(void) send_ipi_message(to_whom, IPI_CPU_STOP); } -/* - * Run a function on all other CPUs. - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * If true, keep retrying until ready. - * If true, wait until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. - * - * Does not return until remote CPUs are nearly ready to execute - * or are or have executed. - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ - -int -smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, - int wait, cpumask_t to_whom) +void arch_send_call_function_ipi(cpumask_t mask) { - struct smp_call_struct data; - unsigned long timeout; - int num_cpus_to_call; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - data.wait = wait; - - cpu_clear(smp_processor_id(), to_whom); - num_cpus_to_call = cpus_weight(to_whom); - - atomic_set(&data.unstarted_count, num_cpus_to_call); - atomic_set(&data.unfinished_count, num_cpus_to_call); - - /* Acquire the smp_call_function_data mutex. */ - if (pointer_lock(&smp_call_function_data, &data, retry)) - return -EBUSY; - - /* Send a message to the requested CPUs. */ - send_ipi_message(to_whom, IPI_CALL_FUNC); - - /* Wait for a minimal response. */ - timeout = jiffies + HZ; - while (atomic_read (&data.unstarted_count) > 0 - && time_before (jiffies, timeout)) - barrier(); - - /* If there's no response yet, log a message but allow a longer - * timeout period -- if we get a response this time, log - * a message saying when we got it.. - */ - if (atomic_read(&data.unstarted_count) > 0) { - long start_time = jiffies; - printk(KERN_ERR "%s: initial timeout -- trying long wait\n", - __func__); - timeout = jiffies + 30 * HZ; - while (atomic_read(&data.unstarted_count) > 0 - && time_before(jiffies, timeout)) - barrier(); - if (atomic_read(&data.unstarted_count) <= 0) { - long delta = jiffies - start_time; - printk(KERN_ERR - "%s: response %ld.%ld seconds into long wait\n", - __func__, delta / HZ, - (100 * (delta - ((delta / HZ) * HZ))) / HZ); - } - } - - /* We either got one or timed out -- clear the lock. */ - mb(); - smp_call_function_data = NULL; - - /* - * If after both the initial and long timeout periods we still don't - * have a response, something is very wrong... - */ - BUG_ON(atomic_read (&data.unstarted_count) > 0); - - /* Wait for a complete response, if needed. */ - if (wait) { - while (atomic_read (&data.unfinished_count) > 0) - barrier(); - } - - return 0; + send_ipi_message(mask, IPI_CALL_FUNC); } -EXPORT_SYMBOL(smp_call_function_on_cpu); -int -smp_call_function (void (*func) (void *info), void *info, int retry, int wait) +void arch_send_call_function_single_ipi(int cpu) { - return smp_call_function_on_cpu (func, info, retry, wait, - cpu_online_map); + send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); } -EXPORT_SYMBOL(smp_call_function); static void ipi_imb(void *ignored) diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h index 286e1d844f63..2f60a362d75e 100644 --- a/include/asm-alpha/smp.h +++ b/include/asm-alpha/smp.h @@ -47,7 +47,8 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS]; extern int smp_num_cpus; #define cpu_possible_map cpu_present_map -int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); #else /* CONFIG_SMP */ -- cgit v1.2.3 From f6dd9fa5a75a3dae16c6843e74e56bf75be51c7c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 10 Jun 2008 20:48:30 +0200 Subject: arm: convert to generic helpers for IPI function calls This converts arm to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Fixups and testing done by Catalin Marinas Cc: Russell King Signed-off-by: Jens Axboe --- arch/arm/Kconfig | 1 + arch/arm/kernel/smp.c | 157 +++++--------------------------------------------- include/asm-arm/smp.h | 3 + 3 files changed, 19 insertions(+), 142 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b786e68914d4..c72dae633f60 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -650,6 +650,7 @@ source "kernel/time/Kconfig" config SMP bool "Symmetric Multi-Processing (EXPERIMENTAL)" depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP) + select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index eefae1de334c..6344466b2113 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -68,20 +68,10 @@ enum ipi_msg_type { IPI_TIMER, IPI_RESCHEDULE, IPI_CALL_FUNC, + IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, }; -struct smp_call_struct { - void (*func)(void *info); - void *info; - int wait; - cpumask_t pending; - cpumask_t unfinished; -}; - -static struct smp_call_struct * volatile smp_call_function_data; -static DEFINE_SPINLOCK(smp_call_function_lock); - int __cpuinit __cpu_up(unsigned int cpu) { struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); @@ -366,114 +356,15 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) local_irq_restore(flags); } -/* - * You must not call this function with disabled interrupts, from a - * hardware interrupt handler, nor from a bottom half handler. - */ -static int smp_call_function_on_cpu(void (*func)(void *info), void *info, - int retry, int wait, cpumask_t callmap) -{ - struct smp_call_struct data; - unsigned long timeout; - int ret = 0; - - data.func = func; - data.info = info; - data.wait = wait; - - cpu_clear(smp_processor_id(), callmap); - if (cpus_empty(callmap)) - goto out; - - data.pending = callmap; - if (wait) - data.unfinished = callmap; - - /* - * try to get the mutex on smp_call_function_data - */ - spin_lock(&smp_call_function_lock); - smp_call_function_data = &data; - - send_ipi_message(callmap, IPI_CALL_FUNC); - - timeout = jiffies + HZ; - while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) - barrier(); - - /* - * did we time out? - */ - if (!cpus_empty(data.pending)) { - /* - * this may be causing our panic - report it - */ - printk(KERN_CRIT - "CPU%u: smp_call_function timeout for %p(%p)\n" - " callmap %lx pending %lx, %swait\n", - smp_processor_id(), func, info, *cpus_addr(callmap), - *cpus_addr(data.pending), wait ? "" : "no "); - - /* - * TRACE - */ - timeout = jiffies + (5 * HZ); - while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) - barrier(); - - if (cpus_empty(data.pending)) - printk(KERN_CRIT " RESOLVED\n"); - else - printk(KERN_CRIT " STILL STUCK\n"); - } - - /* - * whatever happened, we're done with the data, so release it - */ - smp_call_function_data = NULL; - spin_unlock(&smp_call_function_lock); - - if (!cpus_empty(data.pending)) { - ret = -ETIMEDOUT; - goto out; - } - - if (wait) - while (!cpus_empty(data.unfinished)) - barrier(); - out: - - return 0; -} - -int smp_call_function(void (*func)(void *info), void *info, int retry, - int wait) +void arch_send_call_function_ipi(cpumask_t mask) { - return smp_call_function_on_cpu(func, info, retry, wait, - cpu_online_map); + send_ipi_message(mask, IPI_CALL_FUNC); } -EXPORT_SYMBOL_GPL(smp_call_function); -int smp_call_function_single(int cpu, void (*func)(void *info), void *info, - int retry, int wait) +void arch_send_call_function_single_ipi(int cpu) { - /* prevent preemption and reschedule on another processor */ - int current_cpu = get_cpu(); - int ret = 0; - - if (cpu == current_cpu) { - local_irq_disable(); - func(info); - local_irq_enable(); - } else - ret = smp_call_function_on_cpu(func, info, retry, wait, - cpumask_of_cpu(cpu)); - - put_cpu(); - - return ret; + send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); } -EXPORT_SYMBOL_GPL(smp_call_function_single); void show_ipi_list(struct seq_file *p) { @@ -521,27 +412,6 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs) } #endif -/* - * ipi_call_function - handle IPI from smp_call_function() - * - * Note that we copy data out of the cross-call structure and then - * let the caller know that we're here and have done with their data - */ -static void ipi_call_function(unsigned int cpu) -{ - struct smp_call_struct *data = smp_call_function_data; - void (*func)(void *info) = data->func; - void *info = data->info; - int wait = data->wait; - - cpu_clear(cpu, data->pending); - - func(info); - - if (wait) - cpu_clear(cpu, data->unfinished); -} - static DEFINE_SPINLOCK(stop_lock); /* @@ -611,7 +481,11 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs) break; case IPI_CALL_FUNC: - ipi_call_function(cpu); + generic_smp_call_function_interrupt(); + break; + + case IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); break; case IPI_CPU_STOP: @@ -662,14 +536,13 @@ int setup_profiling_timer(unsigned int multiplier) } static int -on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, - cpumask_t mask) +on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) { int ret = 0; preempt_disable(); - ret = smp_call_function_on_cpu(func, info, retry, wait, mask); + ret = smp_call_function_mask(mask, func, info, wait); if (cpu_isset(smp_processor_id(), mask)) func(info); @@ -738,7 +611,7 @@ void flush_tlb_mm(struct mm_struct *mm) { cpumask_t mask = mm->cpu_vm_mask; - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) @@ -749,7 +622,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) ta.ta_vma = vma; ta.ta_start = uaddr; - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); } void flush_tlb_kernel_page(unsigned long kaddr) @@ -771,7 +644,7 @@ void flush_tlb_range(struct vm_area_struct *vma, ta.ta_start = start; ta.ta_end = end; - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h index af99636db400..7fffa2404b8e 100644 --- a/include/asm-arm/smp.h +++ b/include/asm-arm/smp.h @@ -101,6 +101,9 @@ extern void platform_cpu_die(unsigned int cpu); extern int platform_cpu_kill(unsigned int cpu); extern void platform_cpu_enable(unsigned int cpu); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); + /* * Local timer interrupt handling function (can be IPI'ed). */ -- cgit v1.2.3 From 7b7426c8a615cf61df9a77b9df7d5b75d91e3fa0 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 10 Jun 2008 20:49:30 +0200 Subject: m32r: convert to generic helpers for IPI function calls This converts m32r to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Not tested, not even compiled. Cc: Hirokazu Takata Signed-off-by: Jens Axboe --- arch/m32r/Kconfig | 1 + arch/m32r/kernel/m32r_ksyms.c | 3 - arch/m32r/kernel/smp.c | 128 +++++------------------------------------- arch/m32r/kernel/traps.c | 3 +- include/asm-m32r/smp.h | 4 ++ 5 files changed, 20 insertions(+), 119 deletions(-) diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index de153de2ea9f..a5f864c445b2 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -296,6 +296,7 @@ config PREEMPT config SMP bool "Symmetric multi-processing support" + select USE_GENERIC_SMP_HELPERS ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index e6709fe950ba..16bcb189a383 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c @@ -43,9 +43,6 @@ EXPORT_SYMBOL(dcache_dummy); #endif EXPORT_SYMBOL(cpu_data); -/* Global SMP stuff */ -EXPORT_SYMBOL(smp_call_function); - /* TLB flushing */ EXPORT_SYMBOL(smp_flush_tlb_page); #endif diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index c837bc13b015..74eb7bcd5a40 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -34,22 +34,6 @@ /* Data structures and variables */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - */ -static DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); - -static struct call_data_struct *call_data; - /* * For flush_cache_all() */ @@ -96,9 +80,6 @@ void smp_invalidate_interrupt(void); void smp_send_stop(void); static void stop_this_cpu(void *); -int smp_call_function(void (*) (void *), void *, int, int); -void smp_call_function_interrupt(void); - void smp_send_timer(void); void smp_ipi_timer_interrupt(struct pt_regs *); void smp_local_timer_interrupt(void); @@ -565,86 +546,14 @@ static void stop_this_cpu(void *dummy) for ( ; ; ); } -/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -/* Call function Routines */ -/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ - -/*==========================================================================* - * Name: smp_call_function - * - * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs - * in the system. - * - * Born on Date: 2002.02.05 - * - * Arguments: *func - The function to run. This must be fast and - * non-blocking. - * *info - An arbitrary pointer to pass to the function. - * nonatomic - currently unused. - * wait - If true, wait (atomically) until function has - * completed on other CPUs. - * - * Returns: 0 on success, else a negative status code. Does not return - * until remote CPUs are nearly ready to execute <> or - * are or have executed. - * - * Cautions: You must not call this function with disabled interrupts or - * from a hardware interrupt handler, you may call it from a - * bottom half handler. - * - * Modification log: - * Date Who Description - * ---------- --- -------------------------------------------------------- - * - *==========================================================================*/ -int smp_call_function(void (*func) (void *info), void *info, int nonatomic, - int wait) +void arch_send_call_function_ipi(cpumask_t mask) { - struct call_data_struct data; - int cpus; - -#ifdef DEBUG_SMP - unsigned long flags; - __save_flags(flags); - if (!(flags & 0x0040)) /* Interrupt Disable NONONO */ - BUG(); -#endif /* DEBUG_SMP */ - - /* Holding any lock stops cpus from going down. */ - spin_lock(&call_lock); - cpus = num_online_cpus() - 1; - - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); - - /* Send a message to all other CPUs and wait for them to respond */ - send_IPI_allbutself(CALL_FUNCTION_IPI, 0); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - barrier(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - barrier(); - spin_unlock(&call_lock); + send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); +} - return 0; +void arch_send_call_function_single_ipi(int cpu) +{ + send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); } /*==========================================================================* @@ -666,27 +575,16 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, *==========================================================================*/ void smp_call_function_interrupt(void) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - mb(); - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ irq_enter(); - (*func)(info); + generic_smp_call_function_interrupt(); irq_exit(); +} - if (wait) { - mb(); - atomic_inc(&call_data->finished); - } +void smp_call_function_single_interrupt(void) +{ + irq_enter(); + generic_smp_call_function_single_interrupt(); + irq_exit(); } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 89ba4a0b5d51..46159a4e644b 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -40,6 +40,7 @@ extern void smp_invalidate_interrupt(void); extern void smp_call_function_interrupt(void); extern void smp_ipi_timer_interrupt(void); extern void smp_flush_cache_all_interrupt(void); +extern void smp_call_function_single_interrupt(void); /* * for Boot AP function @@ -103,7 +104,7 @@ void set_eit_vector_entries(void) eit_vector[186] = (unsigned long)smp_call_function_interrupt; eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; - eit_vector[189] = 0; + eit_vector[189] = (unsigned long)smp_call_function_single_interrupt; eit_vector[190] = 0; eit_vector[191] = 0; #endif diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h index 078e1a51a042..c5dd66916692 100644 --- a/include/asm-m32r/smp.h +++ b/include/asm-m32r/smp.h @@ -89,6 +89,9 @@ static __inline__ unsigned int num_booting_cpus(void) extern void smp_send_timer(void); extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); + #endif /* not __ASSEMBLY__ */ #define NO_PROC_ID (0xff) /* No processor magic marker */ @@ -104,6 +107,7 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); #define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0) #define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0) #define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0) +#define CALL_FUNC_SINGLE_IPI (M32R_IRQ_IPI6-M32R_IRQ_IPI0) #define IPI_SHIFT (0) #define NR_IPIS (8) -- cgit v1.2.3 From 2f304c0a0a55072b80957580f1b66256a615d8da Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Jun 2008 10:45:23 +0200 Subject: mips: convert to generic helpers for IPI function calls This converts mips to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Not tested, but it compiles. mips shares the same IPI for smp_call_function() and smp_call_function_single(), since not all mips platforms have enough available IPIs to support seperate setups. Cc: Ralf Baechle Signed-off-by: Jens Axboe --- arch/mips/Kconfig | 1 + arch/mips/kernel/smp.c | 141 ++++-------------------------------------------- arch/mips/kernel/smtc.c | 1 - include/asm-mips/smp.h | 13 ++--- 4 files changed, 15 insertions(+), 141 deletions(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e5a7c5d96364..ea70d5a225ca 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1763,6 +1763,7 @@ config SMP bool "Multi-Processing support" depends on SYS_SUPPORTS_SMP select IRQ_PER_CPU + select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index cdf87a9dd4ba..c75b26cb61df 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -131,148 +131,29 @@ asmlinkage __cpuinit void start_secondary(void) cpu_idle(); } -DEFINE_SPINLOCK(smp_call_lock); - -struct call_data_struct *call_data; - -/* - * Run a function on all other CPUs. - * - * cpuset_t of all processors to run the function on. - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * If true, keep retrying until ready. - * If true, wait until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. - * - * Does not return until remote CPUs are nearly ready to execute - * or are or have executed. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler: - * - * CPU A CPU B - * Disable interrupts - * smp_call_function() - * Take call_lock - * Send IPIs - * Wait for all cpus to acknowledge IPI - * CPU A has not responded, spin waiting - * for cpu A to respond, holding call_lock - * smp_call_function() - * Spin waiting for call_lock - * Deadlock Deadlock - */ -int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), - void *info, int retry, int wait) +void arch_send_call_function_ipi(cpumask_t mask) { - struct call_data_struct data; - int cpu = smp_processor_id(); - int cpus; - - /* - * Can die spectacularly if this CPU isn't yet marked online - */ - BUG_ON(!cpu_online(cpu)); - - cpu_clear(cpu, mask); - cpus = cpus_weight(mask); - if (!cpus) - return 0; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - spin_lock(&smp_call_lock); - call_data = &data; - smp_mb(); - - /* Send a message to all other CPUs and wait for them to respond */ mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); - - /* Wait for response */ - /* FIXME: lock-up detection, backtrace on lock-up */ - while (atomic_read(&data.started) != cpus) - barrier(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - barrier(); - call_data = NULL; - spin_unlock(&smp_call_lock); - - return 0; } -int smp_call_function(void (*func) (void *info), void *info, int retry, - int wait) +/* + * We reuse the same vector for the single IPI + */ +void arch_send_call_function_single_ipi(int cpu) { - return smp_call_function_mask(cpu_online_map, func, info, retry, wait); + mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION); } -EXPORT_SYMBOL(smp_call_function); +/* + * Call into both interrupt handlers, as we share the IPI for them + */ void smp_call_function_interrupt(void) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function. - */ - smp_mb(); - atomic_inc(&call_data->started); - - /* - * At this point the info structure may be out of scope unless wait==1. - */ irq_enter(); - (*func)(info); + generic_smp_call_function_single_interrupt(); + generic_smp_call_function_interrupt(); irq_exit(); - - if (wait) { - smp_mb(); - atomic_inc(&call_data->finished); - } -} - -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int retry, int wait) -{ - int ret, me; - - /* - * Can die spectacularly if this CPU isn't yet marked online - */ - if (!cpu_online(cpu)) - return 0; - - me = get_cpu(); - BUG_ON(!cpu_online(me)); - - if (cpu == me) { - local_irq_disable(); - func(info); - local_irq_enable(); - put_cpu(); - return 0; - } - - ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry, - wait); - - put_cpu(); - return 0; } -EXPORT_SYMBOL(smp_call_function_single); static void stop_this_cpu(void *dummy) { diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 3e863186cd22..a516286532ab 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -877,7 +877,6 @@ static void ipi_resched_interrupt(void) /* Return from interrupt should be enough to cause scheduler check */ } - static void ipi_call_interrupt(void) { /* Invoke generic function invocation code in smp.c */ diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h index 84fef1aeec0c..0ff5b523ea77 100644 --- a/include/asm-mips/smp.h +++ b/include/asm-mips/smp.h @@ -35,16 +35,6 @@ extern int __cpu_logical_map[NR_CPUS]; #define NO_PROC_ID (-1) -struct call_data_struct { - void (*func)(void *); - void *info; - atomic_t started; - atomic_t finished; - int wait; -}; - -extern struct call_data_struct *call_data; - #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ #define SMP_CALL_FUNCTION 0x2 @@ -67,4 +57,7 @@ static inline void smp_send_reschedule(int cpu) extern asmlinkage void smp_call_function_interrupt(void); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); + #endif /* __ASM_SMP_H */ -- cgit v1.2.3 From dbcf4787d816a4694ec83b5fde1a947c3ce74d57 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 10 Jun 2008 20:50:56 +0200 Subject: parisc: convert to generic helpers for IPI function calls This converts parisc to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Tested by Kyle, seems to work. Cc: Matthew Wilcox Cc: Grant Grundler Signed-off-by: Kyle McMartin Signed-off-by: Jens Axboe --- arch/parisc/Kconfig | 1 + arch/parisc/kernel/smp.c | 134 ++++++++--------------------------------------- include/asm-parisc/smp.h | 3 ++ 3 files changed, 25 insertions(+), 113 deletions(-) diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index bc7a19da6245..a7d4fd353c2b 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -199,6 +199,7 @@ endchoice config SMP bool "Symmetric multi-processing support" + select USE_GENERIC_SMP_HELPERS ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 85fc7754ec25..126105c76a44 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map); DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; -struct smp_call_struct { - void (*func) (void *info); - void *info; - long wait; - atomic_t unstarted_count; - atomic_t unfinished_count; -}; -static volatile struct smp_call_struct *smp_call_function_data; - enum ipi_message_type { IPI_NOP=0, IPI_RESCHEDULE=1, IPI_CALL_FUNC, + IPI_CALL_FUNC_SINGLE, IPI_CPU_START, IPI_CPU_STOP, IPI_CPU_TEST @@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id) case IPI_CALL_FUNC: smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); - { - volatile struct smp_call_struct *data; - void (*func)(void *info); - void *info; - int wait; - - data = smp_call_function_data; - func = data->func; - info = data->info; - wait = data->wait; - - mb(); - atomic_dec ((atomic_t *)&data->unstarted_count); - - /* At this point, *data can't - * be relied upon. - */ - - (*func)(info); - - /* Notify the sending CPU that the - * task is done. - */ - mb(); - if (wait) - atomic_dec ((atomic_t *)&data->unfinished_count); - } + generic_smp_call_function_interrupt(); + break; + + case IPI_CALL_FUNC_SINGLE: + smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu); + generic_smp_call_function_single_interrupt(); break; case IPI_CPU_START: @@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op) spin_unlock_irqrestore(lock, flags); } +static void +send_IPI_mask(cpumask_t mask, enum ipi_message_type op) +{ + int cpu; + + for_each_cpu_mask(cpu, mask) + ipi_send(cpu, op); +} static inline void send_IPI_single(int dest_cpu, enum ipi_message_type op) @@ -295,86 +274,15 @@ smp_send_all_nop(void) send_IPI_allbutself(IPI_NOP); } - -/** - * Run a function on all other CPUs. - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * If true, keep retrying until ready. - * If true, wait until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. - * - * Does not return until remote CPUs are nearly ready to execute - * or have executed. - */ - -int -smp_call_function (void (*func) (void *info), void *info, int retry, int wait) +void arch_send_call_function_ipi(cpumask_t mask) { - struct smp_call_struct data; - unsigned long timeout; - static DEFINE_SPINLOCK(lock); - int retries = 0; - - if (num_online_cpus() < 2) - return 0; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - /* can also deadlock if IPIs are disabled */ - WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0); - - - data.func = func; - data.info = info; - data.wait = wait; - atomic_set(&data.unstarted_count, num_online_cpus() - 1); - atomic_set(&data.unfinished_count, num_online_cpus() - 1); - - if (retry) { - spin_lock (&lock); - while (smp_call_function_data != 0) - barrier(); - } - else { - spin_lock (&lock); - if (smp_call_function_data) { - spin_unlock (&lock); - return -EBUSY; - } - } - - smp_call_function_data = &data; - spin_unlock (&lock); - - /* Send a message to all other CPUs and wait for them to respond */ - send_IPI_allbutself(IPI_CALL_FUNC); - - retry: - /* Wait for response */ - timeout = jiffies + HZ; - while ( (atomic_read (&data.unstarted_count) > 0) && - time_before (jiffies, timeout) ) - barrier (); - - if (atomic_read (&data.unstarted_count) > 0) { - printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n", - smp_processor_id(), ++retries); - goto retry; - } - /* We either got one or timed out. Release the lock */ - - mb(); - smp_call_function_data = NULL; - - while (wait && atomic_read (&data.unfinished_count) > 0) - barrier (); - - return 0; + send_IPI_mask(mask, IPI_CALL_FUNC); } -EXPORT_SYMBOL(smp_call_function); +void arch_send_call_function_single_ipi(int cpu) +{ + send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); +} /* * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h index 306f4950e32e..398cdbaf4e54 100644 --- a/include/asm-parisc/smp.h +++ b/include/asm-parisc/smp.h @@ -30,6 +30,9 @@ extern cpumask_t cpu_online_map; extern void smp_send_reschedule(int cpu); extern void smp_send_all_nop(void); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); + #endif /* !ASSEMBLY */ /* -- cgit v1.2.3 From 490f5de52a87063fcb40e3b22f61b0779603ff6d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 10 Jun 2008 20:52:59 +0200 Subject: sh: convert to generic helpers for IPI function calls This converts sh to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Not tested, but it compiles. Acked-by: Paul Mundt Signed-off-by: Jens Axboe --- arch/sh/Kconfig | 1 + arch/sh/kernel/smp.c | 48 ++++++++---------------------------------------- include/asm-sh/smp.h | 14 ++++---------- 3 files changed, 13 insertions(+), 50 deletions(-) diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 9a854c8e5274..3e7384f4619c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -688,6 +688,7 @@ config CRASH_DUMP config SMP bool "Symmetric multi-processing support" depends on SYS_SUPPORTS_SMP + select USE_GENERIC_SMP_HELPERS ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 5d039d168f57..2ed8dceb297b 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -36,13 +36,6 @@ EXPORT_SYMBOL(cpu_possible_map); cpumask_t cpu_online_map; EXPORT_SYMBOL(cpu_online_map); -static atomic_t cpus_booted = ATOMIC_INIT(0); - -/* - * Run specified function on a particular processor. - */ -void __smp_call_function(unsigned int cpu); - static inline void __init smp_store_cpu_info(unsigned int cpu) { struct sh_cpuinfo *c = cpu_data + cpu; @@ -178,42 +171,17 @@ void smp_send_stop(void) smp_call_function(stop_this_cpu, 0, 1, 0); } -struct smp_fn_call_struct smp_fn_call = { - .lock = __SPIN_LOCK_UNLOCKED(smp_fn_call.lock), - .finished = ATOMIC_INIT(0), -}; - -/* - * The caller of this wants the passed function to run on every cpu. If wait - * is set, wait until all cpus have finished the function before returning. - * The lock is here to protect the call structure. - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int smp_call_function(void (*func)(void *info), void *info, int retry, int wait) +void arch_send_call_function_ipi(cpumask_t mask) { - unsigned int nr_cpus = atomic_read(&cpus_booted); - int i; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - spin_lock(&smp_fn_call.lock); - - atomic_set(&smp_fn_call.finished, 0); - smp_fn_call.fn = func; - smp_fn_call.data = info; - - for (i = 0; i < nr_cpus; i++) - if (i != smp_processor_id()) - plat_send_ipi(i, SMP_MSG_FUNCTION); - - if (wait) - while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1)); + int cpu; - spin_unlock(&smp_fn_call.lock); + for_each_cpu_mask(cpu, mask) + plat_send_ipi(cpu, SMP_MSG_FUNCTION); +} - return 0; +void arch_send_call_function_single_ipi(int cpu) +{ + plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); } /* Not really SMP stuff ... */ diff --git a/include/asm-sh/smp.h b/include/asm-sh/smp.h index 9c8d34b07ebf..593343cd26ee 100644 --- a/include/asm-sh/smp.h +++ b/include/asm-sh/smp.h @@ -26,18 +26,10 @@ extern int __cpu_logical_map[NR_CPUS]; #define NO_PROC_ID (-1) -struct smp_fn_call_struct { - spinlock_t lock; - atomic_t finished; - void (*fn)(void *); - void *data; -}; - -extern struct smp_fn_call_struct smp_fn_call; - #define SMP_MSG_FUNCTION 0 #define SMP_MSG_RESCHEDULE 1 -#define SMP_MSG_NR 2 +#define SMP_MSG_FUNCTION_SINGLE 2 +#define SMP_MSG_NR 3 void plat_smp_setup(void); void plat_prepare_cpus(unsigned int max_cpus); @@ -46,6 +38,8 @@ void plat_start_cpu(unsigned int cpu, unsigned long entry_point); void plat_send_ipi(unsigned int cpu, unsigned int message); int plat_register_ipi_handler(unsigned int message, void (*handler)(void *), void *arg); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); #else -- cgit v1.2.3 From 8691e5a8f691cc2a4fda0651e8d307aaba0e7d68 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 6 Jun 2008 11:18:06 +0200 Subject: smp_call_function: get rid of the unused nonatomic/retry argument It's never used and the comments refer to nonatomic and retry interchangably. So get rid of it. Acked-by: Jeremy Fitzhardinge Signed-off-by: Jens Axboe --- arch/alpha/kernel/core_marvel.c | 2 +- arch/alpha/kernel/smp.c | 6 +++--- arch/alpha/oprofile/common.c | 6 +++--- arch/arm/oprofile/op_model_mpcore.c | 2 +- arch/arm/vfp/vfpmodule.c | 2 +- arch/cris/arch-v32/kernel/smp.c | 5 ++--- arch/ia64/kernel/mca.c | 2 +- arch/ia64/kernel/palinfo.c | 2 +- arch/ia64/kernel/perfmon.c | 2 +- arch/ia64/kernel/process.c | 2 +- arch/ia64/kernel/smpboot.c | 2 +- arch/ia64/kernel/uncached.c | 5 ++--- arch/ia64/sn/kernel/sn2/sn_hwperf.c | 2 +- arch/m32r/kernel/smp.c | 4 ++-- arch/mips/kernel/smp.c | 4 ++-- arch/mips/mm/c-r4k.c | 18 +++++++++--------- arch/mips/pmc-sierra/yosemite/prom.c | 2 +- arch/mips/sibyte/cfe/setup.c | 2 +- arch/mips/sibyte/sb1250/prom.c | 2 +- arch/powerpc/kernel/smp.c | 2 +- arch/s390/appldata/appldata_base.c | 4 ++-- arch/s390/kernel/smp.c | 16 ++++++---------- arch/s390/kernel/time.c | 4 ++-- arch/sh/kernel/smp.c | 10 +++++----- arch/sparc64/kernel/smp.c | 12 ++++-------- arch/um/kernel/smp.c | 3 +-- arch/x86/kernel/cpu/mtrr/main.c | 4 ++-- arch/x86/kernel/cpuid.c | 2 +- arch/x86/kernel/ldt.c | 2 +- arch/x86/kernel/nmi_32.c | 2 +- arch/x86/kernel/nmi_64.c | 2 +- arch/x86/kernel/smp.c | 2 +- arch/x86/kernel/vsyscall_64.c | 2 +- arch/x86/kvm/vmx.c | 2 +- arch/x86/kvm/x86.c | 2 +- arch/x86/lib/msr-on-cpu.c | 8 ++++---- arch/x86/mach-voyager/voyager_smp.c | 2 +- arch/x86/xen/smp.c | 2 +- drivers/acpi/processor_idle.c | 2 +- drivers/cpuidle/cpuidle.c | 2 +- include/asm-alpha/smp.h | 2 +- include/asm-sparc/smp.h | 2 +- include/linux/smp.h | 8 ++++---- kernel/smp.c | 6 ++---- kernel/softirq.c | 2 +- kernel/time/tick-broadcast.c | 2 +- net/core/flow.c | 2 +- net/iucv/iucv.c | 14 +++++++------- virt/kvm/kvm_main.c | 6 +++--- 49 files changed, 95 insertions(+), 108 deletions(-) diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index ced4aae8b804..04dcc5e5d4c1 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -662,7 +662,7 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write) if (smp_processor_id() != boot_cpuid) smp_call_function_single(boot_cpuid, __marvel_access_rtc, - &rtc_access, 1, 1); + &rtc_access, 1); else __marvel_access_rtc(&rtc_access); #else diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 95c905be9154..44114c8dbb2a 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -710,7 +710,7 @@ flush_tlb_mm(struct mm_struct *mm) } } - if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { + if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) { printk(KERN_CRIT "flush_tlb_mm: timed out\n"); } @@ -763,7 +763,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) data.mm = mm; data.addr = addr; - if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { + if (smp_call_function(ipi_flush_tlb_page, &data, 1)) { printk(KERN_CRIT "flush_tlb_page: timed out\n"); } @@ -815,7 +815,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, } } - if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { + if (smp_call_function(ipi_flush_icache_page, mm, 1)) { printk(KERN_CRIT "flush_icache_page: timed out\n"); } diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c index 9fc0eeb4f0ab..7c3d5ec6ec67 100644 --- a/arch/alpha/oprofile/common.c +++ b/arch/alpha/oprofile/common.c @@ -65,7 +65,7 @@ op_axp_setup(void) model->reg_setup(®, ctr, &sys); /* Configure the registers on all cpus. */ - (void)smp_call_function(model->cpu_setup, ®, 0, 1); + (void)smp_call_function(model->cpu_setup, ®, 1); model->cpu_setup(®); return 0; } @@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy) static int op_axp_start(void) { - (void)smp_call_function(op_axp_cpu_start, NULL, 0, 1); + (void)smp_call_function(op_axp_cpu_start, NULL, 1); op_axp_cpu_start(NULL); return 0; } @@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy) static void op_axp_stop(void) { - (void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1); + (void)smp_call_function(op_axp_cpu_stop, NULL, 1); op_axp_cpu_stop(NULL); } diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 74fae6045650..4458705021e0 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c @@ -201,7 +201,7 @@ static int em_call_function(int (*fn)(void)) data.ret = 0; preempt_disable(); - smp_call_function(em_func, &data, 1, 1); + smp_call_function(em_func, &data, 1); em_func(&data); preempt_enable(); diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 32455c633f1c..c0d2c9bb952b 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -352,7 +352,7 @@ static int __init vfp_init(void) else if (vfpsid & FPSID_NODOUBLE) { printk("no double precision support\n"); } else { - smp_call_function(vfp_enable, NULL, 1, 1); + smp_call_function(vfp_enable, NULL, 1); VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ printk("implementor %02x architecture %d part %02x variant %x rev %x\n", diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index a9c3334e46c9..952a24b2f5a9 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c @@ -194,7 +194,7 @@ void stop_this_cpu(void* dummy) /* Other calls */ void smp_send_stop(void) { - smp_call_function(stop_this_cpu, NULL, 1, 0); + smp_call_function(stop_this_cpu, NULL, 0); } int setup_profiling_timer(unsigned int multiplier) @@ -316,8 +316,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask) * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ -int smp_call_function(void (*func)(void *info), void *info, - int nonatomic, int wait) +int smp_call_function(void (*func)(void *info), void *info, int wait) { cpumask_t cpu_mask = CPU_MASK_ALL; struct call_data_struct data; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 705176b434b3..9cd818cc7008 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, case CPU_ONLINE: case CPU_ONLINE_FROZEN: smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, - NULL, 1, 0); + NULL, 0); break; } return NOTIFY_OK; diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 9dc00f7fe10e..e5c57f413ca2 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) /* will send IPI to other CPU and wait for completion of remote call */ - if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) { + if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) { printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); return 0; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 7714a97b0104..9baa48255c12 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) int ret; DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); - ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); + ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); } #endif /* CONFIG_SMP */ diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index a3a34b4eb038..fabaf08d9a69 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -286,7 +286,7 @@ void cpu_idle_wait(void) { smp_mb(); /* kick all the CPUs so that they exit out of pm_idle */ - smp_call_function(do_nothing, NULL, 0, 1); + smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index eaa1b6795a13..9d1d429c6c59 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -317,7 +317,7 @@ ia64_sync_itc (unsigned int master) go[MASTER] = 1; - if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { + if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); return; } diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index e77995a6e3ed..8eff8c1d40a6 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { atomic_set(&uc_pool->status, 0); - status = smp_call_function(uncached_ipi_visibility, uc_pool, - 0, 1); + status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; } else if (status != PAL_VISIBILITY_OK) @@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) if (status != PAL_STATUS_SUCCESS) goto failed; atomic_set(&uc_pool->status, 0); - status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); + status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 8cc0c4753d89..636588e7e068 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) if (use_ipi) { /* use an interprocessor interrupt to call SAL */ smp_call_function_single(cpu, sn_hwperf_call_sal, - op_info, 1, 1); + op_info, 1); } else { /* migrate the task before calling SAL */ diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 74eb7bcd5a40..7577f971ea4e 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -212,7 +212,7 @@ void smp_flush_tlb_all(void) local_irq_save(flags); __flush_tlb_all(); local_irq_restore(flags); - smp_call_function(flush_tlb_all_ipi, NULL, 1, 1); + smp_call_function(flush_tlb_all_ipi, NULL, 1); preempt_enable(); } @@ -505,7 +505,7 @@ void smp_invalidate_interrupt(void) *==========================================================================*/ void smp_send_stop(void) { - smp_call_function(stop_this_cpu, NULL, 1, 0); + smp_call_function(stop_this_cpu, NULL, 0); } /*==========================================================================* diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index c75b26cb61df..7a9ae830be86 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -167,7 +167,7 @@ static void stop_this_cpu(void *dummy) void smp_send_stop(void) { - smp_call_function(stop_this_cpu, NULL, 1, 0); + smp_call_function(stop_this_cpu, NULL, 0); } void __init smp_cpus_done(unsigned int max_cpus) @@ -266,7 +266,7 @@ static void flush_tlb_mm_ipi(void *mm) static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) { #ifndef CONFIG_MIPS_MT_SMTC - smp_call_function(func, info, 1, 1); + smp_call_function(func, info, 1); #endif } diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 27096751ddce..71df3390c07b 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -43,12 +43,12 @@ * primary cache. */ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, - int retry, int wait) + int wait) { preempt_disable(); #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) - smp_call_function(func, info, retry, wait); + smp_call_function(func, info, wait); #endif func(info); preempt_enable(); @@ -350,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args) static void r4k___flush_cache_all(void) { - r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); + r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1); } static inline int has_valid_asid(const struct mm_struct *mm) @@ -397,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma, int exec = vma->vm_flags & VM_EXEC; if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) - r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); + r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1); } static inline void local_r4k_flush_cache_mm(void * args) @@ -429,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm) if (!cpu_has_dc_aliases) return; - r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); + r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1); } struct flush_cache_page_args { @@ -521,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma, args.addr = addr; args.pfn = pfn; - r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); + r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1); } static inline void local_r4k_flush_data_cache_page(void * addr) @@ -535,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr) local_r4k_flush_data_cache_page((void *)addr); else r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, - 1, 1); + 1); } struct flush_icache_range_args { @@ -571,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) args.start = start; args.end = end; - r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); + r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1); instruction_hazard(); } @@ -672,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg) static void r4k_flush_cache_sigtramp(unsigned long addr) { - r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); + r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1); } static void r4k_flush_icache_all(void) diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c index 35dc435846a6..cf4c868715ac 100644 --- a/arch/mips/pmc-sierra/yosemite/prom.c +++ b/arch/mips/pmc-sierra/yosemite/prom.c @@ -64,7 +64,7 @@ static void prom_exit(void) #ifdef CONFIG_SMP if (smp_processor_id()) /* CPU 1 */ - smp_call_function(prom_cpu0_exit, NULL, 1, 1); + smp_call_function(prom_cpu0_exit, NULL, 1); #endif prom_cpu0_exit(NULL); } diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c index 33fce826f8bf..fd9604d5555a 100644 --- a/arch/mips/sibyte/cfe/setup.c +++ b/arch/mips/sibyte/cfe/setup.c @@ -74,7 +74,7 @@ static void __noreturn cfe_linux_exit(void *arg) if (!reboot_smp) { /* Get CPU 0 to do the cfe_exit */ reboot_smp = 1; - smp_call_function(cfe_linux_exit, arg, 1, 0); + smp_call_function(cfe_linux_exit, arg, 0); } } else { printk("Passing control back to CFE...\n"); diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c index cf8f6b3de86c..65b1af66b674 100644 --- a/arch/mips/sibyte/sb1250/prom.c +++ b/arch/mips/sibyte/sb1250/prom.c @@ -66,7 +66,7 @@ static void prom_linux_exit(void) { #ifdef CONFIG_SMP if (smp_processor_id()) { - smp_call_function(prom_cpu0_exit, NULL, 1, 1); + smp_call_function(prom_cpu0_exit, NULL, 1); } #endif while(1); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 37a5ab410dcc..5191b46a611e 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -168,7 +168,7 @@ static void stop_this_cpu(void *dummy) void smp_send_stop(void) { - smp_call_function(stop_this_cpu, NULL, 0, 0); + smp_call_function(stop_this_cpu, NULL, 0); } extern struct gettimeofday_struct do_gtod; diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index ad40729bec3d..837a3b3e7759 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -209,7 +209,7 @@ __appldata_vtimer_setup(int cmd) per_cpu(appldata_timer, i).expires = per_cpu_interval; smp_call_function_single(i, add_virt_timer_periodic, &per_cpu(appldata_timer, i), - 0, 1); + 1); } appldata_timer_active = 1; P_INFO("Monitoring timer started.\n"); @@ -236,7 +236,7 @@ __appldata_vtimer_setup(int cmd) args.timer = &per_cpu(appldata_timer, i); args.expires = per_cpu_interval; smp_call_function_single(i, __appldata_mod_vtimer_wrap, - &args, 0, 1); + &args, 1); } } } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 5d4fa4b1c74c..276b105fb2a4 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -109,7 +109,7 @@ static void do_call_function(void) } static void __smp_call_function_map(void (*func) (void *info), void *info, - int nonatomic, int wait, cpumask_t map) + int wait, cpumask_t map) { struct call_data_struct data; int cpu, local = 0; @@ -162,7 +162,6 @@ out: * smp_call_function: * @func: the function to run; this must be fast and non-blocking * @info: an arbitrary pointer to pass to the function - * @nonatomic: unused * @wait: if true, wait (atomically) until function has completed on other CPUs * * Run a function on all other CPUs. @@ -170,15 +169,14 @@ out: * You must not call this function with disabled interrupts, from a * hardware interrupt handler or from a bottom half. */ -int smp_call_function(void (*func) (void *info), void *info, int nonatomic, - int wait) +int smp_call_function(void (*func) (void *info), void *info, int wait) { cpumask_t map; spin_lock(&call_lock); map = cpu_online_map; cpu_clear(smp_processor_id(), map); - __smp_call_function_map(func, info, nonatomic, wait, map); + __smp_call_function_map(func, info, wait, map); spin_unlock(&call_lock); return 0; } @@ -189,7 +187,6 @@ EXPORT_SYMBOL(smp_call_function); * @cpu: the CPU where func should run * @func: the function to run; this must be fast and non-blocking * @info: an arbitrary pointer to pass to the function - * @nonatomic: unused * @wait: if true, wait (atomically) until function has completed on other CPUs * * Run a function on one processor. @@ -198,11 +195,10 @@ EXPORT_SYMBOL(smp_call_function); * hardware interrupt handler or from a bottom half. */ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait) + int wait) { spin_lock(&call_lock); - __smp_call_function_map(func, info, nonatomic, wait, - cpumask_of_cpu(cpu)); + __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); spin_unlock(&call_lock); return 0; } @@ -228,7 +224,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, { spin_lock(&call_lock); cpu_clear(smp_processor_id(), mask); - __smp_call_function_map(func, info, 0, wait, mask); + __smp_call_function_map(func, info, wait, mask); spin_unlock(&call_lock); return 0; } diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 7aec676fefd5..bf7bf2c2236a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -690,7 +690,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) */ memset(&etr_sync, 0, sizeof(etr_sync)); preempt_disable(); - smp_call_function(etr_sync_cpu_start, NULL, 0, 0); + smp_call_function(etr_sync_cpu_start, NULL, 0); local_irq_disable(); etr_enable_sync_clock(); @@ -729,7 +729,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) rc = -EAGAIN; } local_irq_enable(); - smp_call_function(etr_sync_cpu_end,NULL,0,0); + smp_call_function(etr_sync_cpu_end,NULL,0); preempt_enable(); return rc; } diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 2ed8dceb297b..71781ba2675b 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -168,7 +168,7 @@ static void stop_this_cpu(void *unused) void smp_send_stop(void) { - smp_call_function(stop_this_cpu, 0, 1, 0); + smp_call_function(stop_this_cpu, 0, 0); } void arch_send_call_function_ipi(cpumask_t mask) @@ -223,7 +223,7 @@ void flush_tlb_mm(struct mm_struct *mm) preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { - smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); + smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); } else { int i; for (i = 0; i < num_online_cpus(); i++) @@ -260,7 +260,7 @@ void flush_tlb_range(struct vm_area_struct *vma, fd.vma = vma; fd.addr1 = start; fd.addr2 = end; - smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); + smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); } else { int i; for (i = 0; i < num_online_cpus(); i++) @@ -303,7 +303,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) fd.vma = vma; fd.addr1 = page; - smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); + smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); } else { int i; for (i = 0; i < num_online_cpus(); i++) @@ -327,6 +327,6 @@ void flush_tlb_one(unsigned long asid, unsigned long vaddr) fd.addr1 = asid; fd.addr2 = vaddr; - smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1); + smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1); local_flush_tlb_one(asid, vaddr); } diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index b82d017a1744..c099d96f1239 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -807,7 +807,6 @@ extern unsigned long xcall_call_function; * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. - * @nonatomic: currently unused. * @wait: If true, wait (atomically) until function has completed on other CPUs. * * Returns 0 on success, else a negative status code. Does not return until @@ -817,8 +816,7 @@ extern unsigned long xcall_call_function; * hardware interrupt handler or from a bottom half handler. */ static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info, - int nonatomic, int wait, - cpumask_t mask) + int wait, cpumask_t mask) { struct call_data_struct data; int cpus; @@ -853,11 +851,9 @@ out_unlock: return 0; } -int smp_call_function(void (*func)(void *info), void *info, - int nonatomic, int wait) +int smp_call_function(void (*func)(void *info), void *info, int wait) { - return sparc64_smp_call_function_mask(func, info, nonatomic, wait, - cpu_online_map); + return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map); } void smp_call_function_client(int irq, struct pt_regs *regs) @@ -894,7 +890,7 @@ static void tsb_sync(void *info) void smp_tsb_sync(struct mm_struct *mm) { - sparc64_smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); + sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask); } extern unsigned long xcall_flush_tlb_mm; diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index e1062ec36d40..be2d50c3aa95 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c @@ -214,8 +214,7 @@ void smp_call_function_slave(int cpu) atomic_inc(&scf_finished); } -int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, - int wait) +int smp_call_function(void (*_func)(void *info), void *_info, int wait) { int cpus = num_online_cpus() - 1; int i; diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 6a1e278d9323..290652cefddb 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -222,7 +222,7 @@ static void set_mtrr(unsigned int reg, unsigned long base, atomic_set(&data.gate,0); /* Start the ball rolling on other CPUs */ - if (smp_call_function(ipi_handler, &data, 1, 0) != 0) + if (smp_call_function(ipi_handler, &data, 0) != 0) panic("mtrr: timed out waiting for other CPUs\n"); local_irq_save(flags); @@ -822,7 +822,7 @@ void mtrr_ap_init(void) */ void mtrr_save_state(void) { - smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); + smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); } static int __init mtrr_init_finialize(void) diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index daff52a62248..336dd43c9158 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -95,7 +95,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, for (; count; count -= 16) { cmd.eax = pos; cmd.ecx = pos >> 32; - smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); + smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); if (copy_to_user(tmp, &cmd, 16)) return -EFAULT; tmp += 16; diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 0224c3637c73..cb0a6398c64b 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) load_LDT(pc); mask = cpumask_of_cpu(smp_processor_id()); if (!cpus_equal(current->mm->cpu_vm_mask, mask)) - smp_call_function(flush_ldt, NULL, 1, 1); + smp_call_function(flush_ldt, NULL, 1); preempt_enable(); #else load_LDT(pc); diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 84160f74eeb0..5562dab0bd20 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c @@ -87,7 +87,7 @@ int __init check_nmi_watchdog(void) #ifdef CONFIG_SMP if (nmi_watchdog == NMI_LOCAL_APIC) - smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); + smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); #endif for_each_possible_cpu(cpu) diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index 5a29ded994fa..2f1e4f503c9e 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c @@ -96,7 +96,7 @@ int __init check_nmi_watchdog(void) #ifdef CONFIG_SMP if (nmi_watchdog == NMI_LOCAL_APIC) - smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); + smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); #endif for (cpu = 0; cpu < NR_CPUS; cpu++) diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 575aa3d7248a..56546e8a13ac 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -164,7 +164,7 @@ static void native_smp_send_stop(void) if (reboot_force) return; - smp_call_function(stop_this_cpu, NULL, 0, 0); + smp_call_function(stop_this_cpu, NULL, 0); local_irq_save(flags); disable_local_APIC(); local_irq_restore(flags); diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 61efa2f7d564..0a03d57f9b3b 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -278,7 +278,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) { long cpu = (long)arg; if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) - smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); + smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); return NOTIFY_DONE; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 540e95179074..5534fe59b5fc 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -335,7 +335,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx) { if (vmx->vcpu.cpu == -1) return; - smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); + smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); vmx->launched = 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 63a77caa59f1..0faa2546b1cd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4044,6 +4044,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) * So need not to call smp_call_function_single() in that case. */ if (vcpu->guest_mode && vcpu->cpu != cpu) - smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); + smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); put_cpu(); } diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index 57d043fa893e..d5a2b39f882b 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c @@ -30,10 +30,10 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) rv.msr_no = msr_no; if (safe) { - smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); + smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); err = rv.err; } else { - smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); + smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); } *l = rv.l; *h = rv.h; @@ -64,10 +64,10 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) rv.l = l; rv.h = h; if (safe) { - smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); + smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); err = rv.err; } else { - smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); + smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); } return err; diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index cb34407a9930..04f596eab749 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -1113,7 +1113,7 @@ int safe_smp_processor_id(void) /* broadcast a halt to all other CPUs */ static void voyager_smp_send_stop(void) { - smp_call_function(smp_stop_cpu_function, NULL, 1, 1); + smp_call_function(smp_stop_cpu_function, NULL, 1); } /* this function is triggered in time.c when a clock tick fires diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index b3786e749b8e..a1651d029ea8 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -331,7 +331,7 @@ static void stop_self(void *v) void xen_smp_send_stop(void) { - smp_call_function(stop_self, NULL, 0, 0); + smp_call_function(stop_self, NULL, 0); } void xen_smp_send_reschedule(int cpu) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 556ee1585192..4976e5db2b3f 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1339,7 +1339,7 @@ static void smp_callback(void *v) static int acpi_processor_latency_notify(struct notifier_block *b, unsigned long l, void *v) { - smp_call_function(smp_callback, NULL, 0, 1); + smp_call_function(smp_callback, NULL, 1); return NOTIFY_OK; } diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 23554b676d6e..5405769020a1 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -340,7 +340,7 @@ static void smp_callback(void *v) static int cpuidle_latency_notify(struct notifier_block *b, unsigned long l, void *v) { - smp_call_function(smp_callback, NULL, 0, 1); + smp_call_function(smp_callback, NULL, 1); return NOTIFY_OK; } diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h index 2f60a362d75e..544c69af8168 100644 --- a/include/asm-alpha/smp.h +++ b/include/asm-alpha/smp.h @@ -53,7 +53,7 @@ extern void arch_send_call_function_ipi(cpumask_t mask); #else /* CONFIG_SMP */ #define hard_smp_processor_id() 0 -#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; }) +#define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; }) #endif /* CONFIG_SMP */ diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h index e6d561599726..b61e74bea06a 100644 --- a/include/asm-sparc/smp.h +++ b/include/asm-sparc/smp.h @@ -72,7 +72,7 @@ static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } -static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) +static inline int smp_call_function(void (*func)(void *info), void *info, int wait) { xc1((smpfunc_t)func, (unsigned long)info); return 0; diff --git a/include/linux/smp.h b/include/linux/smp.h index eac3e062250f..338cad1b9548 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -62,11 +62,11 @@ extern void smp_cpus_done(unsigned int max_cpus); /* * Call a function on all other processors */ -int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); +int smp_call_function(void(*func)(void *info), void *info, int wait); int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, int wait); int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, - int retry, int wait); + int wait); void __smp_call_function_single(int cpuid, struct call_single_data *data); /* @@ -119,7 +119,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) { return 0; } -#define smp_call_function(func, info, retry, wait) \ +#define smp_call_function(func, info, wait) \ (up_smp_call_function(func, info)) #define on_each_cpu(func,info,retry,wait) \ ({ \ @@ -131,7 +131,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) static inline void smp_send_reschedule(int cpu) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) -#define smp_call_function_single(cpuid, func, info, retry, wait) \ +#define smp_call_function_single(cpuid, func, info, wait) \ ({ \ WARN_ON(cpuid != 0); \ local_irq_disable(); \ diff --git a/kernel/smp.c b/kernel/smp.c index f77b75c027ad..7e0432a4a0e2 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -195,7 +195,6 @@ void generic_smp_call_function_single_interrupt(void) * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. - * @retry: Unused * @wait: If true, wait until function has completed on other CPUs. * * Returns 0 on success, else a negative status code. Note that @wait @@ -203,7 +202,7 @@ void generic_smp_call_function_single_interrupt(void) * we fall back to on-stack allocation. */ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int retry, int wait) + int wait) { struct call_single_data d; unsigned long flags; @@ -339,7 +338,6 @@ EXPORT_SYMBOL(smp_call_function_mask); * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. - * @natomic: Unused * @wait: If true, wait (atomically) until function has completed on other CPUs. * * Returns 0 on success, else a negative status code. @@ -351,7 +349,7 @@ EXPORT_SYMBOL(smp_call_function_mask); * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ -int smp_call_function(void (*func)(void *), void *info, int natomic, int wait) +int smp_call_function(void (*func)(void *), void *info, int wait) { int ret; diff --git a/kernel/softirq.c b/kernel/softirq.c index 36e061740047..d73afb4764ef 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -679,7 +679,7 @@ int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait) int ret = 0; preempt_disable(); - ret = smp_call_function(func, info, retry, wait); + ret = smp_call_function(func, info, wait); local_irq_disable(); func(info); local_irq_enable(); diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 57a1f02e5ec0..75e718539dcb 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -266,7 +266,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu) "offline CPU #%d\n", *oncpu); else smp_call_function_single(*oncpu, tick_do_broadcast_on_off, - &reason, 1, 1); + &reason, 1); } /* diff --git a/net/core/flow.c b/net/core/flow.c index 19991175fdeb..5cf81052d044 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -298,7 +298,7 @@ void flow_cache_flush(void) init_completion(&info.completion); local_bh_disable(); - smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0); + smp_call_function(flow_cache_flush_per_cpu, &info, 0); flow_cache_flush_tasklet((unsigned long)&info); local_bh_enable(); diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 918970762131..94d5a45c3a57 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -480,7 +480,7 @@ static void iucv_setmask_mp(void) if (cpu_isset(cpu, iucv_buffer_cpumask) && !cpu_isset(cpu, iucv_irq_cpumask)) smp_call_function_single(cpu, iucv_allow_cpu, - NULL, 0, 1); + NULL, 1); preempt_enable(); } @@ -498,7 +498,7 @@ static void iucv_setmask_up(void) cpumask = iucv_irq_cpumask; cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); for_each_cpu_mask(cpu, cpumask) - smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1); + smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); } /** @@ -523,7 +523,7 @@ static int iucv_enable(void) rc = -EIO; preempt_disable(); for_each_online_cpu(cpu) - smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); + smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); preempt_enable(); if (cpus_empty(iucv_buffer_cpumask)) /* No cpu could declare an iucv buffer. */ @@ -580,7 +580,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, case CPU_ONLINE_FROZEN: case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: - smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); + smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: @@ -589,10 +589,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, if (cpus_empty(cpumask)) /* Can't offline last IUCV enabled cpu. */ return NOTIFY_BAD; - smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1); + smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); if (cpus_empty(iucv_irq_cpumask)) smp_call_function_single(first_cpu(iucv_buffer_cpumask), - iucv_allow_cpu, NULL, 0, 1); + iucv_allow_cpu, NULL, 1); break; } return NOTIFY_OK; @@ -652,7 +652,7 @@ static void iucv_cleanup_queue(void) * pending interrupts force them to the work queue by calling * an empty function on all cpus. */ - smp_call_function(__iucv_cleanup_queue, NULL, 0, 1); + smp_call_function(__iucv_cleanup_queue, NULL, 1); spin_lock_irq(&iucv_queue_lock); list_for_each_entry_safe(p, n, &iucv_task_queue, list) { /* Remove stale work items from the task queue. */ diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2d29e260da3d..ea1f595f8a87 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1266,12 +1266,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, case CPU_UP_CANCELED: printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", cpu); - smp_call_function_single(cpu, hardware_disable, NULL, 0, 1); + smp_call_function_single(cpu, hardware_disable, NULL, 1); break; case CPU_ONLINE: printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", cpu); - smp_call_function_single(cpu, hardware_enable, NULL, 0, 1); + smp_call_function_single(cpu, hardware_enable, NULL, 1); break; } return NOTIFY_OK; @@ -1474,7 +1474,7 @@ int kvm_init(void *opaque, unsigned int vcpu_size, for_each_online_cpu(cpu) { smp_call_function_single(cpu, kvm_arch_check_processor_compat, - &r, 0, 1); + &r, 1); if (r < 0) goto out_free_1; } -- cgit v1.2.3 From 15c8b6c1aaaf1c4edd67e2f02e4d8e1bd1a51c0d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 9 May 2008 09:39:44 +0200 Subject: on_each_cpu(): kill unused 'retry' parameter It's not even passed on to smp_call_function() anymore, since that was removed. So kill it. Acked-by: Jeremy Fitzhardinge Reviewed-by: Paul E. McKenney Signed-off-by: Jens Axboe --- arch/alpha/kernel/process.c | 2 +- arch/alpha/kernel/smp.c | 4 ++-- arch/arm/kernel/smp.c | 6 +++--- arch/ia64/kernel/mca.c | 4 ++-- arch/ia64/kernel/perfmon.c | 4 ++-- arch/ia64/kernel/smp.c | 4 ++-- arch/mips/kernel/irq-rm9000.c | 4 ++-- arch/mips/kernel/smp.c | 4 ++-- arch/mips/oprofile/common.c | 6 +++--- arch/parisc/kernel/cache.c | 6 +++--- arch/parisc/kernel/smp.c | 2 +- arch/parisc/mm/init.c | 2 +- arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/kernel/tau_6xx.c | 4 ++-- arch/powerpc/kernel/time.c | 2 +- arch/powerpc/mm/slice.c | 2 +- arch/powerpc/oprofile/common.c | 6 +++--- arch/s390/kernel/smp.c | 6 +++--- arch/s390/kernel/time.c | 2 +- arch/sh/kernel/smp.c | 4 ++-- arch/sparc64/mm/hugetlbpage.c | 2 +- arch/x86/kernel/cpu/mcheck/mce_64.c | 6 +++--- arch/x86/kernel/cpu/mcheck/non-fatal.c | 2 +- arch/x86/kernel/cpu/perfctr-watchdog.c | 4 ++-- arch/x86/kernel/io_apic_32.c | 2 +- arch/x86/kernel/io_apic_64.c | 2 +- arch/x86/kernel/nmi_32.c | 4 ++-- arch/x86/kernel/nmi_64.c | 4 ++-- arch/x86/kernel/tlb_32.c | 2 +- arch/x86/kernel/tlb_64.c | 2 +- arch/x86/kernel/vsyscall_64.c | 2 +- arch/x86/kvm/vmx.c | 2 +- arch/x86/mach-voyager/voyager_smp.c | 2 +- arch/x86/mm/pageattr.c | 4 ++-- arch/x86/oprofile/nmi_int.c | 10 +++++----- drivers/char/agp/generic.c | 2 +- drivers/lguest/x86/core.c | 4 ++-- fs/buffer.c | 2 +- include/linux/smp.h | 4 ++-- kernel/hrtimer.c | 2 +- kernel/profile.c | 6 +++--- kernel/rcupdate.c | 2 +- kernel/softirq.c | 2 +- mm/page_alloc.c | 2 +- mm/slab.c | 4 ++-- mm/slub.c | 2 +- net/iucv/iucv.c | 2 +- virt/kvm/kvm_main.c | 8 ++++---- 48 files changed, 84 insertions(+), 84 deletions(-) diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 96ed82fd9eef..351407e07e71 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -160,7 +160,7 @@ common_shutdown(int mode, char *restart_cmd) struct halt_info args; args.mode = mode; args.restart_cmd = restart_cmd; - on_each_cpu(common_shutdown_1, &args, 1, 0); + on_each_cpu(common_shutdown_1, &args, 0); } void diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 44114c8dbb2a..83df541650fc 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -657,7 +657,7 @@ void smp_imb(void) { /* Must wait other processors to flush their icache before continue. */ - if (on_each_cpu(ipi_imb, NULL, 1, 1)) + if (on_each_cpu(ipi_imb, NULL, 1)) printk(KERN_CRIT "smp_imb: timed out\n"); } EXPORT_SYMBOL(smp_imb); @@ -673,7 +673,7 @@ flush_tlb_all(void) { /* Although we don't have any data to pass, we do want to synchronize with the other processors. */ - if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { + if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) { printk(KERN_CRIT "flush_tlb_all: timed out\n"); } } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 6344466b2113..5a7c09564d13 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -604,7 +604,7 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) void flush_tlb_all(void) { - on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); + on_each_cpu(ipi_flush_tlb_all, NULL, 1); } void flush_tlb_mm(struct mm_struct *mm) @@ -631,7 +631,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) ta.ta_start = kaddr; - on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); + on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } void flush_tlb_range(struct vm_area_struct *vma, @@ -654,5 +654,5 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) ta.ta_start = start; ta.ta_end = end; - on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 9cd818cc7008..7dd96c127177 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy) static void ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) { - on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); + on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); } /* @@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) static void ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) { - on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); + on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); } /* diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9baa48255c12..19d4493c6193 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) } /* save the current system wide pmu states */ - ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); + ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); if (ret) { DPRINT(("on_each_cpu() failed: %d\n", ret)); goto cleanup_reserve; @@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) pfm_alt_intr_handler = NULL; - ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); + ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); if (ret) { DPRINT(("on_each_cpu() failed: %d\n", ret)); } diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 19152dcbf6e4..3676468612b6 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -285,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) void smp_flush_tlb_all (void) { - on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); + on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); } void @@ -308,7 +308,7 @@ smp_flush_tlb_mm (struct mm_struct *mm) * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is * rather trivial. */ - on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); + on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); } void arch_send_call_function_single_ipi(int cpu) diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index ed9febe63d72..b47e4615ec12 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c @@ -49,7 +49,7 @@ static void local_rm9k_perfcounter_irq_startup(void *args) static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) { - on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1); + on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1); return 0; } @@ -66,7 +66,7 @@ static void local_rm9k_perfcounter_irq_shutdown(void *args) static void rm9k_perfcounter_irq_shutdown(unsigned int irq) { - on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); + on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1); } static struct irq_chip rm9k_irq_controller = { diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 7a9ae830be86..4410f172b8ab 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -246,7 +246,7 @@ static void flush_tlb_all_ipi(void *info) void flush_tlb_all(void) { - on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); + on_each_cpu(flush_tlb_all_ipi, NULL, 1); } static void flush_tlb_mm_ipi(void *mm) @@ -366,7 +366,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) .addr2 = end, }; - on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1); + on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); } static void flush_tlb_page_ipi(void *info) diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c index b5f6f71b27bc..dd2fbd6645c1 100644 --- a/arch/mips/oprofile/common.c +++ b/arch/mips/oprofile/common.c @@ -27,7 +27,7 @@ static int op_mips_setup(void) model->reg_setup(ctr); /* Configure the registers on all cpus. */ - on_each_cpu(model->cpu_setup, NULL, 0, 1); + on_each_cpu(model->cpu_setup, NULL, 1); return 0; } @@ -58,7 +58,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root) static int op_mips_start(void) { - on_each_cpu(model->cpu_start, NULL, 0, 1); + on_each_cpu(model->cpu_start, NULL, 1); return 0; } @@ -66,7 +66,7 @@ static int op_mips_start(void) static void op_mips_stop(void) { /* Disable performance monitoring for all counters. */ - on_each_cpu(model->cpu_stop, NULL, 0, 1); + on_each_cpu(model->cpu_stop, NULL, 1); } int __init oprofile_arch_init(struct oprofile_operations *ops) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index e10d25d2d9c9..5259d8c20676 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -51,12 +51,12 @@ static struct pdc_btlb_info btlb_info __read_mostly; void flush_data_cache(void) { - on_each_cpu(flush_data_cache_local, NULL, 1, 1); + on_each_cpu(flush_data_cache_local, NULL, 1); } void flush_instruction_cache(void) { - on_each_cpu(flush_instruction_cache_local, NULL, 1, 1); + on_each_cpu(flush_instruction_cache_local, NULL, 1); } #endif @@ -515,7 +515,7 @@ static void cacheflush_h_tmp_function(void *dummy) void flush_cache_all(void) { - on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1); + on_each_cpu(cacheflush_h_tmp_function, NULL, 1); } void flush_cache_mm(struct mm_struct *mm) diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 126105c76a44..d47f3975c9c6 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -292,7 +292,7 @@ void arch_send_call_function_single_ipi(int cpu) void smp_flush_tlb_all(void) { - on_each_cpu(flush_tlb_all_local, NULL, 1, 1); + on_each_cpu(flush_tlb_all_local, NULL, 1); } /* diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ce0da689a89d..b4d6c8777ed0 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -1053,7 +1053,7 @@ void flush_tlb_all(void) do_recycle++; } spin_unlock(&sid_lock); - on_each_cpu(flush_tlb_all_local, NULL, 1, 1); + on_each_cpu(flush_tlb_all_local, NULL, 1); if (do_recycle) { spin_lock(&sid_lock); recycle_sids(recycle_ndirty,recycle_dirty_array); diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 34843c318419..647f3e8677dc 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -747,7 +747,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) /* Call function on all CPUs. One of us will make the * rtas call */ - if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0)) + if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) data.error = -EINVAL; wait_for_completion(&done); diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index 368a4934f7ee..c3a56d65c5a9 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c @@ -192,7 +192,7 @@ static void tau_timeout_smp(unsigned long unused) /* schedule ourselves to be run again */ mod_timer(&tau_timer, jiffies + shrink_timer) ; - on_each_cpu(tau_timeout, NULL, 1, 0); + on_each_cpu(tau_timeout, NULL, 0); } /* @@ -234,7 +234,7 @@ int __init TAU_init(void) tau_timer.expires = jiffies + shrink_timer; add_timer(&tau_timer); - on_each_cpu(TAU_init_smp, NULL, 1, 0); + on_each_cpu(TAU_init_smp, NULL, 0); printk("Thermal assist unit "); #ifdef CONFIG_TAU_INT diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 73401e83739a..f1a38a6c1e2d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -322,7 +322,7 @@ void snapshot_timebases(void) { if (!cpu_has_feature(CPU_FTR_PURR)) return; - on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); + on_each_cpu(snapshot_tb_and_purr, NULL, 1); } /* diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index ad928edafb0a..2bd12d965db1 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -218,7 +218,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz mb(); /* XXX this is sub-optimal but will do for now */ - on_each_cpu(slice_flush_segments, mm, 0, 1); + on_each_cpu(slice_flush_segments, mm, 1); #ifdef CONFIG_SPU_BASE spu_flush_all_slbs(mm); #endif diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 4908dc98f9ca..17807acb05d9 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c @@ -65,7 +65,7 @@ static int op_powerpc_setup(void) /* Configure the registers on all cpus. If an error occurs on one * of the cpus, op_per_cpu_rc will be set to the error */ - on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1); + on_each_cpu(op_powerpc_cpu_setup, NULL, 1); out: if (op_per_cpu_rc) { /* error on setup release the performance counter hardware */ @@ -100,7 +100,7 @@ static int op_powerpc_start(void) if (model->global_start) return model->global_start(ctr); if (model->start) { - on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1); + on_each_cpu(op_powerpc_cpu_start, NULL, 1); return op_per_cpu_rc; } return -EIO; /* No start function is defined for this @@ -115,7 +115,7 @@ static inline void op_powerpc_cpu_stop(void *dummy) static void op_powerpc_stop(void) { if (model->stop) - on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1); + on_each_cpu(op_powerpc_cpu_stop, NULL, 1); if (model->global_stop) model->global_stop(); } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 276b105fb2a4..b6781030cfbd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -299,7 +299,7 @@ static void smp_ptlb_callback(void *info) void smp_ptlb_all(void) { - on_each_cpu(smp_ptlb_callback, NULL, 0, 1); + on_each_cpu(smp_ptlb_callback, NULL, 1); } EXPORT_SYMBOL(smp_ptlb_all); #endif /* ! CONFIG_64BIT */ @@ -347,7 +347,7 @@ void smp_ctl_set_bit(int cr, int bit) memset(&parms.orvals, 0, sizeof(parms.orvals)); memset(&parms.andvals, 0xff, sizeof(parms.andvals)); parms.orvals[cr] = 1 << bit; - on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); + on_each_cpu(smp_ctl_bit_callback, &parms, 1); } EXPORT_SYMBOL(smp_ctl_set_bit); @@ -361,7 +361,7 @@ void smp_ctl_clear_bit(int cr, int bit) memset(&parms.orvals, 0, sizeof(parms.orvals)); memset(&parms.andvals, 0xff, sizeof(parms.andvals)); parms.andvals[cr] = ~(1L << bit); - on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); + on_each_cpu(smp_ctl_bit_callback, &parms, 1); } EXPORT_SYMBOL(smp_ctl_clear_bit); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index bf7bf2c2236a..6037ed2b7471 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -909,7 +909,7 @@ static void etr_work_fn(struct work_struct *work) if (!eacr.ea) { /* Both ports offline. Reset everything. */ eacr.dp = eacr.es = eacr.sl = 0; - on_each_cpu(etr_disable_sync_clock, NULL, 0, 1); + on_each_cpu(etr_disable_sync_clock, NULL, 1); del_timer_sync(&etr_timer); etr_update_eacr(eacr); set_bit(ETR_FLAG_EACCES, &etr_flags); diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 71781ba2675b..60c50841143e 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -197,7 +197,7 @@ static void flush_tlb_all_ipi(void *info) void flush_tlb_all(void) { - on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); + on_each_cpu(flush_tlb_all_ipi, 0, 1); } static void flush_tlb_mm_ipi(void *mm) @@ -284,7 +284,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) fd.addr1 = start; fd.addr2 = end; - on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); + on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1); } static void flush_tlb_page_ipi(void *info) diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 6cfab2e4d340..ebefd2a14375 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -344,7 +344,7 @@ void hugetlb_prefault_arch_hook(struct mm_struct *mm) * also executing in this address space. */ mm->context.sparc64_ctx_val = ctx; - on_each_cpu(context_reload, mm, 0, 0); + on_each_cpu(context_reload, mm, 0); } spin_unlock(&ctx_alloc_lock); } diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index e07e8c068ae0..43b7cb594912 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -363,7 +363,7 @@ static void mcheck_check_cpu(void *info) static void mcheck_timer(struct work_struct *work) { - on_each_cpu(mcheck_check_cpu, NULL, 1, 1); + on_each_cpu(mcheck_check_cpu, NULL, 1); /* * Alert userspace if needed. If we logged an MCE, reduce the @@ -612,7 +612,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, * Collect entries that were still getting written before the * synchronize. */ - on_each_cpu(collect_tscs, cpu_tsc, 1, 1); + on_each_cpu(collect_tscs, cpu_tsc, 1); for (i = next; i < MCE_LOG_LEN; i++) { if (mcelog.entry[i].finished && mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { @@ -737,7 +737,7 @@ static void mce_restart(void) if (next_interval) cancel_delayed_work(&mcheck_work); /* Timer race is harmless here */ - on_each_cpu(mce_init, NULL, 1, 1); + on_each_cpu(mce_init, NULL, 1); next_interval = check_interval * HZ; if (next_interval) schedule_delayed_work(&mcheck_work, diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index 00ccb6c14ec2..cc1fccdd31e0 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c @@ -59,7 +59,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); static void mce_work_fn(struct work_struct *work) { - on_each_cpu(mce_checkregs, NULL, 1, 1); + on_each_cpu(mce_checkregs, NULL, 1); schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); } diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index f9ae93adffe5..58043f06d7e2 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -180,7 +180,7 @@ void disable_lapic_nmi_watchdog(void) if (atomic_read(&nmi_active) <= 0) return; - on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); + on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); wd_ops->unreserve(); BUG_ON(atomic_read(&nmi_active) != 0); @@ -202,7 +202,7 @@ void enable_lapic_nmi_watchdog(void) return; } - on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); + on_each_cpu(setup_apic_nmi_watchdog, NULL, 1); touch_nmi_watchdog(); } diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 4dc8600d9d20..720640ff36ca 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -1565,7 +1565,7 @@ void /*__init*/ print_local_APIC(void * dummy) void print_all_local_APICs (void) { - on_each_cpu(print_local_APIC, NULL, 1, 1); + on_each_cpu(print_local_APIC, NULL, 1); } void /*__init*/ print_PIC(void) diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index ef1a8dfcc529..4504c7f50012 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -1146,7 +1146,7 @@ void __apicdebuginit print_local_APIC(void * dummy) void print_all_local_APICs (void) { - on_each_cpu(print_local_APIC, NULL, 1, 1); + on_each_cpu(print_local_APIC, NULL, 1); } void __apicdebuginit print_PIC(void) diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 5562dab0bd20..11008e0857c0 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c @@ -218,7 +218,7 @@ static void __acpi_nmi_enable(void *__unused) void acpi_nmi_enable(void) { if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) - on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); + on_each_cpu(__acpi_nmi_enable, NULL, 1); } static void __acpi_nmi_disable(void *__unused) @@ -232,7 +232,7 @@ static void __acpi_nmi_disable(void *__unused) void acpi_nmi_disable(void) { if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) - on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); + on_each_cpu(__acpi_nmi_disable, NULL, 1); } void setup_apic_nmi_watchdog(void *unused) diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index 2f1e4f503c9e..bbdcb17b3dfe 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c @@ -225,7 +225,7 @@ static void __acpi_nmi_enable(void *__unused) void acpi_nmi_enable(void) { if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) - on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); + on_each_cpu(__acpi_nmi_enable, NULL, 1); } static void __acpi_nmi_disable(void *__unused) @@ -239,7 +239,7 @@ static void __acpi_nmi_disable(void *__unused) void acpi_nmi_disable(void) { if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) - on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); + on_each_cpu(__acpi_nmi_disable, NULL, 1); } void setup_apic_nmi_watchdog(void *unused) diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index 9bb2363851af..fec1ecedc9b7 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -238,6 +238,6 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { - on_each_cpu(do_flush_tlb_all, NULL, 1, 1); + on_each_cpu(do_flush_tlb_all, NULL, 1); } diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index a1f07d793202..184a367516d3 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -270,5 +270,5 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { - on_each_cpu(do_flush_tlb_all, NULL, 1, 1); + on_each_cpu(do_flush_tlb_all, NULL, 1); } diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 0a03d57f9b3b..0dcae19ed627 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -301,7 +301,7 @@ static int __init vsyscall_init(void) #ifdef CONFIG_SYSCTL register_sysctl_table(kernel_root_table2); #endif - on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); + on_each_cpu(cpu_vsyscall_init, NULL, 1); hotcpu_notifier(cpu_vsyscall_notifier, 0); return 0; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5534fe59b5fc..10ce6ee4c491 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2968,7 +2968,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmx->vmcs) { - on_each_cpu(__vcpu_clear, vmx, 0, 1); + on_each_cpu(__vcpu_clear, vmx, 1); free_vmcs(vmx->vmcs); vmx->vmcs = NULL; } diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 04f596eab749..abea08459a73 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -1072,7 +1072,7 @@ static void do_flush_tlb_all(void *info) /* flush the TLB of every active CPU in the system */ void flush_tlb_all(void) { - on_each_cpu(do_flush_tlb_all, 0, 1, 1); + on_each_cpu(do_flush_tlb_all, 0, 1); } /* used to set up the trampoline for other CPUs when the memory manager diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 60bcb5b6a37e..9b836ba9dedd 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -106,7 +106,7 @@ static void cpa_flush_all(unsigned long cache) { BUG_ON(irqs_disabled()); - on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); + on_each_cpu(__cpa_flush_all, (void *) cache, 1); } static void __cpa_flush_range(void *arg) @@ -127,7 +127,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) BUG_ON(irqs_disabled()); WARN_ON(PAGE_ALIGN(start) != start); - on_each_cpu(__cpa_flush_range, NULL, 1, 1); + on_each_cpu(__cpa_flush_range, NULL, 1); if (!cache) return; diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index cc48d3fde545..3238ad32ffd8 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -218,8 +218,8 @@ static int nmi_setup(void) } } - on_each_cpu(nmi_save_registers, NULL, 0, 1); - on_each_cpu(nmi_cpu_setup, NULL, 0, 1); + on_each_cpu(nmi_save_registers, NULL, 1); + on_each_cpu(nmi_cpu_setup, NULL, 1); nmi_enabled = 1; return 0; } @@ -271,7 +271,7 @@ static void nmi_shutdown(void) { struct op_msrs *msrs = &__get_cpu_var(cpu_msrs); nmi_enabled = 0; - on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); + on_each_cpu(nmi_cpu_shutdown, NULL, 1); unregister_die_notifier(&profile_exceptions_nb); model->shutdown(msrs); free_msrs(); @@ -285,7 +285,7 @@ static void nmi_cpu_start(void *dummy) static int nmi_start(void) { - on_each_cpu(nmi_cpu_start, NULL, 0, 1); + on_each_cpu(nmi_cpu_start, NULL, 1); return 0; } @@ -297,7 +297,7 @@ static void nmi_cpu_stop(void *dummy) static void nmi_stop(void) { - on_each_cpu(nmi_cpu_stop, NULL, 0, 1); + on_each_cpu(nmi_cpu_stop, NULL, 1); } struct op_counter_config counter_config[OP_MAX_COUNTER]; diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 564daaa6c7d0..eaa1a355bb32 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -1249,7 +1249,7 @@ static void ipi_handler(void *null) void global_cache_flush(void) { - if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0) + if (on_each_cpu(ipi_handler, NULL, 1) != 0) panic(PFX "timed out waiting for the other CPUs!\n"); } EXPORT_SYMBOL(global_cache_flush); diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 2e554a4ab337..95dfda52b4f9 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -478,7 +478,7 @@ void __init lguest_arch_host_init(void) cpu_had_pge = 1; /* adjust_pge is a helper function which sets or unsets the PGE * bit on its CPU, depending on the argument (0 == unset). */ - on_each_cpu(adjust_pge, (void *)0, 0, 1); + on_each_cpu(adjust_pge, (void *)0, 1); /* Turn off the feature in the global feature set. */ clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); } @@ -493,7 +493,7 @@ void __exit lguest_arch_host_fini(void) if (cpu_had_pge) { set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); /* adjust_pge's argument "1" means set PGE. */ - on_each_cpu(adjust_pge, (void *)1, 0, 1); + on_each_cpu(adjust_pge, (void *)1, 1); } put_online_cpus(); } diff --git a/fs/buffer.c b/fs/buffer.c index a073f3f4f013..5c23ef560d01 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1464,7 +1464,7 @@ static void invalidate_bh_lru(void *arg) void invalidate_bh_lrus(void) { - on_each_cpu(invalidate_bh_lru, NULL, 1, 1); + on_each_cpu(invalidate_bh_lru, NULL, 1); } EXPORT_SYMBOL_GPL(invalidate_bh_lrus); diff --git a/include/linux/smp.h b/include/linux/smp.h index 338cad1b9548..55261101d09a 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -89,7 +89,7 @@ static inline void init_call_single_data(void) /* * Call a function on all processors */ -int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait); +int on_each_cpu(void (*func) (void *info), void *info, int wait); #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ #define MSG_ALL 0x8001 @@ -121,7 +121,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) } #define smp_call_function(func, info, wait) \ (up_smp_call_function(func, info)) -#define on_each_cpu(func,info,retry,wait) \ +#define on_each_cpu(func,info,wait) \ ({ \ local_irq_disable(); \ func(info); \ diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 421be5fe5cc7..50e8616d7955 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -623,7 +623,7 @@ static void retrigger_next_event(void *arg) void clock_was_set(void) { /* Retrigger the CPU local events everywhere */ - on_each_cpu(retrigger_next_event, NULL, 0, 1); + on_each_cpu(retrigger_next_event, NULL, 1); } /* diff --git a/kernel/profile.c b/kernel/profile.c index ae7ead82cbc9..58926411eb2a 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -252,7 +252,7 @@ static void profile_flip_buffers(void) mutex_lock(&profile_flip_mutex); j = per_cpu(cpu_profile_flip, get_cpu()); put_cpu(); - on_each_cpu(__profile_flip_buffers, NULL, 0, 1); + on_each_cpu(__profile_flip_buffers, NULL, 1); for_each_online_cpu(cpu) { struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; for (i = 0; i < NR_PROFILE_HIT; ++i) { @@ -275,7 +275,7 @@ static void profile_discard_flip_buffers(void) mutex_lock(&profile_flip_mutex); i = per_cpu(cpu_profile_flip, get_cpu()); put_cpu(); - on_each_cpu(__profile_flip_buffers, NULL, 0, 1); + on_each_cpu(__profile_flip_buffers, NULL, 1); for_each_online_cpu(cpu) { struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); @@ -558,7 +558,7 @@ static int __init create_hash_tables(void) out_cleanup: prof_on = 0; smp_mb(); - on_each_cpu(profile_nop, NULL, 0, 1); + on_each_cpu(profile_nop, NULL, 1); for_each_online_cpu(cpu) { struct page *page; diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index c09605f8d16c..6addab5e6d88 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -127,7 +127,7 @@ void rcu_barrier(void) * until all the callbacks are queued. */ rcu_read_lock(); - on_each_cpu(rcu_barrier_func, NULL, 0, 1); + on_each_cpu(rcu_barrier_func, NULL, 1); rcu_read_unlock(); wait_for_completion(&rcu_barrier_completion); mutex_unlock(&rcu_barrier_mutex); diff --git a/kernel/softirq.c b/kernel/softirq.c index d73afb4764ef..c159fd094772 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -674,7 +674,7 @@ __init int spawn_ksoftirqd(void) /* * Call a function on all processors */ -int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait) +int on_each_cpu(void (*func) (void *info), void *info, int wait) { int ret = 0; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2f552955a02f..53242344a774 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -918,7 +918,7 @@ void drain_local_pages(void *arg) */ void drain_all_pages(void) { - on_each_cpu(drain_local_pages, NULL, 0, 1); + on_each_cpu(drain_local_pages, NULL, 1); } #ifdef CONFIG_HIBERNATION diff --git a/mm/slab.c b/mm/slab.c index 046607f05f3e..0772abb412b9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2454,7 +2454,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep) struct kmem_list3 *l3; int node; - on_each_cpu(do_drain, cachep, 1, 1); + on_each_cpu(do_drain, cachep, 1); check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; @@ -3939,7 +3939,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, } new->cachep = cachep; - on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); + on_each_cpu(do_ccupdate_local, (void *)new, 1); check_irq_on(); cachep->batchcount = batchcount; diff --git a/mm/slub.c b/mm/slub.c index 0987d1cd943c..44715eb70c06 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1497,7 +1497,7 @@ static void flush_cpu_slab(void *d) static void flush_all(struct kmem_cache *s) { #ifdef CONFIG_SMP - on_each_cpu(flush_cpu_slab, s, 1, 1); + on_each_cpu(flush_cpu_slab, s, 1); #else unsigned long flags; diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 94d5a45c3a57..a178e27e7b1a 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -545,7 +545,7 @@ out: */ static void iucv_disable(void) { - on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1); + on_each_cpu(iucv_retrieve_cpu, NULL, 1); kfree(iucv_path_table); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ea1f595f8a87..d4eae6af0738 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1286,7 +1286,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val, * in vmx root mode. */ printk(KERN_INFO "kvm: exiting hardware virtualization\n"); - on_each_cpu(hardware_disable, NULL, 0, 1); + on_each_cpu(hardware_disable, NULL, 1); } return NOTIFY_OK; } @@ -1479,7 +1479,7 @@ int kvm_init(void *opaque, unsigned int vcpu_size, goto out_free_1; } - on_each_cpu(hardware_enable, NULL, 0, 1); + on_each_cpu(hardware_enable, NULL, 1); r = register_cpu_notifier(&kvm_cpu_notifier); if (r) goto out_free_2; @@ -1525,7 +1525,7 @@ out_free_3: unregister_reboot_notifier(&kvm_reboot_notifier); unregister_cpu_notifier(&kvm_cpu_notifier); out_free_2: - on_each_cpu(hardware_disable, NULL, 0, 1); + on_each_cpu(hardware_disable, NULL, 1); out_free_1: kvm_arch_hardware_unsetup(); out_free_0: @@ -1547,7 +1547,7 @@ void kvm_exit(void) sysdev_class_unregister(&kvm_sysdev_class); unregister_reboot_notifier(&kvm_reboot_notifier); unregister_cpu_notifier(&kvm_cpu_notifier); - on_each_cpu(hardware_disable, NULL, 0, 1); + on_each_cpu(hardware_disable, NULL, 1); kvm_arch_hardware_unsetup(); kvm_arch_exit(); kvm_exit_debug(); -- cgit v1.2.3 From 127a237a1ff49fa5b8e00af91e841598aeea3513 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 27 Jun 2008 11:48:22 +0200 Subject: fix "smp_call_function: get rid of the unused nonatomic/retry argument" fix: arch/x86/kernel/process.c: In function 'cpu_idle_wait': arch/x86/kernel/process.c:64: error: too many arguments to function 'smp_call_function' Signed-off-by: Ingo Molnar --- arch/x86/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ba370dc8685b..2dad8fef391c 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -61,7 +61,7 @@ void cpu_idle_wait(void) { smp_mb(); /* kick all the CPUs so that they exit out of pm_idle */ - smp_call_function(do_nothing, NULL, 0, 1); + smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); -- cgit v1.2.3 From ce0d1b6f73870878aae622b72e85fe8f7a16b51c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 27 Jun 2008 11:50:32 +0200 Subject: fix: "smp_call_function: get rid of the unused nonatomic/retry argument" fix: kernel/smp.c: In function 'smp_call_function_mask': kernel/smp.c:303: error: too many arguments to function 'smp_call_function_single' Signed-off-by: Ingo Molnar --- kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/smp.c b/kernel/smp.c index 7e0432a4a0e2..4f582b257eba 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -300,7 +300,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, return 0; else if (num_cpus == 1) { cpu = first_cpu(mask); - return smp_call_function_single(cpu, func, info, 0, wait); + return smp_call_function_single(cpu, func, info, wait); } if (!wait) { -- cgit v1.2.3 From 8b604d520799a995946437d041f46bae7d5bcc8c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 27 Jun 2008 11:52:45 +0200 Subject: fix: "smp_call_function: get rid of the unused nonatomic/retry argument" drivers/char/sysrq.c: In function 'sysrq_showregs_othercpus': drivers/char/sysrq.c:218: error: too many arguments to function 'smp_call_function' Signed-off-by: Ingo Molnar --- drivers/char/sysrq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index dbce1263bdff..8fdfe9c871e3 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -215,7 +215,7 @@ static void showacpu(void *dummy) static void sysrq_showregs_othercpus(struct work_struct *dummy) { - smp_call_function(showacpu, NULL, 0, 0); + smp_call_function(showacpu, NULL, 0); } static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); -- cgit v1.2.3 From 392096e98fd55e54035978fe03796fca8d26a574 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 3 Jul 2008 17:10:07 +1000 Subject: generic-ipi: fix linux-next tree build failure Today's linux-next build (powerpc ppc64_defconfig) failed like this: arch/powerpc/mm/tlb_64.c: In function 'pgtable_free_now': arch/powerpc/mm/tlb_64.c:66: error: too many arguments to function 'smp_call_function' arch/powerpc/kernel/machine_kexec_64.c: In function 'kexec_prepare_cpus': arch/powerpc/kernel/machine_kexec_64.c:175: error: too many arguments to function 'smp_call_function' Signed-off-by: Stephen Rothwell Acked-by: Jens Axboe Cc: Paul Mackerras Cc: Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/machine_kexec_64.c | 2 +- arch/powerpc/mm/tlb_64.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 704375bda73a..b732b5f8e356 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -172,7 +172,7 @@ static void kexec_prepare_cpus(void) { int my_cpu, i, notified=-1; - smp_call_function(kexec_smp_down, NULL, 0, /* wait */0); + smp_call_function(kexec_smp_down, NULL, /* wait */0); my_cpu = get_cpu(); /* check the others cpus are now down (via paca hw cpu id == -1) */ diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index e2d867ce1c7e..69ad829a7fa3 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c @@ -66,7 +66,7 @@ static void pgtable_free_now(pgtable_free_t pgf) { pte_freelist_forced_free++; - smp_call_function(pte_free_smp_sync, NULL, 0, 1); + smp_call_function(pte_free_smp_sync, NULL, 1); pgtable_free(pgf); } -- cgit v1.2.3 From ba8dd03ac09f51a69c154b8cb508b701d713a2cd Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 4 Jul 2008 11:26:40 +0200 Subject: generic-ipi: fix s390 build bug forgot to remove #include from linux/smp.h while fixing the original s390 build bug. Patch below fixes this build bug caused by header inclusion dependencies: CC kernel/timer.o In file included from include/linux/spinlock.h:87, from include/linux/smp.h:11, from include/linux/kernel_stat.h:4, from kernel/timer.c:22: include/asm/spinlock.h: In function '__raw_spin_lock': include/asm/spinlock.h:69: error: implicit declaration of function 'smp_processor_id' Signed-off-by: Heiko Carstens Signed-off-by: Ingo Molnar --- include/linux/smp.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/smp.h b/include/linux/smp.h index 55261101d09a..48262f86c969 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -8,7 +8,6 @@ #include #include -#include #include extern void cpu_idle(void); -- cgit v1.2.3 From 5e374fb62621aca9522f76c2317c9acda75a8e88 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 1 Jul 2008 13:12:04 +0200 Subject: generic-ipi: fixlet create proper stackframe. Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 56546e8a13ac..361b7a4c640c 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -198,7 +198,7 @@ void smp_call_function_interrupt(struct pt_regs *regs) irq_exit(); } -void smp_call_function_single_interrupt(void) +void smp_call_function_single_interrupt(struct pt_regs *regs) { ack_APIC_irq(); irq_enter(); -- cgit v1.2.3 From ca201c8230de336c3684aa3f3422d0c3f02bcef9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 10 Jul 2008 12:33:20 +0200 Subject: x86, visws: fix generic-ipi build fix: arch/x86/kernel/built-in.o: In function `smp_intr_init': (.init.text+0x49e2): undefined reference to `call_function_single_interrupt' Caused by include/asm-x86/mach-visws/entry_arch.h getting out of sync with the include/asm-x86/mach-default/entry_arch.h file it derives from. Copy the default file over - next step will be to simply include the default file. Signed-off-by: Ingo Molnar --- include/asm-x86/mach-visws/entry_arch.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/include/asm-x86/mach-visws/entry_arch.h b/include/asm-x86/mach-visws/entry_arch.h index b183fa6d83d9..9283b60a1dd2 100644 --- a/include/asm-x86/mach-visws/entry_arch.h +++ b/include/asm-x86/mach-visws/entry_arch.h @@ -1,3 +1,9 @@ +/* + * This file is designed to contain the BUILD_INTERRUPT specifications for + * all of the extra named interrupt vectors used by the architecture. + * Usually this is the Inter Process Interrupts (IPIs) + */ + /* * The following vectors are part of the Linux architecture, there * is no hardware IRQ pin equivalent for them, they are triggered @@ -7,6 +13,7 @@ BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) +BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) #endif /* @@ -20,4 +27,9 @@ BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) + +#ifdef CONFIG_X86_MCE_P4THERMAL +BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) +#endif + #endif -- cgit v1.2.3 From 42a2f217a5e324ed5f2373ab1b7a0a15187c4d6c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 10 Jul 2008 12:35:46 +0200 Subject: x86, visws: use mach-default/entry_arch.h mach-default/entry_arch.h is exactly the same file as mach-visws/entry_arch.h, so include the first from the second, so that updates to the generic one get picked up by VISWS as well. Signed-off-by: Ingo Molnar --- include/asm-x86/mach-visws/entry_arch.h | 34 ++------------------------------- 1 file changed, 2 insertions(+), 32 deletions(-) diff --git a/include/asm-x86/mach-visws/entry_arch.h b/include/asm-x86/mach-visws/entry_arch.h index 9283b60a1dd2..86be554342d4 100644 --- a/include/asm-x86/mach-visws/entry_arch.h +++ b/include/asm-x86/mach-visws/entry_arch.h @@ -1,35 +1,5 @@ /* - * This file is designed to contain the BUILD_INTERRUPT specifications for - * all of the extra named interrupt vectors used by the architecture. - * Usually this is the Inter Process Interrupts (IPIs) + * VISWS uses the standard Linux entry points: */ -/* - * The following vectors are part of the Linux architecture, there - * is no hardware IRQ pin equivalent for them, they are triggered - * through the ICC by us (IPIs) - */ -#ifdef CONFIG_X86_SMP -BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) -BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) -BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) -BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) -#endif - -/* - * every pentium local APIC has two 'local interrupts', with a - * soft-definable vector attached to both interrupts, one of - * which is a timer interrupt, the other one is error counter - * overflow. Linux uses the local APIC timer interrupt to get - * a much simpler SMP time architecture: - */ -#ifdef CONFIG_X86_LOCAL_APIC -BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) -BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) -BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) - -#ifdef CONFIG_X86_MCE_P4THERMAL -BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) -#endif - -#endif +#include "../mach-default/entry_arch.h" -- cgit v1.2.3