diff options
Diffstat (limited to 'arch/x86/kernel/ipi.c')
-rw-r--r-- | arch/x86/kernel/ipi.c | 176 |
1 files changed, 75 insertions, 101 deletions
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 285bbf8831fa..dbf5445727a9 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -17,147 +17,121 @@ #include <asm/mmu_context.h> #include <asm/apic.h> #include <asm/proto.h> +#include <asm/ipi.h> -#ifdef CONFIG_X86_32 -#include <mach_apic.h> -#include <mach_ipi.h> - -/* - * the following functions deal with sending IPIs between CPUs. - * - * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. - */ - -static inline int __prepare_ICR(unsigned int shortcut, int vector) +void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) { - unsigned int icr = shortcut | APIC_DEST_LOGICAL; - - switch (vector) { - default: - icr |= APIC_DM_FIXED | vector; - break; - case NMI_VECTOR: - icr |= APIC_DM_NMI; - break; + unsigned long query_cpu; + unsigned long flags; + + /* + * Hack. The clustered APIC addressing mode doesn't allow us to send + * to an arbitrary mask, so I do a unicast to each CPU instead. + * - mbligh + */ + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, + query_cpu), vector, APIC_DEST_PHYSICAL); } - return icr; + local_irq_restore(flags); } -static inline int __prepare_ICR2(unsigned int mask) +void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, + int vector) { - return SET_APIC_DEST_FIELD(mask); -} + unsigned int this_cpu = smp_processor_id(); + unsigned int query_cpu; + unsigned long flags; -void __send_IPI_shortcut(unsigned int shortcut, int vector) -{ - /* - * Subtle. In the case of the 'never do double writes' workaround - * we have to lock out interrupts to be safe. As we don't care - * of the value read we use an atomic rmw access to avoid costly - * cli/sti. Otherwise we use an even cheaper single atomic write - * to the APIC. - */ - unsigned int cfg; + /* See Hack comment above */ - /* - * Wait for idle. - */ - apic_wait_icr_idle(); + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, + query_cpu), vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} - /* - * No need to touch the target chip field - */ - cfg = __prepare_ICR(shortcut, vector); +void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, + int vector) +{ + unsigned long flags; + unsigned int query_cpu; /* - * Send the IPI. The write to APIC_ICR fires this off. + * Hack. The clustered APIC addressing mode doesn't allow us to send + * to an arbitrary mask, so I do a unicasts to each CPU instead. This + * should be modified to do 1 message per cluster ID - mbligh */ - apic_write(APIC_ICR, cfg); -} -void send_IPI_self(int vector) -{ - __send_IPI_shortcut(APIC_DEST_SELF, vector); + local_irq_save(flags); + for_each_cpu(query_cpu, mask) + __default_send_IPI_dest_field( + apic->cpu_to_logical_apicid(query_cpu), vector, + apic->dest_logical); + local_irq_restore(flags); } -/* - * This is used to send an IPI with no shorthand notation (the destination is - * specified in bits 56 to 63 of the ICR). - */ -static inline void __send_IPI_dest_field(unsigned long mask, int vector) +void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, + int vector) { - unsigned long cfg; - - /* - * Wait for idle. - */ - if (unlikely(vector == NMI_VECTOR)) - safe_apic_wait_icr_idle(); - else - apic_wait_icr_idle(); - - /* - * prepare target chip field - */ - cfg = __prepare_ICR2(mask); - apic_write(APIC_ICR2, cfg); + unsigned long flags; + unsigned int query_cpu; + unsigned int this_cpu = smp_processor_id(); - /* - * program the ICR - */ - cfg = __prepare_ICR(0, vector); + /* See Hack comment above */ - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ - apic_write(APIC_ICR, cfg); + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field( + apic->cpu_to_logical_apicid(query_cpu), vector, + apic->dest_logical); + } + local_irq_restore(flags); } +#ifdef CONFIG_X86_32 + /* * This is only used on smaller machines. */ -void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) +void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) { unsigned long mask = cpumask_bits(cpumask)[0]; unsigned long flags; local_irq_save(flags); WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); - __send_IPI_dest_field(mask, vector); + __default_send_IPI_dest_field(mask, vector, apic->dest_logical); local_irq_restore(flags); } -void send_IPI_mask_sequence(const struct cpumask *mask, int vector) +void default_send_IPI_allbutself(int vector) { - unsigned long flags; - unsigned int query_cpu; - /* - * Hack. The clustered APIC addressing mode doesn't allow us to send - * to an arbitrary mask, so I do a unicasts to each CPU instead. This - * should be modified to do 1 message per cluster ID - mbligh + * if there are no other CPUs in the system then we get an APIC send + * error if we try to broadcast, thus avoid sending IPIs in this case. */ + if (!(num_online_cpus() > 1)) + return; - local_irq_save(flags); - for_each_cpu(query_cpu, mask) - __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); - local_irq_restore(flags); + __default_local_send_IPI_allbutself(vector); } -void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) +void default_send_IPI_all(int vector) { - unsigned long flags; - unsigned int query_cpu; - unsigned int this_cpu = smp_processor_id(); - - /* See Hack comment above */ + __default_local_send_IPI_all(vector); +} - local_irq_save(flags); - for_each_cpu(query_cpu, mask) - if (query_cpu != this_cpu) - __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), - vector); - local_irq_restore(flags); +void default_send_IPI_self(int vector) +{ + __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); } /* must come after the send_IPI functions above for inlining */ |