diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 13:30:36 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:30:36 +0100 |
commit | ae9d983be1eefac4b5efad69a188e7ac89a75797 (patch) | |
tree | ecdf7ad736e1fe98dff2277649b573135d1381fd /include/asm-x86 | |
parent | x86: merge mpspec variants (diff) | |
download | linux-ae9d983be1eefac4b5efad69a188e7ac89a75797.tar.xz linux-ae9d983be1eefac4b5efad69a188e7ac89a75797.zip |
x86: cleanup smp.h variants
Bring the smp.h variants into sync to prepare merging and
paravirt support.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/apic.h | 1 | ||||
-rw-r--r-- | include/asm-x86/mpspec.h | 3 | ||||
-rw-r--r-- | include/asm-x86/smp_32.h | 117 | ||||
-rw-r--r-- | include/asm-x86/smp_64.h | 133 | ||||
-rw-r--r-- | include/asm-x86/topology_64.h | 2 |
5 files changed, 104 insertions, 152 deletions
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index d0a221fa1fc3..18d932dff476 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -42,6 +42,7 @@ extern int local_apic_timer_disabled; extern int apic_runs_main_timer; extern int ioapic_force; +extern int disable_apic; extern int disable_apic_timer; extern unsigned boot_cpu_id; diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index a2a6b2ea4259..781ad74ab9e9 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -13,8 +13,11 @@ extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; extern unsigned int def_to_bigsmp; extern int apic_version[MAX_APICS]; +extern u8 apicid_2_node[]; extern int pic_mode; +#define MAX_APICID 256 + #else #define MAX_MP_BUSSES 256 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index e10b7affdfe5..c69e960429cc 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -1,51 +1,41 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H +#ifndef __ASSEMBLY__ +#include <linux/cpumask.h> +#include <linux/init.h> + /* * We need the APIC definitions automatically as part of 'smp.h' */ -#ifndef __ASSEMBLY__ -#include <linux/kernel.h> -#include <linux/threads.h> -#include <linux/cpumask.h> +#ifdef CONFIG_X86_LOCAL_APIC +# include <asm/mpspec.h> +# include <asm/apic.h> +# ifdef CONFIG_X86_IO_APIC +# include <asm/io_apic.h> +# endif #endif -#if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__) -#include <linux/bitops.h> -#include <asm/mpspec.h> -#include <asm/apic.h> -#ifdef CONFIG_X86_IO_APIC -#include <asm/io_apic.h> -#endif -#endif +extern cpumask_t cpu_callout_map; +extern cpumask_t cpu_callin_map; -#define BAD_APICID 0xFFu -#ifdef CONFIG_SMP -#ifndef __ASSEMBLY__ +extern int smp_num_siblings; +extern unsigned int num_processors; -/* - * Private routines/data - */ - extern void smp_alloc_memory(void); -extern int pic_mode; -extern int smp_num_siblings; -DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); -DECLARE_PER_CPU(cpumask_t, cpu_core_map); +extern void lock_ipi_call_lock(void); +extern void unlock_ipi_call_lock(void); extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); -extern void lock_ipi_call_lock(void); -extern void unlock_ipi_call_lock(void); -#define MAX_APICID 256 extern u8 __initdata x86_cpu_to_apicid_init[]; extern void *x86_cpu_to_apicid_ptr; -DECLARE_PER_CPU(u8, x86_cpu_to_apicid); - -#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -extern void set_cpu_sibling_map(int cpu); +DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); +DECLARE_PER_CPU(cpumask_t, cpu_core_map); +DECLARE_PER_CPU(u8, cpu_llc_id); +DECLARE_PER_CPU(u8, x86_cpu_to_apicid); #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); @@ -53,6 +43,9 @@ extern void cpu_uninit(void); extern void remove_siblinginfo(int cpu); #endif +/* Globals due to paravirt */ +extern void set_cpu_sibling_map(int cpu); + struct smp_ops { void (*smp_prepare_boot_cpu)(void); @@ -67,6 +60,7 @@ struct smp_ops int wait); }; +#ifdef CONFIG_SMP extern struct smp_ops smp_ops; static inline void smp_prepare_boot_cpu(void) @@ -107,10 +101,12 @@ int native_cpu_up(unsigned int cpunum); void native_smp_cpus_done(unsigned int max_cpus); #ifndef CONFIG_PARAVIRT -#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \ -do { } while (0) +#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) #endif +extern int __cpu_disable(void); +extern void __cpu_die(unsigned int cpu); + /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), @@ -119,9 +115,11 @@ do { } while (0) DECLARE_PER_CPU(int, cpu_number); #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) -extern cpumask_t cpu_callout_map; -extern cpumask_t cpu_callin_map; -extern cpumask_t cpu_possible_map; +#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) + +extern int safe_smp_processor_id(void); + +void __cpuinit smp_store_cpu_info(int id); /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) @@ -129,56 +127,39 @@ static inline int num_booting_cpus(void) return cpus_weight(cpu_callout_map); } -extern int safe_smp_processor_id(void); -extern int __cpu_disable(void); -extern void __cpu_die(unsigned int cpu); -extern unsigned int num_processors; - -void __cpuinit smp_store_cpu_info(int id); - -#endif /* !__ASSEMBLY__ */ - #else /* CONFIG_SMP */ #define safe_smp_processor_id() 0 #define cpu_physical_id(cpu) boot_cpu_physical_apicid -#define NO_PROC_ID 0xFF /* No processor magic marker */ - -#endif /* CONFIG_SMP */ - -#ifndef __ASSEMBLY__ +#endif /* !CONFIG_SMP */ #ifdef CONFIG_X86_LOCAL_APIC -#ifdef APIC_DEFINITION +static __inline int logical_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); +} + +# ifdef APIC_DEFINITION extern int hard_smp_processor_id(void); -#else -#include <mach_apicdef.h> +# else +# include <mach_apicdef.h> static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); + return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); } -#endif /* APIC_DEFINITION */ +# endif /* APIC_DEFINITION */ #else /* CONFIG_X86_LOCAL_APIC */ -#ifndef CONFIG_SMP -#define hard_smp_processor_id() 0 -#endif +# ifndef CONFIG_SMP +# define hard_smp_processor_id() 0 +# endif #endif /* CONFIG_X86_LOCAL_APIC */ -extern u8 apicid_2_node[]; - -#ifdef CONFIG_X86_LOCAL_APIC -static __inline int logical_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); -} -#endif -#endif - +#endif /* !ASSEMBLY */ #endif diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index ab612b0ff270..2feddda91e12 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -1,130 +1,99 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H -/* - * We need the APIC definitions automatically as part of 'smp.h' - */ -#include <linux/threads.h> #include <linux/cpumask.h> -#include <linux/bitops.h> #include <linux/init.h> -extern int disable_apic; -#include <asm/mpspec.h> +/* + * We need the APIC definitions automatically as part of 'smp.h' + */ #include <asm/apic.h> #include <asm/io_apic.h> -#include <asm/thread_info.h> - -#ifdef CONFIG_SMP - +#include <asm/mpspec.h> #include <asm/pda.h> +#include <asm/thread_info.h> -struct pt_regs; - -extern cpumask_t cpu_present_mask; -extern cpumask_t cpu_possible_map; -extern cpumask_t cpu_online_map; extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; -/* - * Private routines/data - */ - +extern int smp_num_siblings; +extern unsigned int num_processors; + extern void smp_alloc_memory(void); -extern volatile unsigned long smp_invalidate_needed; extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); -extern int smp_num_siblings; -extern void smp_send_reschedule(int cpu); + extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -/* - * cpu_sibling_map and cpu_core_map now live - * in the per cpu area - * - * extern cpumask_t cpu_sibling_map[NR_CPUS]; - * extern cpumask_t cpu_core_map[NR_CPUS]; - */ +extern u8 __initdata x86_cpu_to_apicid_init[]; +extern void *x86_cpu_to_apicid_ptr; +extern u8 bios_cpu_apicid[]; + DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(u8, cpu_llc_id); +DECLARE_PER_CPU(u8, x86_cpu_to_apicid); -#define SMP_TRAMPOLINE_BASE 0x6000 - -/* - * On x86 all CPUs are mapped 1:1 to the APIC space. - * This simplifies scheduling and IPI sending and - * compresses data structures. - */ - -static inline int num_booting_cpus(void) +static inline int cpu_present_to_apicid(int mps_cpu) { - return cpus_weight(cpu_callout_map); + if (mps_cpu < NR_CPUS) + return (int)bios_cpu_apicid[mps_cpu]; + else + return BAD_APICID; } -#define raw_smp_processor_id() read_pda(cpunumber) +#ifdef CONFIG_SMP + +#define SMP_TRAMPOLINE_BASE 0x6000 extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); -extern unsigned num_processors; extern unsigned __cpuinitdata disabled_cpus; -#define NO_PROC_ID 0xFF /* No processor magic marker */ - -#endif /* CONFIG_SMP */ - -#define safe_smp_processor_id() smp_processor_id() +#define raw_smp_processor_id() read_pda(cpunumber) +#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -static inline int hard_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); -} +#define stack_smp_processor_id() \ + ({ \ + struct thread_info *ti; \ + __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ + ti->cpu; \ +}) /* - * Some lowlevel functions might want to know about - * the real APIC ID <-> CPU # mapping. + * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies + * scheduling and IPI sending and compresses data structures. */ -extern u8 __initdata x86_cpu_to_apicid_init[]; -extern void *x86_cpu_to_apicid_ptr; -DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */ -extern u8 bios_cpu_apicid[]; - -static inline int cpu_present_to_apicid(int mps_cpu) +static inline int num_booting_cpus(void) { - if (mps_cpu < NR_CPUS) - return (int)bios_cpu_apicid[mps_cpu]; - else - return BAD_APICID; + return cpus_weight(cpu_callout_map); } -#ifndef CONFIG_SMP +extern void smp_send_reschedule(int cpu); + +#else /* CONFIG_SMP */ + +extern unsigned int boot_cpu_id; +#define cpu_physical_id(cpu) boot_cpu_id #define stack_smp_processor_id() 0 -#define cpu_logical_map(x) (x) -#else -#include <asm/thread_info.h> -#define stack_smp_processor_id() \ -({ \ - struct thread_info *ti; \ - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ - ti->cpu; \ -}) -#endif + +#endif /* !CONFIG_SMP */ + +#define safe_smp_processor_id() smp_processor_id() static __inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); + return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); +} + +static inline int hard_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); } -#ifdef CONFIG_SMP -#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -#else -extern unsigned int boot_cpu_id; -#define cpu_physical_id(cpu) boot_cpu_id -#endif /* !CONFIG_SMP */ #endif diff --git a/include/asm-x86/topology_64.h b/include/asm-x86/topology_64.h index a718dda037e0..407b22d4e3b0 100644 --- a/include/asm-x86/topology_64.h +++ b/include/asm-x86/topology_64.h @@ -7,8 +7,6 @@ #include <asm/mpspec.h> #include <linux/bitops.h> -extern cpumask_t cpu_online_map; - extern unsigned char cpu_to_node[]; extern cpumask_t node_to_cpumask[]; |