summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/alternative.h13
-rw-r--r--include/asm-i386/apic.h15
-rw-r--r--include/asm-i386/atomic.h6
-rw-r--r--include/asm-i386/boot.h6
-rw-r--r--include/asm-i386/bugs.h4
-rw-r--r--include/asm-i386/cpu.h3
-rw-r--r--include/asm-i386/cpufeature.h8
-rw-r--r--include/asm-i386/current.h7
-rw-r--r--include/asm-i386/delay.h13
-rw-r--r--include/asm-i386/desc.h95
-rw-r--r--include/asm-i386/e820.h5
-rw-r--r--include/asm-i386/elf.h2
-rw-r--r--include/asm-i386/genapic.h2
-rw-r--r--include/asm-i386/i387.h5
-rw-r--r--include/asm-i386/io.h8
-rw-r--r--include/asm-i386/irq.h5
-rw-r--r--include/asm-i386/irq_regs.h28
-rw-r--r--include/asm-i386/irqflags.h42
-rw-r--r--include/asm-i386/mach-default/setup_arch.h2
-rw-r--r--include/asm-i386/math_emu.h1
-rw-r--r--include/asm-i386/mmu_context.h8
-rw-r--r--include/asm-i386/module.h10
-rw-r--r--include/asm-i386/mpspec_def.h2
-rw-r--r--include/asm-i386/msr.h18
-rw-r--r--include/asm-i386/nmi.h8
-rw-r--r--include/asm-i386/page.h8
-rw-r--r--include/asm-i386/paravirt.h505
-rw-r--r--include/asm-i386/pda.h100
-rw-r--r--include/asm-i386/percpu.h25
-rw-r--r--include/asm-i386/pgtable-2level.h10
-rw-r--r--include/asm-i386/pgtable-3level.h45
-rw-r--r--include/asm-i386/pgtable.h18
-rw-r--r--include/asm-i386/processor.h204
-rw-r--r--include/asm-i386/ptrace.h2
-rw-r--r--include/asm-i386/segment.h7
-rw-r--r--include/asm-i386/setup.h1
-rw-r--r--include/asm-i386/smp.h3
-rw-r--r--include/asm-i386/spinlock.h19
-rw-r--r--include/asm-i386/suspend.h8
-rw-r--r--include/asm-i386/system.h16
-rw-r--r--include/asm-i386/thread_info.h10
-rw-r--r--include/asm-i386/time.h41
-rw-r--r--include/asm-i386/tlbflush.h18
-rw-r--r--include/asm-i386/unwind.h13
-rw-r--r--include/asm-i386/vm86.h17
45 files changed, 1114 insertions, 272 deletions
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index b01a7ec409ce..b8fa9557c532 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -4,7 +4,7 @@
#ifdef __KERNEL__
#include <asm/types.h>
-
+#include <linux/stddef.h>
#include <linux/types.h>
struct alt_instr {
@@ -118,4 +118,15 @@ static inline void alternatives_smp_switch(int smp) {}
#define LOCK_PREFIX ""
#endif
+struct paravirt_patch;
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
+#else
+static inline void
+apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
+{}
+#define __start_parainstructions NULL
+#define __stop_parainstructions NULL
+#endif
+
#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index b9529578fc37..41a44319905f 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -37,18 +37,27 @@ extern void generic_apic_probe(void);
/*
* Basic functions accessing APICs.
*/
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define apic_write native_apic_write
+#define apic_write_atomic native_apic_write_atomic
+#define apic_read native_apic_read
+#endif
-static __inline void apic_write(unsigned long reg, unsigned long v)
+static __inline fastcall void native_apic_write(unsigned long reg,
+ unsigned long v)
{
*((volatile unsigned long *)(APIC_BASE+reg)) = v;
}
-static __inline void apic_write_atomic(unsigned long reg, unsigned long v)
+static __inline fastcall void native_apic_write_atomic(unsigned long reg,
+ unsigned long v)
{
xchg((volatile unsigned long *)(APIC_BASE+reg), v);
}
-static __inline unsigned long apic_read(unsigned long reg)
+static __inline fastcall unsigned long native_apic_read(unsigned long reg)
{
return *((volatile unsigned long *)(APIC_BASE+reg));
}
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index a6c024e2506f..c57441bb2905 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -187,9 +187,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
/* Modern 486+ processor */
__i = i;
__asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1;"
- :"=r"(i)
- :"m"(v->counter), "0"(i));
+ LOCK_PREFIX "xaddl %0, %1"
+ :"+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
#ifdef CONFIG_M386
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
index 96b228e6e79c..8ce79a6fa891 100644
--- a/include/asm-i386/boot.h
+++ b/include/asm-i386/boot.h
@@ -12,4 +12,8 @@
#define EXTENDED_VGA 0xfffe /* 80x50 mode */
#define ASK_VGA 0xfffd /* ask for it at bootup */
-#endif
+/* Physical address where kenrel should be loaded. */
+#define LOAD_PHYSICAL_ADDR ((0x100000 + CONFIG_PHYSICAL_ALIGN - 1) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
+#endif /* _LINUX_BOOT_H */
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index 592ffeeda45e..38f1aebbbdb5 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -21,6 +21,7 @@
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/msr.h>
+#include <asm/paravirt.h>
static int __init no_halt(char *s)
{
@@ -91,6 +92,9 @@ static void __init check_fpu(void)
static void __init check_hlt(void)
{
+ if (paravirt_enabled())
+ return;
+
printk(KERN_INFO "Checking 'hlt' instruction... ");
if (!boot_cpu_data.hlt_works_ok) {
printk("disabled\n");
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h
index b1bc7b1b64b0..9d914e1e4aad 100644
--- a/include/asm-i386/cpu.h
+++ b/include/asm-i386/cpu.h
@@ -13,6 +13,9 @@ struct i386_cpu {
extern int arch_register_cpu(int num);
#ifdef CONFIG_HOTPLUG_CPU
extern void arch_unregister_cpu(int);
+extern int enable_cpu_hotplug;
+#else
+#define enable_cpu_hotplug 0
#endif
DECLARE_PER_CPU(int, cpu_state);
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index d314ebb3d59e..3f92b94e0d75 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -31,7 +31,7 @@
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */
+#define X86_FEATURE_DS (0*32+21) /* Debug Store */
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
@@ -73,6 +73,8 @@
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -134,6 +136,10 @@
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
+#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
+#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
+#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#endif /* __ASM_I386_CPUFEATURE_H */
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index 3cbbecd79016..5252ee0f6d7a 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -1,13 +1,14 @@
#ifndef _I386_CURRENT_H
#define _I386_CURRENT_H
-#include <linux/thread_info.h>
+#include <asm/pda.h>
+#include <linux/compiler.h>
struct task_struct;
-static __always_inline struct task_struct * get_current(void)
+static __always_inline struct task_struct *get_current(void)
{
- return current_thread_info()->task;
+ return read_pda(pcurrent);
}
#define current get_current()
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
index b1c7650dc7b9..32d6678d0bbf 100644
--- a/include/asm-i386/delay.h
+++ b/include/asm-i386/delay.h
@@ -7,6 +7,7 @@
* Delay routines calling functions in arch/i386/lib/delay.c
*/
+/* Undefined functions to get compile-time errors */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
@@ -15,13 +16,23 @@ extern void __ndelay(unsigned long nsecs);
extern void __const_udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
+#if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY)
+#define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul)
+
+#define ndelay(n) paravirt_ops.const_udelay((n) * 5ul)
+
+#else /* !PARAVIRT || USE_REAL_TIME_DELAY */
+
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
__udelay(n))
-
+
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
+#endif
void use_tsc_delay(void);
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 5874ef119ffd..f398cc456448 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -4,8 +4,6 @@
#include <asm/ldt.h>
#include <asm/segment.h>
-#define CPU_16BIT_STACK_SIZE 1024
-
#ifndef __ASSEMBLY__
#include <linux/preempt.h>
@@ -16,8 +14,6 @@
extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
-DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-
struct Xgt_desc_struct {
unsigned short size;
unsigned long address __attribute__((packed));
@@ -33,11 +29,6 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
}
-/*
- * This is the ldt that every process will get unless we need
- * something other than this.
- */
-extern struct desc_struct default_ldt[];
extern struct desc_struct idt_table[];
extern void set_intr_gate(unsigned int irq, void * addr);
@@ -64,8 +55,10 @@ static inline void pack_gate(__u32 *a, __u32 *b,
#define DESCTYPE_DPL3 0x60 /* DPL-3 */
#define DESCTYPE_S 0x10 /* !system */
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
-#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
@@ -88,6 +81,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
#undef C
}
+#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+
static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
{
__u32 *lp = (__u32 *)((char *)dt + entry*8);
@@ -95,9 +92,25 @@ static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entr
*(lp+1) = entry_b;
}
-#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define set_ldt native_set_ldt
+#endif /* CONFIG_PARAVIRT */
+
+static inline fastcall void native_set_ldt(const void *addr,
+ unsigned int entries)
+{
+ if (likely(entries == 0))
+ __asm__ __volatile__("lldt %w0"::"q" (0));
+ else {
+ unsigned cpu = smp_processor_id();
+ __u32 a, b;
+
+ pack_descriptor(&a, &b, (unsigned long)addr,
+ entries * sizeof(struct desc_struct) - 1,
+ DESCTYPE_LDT, 0);
+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
+ __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
+ }
+}
static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
{
@@ -115,14 +128,6 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const vo
write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
}
-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries)
-{
- __u32 a, b;
- pack_descriptor(&a, &b, (unsigned long)addr,
- entries * sizeof(struct desc_struct) - 1,
- DESCTYPE_LDT, 0);
- write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
-}
#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
@@ -153,35 +158,22 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entri
static inline void clear_LDT(void)
{
- int cpu = get_cpu();
-
- set_ldt_desc(cpu, &default_ldt[0], 5);
- load_LDT_desc();
- put_cpu();
+ set_ldt(NULL, 0);
}
/*
* load one particular LDT into the current CPU
*/
-static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+static inline void load_LDT_nolock(mm_context_t *pc)
{
- void *segments = pc->ldt;
- int count = pc->size;
-
- if (likely(!count)) {
- segments = &default_ldt[0];
- count = 5;
- }
-
- set_ldt_desc(cpu, segments, count);
- load_LDT_desc();
+ set_ldt(pc->ldt, pc->size);
}
static inline void load_LDT(mm_context_t *pc)
{
- int cpu = get_cpu();
- load_LDT_nolock(pc, cpu);
- put_cpu();
+ preempt_disable();
+ load_LDT_nolock(pc);
+ preempt_enable();
}
static inline unsigned long get_desc_base(unsigned long *desc)
@@ -193,6 +185,29 @@ static inline unsigned long get_desc_base(unsigned long *desc)
return base;
}
+#else /* __ASSEMBLY__ */
+
+/*
+ * GET_DESC_BASE reads the descriptor base of the specified segment.
+ *
+ * Args:
+ * idx - descriptor index
+ * gdt - GDT pointer
+ * base - 32bit register to which the base will be written
+ * lo_w - lo word of the "base" register
+ * lo_b - lo byte of the "base" register
+ * hi_b - hi byte of the low word of the "base" register
+ *
+ * Example:
+ * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
+ * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
+ */
+#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
+ movb idx*8+4(gdt), lo_b; \
+ movb idx*8+7(gdt), hi_b; \
+ shll $16, base; \
+ movw idx*8+2(gdt), lo_w;
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index f7514fb6e8e4..395077aba583 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -38,6 +38,11 @@ extern struct e820map e820;
extern int e820_all_mapped(unsigned long start, unsigned long end,
unsigned type);
+extern void find_max_pfn(void);
+extern void register_bootmem_low_pages(unsigned long max_low_pfn);
+extern void register_memory(void);
+extern void limit_regions(unsigned long long size);
+extern void print_memory_map(char *who);
#endif/*!__ASSEMBLY__*/
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 3a05436f31c0..45d21a0c95bf 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -91,7 +91,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
pr_reg[7] = regs->xds; \
pr_reg[8] = regs->xes; \
savesegment(fs,pr_reg[9]); \
- savesegment(gs,pr_reg[10]); \
+ pr_reg[10] = regs->xgs; \
pr_reg[11] = regs->orig_eax; \
pr_reg[12] = regs->eip; \
pr_reg[13] = regs->xcs; \
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h
index 8ffbb0f07457..fd2be593b06e 100644
--- a/include/asm-i386/genapic.h
+++ b/include/asm-i386/genapic.h
@@ -122,6 +122,6 @@ struct genapic {
APICFUNC(phys_pkg_id) \
}
-extern struct genapic *genapic;
+extern struct genapic *genapic, apic_default;
#endif
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index bc1d6edae1ed..434936c732d6 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -76,7 +76,9 @@ static inline void __save_init_fpu( struct task_struct *tsk )
#define __unlazy_fpu( tsk ) do { \
if (task_thread_info(tsk)->status & TS_USEDFPU) \
- save_init_fpu( tsk ); \
+ save_init_fpu( tsk ); \
+ else \
+ tsk->fpu_counter = 0; \
} while (0)
#define __clear_fpu( tsk ) \
@@ -118,6 +120,7 @@ static inline void save_init_fpu( struct task_struct *tsk )
extern unsigned short get_fpu_cwd( struct task_struct *tsk );
extern unsigned short get_fpu_swd( struct task_struct *tsk );
extern unsigned short get_fpu_mxcsr( struct task_struct *tsk );
+extern asmlinkage void math_state_restore(void);
/*
* Signal frame handlers...
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 68df0dc3ab8f..86ff5e83be2f 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -256,11 +256,11 @@ static inline void flush_write_buffers(void)
#endif /* __KERNEL__ */
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
+#if defined(CONFIG_PARAVIRT)
+#include <asm/paravirt.h>
#else
+
#define __SLOW_DOWN_IO "outb %%al,$0x80;"
-#endif
static inline void slow_down_io(void) {
__asm__ __volatile__(
@@ -271,6 +271,8 @@ static inline void slow_down_io(void) {
: : );
}
+#endif
+
#ifdef CONFIG_X86_NUMAQ
extern void *xquad_portio; /* Where the IO area was mapped */
#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 331726b41128..11761cdaae19 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -37,8 +37,13 @@ static __inline__ int irq_canonicalize(int irq)
extern int irqbalance_disable(char *str);
#endif
+extern void quirk_intel_irqbalance(void);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void fixup_irqs(cpumask_t map);
#endif
+void init_IRQ(void);
+void __init native_init_IRQ(void);
+
#endif /* _ASM_IRQ_H */
diff --git a/include/asm-i386/irq_regs.h b/include/asm-i386/irq_regs.h
index 3dd9c0b70270..a1b3f7f594a2 100644
--- a/include/asm-i386/irq_regs.h
+++ b/include/asm-i386/irq_regs.h
@@ -1 +1,27 @@
-#include <asm-generic/irq_regs.h>
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack, stored in the PDA.
+ *
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+#ifndef _ASM_I386_IRQ_REGS_H
+#define _ASM_I386_IRQ_REGS_H
+
+#include <asm/pda.h>
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+ return read_pda(irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = read_pda(irq_regs);
+ write_pda(irq_regs, new_regs);
+
+ return old_regs;
+}
+
+#endif /* _ASM_I386_IRQ_REGS_H */
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
index e1bdb97c07fa..17b18cf4fe9d 100644
--- a/include/asm-i386/irqflags.h
+++ b/include/asm-i386/irqflags.h
@@ -10,6 +10,9 @@
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
#ifndef __ASSEMBLY__
static inline unsigned long __raw_local_save_flags(void)
@@ -25,9 +28,6 @@ static inline unsigned long __raw_local_save_flags(void)
return flags;
}
-#define raw_local_save_flags(flags) \
- do { (flags) = __raw_local_save_flags(); } while (0)
-
static inline void raw_local_irq_restore(unsigned long flags)
{
__asm__ __volatile__(
@@ -66,18 +66,6 @@ static inline void halt(void)
__asm__ __volatile__("hlt": : :"memory");
}
-static inline int raw_irqs_disabled_flags(unsigned long flags)
-{
- return !(flags & (1 << 9));
-}
-
-static inline int raw_irqs_disabled(void)
-{
- unsigned long flags = __raw_local_save_flags();
-
- return raw_irqs_disabled_flags(flags);
-}
-
/*
* For spinlocks, etc:
*/
@@ -90,9 +78,33 @@ static inline unsigned long __raw_local_irq_save(void)
return flags;
}
+#else
+#define DISABLE_INTERRUPTS(clobbers) cli
+#define ENABLE_INTERRUPTS(clobbers) sti
+#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+#define INTERRUPT_RETURN iret
+#define GET_CR0_INTO_EAX movl %cr0, %eax
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+
+#ifndef __ASSEMBLY__
+#define raw_local_save_flags(flags) \
+ do { (flags) = __raw_local_save_flags(); } while (0)
+
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (1 << 9));
+}
+
+static inline int raw_irqs_disabled(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+
+ return raw_irqs_disabled_flags(flags);
+}
#endif /* __ASSEMBLY__ */
/*
diff --git a/include/asm-i386/mach-default/setup_arch.h b/include/asm-i386/mach-default/setup_arch.h
index fb42099e7bd4..605e3ccb991b 100644
--- a/include/asm-i386/mach-default/setup_arch.h
+++ b/include/asm-i386/mach-default/setup_arch.h
@@ -2,4 +2,6 @@
/* no action for generic */
+#ifndef ARCH_SETUP
#define ARCH_SETUP
+#endif
diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h
index 697673b555ce..a4b0aa3320e6 100644
--- a/include/asm-i386/math_emu.h
+++ b/include/asm-i386/math_emu.h
@@ -21,6 +21,7 @@ struct info {
long ___eax;
long ___ds;
long ___es;
+ long ___fs;
long ___orig_eax;
long ___eip;
long ___cs;
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 62b7bf184094..68ff102d6f5e 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -44,7 +44,7 @@ static inline void switch_mm(struct mm_struct *prev,
* load the LDT, if the LDT is different:
*/
if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context, cpu);
+ load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
else {
@@ -56,14 +56,14 @@ static inline void switch_mm(struct mm_struct *prev,
* tlb flush IPI delivery. We must reload %cr3.
*/
load_cr3(next->pgd);
- load_LDT_nolock(&next->context, cpu);
+ load_LDT_nolock(&next->context);
}
}
#endif
}
-#define deactivate_mm(tsk, mm) \
- asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
+#define deactivate_mm(tsk, mm) \
+ asm("movl %0,%%fs": :"r" (0));
#define activate_mm(prev, next) \
switch_mm((prev),(next),NULL)
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h
index 424661d25bd3..02f8f541cbe0 100644
--- a/include/asm-i386/module.h
+++ b/include/asm-i386/module.h
@@ -20,6 +20,8 @@ struct mod_arch_specific
#define MODULE_PROC_FAMILY "586TSC "
#elif defined CONFIG_M586MMX
#define MODULE_PROC_FAMILY "586MMX "
+#elif defined CONFIG_MCORE2
+#define MODULE_PROC_FAMILY "CORE2 "
#elif defined CONFIG_M686
#define MODULE_PROC_FAMILY "686 "
#elif defined CONFIG_MPENTIUMII
@@ -60,18 +62,12 @@ struct mod_arch_specific
#error unknown processor family
#endif
-#ifdef CONFIG_REGPARM
-#define MODULE_REGPARM "REGPARM "
-#else
-#define MODULE_REGPARM ""
-#endif
-
#ifdef CONFIG_4KSTACKS
#define MODULE_STACKSIZE "4KSTACKS "
#else
#define MODULE_STACKSIZE ""
#endif
-#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
#endif /* _ASM_I386_MODULE_H */
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h
index 76feedf85a8a..13bafb16e7af 100644
--- a/include/asm-i386/mpspec_def.h
+++ b/include/asm-i386/mpspec_def.h
@@ -97,7 +97,6 @@ struct mpc_config_bus
#define BUSTYPE_TC "TC"
#define BUSTYPE_VME "VME"
#define BUSTYPE_XPRESS "XPRESS"
-#define BUSTYPE_NEC98 "NEC98"
struct mpc_config_ioapic
{
@@ -182,7 +181,6 @@ enum mp_bustype {
MP_BUS_EISA,
MP_BUS_PCI,
MP_BUS_MCA,
- MP_BUS_NEC98
};
#endif
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 62b76cd96957..5679d4993072 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -1,6 +1,10 @@
#ifndef __ASM_MSR_H
#define __ASM_MSR_H
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+
/*
* Access to machine-specific registers (available on 586 and better only)
* Note: the rd* operations modify the parameters directly (without using
@@ -77,6 +81,7 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
__asm__ __volatile__("rdpmc" \
: "=a" (low), "=d" (high) \
: "c" (counter))
+#endif /* !CONFIG_PARAVIRT */
/* symbolic names for some interesting MSRs */
/* Intel defined MSRs. */
@@ -141,6 +146,10 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
#define MSR_IA32_MC0_ADDR 0x402
#define MSR_IA32_MC0_MISC 0x403
+#define MSR_IA32_PEBS_ENABLE 0x3f1
+#define MSR_IA32_DS_AREA 0x600
+#define MSR_IA32_PERF_CAPABILITIES 0x345
+
/* Pentium IV performance counter MSRs */
#define MSR_P4_BPU_PERFCTR0 0x300
#define MSR_P4_BPU_PERFCTR1 0x301
@@ -284,4 +293,13 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
#define MSR_TMTA_LRTI_READOUT 0x80868018
#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0 0x309
+#define MSR_CORE_PERF_FIXED_CTR1 0x30a
+#define MSR_CORE_PERF_FIXED_CTR2 0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
+
#endif /* __ASM_MSR_H */
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 269d315719ca..b04333ea6f31 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -5,6 +5,9 @@
#define ASM_NMI_H
#include <linux/pm.h>
+#include <asm/irq.h>
+
+#ifdef ARCH_HAS_NMI_WATCHDOG
/**
* do_nmi_callback
@@ -42,4 +45,9 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
void __user *, size_t *, loff_t *);
extern int unknown_nmi_panic;
+void __trigger_all_cpu_backtrace(void);
+#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
+#endif
+
#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index f5bf544c729a..fd3f64ace248 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -52,6 +52,7 @@ typedef struct { unsigned long long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
#define __pmd(x) ((pmd_t) { (x) } )
#define HPAGE_SHIFT 21
+#include <asm-generic/pgtable-nopud.h>
#else
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
@@ -59,6 +60,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define boot_pte_t pte_t /* or would you rather have a typedef */
#define pte_val(x) ((x).pte_low)
#define HPAGE_SHIFT 22
+#include <asm-generic/pgtable-nopmd.h>
#endif
#define PTE_MASK PAGE_MASK
@@ -112,18 +114,18 @@ extern int page_is_ram(unsigned long pagenr);
#ifdef __ASSEMBLY__
#define __PAGE_OFFSET CONFIG_PAGE_OFFSET
-#define __PHYSICAL_START CONFIG_PHYSICAL_START
#else
#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
-#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
#endif
-#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+/* __pa_symbol should be used for C visible symbols.
+ This seems to be the official gcc blessed way to do such arithmetic. */
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#ifdef CONFIG_FLATMEM
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
new file mode 100644
index 000000000000..9f06265065f4
--- /dev/null
+++ b/include/asm-i386/paravirt.h
@@ -0,0 +1,505 @@
+#ifndef __ASM_PARAVIRT_H
+#define __ASM_PARAVIRT_H
+/* Various instructions on x86 need to be replaced for
+ * para-virtualization: those hooks are defined here. */
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_PARAVIRT
+/* These are the most performance critical ops, so we want to be able to patch
+ * callers */
+#define PARAVIRT_IRQ_DISABLE 0
+#define PARAVIRT_IRQ_ENABLE 1
+#define PARAVIRT_RESTORE_FLAGS 2
+#define PARAVIRT_SAVE_FLAGS 3
+#define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4
+#define PARAVIRT_INTERRUPT_RETURN 5
+#define PARAVIRT_STI_SYSEXIT 6
+
+/* Bitmask of what can be clobbered: usually at least eax. */
+#define CLBR_NONE 0x0
+#define CLBR_EAX 0x1
+#define CLBR_ECX 0x2
+#define CLBR_EDX 0x4
+#define CLBR_ANY 0x7
+
+#ifndef __ASSEMBLY__
+struct thread_struct;
+struct Xgt_desc_struct;
+struct tss_struct;
+struct mm_struct;
+struct paravirt_ops
+{
+ unsigned int kernel_rpl;
+ int paravirt_enabled;
+ const char *name;
+
+ /*
+ * Patch may replace one of the defined code sequences with arbitrary
+ * code, subject to the same register constraints. This generally
+ * means the code is not free to clobber any registers other than EAX.
+ * The patch function should return the number of bytes of code
+ * generated, as we nop pad the rest in generic code.
+ */
+ unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
+
+ void (*arch_setup)(void);
+ char *(*memory_setup)(void);
+ void (*init_IRQ)(void);
+
+ void (*banner)(void);
+
+ unsigned long (*get_wallclock)(void);
+ int (*set_wallclock)(unsigned long);
+ void (*time_init)(void);
+
+ /* All the function pointers here are declared as "fastcall"
+ so that we get a specific register-based calling
+ convention. This makes it easier to implement inline
+ assembler replacements. */
+
+ void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx);
+
+ unsigned long (fastcall *get_debugreg)(int regno);
+ void (fastcall *set_debugreg)(int regno, unsigned long value);
+
+ void (fastcall *clts)(void);
+
+ unsigned long (fastcall *read_cr0)(void);
+ void (fastcall *write_cr0)(unsigned long);
+
+ unsigned long (fastcall *read_cr2)(void);
+ void (fastcall *write_cr2)(unsigned long);
+
+ unsigned long (fastcall *read_cr3)(void);
+ void (fastcall *write_cr3)(unsigned long);
+
+ unsigned long (fastcall *read_cr4_safe)(void);
+ unsigned long (fastcall *read_cr4)(void);
+ void (fastcall *write_cr4)(unsigned long);
+
+ unsigned long (fastcall *save_fl)(void);
+ void (fastcall *restore_fl)(unsigned long);
+ void (fastcall *irq_disable)(void);
+ void (fastcall *irq_enable)(void);
+ void (fastcall *safe_halt)(void);
+ void (fastcall *halt)(void);
+ void (fastcall *wbinvd)(void);
+
+ /* err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
+ u64 (fastcall *read_msr)(unsigned int msr, int *err);
+ int (fastcall *write_msr)(unsigned int msr, u64 val);
+
+ u64 (fastcall *read_tsc)(void);
+ u64 (fastcall *read_pmc)(void);
+
+ void (fastcall *load_tr_desc)(void);
+ void (fastcall *load_gdt)(const struct Xgt_desc_struct *);
+ void (fastcall *load_idt)(const struct Xgt_desc_struct *);
+ void (fastcall *store_gdt)(struct Xgt_desc_struct *);
+ void (fastcall *store_idt)(struct Xgt_desc_struct *);
+ void (fastcall *set_ldt)(const void *desc, unsigned entries);
+ unsigned long (fastcall *store_tr)(void);
+ void (fastcall *load_tls)(struct thread_struct *t, unsigned int cpu);
+ void (fastcall *write_ldt_entry)(void *dt, int entrynum,
+ u32 low, u32 high);
+ void (fastcall *write_gdt_entry)(void *dt, int entrynum,
+ u32 low, u32 high);
+ void (fastcall *write_idt_entry)(void *dt, int entrynum,
+ u32 low, u32 high);
+ void (fastcall *load_esp0)(struct tss_struct *tss,
+ struct thread_struct *thread);
+
+ void (fastcall *set_iopl_mask)(unsigned mask);
+
+ void (fastcall *io_delay)(void);
+ void (*const_udelay)(unsigned long loops);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ void (fastcall *apic_write)(unsigned long reg, unsigned long v);
+ void (fastcall *apic_write_atomic)(unsigned long reg, unsigned long v);
+ unsigned long (fastcall *apic_read)(unsigned long reg);
+#endif
+
+ void (fastcall *flush_tlb_user)(void);
+ void (fastcall *flush_tlb_kernel)(void);
+ void (fastcall *flush_tlb_single)(u32 addr);
+
+ void (fastcall *set_pte)(pte_t *ptep, pte_t pteval);
+ void (fastcall *set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval);
+ void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+ void (fastcall *pte_update)(struct mm_struct *mm, u32 addr, pte_t *ptep);
+ void (fastcall *pte_update_defer)(struct mm_struct *mm, u32 addr, pte_t *ptep);
+#ifdef CONFIG_X86_PAE
+ void (fastcall *set_pte_atomic)(pte_t *ptep, pte_t pteval);
+ void (fastcall *set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+ void (fastcall *set_pud)(pud_t *pudp, pud_t pudval);
+ void (fastcall *pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+ void (fastcall *pmd_clear)(pmd_t *pmdp);
+#endif
+
+ /* These two are jmp to, not actually called. */
+ void (fastcall *irq_enable_sysexit)(void);
+ void (fastcall *iret)(void);
+};
+
+/* Mark a paravirt probe function. */
+#define paravirt_probe(fn) \
+ static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
+ __attribute__((__section__(".paravirtprobe"))) = fn
+
+extern struct paravirt_ops paravirt_ops;
+
+#define paravirt_enabled() (paravirt_ops.paravirt_enabled)
+
+static inline void load_esp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+{
+ paravirt_ops.load_esp0(tss, thread);
+}
+
+#define ARCH_SETUP paravirt_ops.arch_setup();
+static inline unsigned long get_wallclock(void)
+{
+ return paravirt_ops.get_wallclock();
+}
+
+static inline int set_wallclock(unsigned long nowtime)
+{
+ return paravirt_ops.set_wallclock(nowtime);
+}
+
+static inline void do_time_init(void)
+{
+ return paravirt_ops.time_init();
+}
+
+/* The paravirtualized CPUID instruction. */
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ paravirt_ops.cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg)
+#define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val)
+
+#define clts() paravirt_ops.clts()
+
+#define read_cr0() paravirt_ops.read_cr0()
+#define write_cr0(x) paravirt_ops.write_cr0(x)
+
+#define read_cr2() paravirt_ops.read_cr2()
+#define write_cr2(x) paravirt_ops.write_cr2(x)
+
+#define read_cr3() paravirt_ops.read_cr3()
+#define write_cr3(x) paravirt_ops.write_cr3(x)
+
+#define read_cr4() paravirt_ops.read_cr4()
+#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
+#define write_cr4(x) paravirt_ops.write_cr4(x)
+
+static inline void raw_safe_halt(void)
+{
+ paravirt_ops.safe_halt();
+}
+
+static inline void halt(void)
+{
+ paravirt_ops.safe_halt();
+}
+#define wbinvd() paravirt_ops.wbinvd()
+
+#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
+
+#define rdmsr(msr,val1,val2) do { \
+ int _err; \
+ u64 _l = paravirt_ops.read_msr(msr,&_err); \
+ val1 = (u32)_l; \
+ val2 = _l >> 32; \
+} while(0)
+
+#define wrmsr(msr,val1,val2) do { \
+ u64 _l = ((u64)(val2) << 32) | (val1); \
+ paravirt_ops.write_msr((msr), _l); \
+} while(0)
+
+#define rdmsrl(msr,val) do { \
+ int _err; \
+ val = paravirt_ops.read_msr((msr),&_err); \
+} while(0)
+
+#define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val)))
+#define wrmsr_safe(msr,a,b) ({ \
+ u64 _l = ((u64)(b) << 32) | (a); \
+ paravirt_ops.write_msr((msr),_l); \
+})
+
+/* rdmsr with exception handling */
+#define rdmsr_safe(msr,a,b) ({ \
+ int _err; \
+ u64 _l = paravirt_ops.read_msr(msr,&_err); \
+ (*a) = (u32)_l; \
+ (*b) = _l >> 32; \
+ _err; })
+
+#define rdtsc(low,high) do { \
+ u64 _l = paravirt_ops.read_tsc(); \
+ low = (u32)_l; \
+ high = _l >> 32; \
+} while(0)
+
+#define rdtscl(low) do { \
+ u64 _l = paravirt_ops.read_tsc(); \
+ low = (int)_l; \
+} while(0)
+
+#define rdtscll(val) (val = paravirt_ops.read_tsc())
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) do { \
+ u64 _l = paravirt_ops.read_pmc(); \
+ low = (u32)_l; \
+ high = _l >> 32; \
+} while(0)
+
+#define load_TR_desc() (paravirt_ops.load_tr_desc())
+#define load_gdt(dtr) (paravirt_ops.load_gdt(dtr))
+#define load_idt(dtr) (paravirt_ops.load_idt(dtr))
+#define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries)))
+#define store_gdt(dtr) (paravirt_ops.store_gdt(dtr))
+#define store_idt(dtr) (paravirt_ops.store_idt(dtr))
+#define store_tr(tr) ((tr) = paravirt_ops.store_tr())
+#define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu)))
+#define write_ldt_entry(dt, entry, low, high) \
+ (paravirt_ops.write_ldt_entry((dt), (entry), (low), (high)))
+#define write_gdt_entry(dt, entry, low, high) \
+ (paravirt_ops.write_gdt_entry((dt), (entry), (low), (high)))
+#define write_idt_entry(dt, entry, low, high) \
+ (paravirt_ops.write_idt_entry((dt), (entry), (low), (high)))
+#define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask))
+
+/* The paravirtualized I/O functions */
+static inline void slow_down_io(void) {
+ paravirt_ops.io_delay();
+#ifdef REALLY_SLOW_IO
+ paravirt_ops.io_delay();
+ paravirt_ops.io_delay();
+ paravirt_ops.io_delay();
+#endif
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+/*
+ * Basic functions accessing APICs.
+ */
+static inline void apic_write(unsigned long reg, unsigned long v)
+{
+ paravirt_ops.apic_write(reg,v);
+}
+
+static inline void apic_write_atomic(unsigned long reg, unsigned long v)
+{
+ paravirt_ops.apic_write_atomic(reg,v);
+}
+
+static inline unsigned long apic_read(unsigned long reg)
+{
+ return paravirt_ops.apic_read(reg);
+}
+#endif
+
+
+#define __flush_tlb() paravirt_ops.flush_tlb_user()
+#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
+#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ paravirt_ops.set_pte(ptep, pteval);
+}
+
+static inline void set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
+{
+ paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ paravirt_ops.set_pmd(pmdp, pmdval);
+}
+
+static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep)
+{
+ paravirt_ops.pte_update(mm, addr, ptep);
+}
+
+static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep)
+{
+ paravirt_ops.pte_update_defer(mm, addr, ptep);
+}
+
+#ifdef CONFIG_X86_PAE
+static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
+{
+ paravirt_ops.set_pte_atomic(ptep, pteval);
+}
+
+static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+ paravirt_ops.set_pte_present(mm, addr, ptep, pte);
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pudval)
+{
+ paravirt_ops.set_pud(pudp, pudval);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ paravirt_ops.pte_clear(mm, addr, ptep);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ paravirt_ops.pmd_clear(pmdp);
+}
+#endif
+
+/* These all sit in the .parainstructions section to tell us what to patch. */
+struct paravirt_patch {
+ u8 *instr; /* original instructions */
+ u8 instrtype; /* type of this instruction */
+ u8 len; /* length of original instruction */
+ u16 clobbers; /* what registers you may clobber */
+};
+
+#define paravirt_alt(insn_string, typenum, clobber) \
+ "771:\n\t" insn_string "\n" "772:\n" \
+ ".pushsection .parainstructions,\"a\"\n" \
+ " .long 771b\n" \
+ " .byte " __stringify(typenum) "\n" \
+ " .byte 772b-771b\n" \
+ " .short " __stringify(clobber) "\n" \
+ ".popsection"
+
+static inline unsigned long __raw_local_save_flags(void)
+{
+ unsigned long f;
+
+ __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+ "call *%1;"
+ "popl %%edx; popl %%ecx",
+ PARAVIRT_SAVE_FLAGS, CLBR_NONE)
+ : "=a"(f): "m"(paravirt_ops.save_fl)
+ : "memory", "cc");
+ return f;
+}
+
+static inline void raw_local_irq_restore(unsigned long f)
+{
+ __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+ "call *%1;"
+ "popl %%edx; popl %%ecx",
+ PARAVIRT_RESTORE_FLAGS, CLBR_EAX)
+ : "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f)
+ : "memory", "cc");
+}
+
+static inline void raw_local_irq_disable(void)
+{
+ __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+ "call *%0;"
+ "popl %%edx; popl %%ecx",
+ PARAVIRT_IRQ_DISABLE, CLBR_EAX)
+ : : "m" (paravirt_ops.irq_disable)
+ : "memory", "eax", "cc");
+}
+
+static inline void raw_local_irq_enable(void)
+{
+ __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+ "call *%0;"
+ "popl %%edx; popl %%ecx",
+ PARAVIRT_IRQ_ENABLE, CLBR_EAX)
+ : : "m" (paravirt_ops.irq_enable)
+ : "memory", "eax", "cc");
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+ unsigned long f;
+
+ __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+ "call *%1; pushl %%eax;"
+ "call *%2; popl %%eax;"
+ "popl %%edx; popl %%ecx",
+ PARAVIRT_SAVE_FLAGS_IRQ_DISABLE,
+ CLBR_NONE)
+ : "=a"(f)
+ : "m" (paravirt_ops.save_fl),
+ "m" (paravirt_ops.irq_disable)
+ : "memory", "cc");
+ return f;
+}
+
+#define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
+ "call *paravirt_ops+%c[irq_disable];" \
+ "popl %%edx; popl %%ecx", \
+ PARAVIRT_IRQ_DISABLE, CLBR_EAX)
+
+#define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
+ "call *paravirt_ops+%c[irq_enable];" \
+ "popl %%edx; popl %%ecx", \
+ PARAVIRT_IRQ_ENABLE, CLBR_EAX)
+#define CLI_STI_CLOBBERS , "%eax"
+#define CLI_STI_INPUT_ARGS \
+ , \
+ [irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \
+ [irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable))
+
+#else /* __ASSEMBLY__ */
+
+#define PARA_PATCH(ptype, clobbers, ops) \
+771:; \
+ ops; \
+772:; \
+ .pushsection .parainstructions,"a"; \
+ .long 771b; \
+ .byte ptype; \
+ .byte 772b-771b; \
+ .short clobbers; \
+ .popsection
+
+#define INTERRUPT_RETURN \
+ PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY, \
+ jmp *%cs:paravirt_ops+PARAVIRT_iret)
+
+#define DISABLE_INTERRUPTS(clobbers) \
+ PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers, \
+ pushl %ecx; pushl %edx; \
+ call *paravirt_ops+PARAVIRT_irq_disable; \
+ popl %edx; popl %ecx) \
+
+#define ENABLE_INTERRUPTS(clobbers) \
+ PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers, \
+ pushl %ecx; pushl %edx; \
+ call *%cs:paravirt_ops+PARAVIRT_irq_enable; \
+ popl %edx; popl %ecx)
+
+#define ENABLE_INTERRUPTS_SYSEXIT \
+ PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY, \
+ jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
+
+#define GET_CR0_INTO_EAX \
+ call *paravirt_ops+PARAVIRT_read_cr0
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+#endif /* __ASM_PARAVIRT_H */
diff --git a/include/asm-i386/pda.h b/include/asm-i386/pda.h
new file mode 100644
index 000000000000..2ba2736aa109
--- /dev/null
+++ b/include/asm-i386/pda.h
@@ -0,0 +1,100 @@
+/*
+ Per-processor Data Areas
+ Jeremy Fitzhardinge <jeremy@goop.org> 2006
+ Based on asm-x86_64/pda.h by Andi Kleen.
+ */
+#ifndef _I386_PDA_H
+#define _I386_PDA_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct i386_pda
+{
+ struct i386_pda *_pda; /* pointer to self */
+
+ int cpu_number;
+ struct task_struct *pcurrent; /* current process */
+ struct pt_regs *irq_regs;
+};
+
+extern struct i386_pda *_cpu_pda[];
+
+#define cpu_pda(i) (_cpu_pda[i])
+
+#define pda_offset(field) offsetof(struct i386_pda, field)
+
+extern void __bad_pda_field(void);
+
+/* This variable is never instantiated. It is only used as a stand-in
+ for the real per-cpu PDA memory, so that gcc can understand what
+ memory operations the inline asms() below are performing. This
+ eliminates the need to make the asms volatile or have memory
+ clobbers, so gcc can readily analyse them. */
+extern struct i386_pda _proxy_pda;
+
+#define pda_to_op(op,field,val) \
+ do { \
+ typedef typeof(_proxy_pda.field) T__; \
+ if (0) { T__ tmp__; tmp__ = (val); } \
+ switch (sizeof(_proxy_pda.field)) { \
+ case 1: \
+ asm(op "b %1,%%gs:%c2" \
+ : "+m" (_proxy_pda.field) \
+ :"ri" ((T__)val), \
+ "i"(pda_offset(field))); \
+ break; \
+ case 2: \
+ asm(op "w %1,%%gs:%c2" \
+ : "+m" (_proxy_pda.field) \
+ :"ri" ((T__)val), \
+ "i"(pda_offset(field))); \
+ break; \
+ case 4: \
+ asm(op "l %1,%%gs:%c2" \
+ : "+m" (_proxy_pda.field) \
+ :"ri" ((T__)val), \
+ "i"(pda_offset(field))); \
+ break; \
+ default: __bad_pda_field(); \
+ } \
+ } while (0)
+
+#define pda_from_op(op,field) \
+ ({ \
+ typeof(_proxy_pda.field) ret__; \
+ switch (sizeof(_proxy_pda.field)) { \
+ case 1: \
+ asm(op "b %%gs:%c1,%0" \
+ : "=r" (ret__) \
+ : "i" (pda_offset(field)), \
+ "m" (_proxy_pda.field)); \
+ break; \
+ case 2: \
+ asm(op "w %%gs:%c1,%0" \
+ : "=r" (ret__) \
+ : "i" (pda_offset(field)), \
+ "m" (_proxy_pda.field)); \
+ break; \
+ case 4: \
+ asm(op "l %%gs:%c1,%0" \
+ : "=r" (ret__) \
+ : "i" (pda_offset(field)), \
+ "m" (_proxy_pda.field)); \
+ break; \
+ default: __bad_pda_field(); \
+ } \
+ ret__; })
+
+/* Return a pointer to a pda field */
+#define pda_addr(field) \
+ ((typeof(_proxy_pda.field) *)((unsigned char *)read_pda(_pda) + \
+ pda_offset(field)))
+
+#define read_pda(field) pda_from_op("mov",field)
+#define write_pda(field,val) pda_to_op("mov",field,val)
+#define add_pda(field,val) pda_to_op("add",field,val)
+#define sub_pda(field,val) pda_to_op("sub",field,val)
+#define or_pda(field,val) pda_to_op("or",field,val)
+
+#endif /* _I386_PDA_H */
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index 5764afa4b6a4..510ae1d3486c 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -1,6 +1,31 @@
#ifndef __ARCH_I386_PERCPU__
#define __ARCH_I386_PERCPU__
+#ifndef __ASSEMBLY__
#include <asm-generic/percpu.h>
+#else
+
+/*
+ * PER_CPU finds an address of a per-cpu variable.
+ *
+ * Args:
+ * var - variable name
+ * cpu - 32bit register containing the current CPU number
+ *
+ * The resulting address is stored in the "cpu" argument.
+ *
+ * Example:
+ * PER_CPU(cpu_gdt_descr, %ebx)
+ */
+#ifdef CONFIG_SMP
+#define PER_CPU(var, cpu) \
+ movl __per_cpu_offset(,cpu,4), cpu; \
+ addl $per_cpu__/**/var, cpu;
+#else /* ! SMP */
+#define PER_CPU(var, cpu) \
+ movl $per_cpu__/**/var, cpu;
+#endif /* SMP */
+
+#endif /* !__ASSEMBLY__ */
#endif /* __ARCH_I386_PERCPU__ */
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 8d8d3b9ecdb0..38c3fcc0676d 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -1,8 +1,6 @@
#ifndef _I386_PGTABLE_2LEVEL_H
#define _I386_PGTABLE_2LEVEL_H
-#include <asm-generic/pgtable-nopmd.h>
-
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
#define pgd_ERROR(e) \
@@ -13,17 +11,19 @@
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
+#ifndef CONFIG_PARAVIRT
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#endif
+
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0))
+#define raw_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low)
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index c2d701ea35be..7a2318f38303 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -1,8 +1,6 @@
#ifndef _I386_PGTABLE_3LEVEL_H
#define _I386_PGTABLE_3LEVEL_H
-#include <asm-generic/pgtable-nopud.h>
-
/*
* Intel Physical Address Extension (PAE) Mode - three-level page
* tables on PPro+ CPUs.
@@ -44,6 +42,7 @@ static inline int pte_exec_kernel(pte_t pte)
return pte_x(pte);
}
+#ifndef CONFIG_PARAVIRT
/* Rules for using set_pte: the pte being assigned *must* be
* either not present or in a state where the hardware will
* not attempt to update the pte. In places where this is
@@ -81,25 +80,6 @@ static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte
(*(pudptr) = (pudval))
/*
- * Pentium-II erratum A13: in PAE mode we explicitly have to flush
- * the TLB via cr3 if the top-level pgd is changed...
- * We do not let the generic code free and clear pgd entries due to
- * this erratum.
- */
-static inline void pud_clear (pud_t * pud) { }
-
-#define pud_page(pud) \
-((struct page *) __va(pud_val(pud) & PAGE_MASK))
-
-#define pud_page_vaddr(pud) \
-((unsigned long) __va(pud_val(pud) & PAGE_MASK))
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
- pmd_index(address))
-
-/*
* For PTEs and PDEs, we must clear the P-bit first when clearing a page table
* entry, so clear the bottom half first and enforce ordering with a compiler
* barrier.
@@ -118,9 +98,28 @@ static inline void pmd_clear(pmd_t *pmd)
smp_wmb();
*(tmp + 1) = 0;
}
+#endif
+
+/*
+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
+ * the TLB via cr3 if the top-level pgd is changed...
+ * We do not let the generic code free and clear pgd entries due to
+ * this erratum.
+ */
+static inline void pud_clear (pud_t * pud) { }
+
+#define pud_page(pud) \
+((struct page *) __va(pud_val(pud) & PAGE_MASK))
+
+#define pud_page_vaddr(pud) \
+((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
+ pmd_index(address))
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline pte_t raw_ptep_get_and_clear(pte_t *ptep)
{
pte_t res;
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index bfee7ddfff53..e6a4723f0eb1 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -15,6 +15,7 @@
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <linux/threads.h>
+#include <asm/paravirt.h>
#ifndef _I386_BITOPS_H
#include <asm/bitops.h>
@@ -246,6 +247,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
# include <asm/pgtable-2level.h>
#endif
+#ifndef CONFIG_PARAVIRT
/*
* Rules for using pte_update - it must be called after any PTE update which
* has not been done using the set_pte / clear_pte interfaces. It is used by
@@ -261,7 +263,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
*/
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
-
+#endif
/*
* We only update the dirty/accessed state if we set
@@ -275,7 +277,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
do { \
if (dirty) { \
(ptep)->pte_low = (entry).pte_low; \
- pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ pte_update_defer((vma)->vm_mm, (address), (ptep)); \
flush_tlb_page(vma, address); \
} \
} while (0)
@@ -305,7 +307,7 @@ do { \
__dirty = pte_dirty(*(ptep)); \
if (__dirty) { \
clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
- pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ pte_update_defer((vma)->vm_mm, (address), (ptep)); \
flush_tlb_page(vma, address); \
} \
__dirty; \
@@ -318,12 +320,20 @@ do { \
__young = pte_young(*(ptep)); \
if (__young) { \
clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
- pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ pte_update_defer((vma)->vm_mm, (address), (ptep)); \
flush_tlb_page(vma, address); \
} \
__young; \
})
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = raw_ptep_get_and_clear(ptep);
+ pte_update(mm, addr, ptep);
+ return pte;
+}
+
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
{
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index e0ddca94d50c..a52d65440429 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -20,6 +20,7 @@
#include <linux/threads.h>
#include <asm/percpu.h>
#include <linux/cpumask.h>
+#include <linux/init.h>
/* flag for disabling the tsc */
extern int tsc_disable;
@@ -72,6 +73,7 @@ struct cpuinfo_x86 {
#endif
unsigned char x86_max_cores; /* cpuid returned max cores value */
unsigned char apicid;
+ unsigned short x86_clflush_size;
#ifdef CONFIG_SMP
unsigned char booted_cores; /* number of cores as seen by OS */
__u8 phys_proc_id; /* Physical processor id. */
@@ -111,6 +113,8 @@ extern struct cpuinfo_x86 cpu_data[];
extern int cpu_llc_id[NR_CPUS];
extern char ignore_fpu_irq;
+void __init cpu_detect(struct cpuinfo_x86 *c);
+
extern void identify_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
@@ -143,8 +147,8 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
-static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
+static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
__asm__("cpuid"
@@ -155,59 +159,6 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
: "0" (*eax), "2" (*ecx));
}
-/*
- * Generic CPUID function
- * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
- * resulting in stale register contents being returned.
- */
-static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
-{
- *eax = op;
- *ecx = 0;
- __cpuid(eax, ebx, ecx, edx);
-}
-
-/* Some CPUID calls want 'count' to be placed in ecx */
-static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
- int *edx)
-{
- *eax = op;
- *ecx = count;
- __cpuid(eax, ebx, ecx, edx);
-}
-
-/*
- * CPUID functions returning a single datum
- */
-static inline unsigned int cpuid_eax(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
- return eax;
-}
-static inline unsigned int cpuid_ebx(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
- return ebx;
-}
-static inline unsigned int cpuid_ecx(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
- return ecx;
-}
-static inline unsigned int cpuid_edx(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
- return edx;
-}
-
#define load_cr3(pgdir) write_cr3(__pa(pgdir))
/*
@@ -473,6 +424,7 @@ struct thread_struct {
.vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
+ .gs = __KERNEL_PDA, \
}
/*
@@ -489,18 +441,9 @@ struct thread_struct {
.io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
}
-static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
-{
- tss->esp0 = thread->esp0;
- /* This can only happen when SEP is enabled, no need to test "SEP"arately */
- if (unlikely(tss->ss1 != thread->sysenter_cs)) {
- tss->ss1 = thread->sysenter_cs;
- wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
- }
-}
-
#define start_thread(regs, new_eip, new_esp) do { \
- __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
+ __asm__("movl %0,%%fs": :"r" (0)); \
+ regs->xgs = 0; \
set_fs(USER_DS); \
regs->xds = __USER_DS; \
regs->xes = __USER_DS; \
@@ -510,33 +453,6 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
regs->esp = new_esp; \
} while (0)
-/*
- * These special macros can be used to get or set a debugging register
- */
-#define get_debugreg(var, register) \
- __asm__("movl %%db" #register ", %0" \
- :"=r" (var))
-#define set_debugreg(value, register) \
- __asm__("movl %0,%%db" #register \
- : /* no output */ \
- :"r" (value))
-
-/*
- * Set IOPL bits in EFLAGS from given mask
- */
-static inline void set_iopl_mask(unsigned mask)
-{
- unsigned int reg;
- __asm__ __volatile__ ("pushfl;"
- "popl %0;"
- "andl %1, %0;"
- "orl %2, %0;"
- "pushl %0;"
- "popfl"
- : "=&r" (reg)
- : "i" (~X86_EFLAGS_IOPL), "r" (mask));
-}
-
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
@@ -628,6 +544,105 @@ static inline void rep_nop(void)
#define cpu_relax() rep_nop()
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define paravirt_enabled() 0
+#define __cpuid native_cpuid
+
+static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+{
+ tss->esp0 = thread->esp0;
+ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
+ if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+ tss->ss1 = thread->sysenter_cs;
+ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+ }
+}
+
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, register) \
+ __asm__("movl %%db" #register ", %0" \
+ :"=r" (var))
+#define set_debugreg(value, register) \
+ __asm__("movl %0,%%db" #register \
+ : /* no output */ \
+ :"r" (value))
+
+#define set_iopl_mask native_set_iopl_mask
+#endif /* CONFIG_PARAVIRT */
+
+/*
+ * Set IOPL bits in EFLAGS from given mask
+ */
+static fastcall inline void native_set_iopl_mask(unsigned mask)
+{
+ unsigned int reg;
+ __asm__ __volatile__ ("pushfl;"
+ "popl %0;"
+ "andl %1, %0;"
+ "orl %2, %0;"
+ "pushl %0;"
+ "popfl"
+ : "=&r" (reg)
+ : "i" (~X86_EFLAGS_IOPL), "r" (mask));
+}
+
+/*
+ * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+ * resulting in stale register contents being returned.
+ */
+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
+{
+ *eax = op;
+ *ecx = 0;
+ __cpuid(eax, ebx, ecx, edx);
+}
+
+/* Some CPUID calls want 'count' to be placed in ecx */
+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
+ int *edx)
+{
+ *eax = op;
+ *ecx = count;
+ __cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return edx;
+}
+
/* generic versions from gas */
#define GENERIC_NOP1 ".byte 0x90\n"
#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
@@ -727,4 +742,7 @@ extern unsigned long boot_option_idle_override;
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
+extern int init_gdt(int cpu, struct task_struct *idle);
+extern void secondary_cpu_init(void);
+
#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index d505f501077a..bdbc894339b4 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -16,6 +16,8 @@ struct pt_regs {
long eax;
int xds;
int xes;
+ /* int xfs; */
+ int xgs;
long orig_eax;
long eip;
int xcs;
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index b7ab59685ba7..3c796af33776 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -39,7 +39,7 @@
* 25 - APM BIOS support
*
* 26 - ESPFIX small SS
- * 27 - unused
+ * 27 - PDA [ per-cpu private data area ]
* 28 - unused
* 29 - unused
* 30 - unused
@@ -74,6 +74,9 @@
#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
+#define GDT_ENTRY_PDA (GDT_ENTRY_KERNEL_BASE + 15)
+#define __KERNEL_PDA (GDT_ENTRY_PDA * 8)
+
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
/*
@@ -128,5 +131,7 @@
#define SEGMENT_LDT 0x4
#define SEGMENT_GDT 0x0
+#ifndef CONFIG_PARAVIRT
#define get_kernel_rpl() 0
#endif
+#endif
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index c5b504bfbaad..67659dbaf120 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -70,6 +70,7 @@ extern unsigned char boot_params[PARAM_SIZE];
struct e820entry;
char * __init machine_specific_memory_setup(void);
+char *memory_setup(void);
int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index bd59c1508e71..64fe624c02ca 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
+#include <asm/pda.h>
#endif
#ifdef CONFIG_X86_LOCAL_APIC
@@ -56,7 +57,7 @@ extern void cpu_uninit(void);
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define raw_smp_processor_id() (read_pda(cpu_number))
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_callin_map;
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index c18b71fae6b3..d3bcebed60ca 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -7,8 +7,14 @@
#include <asm/processor.h>
#include <linux/compiler.h>
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
#define CLI_STRING "cli"
#define STI_STRING "sti"
+#define CLI_STI_CLOBBERS
+#define CLI_STI_INPUT_ARGS
+#endif /* CONFIG_PARAVIRT */
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -53,25 +59,28 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
{
asm volatile(
"\n1:\t"
- LOCK_PREFIX " ; decb %0\n\t"
+ LOCK_PREFIX " ; decb %[slock]\n\t"
"jns 5f\n"
"2:\t"
- "testl $0x200, %1\n\t"
+ "testl $0x200, %[flags]\n\t"
"jz 4f\n\t"
STI_STRING "\n"
"3:\t"
"rep;nop\n\t"
- "cmpb $0, %0\n\t"
+ "cmpb $0, %[slock]\n\t"
"jle 3b\n\t"
CLI_STRING "\n\t"
"jmp 1b\n"
"4:\t"
"rep;nop\n\t"
- "cmpb $0, %0\n\t"
+ "cmpb $0, %[slock]\n\t"
"jg 1b\n\t"
"jmp 4b\n"
"5:\n\t"
- : "+m" (lock->slock) : "r" (flags) : "memory");
+ : [slock] "+m" (lock->slock)
+ : [flags] "r" (flags)
+ CLI_STI_INPUT_ARGS
+ : "memory" CLI_STI_CLOBBERS);
}
#endif
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
index c1da5caafaf7..8dbaafe611ff 100644
--- a/include/asm-i386/suspend.h
+++ b/include/asm-i386/suspend.h
@@ -12,12 +12,8 @@ static inline int arch_prepare_suspend(void) { return 0; }
struct saved_context {
u16 es, fs, gs, ss;
unsigned long cr0, cr2, cr3, cr4;
- u16 gdt_pad;
- u16 gdt_limit;
- unsigned long gdt_base;
- u16 idt_pad;
- u16 idt_limit;
- unsigned long idt_base;
+ struct Xgt_desc_struct gdt;
+ struct Xgt_desc_struct idt;
u16 ldt;
u16 tss;
unsigned long tr;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index a6dabbcd6e6a..a6d20d9a1a30 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -88,6 +88,9 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=rm" (value))
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
#define read_cr0() ({ \
unsigned int __dummy; \
__asm__ __volatile__( \
@@ -139,17 +142,18 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
#define write_cr4(x) \
__asm__ __volatile__("movl %0,%%cr4": :"r" (x))
-/*
- * Clear and set 'TS' bit respectively
- */
+#define wbinvd() \
+ __asm__ __volatile__ ("wbinvd": : :"memory")
+
+/* Clear the 'TS' bit */
#define clts() __asm__ __volatile__ ("clts")
+#endif/* CONFIG_PARAVIRT */
+
+/* Set the 'TS' bit */
#define stts() write_cr0(8 | read_cr0())
#endif /* __KERNEL__ */
-#define wbinvd() \
- __asm__ __volatile__ ("wbinvd": : :"memory")
-
static inline unsigned long get_limit(unsigned long segment)
{
unsigned long __limit;
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 54d6d7aea938..46d32ad92082 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -95,15 +95,7 @@ static inline struct thread_info *current_thread_info(void)
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
-#define alloc_thread_info(tsk) \
- ({ \
- struct thread_info *ret; \
- \
- ret = kmalloc(THREAD_SIZE, GFP_KERNEL); \
- if (ret) \
- memset(ret, 0, THREAD_SIZE); \
- ret; \
- })
+#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL)
#else
#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
#endif
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h
new file mode 100644
index 000000000000..ea8065af825a
--- /dev/null
+++ b/include/asm-i386/time.h
@@ -0,0 +1,41 @@
+#ifndef _ASMi386_TIME_H
+#define _ASMi386_TIME_H
+
+#include <linux/efi.h>
+#include "mach_time.h"
+
+static inline unsigned long native_get_wallclock(void)
+{
+ unsigned long retval;
+
+ if (efi_enabled)
+ retval = efi_get_time();
+ else
+ retval = mach_get_cmos_time();
+
+ return retval;
+}
+
+static inline int native_set_wallclock(unsigned long nowtime)
+{
+ int retval;
+
+ if (efi_enabled)
+ retval = efi_set_rtc_mmss(nowtime);
+ else
+ retval = mach_set_rtc_mmss(nowtime);
+
+ return retval;
+}
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else /* !CONFIG_PARAVIRT */
+
+#define get_wallclock() native_get_wallclock()
+#define set_wallclock(x) native_set_wallclock(x)
+#define do_time_init() time_init_hook()
+
+#endif /* CONFIG_PARAVIRT */
+
+#endif
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index 360648b0f2b3..4dd82840d53b 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -4,7 +4,15 @@
#include <linux/mm.h>
#include <asm/processor.h>
-#define __flush_tlb() \
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define __flush_tlb() __native_flush_tlb()
+#define __flush_tlb_global() __native_flush_tlb_global()
+#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
+#endif
+
+#define __native_flush_tlb() \
do { \
unsigned int tmpreg; \
\
@@ -19,7 +27,7 @@
* Global pages have to be flushed a bit differently. Not a real
* performance problem because this does not happen often.
*/
-#define __flush_tlb_global() \
+#define __native_flush_tlb_global() \
do { \
unsigned int tmpreg, cr4, cr4_orig; \
\
@@ -36,6 +44,9 @@
: "memory"); \
} while (0)
+#define __native_flush_tlb_single(addr) \
+ __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
+
# define __flush_tlb_all() \
do { \
if (cpu_has_pge) \
@@ -46,9 +57,6 @@
#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
-#define __flush_tlb_single(addr) \
- __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
-
#ifdef CONFIG_X86_INVLPG
# define __flush_tlb_one(addr) __flush_tlb_single(addr)
#else
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
index 5031d693b89d..aa2c931e30db 100644
--- a/include/asm-i386/unwind.h
+++ b/include/asm-i386/unwind.h
@@ -71,6 +71,7 @@ static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
info->regs.xss = __KERNEL_DS;
info->regs.xds = __USER_DS;
info->regs.xes = __USER_DS;
+ info->regs.xgs = __KERNEL_PDA;
}
extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
@@ -78,17 +79,13 @@ extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
void *arg),
void *arg);
-static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info)
{
-#if 0 /* This can only work when selector register and EFLAGS saves/restores
- are properly annotated (and tracked in UNW_REGISTER_INFO). */
- return user_mode_vm(&info->regs);
-#else
- return info->regs.eip < PAGE_OFFSET
+ return user_mode_vm(&info->regs)
+ || info->regs.eip < PAGE_OFFSET
|| (info->regs.eip >= __fix_to_virt(FIX_VDSO)
- && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
+ && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
|| info->regs.esp < PAGE_OFFSET;
-#endif
}
#else
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h
index 952fd6957380..a5edf517b992 100644
--- a/include/asm-i386/vm86.h
+++ b/include/asm-i386/vm86.h
@@ -145,26 +145,13 @@ struct vm86plus_struct {
* at the end of the structure. Look at ptrace.h to see the "normal"
* setup. For user space layout see 'struct vm86_regs' above.
*/
+#include <asm/ptrace.h>
struct kernel_vm86_regs {
/*
* normal regs, with special meaning for the segment descriptors..
*/
- long ebx;
- long ecx;
- long edx;
- long esi;
- long edi;
- long ebp;
- long eax;
- long __null_ds;
- long __null_es;
- long orig_eax;
- long eip;
- unsigned short cs, __csh;
- long eflags;
- long esp;
- unsigned short ss, __ssh;
+ struct pt_regs pt;
/*
* these are specific to v86 mode:
*/