diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-07-23 11:20:10 +0200 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-07-23 11:20:10 +0200 |
commit | 39fe5434cb9de5da40510028b17b96bc4eb312b3 (patch) | |
tree | 7a02a317b9ad57da51ca99887c119e779ccf3f13 /include/asm-x86_64 | |
parent | [JFFS2] Add declaration of jffs2_lzo_{init,exit} to compr.h (diff) | |
parent | Linux 2.6.23-rc1 (diff) | |
download | linux-39fe5434cb9de5da40510028b17b96bc4eb312b3.tar.xz linux-39fe5434cb9de5da40510028b17b96bc4eb312b3.zip |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'include/asm-x86_64')
47 files changed, 411 insertions, 456 deletions
diff --git a/include/asm-x86_64/a.out.h b/include/asm-x86_64/a.out.h index 7255cde06538..e789300e41a5 100644 --- a/include/asm-x86_64/a.out.h +++ b/include/asm-x86_64/a.out.h @@ -21,7 +21,8 @@ struct exec #ifdef __KERNEL__ #include <linux/thread_info.h> -#define STACK_TOP TASK_SIZE +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE64 #endif #endif /* __A_OUT_GNU_H__ */ diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h index a29f05087a31..1da8f49c0fe2 100644 --- a/include/asm-x86_64/acpi.h +++ b/include/asm-x86_64/acpi.h @@ -29,6 +29,7 @@ #ifdef __KERNEL__ #include <acpi/pdc_intel.h> +#include <asm/numa.h> #define COMPILER_DEPENDENT_INT64 long long #define COMPILER_DEPENDENT_UINT64 unsigned long long @@ -141,6 +142,16 @@ extern int acpi_pci_disabled; extern int acpi_skip_timer_override; extern int acpi_use_timer_override; +#ifdef CONFIG_ACPI_NUMA +extern void __init acpi_fake_nodes(const struct bootnode *fake_nodes, + int num_nodes); +#else +static inline void acpi_fake_nodes(const struct bootnode *fake_nodes, + int num_nodes) +{ +} +#endif + #endif /*__KERNEL__*/ #endif /*_ASM_ACPI_H*/ diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h index a09427640764..ab161e810151 100644 --- a/include/asm-x86_64/alternative.h +++ b/include/asm-x86_64/alternative.h @@ -5,6 +5,41 @@ #include <linux/types.h> #include <linux/stddef.h> + +/* + * Alternative inline assembly for SMP. + * + * The LOCK_PREFIX macro defined here replaces the LOCK and + * LOCK_PREFIX macros used everywhere in the source tree. + * + * SMP alternatives use the same data structures as the other + * alternatives and the X86_FEATURE_UP flag to indicate the case of a + * UP system running a SMP kernel. The existing apply_alternatives() + * works fine for patching a SMP kernel for UP. + * + * The SMP alternative tables can be kept after boot and contain both + * UP and SMP versions of the instructions to allow switching back to + * SMP at runtime, when hotplugging in a new CPU, which is especially + * useful in virtualized environments. + * + * The very common lock prefix is handled as special case in a + * separate table which is a pure address list without replacement ptr + * and size information. That keeps the table sizes small. + */ + +#ifdef CONFIG_SMP +#define LOCK_PREFIX \ + ".section .smp_locks,\"a\"\n" \ + " .align 8\n" \ + " .quad 661f\n" /* address */ \ + ".previous\n" \ + "661:\n\tlock; " + +#else /* ! CONFIG_SMP */ +#define LOCK_PREFIX "" +#endif + +/* This must be included *after* the definition of LOCK_PREFIX */ #include <asm/cpufeature.h> struct alt_instr { @@ -108,39 +143,6 @@ static inline void alternatives_smp_switch(int smp) {} */ #define ASM_OUTPUT2(a, b) a, b -/* - * Alternative inline assembly for SMP. - * - * The LOCK_PREFIX macro defined here replaces the LOCK and - * LOCK_PREFIX macros used everywhere in the source tree. - * - * SMP alternatives use the same data structures as the other - * alternatives and the X86_FEATURE_UP flag to indicate the case of a - * UP system running a SMP kernel. The existing apply_alternatives() - * works fine for patching a SMP kernel for UP. - * - * The SMP alternative tables can be kept after boot and contain both - * UP and SMP versions of the instructions to allow switching back to - * SMP at runtime, when hotplugging in a new CPU, which is especially - * useful in virtualized environments. - * - * The very common lock prefix is handled as special case in a - * separate table which is a pure address list without replacement ptr - * and size information. That keeps the table sizes small. - */ - -#ifdef CONFIG_SMP -#define LOCK_PREFIX \ - ".section .smp_locks,\"a\"\n" \ - " .align 8\n" \ - " .quad 661f\n" /* address */ \ - ".previous\n" \ - "661:\n\tlock; " - -#else /* ! CONFIG_SMP */ -#define LOCK_PREFIX "" -#endif - struct paravirt_patch; #ifdef CONFIG_PARAVIRT void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); @@ -152,4 +154,6 @@ apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) #define __parainstructions_end NULL #endif +extern void text_poke(void *addr, unsigned char *opcode, int len); + #endif /* _X86_64_ALTERNATIVE_H */ diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index 45e9fca1febc..85125ef3c414 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h @@ -83,8 +83,10 @@ extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); extern void setup_apic_routing(void); -extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector, - unsigned char msg_type, unsigned char mask); +extern void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector, + unsigned char msg_type, unsigned char mask); + +extern int apic_is_clustered_box(void); #define K8_APIC_EXT_LVT_BASE 0x500 #define K8_APIC_EXT_INT_MSG_FIX 0x0 diff --git a/include/asm-x86_64/auxvec.h b/include/asm-x86_64/auxvec.h index 2403c4cfced2..1d5ab0d03950 100644 --- a/include/asm-x86_64/auxvec.h +++ b/include/asm-x86_64/auxvec.h @@ -1,4 +1,6 @@ #ifndef __ASM_X86_64_AUXVEC_H #define __ASM_X86_64_AUXVEC_H +#define AT_SYSINFO_EHDR 33 + #endif diff --git a/include/asm-x86_64/boot.h b/include/asm-x86_64/boot.h index 96b228e6e79c..3c46cea8db7f 100644 --- a/include/asm-x86_64/boot.h +++ b/include/asm-x86_64/boot.h @@ -1,15 +1 @@ -#ifndef _LINUX_BOOT_H -#define _LINUX_BOOT_H - -/* Don't touch these, unless you really know what you're doing. */ -#define DEF_INITSEG 0x9000 -#define DEF_SYSSEG 0x1000 -#define DEF_SETUPSEG 0x9020 -#define DEF_SYSSIZE 0x7F00 - -/* Internal svga startup constants */ -#define NORMAL_VGA 0xffff /* 80x25 mode */ -#define EXTENDED_VGA 0xfffe /* 80x50 mode */ -#define ASK_VGA 0xfffd /* ask for it at bootup */ - -#endif +#include <asm-i386/boot.h> diff --git a/include/asm-x86_64/bootparam.h b/include/asm-x86_64/bootparam.h new file mode 100644 index 000000000000..aa82e5238d82 --- /dev/null +++ b/include/asm-x86_64/bootparam.h @@ -0,0 +1 @@ +#include <asm-i386/bootparam.h> diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h index 4d5747a0923c..67f60406e2d8 100644 --- a/include/asm-x86_64/calgary.h +++ b/include/asm-x86_64/calgary.h @@ -1,7 +1,7 @@ /* * Derived from include/asm-powerpc/iommu.h * - * Copyright (C) IBM Corporation, 2006 + * Copyright IBM Corporation, 2006-2007 * * Author: Jon Mason <jdmason@us.ibm.com> * Author: Muli Ben-Yehuda <muli@il.ibm.com> @@ -31,6 +31,7 @@ #include <asm/types.h> struct iommu_table { + struct cal_chipset_ops *chip_ops; /* chipset specific funcs */ unsigned long it_base; /* mapped address of tce table */ unsigned long it_hint; /* Hint for next alloc */ unsigned long *it_map; /* A simple allocation bitmap for now */ @@ -42,6 +43,12 @@ struct iommu_table { unsigned char it_busno; /* Bus number this table belongs to */ }; +struct cal_chipset_ops { + void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev); + void (*tce_cache_blast)(struct iommu_table *tbl); + void (*dump_error_regs)(struct iommu_table *tbl); +}; + #define TCE_TABLE_SIZE_UNSPECIFIED ~0 #define TCE_TABLE_SIZE_64K 0 #define TCE_TABLE_SIZE_128K 1 diff --git a/include/asm-x86_64/cmpxchg.h b/include/asm-x86_64/cmpxchg.h index 09a6b6b6b74d..5e182062e6ec 100644 --- a/include/asm-x86_64/cmpxchg.h +++ b/include/asm-x86_64/cmpxchg.h @@ -128,7 +128,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ (unsigned long)(n),sizeof(*(ptr)))) #define cmpxchg_local(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\ (unsigned long)(n),sizeof(*(ptr)))) #endif diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h index b37ab8218ef0..53cb96b68a62 100644 --- a/include/asm-x86_64/compat.h +++ b/include/asm-x86_64/compat.h @@ -33,8 +33,10 @@ typedef s32 compat_key_t; typedef s32 compat_int_t; typedef s32 compat_long_t; +typedef s64 __attribute__((aligned(4))) compat_s64; typedef u32 compat_uint_t; typedef u32 compat_ulong_t; +typedef u64 __attribute__((aligned(4))) compat_u64; struct compat_timespec { compat_time_t tv_sec; diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index 0b3c686139f1..8baefc3beb2e 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h @@ -7,115 +7,24 @@ #ifndef __ASM_X8664_CPUFEATURE_H #define __ASM_X8664_CPUFEATURE_H -#define NCAPINTS 7 /* N 32-bit words worth of info */ +#include <asm-i386/cpufeature.h> -/* Intel-defined CPU features, CPUID level 0x00000001, word 0 */ -#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ -#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ -#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ -#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ -#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ -#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ -#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ -#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ -#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ -#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ -#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ -#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ -#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ -#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ -#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ -#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ -#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ -#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ -#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ -#define X86_FEATURE_DS (0*32+21) /* Debug Store */ -#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ -#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ -#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ - /* of FPU context), and CR4.OSFXSR available */ -#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ -#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ -#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ -#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ -#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ -#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ - -/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ -/* Don't duplicate feature flags which are redundant with Intel! */ -#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ -#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ -#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */ -#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ -#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ -#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ -#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ - -/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ -#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ -#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ -#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ - -/* Other features, Linux-defined mapping, word 3 */ -/* This range is used for feature bits which conflict or are synthesized */ -#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ -#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ -#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ -#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ -#define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */ -#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ -#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ -#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ -#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */ -#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */ -#define X86_FEATURE_PEBS (3*32+10) /* Precise-Event Based Sampling */ -#define X86_FEATURE_BTS (3*32+11) /* Branch Trace Store */ - -/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ -#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ -#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ -#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ -#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ -#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ -#define X86_FEATURE_CID (4*32+10) /* Context ID */ -#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ -#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ - -/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ -#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ -#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ -#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ -#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ - -/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ -#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ - -#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) -#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) - -#define cpu_has_fpu 1 +#undef cpu_has_vme #define cpu_has_vme 0 -#define cpu_has_de 1 -#define cpu_has_pse 1 -#define cpu_has_tsc 1 + +#undef cpu_has_pae #define cpu_has_pae ___BUG___ -#define cpu_has_pge 1 -#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) -#define cpu_has_mtrr 1 -#define cpu_has_mmx 1 -#define cpu_has_fxsr 1 -#define cpu_has_xmm 1 -#define cpu_has_xmm2 1 -#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) -#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) + +#undef cpu_has_mp #define cpu_has_mp 1 /* XXX */ + +#undef cpu_has_k6_mtrr #define cpu_has_k6_mtrr 0 + +#undef cpu_has_cyrix_arr #define cpu_has_cyrix_arr 0 + +#undef cpu_has_centaur_mcr #define cpu_has_centaur_mcr 0 -#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) -#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) -#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) -#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) #endif /* __ASM_X8664_CPUFEATURE_H */ diff --git a/include/asm-x86_64/dmi.h b/include/asm-x86_64/dmi.h index 93b2b15d4325..d02e32e3c3f0 100644 --- a/include/asm-x86_64/dmi.h +++ b/include/asm-x86_64/dmi.h @@ -3,15 +3,12 @@ #include <asm/io.h> -extern void *dmi_ioremap(unsigned long addr, unsigned long size); -extern void dmi_iounmap(void *addr, unsigned long size); - #define DMI_MAX_DATA 2048 extern int dmi_alloc_index; extern char dmi_alloc_data[DMI_MAX_DATA]; -/* This is so early that there is no good way to allocate dynamic memory. +/* This is so early that there is no good way to allocate dynamic memory. Allocate data in an BSS array. */ static inline void *dmi_alloc(unsigned len) { diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h index 6216fa3f2802..3486e701bd86 100644 --- a/include/asm-x86_64/e820.h +++ b/include/asm-x86_64/e820.h @@ -11,8 +11,6 @@ #ifndef __E820_HEADER #define __E820_HEADER -#include <linux/mmzone.h> - #define E820MAP 0x2d0 /* our map */ #define E820MAX 128 /* number of entries in E820MAP */ #define E820NR 0x1e8 /* # entries in E820MAP */ @@ -30,7 +28,7 @@ struct e820entry { } __attribute__((packed)); struct e820map { - int nr_map; + u32 nr_map; struct e820entry map[E820MAX]; }; diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h index 6d24ea7c4d9d..b4fbe47f6ccd 100644 --- a/include/asm-x86_64/elf.h +++ b/include/asm-x86_64/elf.h @@ -162,6 +162,19 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); /* 1GB for 64bit, 8MB for 32bit */ #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int executable_stack); + +extern int vdso_enabled; + +#define ARCH_DLINFO \ +do if (vdso_enabled) { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\ +} while (0) + #endif #endif diff --git a/include/asm-x86_64/fb.h b/include/asm-x86_64/fb.h new file mode 100644 index 000000000000..60548e651d12 --- /dev/null +++ b/include/asm-x86_64/fb.h @@ -0,0 +1,19 @@ +#ifndef _ASM_FB_H_ +#define _ASM_FB_H_ +#include <linux/fb.h> +#include <linux/fs.h> +#include <asm/page.h> + +static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, + unsigned long off) +{ + if (boot_cpu_data.x86 > 3) + pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; +} + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* _ASM_FB_H_ */ diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h index e90e1677531b..cdfbe4a6ae6f 100644 --- a/include/asm-x86_64/fixmap.h +++ b/include/asm-x86_64/fixmap.h @@ -22,9 +22,9 @@ * compile time, but to set the physical address only * in the boot process. * - * these 'compile-time allocated' memory buffers are - * fixed-size 4k pages. (or larger if used with an increment - * highger than 1) use fixmap_set(idx,phys) to associate + * These 'compile-time allocated' memory buffers are + * fixed-size 4k pages (or larger if used with an increment + * higher than 1). Use set_fixmap(idx,phys) to associate * physical memory with fixmap indices. * * TLB entries of such buffers will not be flushed across @@ -35,6 +35,8 @@ enum fixed_addresses { VSYSCALL_LAST_PAGE, VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, VSYSCALL_HPET, + FIX_DBGP_BASE, + FIX_EARLYCON_MEM_BASE, FIX_HPET_BASE, FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ FIX_IO_APIC_BASE_0, @@ -84,7 +86,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx) if (idx >= __end_of_fixed_addresses) __this_fixmap_does_not_exist(); - return __fix_to_virt(idx); + return __fix_to_virt(idx); } #endif diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h index 59a66f084611..79bb950f82c5 100644 --- a/include/asm-x86_64/hpet.h +++ b/include/asm-x86_64/hpet.h @@ -1,78 +1,18 @@ #ifndef _ASM_X8664_HPET_H #define _ASM_X8664_HPET_H 1 -/* - * Documentation on HPET can be found at: - * http://www.intel.com/ial/home/sp/pcmmspec.htm - * ftp://download.intel.com/ial/home/sp/mmts098.pdf - */ - -#define HPET_MMAP_SIZE 1024 - -#define HPET_ID 0x000 -#define HPET_PERIOD 0x004 -#define HPET_CFG 0x010 -#define HPET_STATUS 0x020 -#define HPET_COUNTER 0x0f0 -#define HPET_Tn_OFFSET 0x20 -#define HPET_Tn_CFG(n) (0x100 + (n) * HPET_Tn_OFFSET) -#define HPET_Tn_ROUTE(n) (0x104 + (n) * HPET_Tn_OFFSET) -#define HPET_Tn_CMP(n) (0x108 + (n) * HPET_Tn_OFFSET) -#define HPET_T0_CFG HPET_Tn_CFG(0) -#define HPET_T0_CMP HPET_Tn_CMP(0) -#define HPET_T1_CFG HPET_Tn_CFG(1) -#define HPET_T1_CMP HPET_Tn_CMP(1) - -#define HPET_ID_VENDOR 0xffff0000 -#define HPET_ID_LEGSUP 0x00008000 -#define HPET_ID_64BIT 0x00002000 -#define HPET_ID_NUMBER 0x00001f00 -#define HPET_ID_REV 0x000000ff -#define HPET_ID_NUMBER_SHIFT 8 - -#define HPET_ID_VENDOR_SHIFT 16 -#define HPET_ID_VENDOR_8086 0x8086 - -#define HPET_CFG_ENABLE 0x001 -#define HPET_CFG_LEGACY 0x002 -#define HPET_LEGACY_8254 2 -#define HPET_LEGACY_RTC 8 - -#define HPET_TN_LEVEL 0x0002 -#define HPET_TN_ENABLE 0x0004 -#define HPET_TN_PERIODIC 0x0008 -#define HPET_TN_PERIODIC_CAP 0x0010 -#define HPET_TN_64BIT_CAP 0x0020 -#define HPET_TN_SETVAL 0x0040 -#define HPET_TN_32BIT 0x0100 -#define HPET_TN_ROUTE 0x3e00 -#define HPET_TN_FSB 0x4000 -#define HPET_TN_FSB_CAP 0x8000 - -#define HPET_TN_ROUTE_SHIFT 9 +#include <asm-i386/hpet.h> #define HPET_TICK_RATE (HZ * 100000UL) -extern int is_hpet_enabled(void); extern int hpet_rtc_timer_init(void); -extern int apic_is_clustered_box(void); extern int hpet_arch_init(void); extern int hpet_timer_stop_set_go(unsigned long tick); extern int hpet_reenable(void); extern unsigned int hpet_calibrate_tsc(void); extern int hpet_use_timer; -extern unsigned long hpet_address; extern unsigned long hpet_period; extern unsigned long hpet_tick; -#ifdef CONFIG_HPET_EMULATE_RTC -extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); -extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); -extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec); -extern int hpet_set_periodic_freq(unsigned long freq); -extern int hpet_rtc_dropped_irq(void); -extern int hpet_rtc_timer_init(void); -#endif /* CONFIG_HPET_EMULATE_RTC */ - #endif diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h index 6153ae5df2e8..09dfc18a6dd0 100644 --- a/include/asm-x86_64/hw_irq.h +++ b/include/asm-x86_64/hw_irq.h @@ -95,6 +95,26 @@ #ifndef __ASSEMBLY__ + +/* Interrupt handlers registered during init_IRQ */ +void apic_timer_interrupt(void); +void spurious_interrupt(void); +void error_interrupt(void); +void reschedule_interrupt(void); +void call_function_interrupt(void); +void irq_move_cleanup_interrupt(void); +void invalidate_interrupt0(void); +void invalidate_interrupt1(void); +void invalidate_interrupt2(void); +void invalidate_interrupt3(void); +void invalidate_interrupt4(void); +void invalidate_interrupt5(void); +void invalidate_interrupt6(void); +void invalidate_interrupt7(void); +void thermal_interrupt(void); +void threshold_interrupt(void); +void i8254_timer_resume(void); + typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); extern void __setup_vector_irq(int cpu); diff --git a/include/asm-x86_64/hypertransport.h b/include/asm-x86_64/hypertransport.h index c16c6ff4bdd7..5cbf9fa5e0b5 100644 --- a/include/asm-x86_64/hypertransport.h +++ b/include/asm-x86_64/hypertransport.h @@ -1,42 +1 @@ -#ifndef ASM_HYPERTRANSPORT_H -#define ASM_HYPERTRANSPORT_H - -/* - * Constants for x86 Hypertransport Interrupts. - */ - -#define HT_IRQ_LOW_BASE 0xf8000000 - -#define HT_IRQ_LOW_VECTOR_SHIFT 16 -#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 -#define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) - -#define HT_IRQ_LOW_DEST_ID_SHIFT 8 -#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 -#define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) - -#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 -#define HT_IRQ_LOW_DM_LOGICAL 0x0000040 - -#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000 -#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020 - - -#define HT_IRQ_LOW_MT_FIXED 0x0000000 -#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004 -#define HT_IRQ_LOW_MT_SMI 0x0000008 -#define HT_IRQ_LOW_MT_NMI 0x000000c -#define HT_IRQ_LOW_MT_INIT 0x0000010 -#define HT_IRQ_LOW_MT_STARTUP 0x0000014 -#define HT_IRQ_LOW_MT_EXTINT 0x0000018 -#define HT_IRQ_LOW_MT_LINT1 0x000008c -#define HT_IRQ_LOW_MT_LINT0 0x0000098 - -#define HT_IRQ_LOW_IRQ_MASKED 0x0000001 - - -#define HT_IRQ_HIGH_DEST_ID_SHIFT 0 -#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff -#define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) - -#endif /* ASM_HYPERTRANSPORT_H */ +#include <asm-i386/hypertransport.h> diff --git a/include/asm-x86_64/i8253.h b/include/asm-x86_64/i8253.h new file mode 100644 index 000000000000..015d8df07690 --- /dev/null +++ b/include/asm-x86_64/i8253.h @@ -0,0 +1,6 @@ +#ifndef __ASM_I8253_H__ +#define __ASM_I8253_H__ + +extern spinlock_t i8253_lock; + +#endif /* __ASM_I8253_H__ */ diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h index de2cd9a2303a..7475095c5061 100644 --- a/include/asm-x86_64/io.h +++ b/include/asm-x86_64/io.h @@ -144,6 +144,7 @@ extern void early_iounmap(void *addr, unsigned long size); */ extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); extern void iounmap(volatile void __iomem *addr); +extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); /* * ISA I/O bus memory addresses are 1:1 with the physical address. diff --git a/include/asm-x86_64/iommu.h b/include/asm-x86_64/iommu.h new file mode 100644 index 000000000000..5af471f228ee --- /dev/null +++ b/include/asm-x86_64/iommu.h @@ -0,0 +1,29 @@ +#ifndef _ASM_X8664_IOMMU_H +#define _ASM_X8664_IOMMU_H 1 + +extern void pci_iommu_shutdown(void); +extern void no_iommu_init(void); +extern int force_iommu, no_iommu; +extern int iommu_detected; +#ifdef CONFIG_IOMMU +extern void gart_iommu_init(void); +extern void gart_iommu_shutdown(void); +extern void __init gart_parse_options(char *); +extern void iommu_hole_init(void); +extern int fallback_aper_order; +extern int fallback_aper_force; +extern int iommu_aperture; +extern int iommu_aperture_allowed; +extern int iommu_aperture_disabled; +extern int fix_aperture; +#else +#define iommu_aperture 0 +#define iommu_aperture_allowed 0 + +static inline void gart_iommu_shutdown(void) +{ +} + +#endif + +#endif diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h index cf5317898fb0..7db825403e01 100644 --- a/include/asm-x86_64/kprobes.h +++ b/include/asm-x86_64/kprobes.h @@ -41,7 +41,6 @@ typedef u8 kprobe_opcode_t; ? (MAX_STACK_SIZE) \ : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) -#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES #define ARCH_INACTIVE_KPROBE_COUNT 1 diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h index 177e92b4019b..7bc030a1996d 100644 --- a/include/asm-x86_64/mce.h +++ b/include/asm-x86_64/mce.h @@ -105,6 +105,11 @@ extern atomic_t mce_entry; extern void do_machine_check(struct pt_regs *, long); +extern int mce_notify_user(void); + +extern void stop_mce(void); +extern void restart_mce(void); + #endif #endif diff --git a/include/asm-x86_64/mmu.h b/include/asm-x86_64/mmu.h index 5dc6ed79859a..d2cd4a9d984d 100644 --- a/include/asm-x86_64/mmu.h +++ b/include/asm-x86_64/mmu.h @@ -15,6 +15,7 @@ typedef struct { rwlock_t ldtlock; int size; struct semaphore sem; + void *vdso; } mm_context_t; #endif diff --git a/include/asm-x86_64/msidef.h b/include/asm-x86_64/msidef.h index 5b8acddb70fb..083ad5827e48 100644 --- a/include/asm-x86_64/msidef.h +++ b/include/asm-x86_64/msidef.h @@ -1,47 +1 @@ -#ifndef ASM_MSIDEF_H -#define ASM_MSIDEF_H - -/* - * Constants for Intel APIC based MSI messages. - */ - -/* - * Shifts for MSI data - */ - -#define MSI_DATA_VECTOR_SHIFT 0 -#define MSI_DATA_VECTOR_MASK 0x000000ff -#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK) - -#define MSI_DATA_DELIVERY_MODE_SHIFT 8 -#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) -#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) - -#define MSI_DATA_LEVEL_SHIFT 14 -#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) -#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) - -#define MSI_DATA_TRIGGER_SHIFT 15 -#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) -#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) - -/* - * Shift/mask fields for msi address - */ - -#define MSI_ADDR_BASE_HI 0 -#define MSI_ADDR_BASE_LO 0xfee00000 - -#define MSI_ADDR_DEST_MODE_SHIFT 2 -#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT) -#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) - -#define MSI_ADDR_REDIRECTION_SHIFT 3 -#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */ -#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */ - -#define MSI_ADDR_DEST_ID_SHIFT 12 -#define MSI_ADDR_DEST_ID_MASK 0x00ffff0 -#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK) - -#endif /* ASM_MSIDEF_H */ +#include <asm-i386/msidef.h> diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h index d0a7f53b1497..5fb3c0de5ccc 100644 --- a/include/asm-x86_64/nmi.h +++ b/include/asm-x86_64/nmi.h @@ -88,5 +88,7 @@ unsigned lapic_adjust_nmi_hz(unsigned hz); int lapic_watchdog_ok(void); void disable_lapic_nmi_watchdog(void); void enable_lapic_nmi_watchdog(void); +void stop_nmi(void); +void restart_nmi(void); #endif /* ASM_NMI_H */ diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index e327c830da0c..88adf1afb0a2 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -48,7 +48,8 @@ void copy_page(void *, void *); #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) -#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE /* * These are used to make use of C type-checking.. diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h index 49c5e9280598..88926eb44f5c 100644 --- a/include/asm-x86_64/pci.h +++ b/include/asm-x86_64/pci.h @@ -5,6 +5,25 @@ #ifdef __KERNEL__ +struct pci_sysdata { + int node; /* NUMA node */ + void* iommu; /* IOMMU private data */ +}; + +#ifdef CONFIG_CALGARY_IOMMU +static inline void* pci_iommu(struct pci_bus *bus) +{ + struct pci_sysdata *sd = bus->sysdata; + return sd->iommu; +} + +static inline void set_pci_iommu(struct pci_bus *bus, void *val) +{ + struct pci_sysdata *sd = bus->sysdata; + sd->iommu = val; +} +#endif /* CONFIG_CALGARY_IOMMU */ + #include <linux/mm.h> /* for struct page */ /* Can be used to override the logic in pci_scan_bus for skipping @@ -54,14 +73,6 @@ extern int iommu_setup(char *opt); #if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU) -/* - * x86-64 always supports DAC, but sometimes it is useful to force - * devices through the IOMMU to get automatic sg list merging. - * Optional right now. - */ -extern int iommu_sac_force; -#define pci_dac_dma_supported(pci_dev, mask) (!iommu_sac_force) - #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ dma_addr_t ADDR_NAME; #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ @@ -78,8 +89,6 @@ extern int iommu_sac_force; #else /* No IOMMU */ -#define pci_dac_dma_supported(pci_dev, mask) 1 - #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) #define pci_unmap_addr(PTR, ADDR_NAME) (0) @@ -91,36 +100,6 @@ extern int iommu_sac_force; #include <asm-generic/pci-dma-compat.h> -static inline dma64_addr_t -pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) -{ - return ((dma64_addr_t) page_to_phys(page) + - (dma64_addr_t) offset); -} - -static inline struct page * -pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) -{ - return virt_to_page(__va(dma_addr)); -} - -static inline unsigned long -pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) -{ - return (dma_addr & ~PAGE_MASK); -} - -static inline void -pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) -{ -} - -static inline void -pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) -{ - flush_write_buffers(); -} - #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, @@ -135,10 +114,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); -static inline void pcibios_add_platform_entries(struct pci_dev *dev) -{ -} - #endif /* __KERNEL__ */ /* generic pci stuff */ diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index c6fbb67eac90..5abd48270101 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h @@ -20,6 +20,11 @@ #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + __typeof__(type) per_cpu__##name \ + ____cacheline_internodealigned_in_smp + /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*({ \ extern int simple_identifier_##var(void); \ @@ -46,6 +51,8 @@ extern void setup_per_cpu_areas(void); #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h index 8bb564687860..b467be6d367f 100644 --- a/include/asm-x86_64/pgalloc.h +++ b/include/asm-x86_64/pgalloc.h @@ -4,6 +4,10 @@ #include <asm/pda.h> #include <linux/threads.h> #include <linux/mm.h> +#include <linux/quicklist.h> + +#define QUICK_PGD 0 /* We preserve special mappings over free */ +#define QUICK_PT 1 /* Other page table pages that are zero on free */ #define pmd_populate_kernel(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) @@ -20,23 +24,23 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p static inline void pmd_free(pmd_t *pmd) { BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); - free_page((unsigned long)pmd); + quicklist_free(QUICK_PT, NULL, pmd); } static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr) { - return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); + return (pmd_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL); } static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); + return (pud_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL); } static inline void pud_free (pud_t *pud) { BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - free_page((unsigned long)pud); + quicklist_free(QUICK_PT, NULL, pud); } static inline void pgd_list_add(pgd_t *pgd) @@ -57,41 +61,57 @@ static inline void pgd_list_del(pgd_t *pgd) spin_unlock(&pgd_lock); } -static inline pgd_t *pgd_alloc(struct mm_struct *mm) +static inline void pgd_ctor(void *x) { unsigned boundary; - pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (!pgd) - return NULL; - pgd_list_add(pgd); + pgd_t *pgd = x; + struct page *page = virt_to_page(pgd); + /* * Copy kernel pointers in from init. - * Could keep a freelist or slab cache of those because the kernel - * part never changes. */ boundary = pgd_index(__PAGE_OFFSET); - memset(pgd, 0, boundary * sizeof(pgd_t)); memcpy(pgd + boundary, - init_level4_pgt + boundary, - (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); + init_level4_pgt + boundary, + (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); + + spin_lock(&pgd_lock); + list_add(&page->lru, &pgd_list); + spin_unlock(&pgd_lock); +} + +static inline void pgd_dtor(void *x) +{ + pgd_t *pgd = x; + struct page *page = virt_to_page(pgd); + + spin_lock(&pgd_lock); + list_del(&page->lru); + spin_unlock(&pgd_lock); +} + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *pgd = (pgd_t *)quicklist_alloc(QUICK_PGD, + GFP_KERNEL|__GFP_REPEAT, pgd_ctor); return pgd; } static inline void pgd_free(pgd_t *pgd) { BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); - pgd_list_del(pgd); - free_page((unsigned long)pgd); + quicklist_free(QUICK_PGD, pgd_dtor, pgd); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); + return (pte_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL); } static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { - void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); + void *p = (void *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL); + if (!p) return NULL; return virt_to_page(p); @@ -103,17 +123,22 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add static inline void pte_free_kernel(pte_t *pte) { BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); - free_page((unsigned long)pte); + quicklist_free(QUICK_PT, NULL, pte); } static inline void pte_free(struct page *pte) { - __free_page(pte); -} + quicklist_free_page(QUICK_PT, NULL, pte); +} -#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) +#define __pte_free_tlb(tlb,pte) quicklist_free_page(QUICK_PT, NULL,(pte)) -#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) -#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) +#define __pmd_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x)) +#define __pud_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x)) +static inline void check_pgt_cache(void) +{ + quicklist_trim(QUICK_PGD, pgd_dtor, 25, 16); + quicklist_trim(QUICK_PT, NULL, 25, 16); +} #endif /* _X86_64_PGALLOC_H */ diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 0a71e0b9a619..c9d8764c89d1 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -266,21 +266,15 @@ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) * Undefined behaviour if not.. */ #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) -static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; } -static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } -static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } -static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } -static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } -static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; } static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } @@ -290,13 +284,6 @@ static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & struct vm_area_struct; -static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) -{ - if (!pte_dirty(*ptep)) - return 0; - return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte); -} - static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_young(*ptep)) @@ -416,13 +403,14 @@ extern struct list_head pgd_list; extern int kern_addr_valid(unsigned long addr); +pte_t *lookup_address(unsigned long addr); + #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) #define HAVE_ARCH_UNMAPPED_AREA #define pgtable_cache_init() do { } while (0) -#define check_pgt_cache() do { } while (0) #define PAGE_AGP PAGE_KERNEL_NOCACHE #define HAVE_PAGE_AGP 1 @@ -433,7 +421,6 @@ extern int kern_addr_valid(unsigned long addr); (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PTEP_SET_WRPROTECT diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 461ffe4c1fcc..19525175b91c 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -83,7 +83,6 @@ struct cpuinfo_x86 { #define X86_VENDOR_UMC 3 #define X86_VENDOR_NEXGEN 4 #define X86_VENDOR_CENTAUR 5 -#define X86_VENDOR_RISE 6 #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NUM 8 #define X86_VENDOR_UNKNOWN 0xff @@ -100,6 +99,7 @@ extern char ignore_irq13; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); +extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); extern unsigned short num_cache_leaves; @@ -368,8 +368,6 @@ static inline void sync_core(void) asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); } -#define cpu_has_fpu 1 - #define ARCH_HAS_PREFETCH static inline void prefetch(void *x) { @@ -391,17 +389,6 @@ static inline void prefetchw(void *x) #define cpu_relax() rep_nop() -/* - * NSC/Cyrix CPU indexed register access macros - */ - -#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) - -#define setCx86(reg, data) do { \ - outb((reg), 0x22); \ - outb((data), 0x23); \ -} while (0) - static inline void serialize_cpu(void) { __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 85255db1e82d..31f20ad65876 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -75,8 +75,6 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long en extern void early_quirks(void); extern void check_efer(void); -extern int unhandled_signal(struct task_struct *tsk, int sig); - extern void select_idle_routine(const struct cpuinfo_x86 *c); extern unsigned long table_start, table_end; @@ -85,24 +83,6 @@ extern int exception_trace; extern unsigned cpu_khz; extern unsigned tsc_khz; -extern void no_iommu_init(void); -extern int force_iommu, no_iommu; -extern int iommu_detected; -#ifdef CONFIG_IOMMU -extern void gart_iommu_init(void); -extern void __init gart_parse_options(char *); -extern void iommu_hole_init(void); -extern int fallback_aper_order; -extern int fallback_aper_force; -extern int iommu_aperture; -extern int iommu_aperture_allowed; -extern int iommu_aperture_disabled; -extern int fix_aperture; -#else -#define iommu_aperture 0 -#define iommu_aperture_allowed 0 -#endif - extern int reboot_force; extern int notsc_setup(char *); diff --git a/include/asm-x86_64/ptrace.h b/include/asm-x86_64/ptrace.h index 5ea84dbb1e9c..7f166ccb0606 100644 --- a/include/asm-x86_64/ptrace.h +++ b/include/asm-x86_64/ptrace.h @@ -1,6 +1,7 @@ #ifndef _X86_64_PTRACE_H #define _X86_64_PTRACE_H +#include <linux/compiler.h> /* For __user */ #include <asm/ptrace-abi.h> #ifndef __ASSEMBLY__ diff --git a/include/asm-x86_64/required-features.h b/include/asm-x86_64/required-features.h new file mode 100644 index 000000000000..e80d5761b00a --- /dev/null +++ b/include/asm-x86_64/required-features.h @@ -0,0 +1,46 @@ +#ifndef _ASM_REQUIRED_FEATURES_H +#define _ASM_REQUIRED_FEATURES_H 1 + +/* Define minimum CPUID feature set for kernel These bits are checked + really early to actually display a visible error message before the + kernel dies. Make sure to assign features to the proper mask! + + The real information is in arch/x86_64/Kconfig.cpu, this just converts + the CONFIGs into a bitmask */ + +/* x86-64 baseline features */ +#define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) +#define NEED_PSE (1<<(X86_FEATURE_PSE & 31)) +#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) +#define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) +#define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) +#define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) +#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) +#define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) +#define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) +#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) + +#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\ + NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\ + NEED_XMM|NEED_XMM2) +#define SSE_MASK (NEED_XMM|NEED_XMM2) + +/* x86-64 baseline features */ +#define NEED_LM (1<<(X86_FEATURE_LM & 31)) + +#ifdef CONFIG_X86_USE_3DNOW +# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) +#else +# define NEED_3DNOW 0 +#endif + +#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) + +#define REQUIRED_MASK2 0 +#define REQUIRED_MASK3 0 +#define REQUIRED_MASK4 0 +#define REQUIRED_MASK5 0 +#define REQUIRED_MASK6 0 +#define REQUIRED_MASK7 0 + +#endif diff --git a/include/asm-x86_64/resume-trace.h b/include/asm-x86_64/resume-trace.h new file mode 100644 index 000000000000..34bf998fdf62 --- /dev/null +++ b/include/asm-x86_64/resume-trace.h @@ -0,0 +1,13 @@ +#define TRACE_RESUME(user) do { \ + if (pm_trace_enabled) { \ + void *tracedata; \ + asm volatile("movq $1f,%0\n" \ + ".section .tracedata,\"a\"\n" \ + "1:\t.word %c1\n" \ + "\t.quad %c2\n" \ + ".previous" \ + :"=r" (tracedata) \ + : "i" (__LINE__), "i" (__FILE__)); \ + generate_resume_trace(tracedata, user); \ + } \ +} while (0) diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h index adf2bf1e187c..04b8ab21328f 100644 --- a/include/asm-x86_64/segment.h +++ b/include/asm-x86_64/segment.h @@ -3,6 +3,14 @@ #include <asm/cache.h> +/* Simple and small GDT entries for booting only */ + +#define GDT_ENTRY_BOOT_CS 2 +#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) + +#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) +#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) + #define __KERNEL_CS 0x10 #define __KERNEL_DS 0x18 diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h index 9505d9f4bead..e583da7918fb 100644 --- a/include/asm-x86_64/string.h +++ b/include/asm-x86_64/string.h @@ -29,6 +29,9 @@ return (to); function. */ #define __HAVE_ARCH_MEMCPY 1 +#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 +extern void *memcpy(void *to, const void *from, size_t len); +#else extern void *__memcpy(void *to, const void *from, size_t len); #define memcpy(dst,src,len) \ ({ size_t __len = (len); \ @@ -38,7 +41,7 @@ extern void *__memcpy(void *to, const void *from, size_t len); else \ __ret = __builtin_memcpy((dst),(src),__len); \ __ret; }) - +#endif #define __HAVE_ARCH_MEMSET void *memset(void *s, int c, size_t n); diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index ead9f9a56234..02175aa1d16a 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -75,19 +75,31 @@ static inline unsigned long read_cr0(void) unsigned long cr0; asm volatile("movq %%cr0,%0" : "=r" (cr0)); return cr0; -} +} static inline void write_cr0(unsigned long val) { asm volatile("movq %0,%%cr0" :: "r" (val)); -} +} + +static inline unsigned long read_cr2(void) +{ + unsigned long cr2; + asm("movq %%cr2,%0" : "=r" (cr2)); + return cr2; +} + +static inline void write_cr2(unsigned long val) +{ + asm volatile("movq %0,%%cr2" :: "r" (val)); +} static inline unsigned long read_cr3(void) { unsigned long cr3; asm("movq %%cr3,%0" : "=r" (cr3)); return cr3; -} +} static inline void write_cr3(unsigned long val) { @@ -99,27 +111,30 @@ static inline unsigned long read_cr4(void) unsigned long cr4; asm("movq %%cr4,%0" : "=r" (cr4)); return cr4; -} +} static inline void write_cr4(unsigned long val) { asm volatile("movq %0,%%cr4" :: "r" (val) : "memory"); -} - -#define stts() write_cr0(8 | read_cr0()) +} -#define wbinvd() \ - __asm__ __volatile__ ("wbinvd": : :"memory"); +static inline unsigned long read_cr8(void) +{ + unsigned long cr8; + asm("movq %%cr8,%0" : "=r" (cr8)); + return cr8; +} -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - */ -static inline void sched_cacheflush(void) +static inline void write_cr8(unsigned long val) { - wbinvd(); + asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); } +#define stts() write_cr0(8 | read_cr0()) + +#define wbinvd() \ + __asm__ __volatile__ ("wbinvd": : :"memory") + #endif /* __KERNEL__ */ #define nop() __asm__ __volatile__ ("nop") diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h index 10bb5a8ed688..33c72ef15a0c 100644 --- a/include/asm-x86_64/thread_info.h +++ b/include/asm-x86_64/thread_info.h @@ -115,6 +115,7 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ +#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ /* 16 free */ #define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ @@ -133,6 +134,7 @@ static inline struct thread_info *stack_thread_info(void) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) +#define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY) #define _TIF_IA32 (1<<TIF_IA32) #define _TIF_FORK (1<<TIF_FORK) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h index f6527e1b6c1c..6ed21f44d308 100644 --- a/include/asm-x86_64/timex.h +++ b/include/asm-x86_64/timex.h @@ -9,7 +9,6 @@ #include <asm/8253pit.h> #include <asm/msr.h> #include <asm/vsyscall.h> -#include <asm/hpet.h> #include <asm/system.h> #include <asm/processor.h> #include <asm/tsc.h> diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h index 8516225a8389..888eb4abdd07 100644 --- a/include/asm-x86_64/tlbflush.h +++ b/include/asm-x86_64/tlbflush.h @@ -92,7 +92,11 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st #endif -#define flush_tlb_kernel_range(start, end) flush_tlb_all() +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + flush_tlb_all(); +} static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index 4fd6fb23953e..36e52fba7960 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -22,7 +22,7 @@ extern int __node_distance(int, int); #define parent_node(node) (node) #define node_to_first_cpu(node) (first_cpu(node_to_cpumask[node])) #define node_to_cpumask(node) (node_to_cpumask[node]) -#define pcibus_to_node(bus) ((long)(bus->sysdata)) +#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); #define numa_node_id() read_pda(nodenumber) diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 8696f8ad401e..fc4e73f5f1fa 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -630,6 +630,8 @@ __SYSCALL(__NR_signalfd, sys_signalfd) __SYSCALL(__NR_timerfd, sys_timerfd) #define __NR_eventfd 284 __SYSCALL(__NR_eventfd, sys_eventfd) +#define __NR_fallocate 285 +__SYSCALL(__NR_fallocate, sys_fallocate) #ifndef __NO_STUBS #define __ARCH_WANT_OLD_READDIR diff --git a/include/asm-x86_64/vgtod.h b/include/asm-x86_64/vgtod.h new file mode 100644 index 000000000000..3301f0929342 --- /dev/null +++ b/include/asm-x86_64/vgtod.h @@ -0,0 +1,29 @@ +#ifndef _ASM_VGTOD_H +#define _ASM_VGTOD_H 1 + +#include <asm/vsyscall.h> +#include <linux/clocksource.h> + +struct vsyscall_gtod_data { + seqlock_t lock; + + /* open coded 'struct timespec' */ + time_t wall_time_sec; + u32 wall_time_nsec; + + int sysctl_enabled; + struct timezone sys_tz; + struct { /* extract of a clocksource struct */ + cycle_t (*vread)(void); + cycle_t cycle_last; + cycle_t mask; + u32 mult; + u32 shift; + } clock; + struct timespec wall_to_monotonic; +}; +extern struct vsyscall_gtod_data __vsyscall_gtod_data +__section_vsyscall_gtod_data; +extern struct vsyscall_gtod_data vsyscall_gtod_data; + +#endif diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h index 82b4afe65c91..3b8ceb4af2cf 100644 --- a/include/asm-x86_64/vsyscall.h +++ b/include/asm-x86_64/vsyscall.h @@ -22,6 +22,8 @@ enum vsyscall_num { /* Definitions for CONFIG_GENERIC_TIME definitions */ #define __section_vsyscall_gtod_data __attribute__ \ ((unused, __section__ (".vsyscall_gtod_data"),aligned(16))) +#define __section_vsyscall_clock __attribute__ \ + ((unused, __section__ (".vsyscall_clock"),aligned(16))) #define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn"))) #define VGETCPU_RDTSCP 1 @@ -36,7 +38,6 @@ extern volatile unsigned long __jiffies; /* kernel space (writeable) */ extern int vgetcpu_mode; extern struct timezone sys_tz; -extern struct vsyscall_gtod_data_t vsyscall_gtod_data; #endif /* __KERNEL__ */ |