diff options
Diffstat (limited to 'arch')
230 files changed, 4611 insertions, 1779 deletions
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h index fea4ea75b79d..13cd42743810 100644 --- a/arch/alpha/include/asm/machvec.h +++ b/arch/alpha/include/asm/machvec.h @@ -80,7 +80,7 @@ struct alpha_machine_vector void (*update_irq_hw)(unsigned long, unsigned long, int); void (*ack_irq)(unsigned long); void (*device_interrupt)(unsigned long vector); - void (*machine_check)(u64 vector, u64 la); + void (*machine_check)(unsigned long vector, unsigned long la); void (*smp_callin)(void); void (*init_arch)(void); diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h index 2a14302c17a3..cb04eaa6ba33 100644 --- a/arch/alpha/include/asm/pci.h +++ b/arch/alpha/include/asm/pci.h @@ -273,4 +273,18 @@ struct pci_dev *alpha_gendev_to_pci(struct device *dev); extern struct pci_dev *isa_bridge; +extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, + size_t count); +extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, + size_t count); +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state); +extern void pci_adjust_legacy_attr(struct pci_bus *bus, + enum pci_mmap_state mmap_type); +#define HAVE_PCI_LEGACY 1 + +extern int pci_create_resource_files(struct pci_dev *dev); +extern void pci_remove_resource_files(struct pci_dev *dev); + #endif /* __ALPHA_PCI_H */ diff --git a/arch/alpha/include/asm/system.h b/arch/alpha/include/asm/system.h index afe20fa58c99..5aa40cca4f23 100644 --- a/arch/alpha/include/asm/system.h +++ b/arch/alpha/include/asm/system.h @@ -309,518 +309,71 @@ extern int __min_ipl; #define tbia() __tbi(-2, /* no second argument */) /* - * Atomic exchange. - * Since it can be used to implement critical sections - * it must clobber "memory" (also for interrupts in UP). + * Atomic exchange routines. */ -static inline unsigned long -__xchg_u8(volatile char *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " insbl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extbl %2,%4,%0\n" - " mskbl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u16(volatile short *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " inswl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extwl %2,%4,%0\n" - " mskwl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u32(volatile int *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldl_l %0,%4\n" - " bis $31,%3,%1\n" - " stl_c %1,%2\n" - " beq %1,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -static inline unsigned long -__xchg_u64(volatile long *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldq_l %0,%4\n" - " bis $31,%3,%1\n" - " stq_c %1,%2\n" - " beq %1,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); +#define __ASM__MB +#define ____xchg(type, args...) __xchg ## type ## _local(args) +#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) +#include <asm/xchg.h> - return val; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid xchg(). */ -extern void __xchg_called_with_bad_pointer(void); - -#define __xchg(ptr, x, size) \ -({ \ - unsigned long __xchg__res; \ - volatile void *__xchg__ptr = (ptr); \ - switch (size) { \ - case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \ - case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \ - case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \ - case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \ - default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ - } \ - __xchg__res; \ -}) - -#define xchg(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ +#define xchg_local(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ }) -static inline unsigned long -__xchg_u8_local(volatile char *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " insbl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extbl %2,%4,%0\n" - " mskbl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u16_local(volatile short *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " inswl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extwl %2,%4,%0\n" - " mskwl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u32_local(volatile int *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldl_l %0,%4\n" - " bis $31,%3,%1\n" - " stl_c %1,%2\n" - " beq %1,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -static inline unsigned long -__xchg_u64_local(volatile long *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldq_l %0,%4\n" - " bis $31,%3,%1\n" - " stq_c %1,%2\n" - " beq %1,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -#define __xchg_local(ptr, x, size) \ -({ \ - unsigned long __xchg__res; \ - volatile void *__xchg__ptr = (ptr); \ - switch (size) { \ - case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \ - case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \ - case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \ - case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \ - default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ - } \ - __xchg__res; \ -}) - -#define xchg_local(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ - sizeof(*(ptr))); \ +#define cmpxchg_local(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, \ + sizeof(*(ptr))); \ }) -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - * - * The memory barrier should be placed in SMP only when we actually - * make the change. If we don't change anything (so if the returned - * prev is equal to old) then we aren't acquiring anything new and - * we don't need any memory barrier as far I can tell. - */ - -#define __HAVE_ARCH_CMPXCHG 1 - -static inline unsigned long -__cmpxchg_u8(volatile char *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " insbl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extbl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskbl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u16(volatile short *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " inswl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extwl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskwl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u32(volatile int *m, int old, int new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldl_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stl_c %1,%2\n" - " beq %1,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} +#define cmpxchg64_local(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ + }) -static inline unsigned long -__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldq_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stq_c %1,%2\n" - " beq %1,3f\n" #ifdef CONFIG_SMP - " mb\n" +#undef __ASM__MB +#define __ASM__MB "\tmb\n" #endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) -{ - switch (size) { - case 1: - return __cmpxchg_u8(ptr, old, new); - case 2: - return __cmpxchg_u16(ptr, old, new); - case 4: - return __cmpxchg_u32(ptr, old, new); - case 8: - return __cmpxchg_u64(ptr, old, new); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ +#undef ____xchg +#undef ____cmpxchg +#define ____xchg(type, args...) __xchg ##type(args) +#define ____cmpxchg(type, args...) __cmpxchg ##type(args) +#include <asm/xchg.h> + +#define xchg(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ }) -#define cmpxchg64(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ - }) - -static inline unsigned long -__cmpxchg_u8_local(volatile char *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " insbl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extbl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskbl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u16_local(volatile short *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " inswl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extwl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskwl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u32_local(volatile int *m, int old, int new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldl_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stl_c %1,%2\n" - " beq %1,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldq_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stq_c %1,%2\n" - " beq %1,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -static __always_inline unsigned long -__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, - int size) -{ - switch (size) { - case 1: - return __cmpxchg_u8_local(ptr, old, new); - case 2: - return __cmpxchg_u16_local(ptr, old, new); - case 4: - return __cmpxchg_u32_local(ptr, old, new); - case 8: - return __cmpxchg_u64_local(ptr, old, new); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} -#define cmpxchg_local(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ +#define cmpxchg(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr)));\ }) -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ + +#define cmpxchg64(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg((ptr), (o), (n)); \ }) +#undef __ASM__MB +#undef ____cmpxchg + +#define __HAVE_ARCH_CMPXCHG 1 #endif /* __ASSEMBLY__ */ diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h index c1541353ccef..f072f344497e 100644 --- a/arch/alpha/include/asm/types.h +++ b/arch/alpha/include/asm/types.h @@ -8,7 +8,12 @@ * not a major issue. However, for interoperability, libraries still * need to be careful to avoid a name clashes. */ + +#ifdef __KERNEL__ +#include <asm-generic/int-ll64.h> +#else #include <asm-generic/int-l64.h> +#endif #ifndef __ASSEMBLY__ diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 22de3b434a22..163f3053001c 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -498,13 +498,13 @@ struct exception_table_entry }; /* Returns the new pc */ -#define fixup_exception(map_reg, fixup, pc) \ +#define fixup_exception(map_reg, _fixup, pc) \ ({ \ - if ((fixup)->fixup.bits.valreg != 31) \ - map_reg((fixup)->fixup.bits.valreg) = 0; \ - if ((fixup)->fixup.bits.errreg != 31) \ - map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \ - (pc) + (fixup)->fixup.bits.nextinsn; \ + if ((_fixup)->fixup.bits.valreg != 31) \ + map_reg((_fixup)->fixup.bits.valreg) = 0; \ + if ((_fixup)->fixup.bits.errreg != 31) \ + map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \ + (pc) + (_fixup)->fixup.bits.nextinsn; \ }) diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h new file mode 100644 index 000000000000..beba1b803e0d --- /dev/null +++ b/arch/alpha/include/asm/xchg.h @@ -0,0 +1,258 @@ +#ifndef __ALPHA_SYSTEM_H +#error Do not include xchg.h directly! +#else +/* + * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code + * except that local version do not have the expensive memory barrier. + * So this file is included twice from asm/system.h. + */ + +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +____xchg(_u8, volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4,7,%3\n" + " insbl %1,%4,%1\n" + "1: ldq_l %2,0(%3)\n" + " extbl %2,%4,%0\n" + " mskbl %2,%4,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%3)\n" + " beq %2,2f\n" + __ASM__MB + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u16, volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4,7,%3\n" + " inswl %1,%4,%1\n" + "1: ldq_l %2,0(%3)\n" + " extwl %2,%4,%0\n" + " mskwl %2,%4,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%3)\n" + " beq %2,2f\n" + __ASM__MB + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u32, volatile int *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + "1: ldl_l %0,%4\n" + " bis $31,%3,%1\n" + " stl_c %1,%2\n" + " beq %1,2f\n" + __ASM__MB + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg(_u64, volatile long *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + "1: ldq_l %0,%4\n" + " bis $31,%3,%1\n" + " stq_c %1,%2\n" + " beq %1,2f\n" + __ASM__MB + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid xchg(). */ +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +____xchg(, volatile void *ptr, unsigned long x, int size) +{ + switch (size) { + case 1: + return ____xchg(_u8, ptr, x); + case 2: + return ____xchg(_u16, ptr, x); + case 4: + return ____xchg(_u32, ptr, x); + case 8: + return ____xchg(_u64, ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ + +static inline unsigned long +____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5,7,%4\n" + " insbl %1,%5,%1\n" + "1: ldq_l %2,0(%4)\n" + " extbl %2,%5,%0\n" + " cmpeq %0,%6,%3\n" + " beq %3,2f\n" + " mskbl %2,%5,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" + __ASM__MB + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5,7,%4\n" + " inswl %1,%5,%1\n" + "1: ldq_l %2,0(%4)\n" + " extwl %2,%5,%0\n" + " cmpeq %0,%6,%3\n" + " beq %3,2f\n" + " mskwl %2,%5,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" + __ASM__MB + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u32, volatile int *m, int old, int new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldl_l %0,%5\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stl_c %1,%2\n" + " beq %1,3f\n" + __ASM__MB + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldq_l %0,%5\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stq_c %1,%2\n" + " beq %1,3f\n" + __ASM__MB + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +____cmpxchg(, volatile void *ptr, unsigned long old, unsigned long new, + int size) +{ + switch (size) { + case 1: + return ____cmpxchg(_u8, ptr, old, new); + case 2: + return ____cmpxchg(_u16, ptr, old, new); + case 4: + return ____cmpxchg(_u32, ptr, old, new); + case 8: + return ____cmpxchg(_u64, ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#endif diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index b4697759a123..a427538252f8 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile @@ -12,7 +12,7 @@ obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ obj-$(CONFIG_VGA_HOSE) += console.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_PCI) += pci.o pci_iommu.o +obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o obj-$(CONFIG_SRM_ENV) += srm_env.o obj-$(CONFIG_MODULES) += module.o diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c index 11aee012a8ae..985e5c1681ac 100644 --- a/arch/alpha/kernel/err_ev6.c +++ b/arch/alpha/kernel/err_ev6.c @@ -157,8 +157,8 @@ ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, err_print_prefix, streamname[stream], bitsname[bits], sourcename[source]); - printk("%s Address: 0x%016lx\n" - " Syndrome[upper.lower]: %02lx.%02lx\n", + printk("%s Address: 0x%016llx\n" + " Syndrome[upper.lower]: %02llx.%02llx\n", err_print_prefix, c_addr, c2_syn, c1_syn); diff --git a/arch/alpha/kernel/err_ev7.c b/arch/alpha/kernel/err_ev7.c index 68cd493f54c5..73770c6ca013 100644 --- a/arch/alpha/kernel/err_ev7.c +++ b/arch/alpha/kernel/err_ev7.c @@ -246,13 +246,13 @@ ev7_process_pal_subpacket(struct el_subpacket *header) switch(header->type) { case EL_TYPE__PAL__LOGOUT_FRAME: - printk("%s*** MCHK occurred on LPID %ld (RBOX %lx)\n", + printk("%s*** MCHK occurred on LPID %ld (RBOX %llx)\n", err_print_prefix, packet->by_type.logout.whami, packet->by_type.logout.rbox_whami); el_print_timestamp(&packet->by_type.logout.timestamp); - printk("%s EXC_ADDR: %016lx\n" - " HALT_CODE: %lx\n", + printk("%s EXC_ADDR: %016llx\n" + " HALT_CODE: %llx\n", err_print_prefix, packet->by_type.logout.exc_addr, packet->by_type.logout.halt_code); diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c index 413bf37eb094..6bfd243efba3 100644 --- a/arch/alpha/kernel/err_marvel.c +++ b/arch/alpha/kernel/err_marvel.c @@ -129,7 +129,7 @@ marvel_print_po7_crrct_sym(u64 crrct_sym) printk("%s Correctable Error Symptoms:\n" - "%s Syndrome: 0x%lx\n", + "%s Syndrome: 0x%llx\n", err_print_prefix, err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN)); marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC)); @@ -186,7 +186,7 @@ marvel_print_po7_uncrr_sym(u64 uncrr_sym, u64 valid_mask) uncrr_sym &= valid_mask; if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN)) - printk("%s Syndrome: 0x%lx\n", + printk("%s Syndrome: 0x%llx\n", err_print_prefix, EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN)); @@ -307,7 +307,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym) sprintf(opcode_str, "BlkIO"); break; default: - sprintf(opcode_str, "0x%lx\n", + sprintf(opcode_str, "0x%llx\n", EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)); break; } @@ -321,7 +321,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym) opcode_str); if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) - printk("%s Packet Offset 0x%08lx\n", + printk("%s Packet Offset 0x%08llx\n", err_print_prefix, EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF)); } @@ -480,8 +480,8 @@ marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io) printk("%s Lost Error\n", err_print_prefix); printk("%s Failing Packet:\n" - "%s Cycle 1: %016lx\n" - "%s Cycle 2: %016lx\n", + "%s Cycle 1: %016llx\n" + "%s Cycle 2: %016llx\n", err_print_prefix, err_print_prefix, io->po7_err_pkt0, err_print_prefix, io->po7_err_pkt1); @@ -515,9 +515,9 @@ marvel_print_pox_tlb_err(u64 tlb_err) if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID)) return; - printk("%s TLB Error on index 0x%lx:\n" + printk("%s TLB Error on index 0x%llx:\n" "%s - %s\n" - "%s - Addr: 0x%016lx\n", + "%s - Addr: 0x%016llx\n", err_print_prefix, EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR), err_print_prefix, @@ -579,7 +579,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt) sprintf(message, "Uncorrectable Split Write Data Error"); break; default: - sprintf(message, "%08lx\n", + sprintf(message, "%08llx\n", EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE)); break; } @@ -620,9 +620,9 @@ marvel_print_pox_trans_sum(u64 trans_sum) return; printk("%s Transaction Summary:\n" - "%s Command: 0x%lx - %s\n" - "%s Address: 0x%016lx%s\n" - "%s PCI-X Master Slot: 0x%lx\n", + "%s Command: 0x%llx - %s\n" + "%s Address: 0x%016llx%s\n" + "%s PCI-X Master Slot: 0x%llx\n", err_print_prefix, err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD), @@ -964,12 +964,12 @@ marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print) #if 0 printk("%s PORT 7 ERROR:\n" - "%s PO7_ERROR_SUM: %016lx\n" - "%s PO7_UNCRR_SYM: %016lx\n" - "%s PO7_CRRCT_SYM: %016lx\n" - "%s PO7_UGBGE_SYM: %016lx\n" - "%s PO7_ERR_PKT0: %016lx\n" - "%s PO7_ERR_PKT1: %016lx\n", + "%s PO7_ERROR_SUM: %016llx\n" + "%s PO7_UNCRR_SYM: %016llx\n" + "%s PO7_CRRCT_SYM: %016llx\n" + "%s PO7_UGBGE_SYM: %016llx\n" + "%s PO7_ERR_PKT0: %016llx\n" + "%s PO7_ERR_PKT1: %016llx\n", err_print_prefix, err_print_prefix, io->po7_error_sum, err_print_prefix, io->po7_uncrr_sym, @@ -987,12 +987,12 @@ marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print) if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum)) continue; - printk("%s PID %u PORT %d POx_ERR_SUM: %016lx\n", + printk("%s PID %u PORT %d POx_ERR_SUM: %016llx\n", err_print_prefix, lf_subpackets->io_pid, i, io->ports[i].pox_err_sum); marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]); - printk("%s [ POx_FIRST_ERR: %016lx ]\n", + printk("%s [ POx_FIRST_ERR: %016llx ]\n", err_print_prefix, io->ports[i].pox_first_err); marvel_print_pox_err(io->ports[i].pox_first_err, &io->ports[i]); diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c index 257449ed15ef..c7e28a88d6e3 100644 --- a/arch/alpha/kernel/err_titan.c +++ b/arch/alpha/kernel/err_titan.c @@ -107,12 +107,12 @@ titan_parse_p_serror(int which, u64 serror, int print) if (!print) return status; - printk("%s PChip %d SERROR: %016lx\n", + printk("%s PChip %d SERROR: %016llx\n", err_print_prefix, which, serror); if (serror & TITAN__PCHIP_SERROR__ECCMASK) { printk("%s %sorrectable ECC Error:\n" " Source: %-6s Command: %-8s Syndrome: 0x%08x\n" - " Address: 0x%lx\n", + " Address: 0x%llx\n", err_print_prefix, (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C", serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)], @@ -223,7 +223,7 @@ titan_parse_p_perror(int which, int port, u64 perror, int print) if (!print) return status; - printk("%s PChip %d %cPERROR: %016lx\n", + printk("%s PChip %d %cPERROR: %016llx\n", err_print_prefix, which, port ? 'A' : 'G', perror); if (perror & TITAN__PCHIP_PERROR__IPTPW) @@ -316,7 +316,7 @@ titan_parse_p_agperror(int which, u64 agperror, int print) addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3; len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN); - printk("%s PChip %d AGPERROR: %016lx\n", err_print_prefix, + printk("%s PChip %d AGPERROR: %016llx\n", err_print_prefix, which, agperror); if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW) printk("%s No Window\n", err_print_prefix); @@ -597,16 +597,16 @@ privateer_process_680_frame(struct el_common *mchk_header, int print) return status; /* TODO - decode instead of just dumping... */ - printk("%s Summary Flags: %016lx\n" - " CChip DIRx: %016lx\n" - " System Management IR: %016lx\n" - " CPU IR: %016lx\n" - " Power Supply IR: %016lx\n" - " LM78 Fault Status: %016lx\n" - " System Doors: %016lx\n" - " Temperature Warning: %016lx\n" - " Fan Control: %016lx\n" - " Fatal Power Down Code: %016lx\n", + printk("%s Summary Flags: %016llx\n" + " CChip DIRx: %016llx\n" + " System Management IR: %016llx\n" + " CPU IR: %016llx\n" + " Power Supply IR: %016llx\n" + " LM78 Fault Status: %016llx\n" + " System Doors: %016llx\n" + " Temperature Warning: %016llx\n" + " Fan Control: %016llx\n" + " Fatal Power Down Code: %016llx\n", err_print_prefix, emchk->summary, emchk->c_dirx, diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c new file mode 100644 index 000000000000..6ea822e7f724 --- /dev/null +++ b/arch/alpha/kernel/pci-sysfs.c @@ -0,0 +1,366 @@ +/* + * arch/alpha/kernel/pci-sysfs.c + * + * Copyright (C) 2009 Ivan Kokshaysky + * + * Alpha PCI resource files. + * + * Loosely based on generic HAVE_PCI_MMAP implementation in + * drivers/pci/pci-sysfs.c + */ + +#include <linux/sched.h> +#include <linux/pci.h> + +static int hose_mmap_page_range(struct pci_controller *hose, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_type, int sparse) +{ + unsigned long base; + + if (mmap_type == pci_mmap_mem) + base = sparse ? hose->sparse_mem_base : hose->dense_mem_base; + else + base = sparse ? hose->sparse_io_base : hose->dense_io_base; + + vma->vm_pgoff += base >> PAGE_SHIFT; + vma->vm_flags |= (VM_IO | VM_RESERVED); + + return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static int __pci_mmap_fits(struct pci_dev *pdev, int num, + struct vm_area_struct *vma, int sparse) +{ + unsigned long nr, start, size; + int shift = sparse ? 5 : 0; + + nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + start = vma->vm_pgoff; + size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1; + + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on %s BAR %d " + "(size 0x%08lx)\n", + current->comm, sparse ? " sparse" : "", start, start + nr, + pci_name(pdev), num, size); + return 0; +} + +/** + * pci_mmap_resource - map a PCI resource into user memory space + * @kobj: kobject for mapping + * @attr: struct bin_attribute for the file being mapped + * @vma: struct vm_area_struct passed into the mmap + * @sparse: address space type + * + * Use the bus mapping routines to map a PCI resource into userspace. + */ +static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, + struct vm_area_struct *vma, int sparse) +{ + struct pci_dev *pdev = to_pci_dev(container_of(kobj, + struct device, kobj)); + struct resource *res = (struct resource *)attr->private; + enum pci_mmap_state mmap_type; + struct pci_bus_region bar; + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) + if (res == &pdev->resource[i]) + break; + if (i >= PCI_ROM_RESOURCE) + return -ENODEV; + + if (!__pci_mmap_fits(pdev, i, vma, sparse)) + return -EINVAL; + + if (iomem_is_exclusive(res->start)) + return -EINVAL; + + pcibios_resource_to_bus(pdev, &bar, res); + vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); + mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; + + return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse); +} + +static int pci_mmap_resource_sparse(struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + return pci_mmap_resource(kobj, attr, vma, 1); +} + +static int pci_mmap_resource_dense(struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + return pci_mmap_resource(kobj, attr, vma, 0); +} + +/** + * pci_remove_resource_files - cleanup resource files + * @dev: dev to cleanup + * + * If we created resource files for @dev, remove them from sysfs and + * free their resources. + */ +void pci_remove_resource_files(struct pci_dev *pdev) +{ + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + struct bin_attribute *res_attr; + + res_attr = pdev->res_attr[i]; + if (res_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); + kfree(res_attr); + } + + res_attr = pdev->res_attr_wc[i]; + if (res_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); + kfree(res_attr); + } + } +} + +static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num) +{ + struct pci_bus_region bar; + struct pci_controller *hose = pdev->sysdata; + long dense_offset; + unsigned long sparse_size; + + pcibios_resource_to_bus(pdev, &bar, &pdev->resource[num]); + + /* All core logic chips have 4G sparse address space, except + CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM + definitions in asm/core_xxx.h files). This corresponds + to 128M or 512M of the bus space. */ + dense_offset = (long)(hose->dense_mem_base - hose->sparse_mem_base); + sparse_size = dense_offset >= 0x400000000UL ? 0x20000000 : 0x8000000; + + return bar.end < sparse_size; +} + +static int pci_create_one_attr(struct pci_dev *pdev, int num, char *name, + char *suffix, struct bin_attribute *res_attr, + unsigned long sparse) +{ + size_t size = pci_resource_len(pdev, num); + + sprintf(name, "resource%d%s", num, suffix); + res_attr->mmap = sparse ? pci_mmap_resource_sparse : + pci_mmap_resource_dense; + res_attr->attr.name = name; + res_attr->attr.mode = S_IRUSR | S_IWUSR; + res_attr->size = sparse ? size << 5 : size; + res_attr->private = &pdev->resource[num]; + return sysfs_create_bin_file(&pdev->dev.kobj, res_attr); +} + +static int pci_create_attr(struct pci_dev *pdev, int num) +{ + /* allocate attribute structure, piggyback attribute name */ + int retval, nlen1, nlen2 = 0, res_count = 1; + unsigned long sparse_base, dense_base; + struct bin_attribute *attr; + struct pci_controller *hose = pdev->sysdata; + char *suffix, *attr_name; + + suffix = ""; /* Assume bwx machine, normal resourceN files. */ + nlen1 = 10; + + if (pdev->resource[num].flags & IORESOURCE_MEM) { + sparse_base = hose->sparse_mem_base; + dense_base = hose->dense_mem_base; + if (sparse_base && !sparse_mem_mmap_fits(pdev, num)) { + sparse_base = 0; + suffix = "_dense"; + nlen1 = 16; /* resourceN_dense */ + } + } else { + sparse_base = hose->sparse_io_base; + dense_base = hose->dense_io_base; + } + + if (sparse_base) { + suffix = "_sparse"; + nlen1 = 17; + if (dense_base) { + nlen2 = 16; /* resourceN_dense */ + res_count = 2; + } + } + + attr = kzalloc(sizeof(*attr) * res_count + nlen1 + nlen2, GFP_ATOMIC); + if (!attr) + return -ENOMEM; + + /* Create bwx, sparse or single dense file */ + attr_name = (char *)(attr + res_count); + pdev->res_attr[num] = attr; + retval = pci_create_one_attr(pdev, num, attr_name, suffix, attr, + sparse_base); + if (retval || res_count == 1) + return retval; + + /* Create dense file */ + attr_name += nlen1; + attr++; + pdev->res_attr_wc[num] = attr; + return pci_create_one_attr(pdev, num, attr_name, "_dense", attr, 0); +} + +/** + * pci_create_resource_files - create resource files in sysfs for @dev + * @dev: dev in question + * + * Walk the resources in @dev creating files for each resource available. + */ +int pci_create_resource_files(struct pci_dev *pdev) +{ + int i; + int retval; + + /* Expose the PCI resources from this device as files */ + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + + /* skip empty resources */ + if (!pci_resource_len(pdev, i)) + continue; + + retval = pci_create_attr(pdev, i); + if (retval) { + pci_remove_resource_files(pdev); + return retval; + } + } + return 0; +} + +/* Legacy I/O bus mapping stuff. */ + +static int __legacy_mmap_fits(struct pci_controller *hose, + struct vm_area_struct *vma, + unsigned long res_size, int sparse) +{ + unsigned long nr, start, size; + + nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + start = vma->vm_pgoff; + size = ((res_size - 1) >> PAGE_SHIFT) + 1; + + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on hose %d " + "(size 0x%08lx)\n", + current->comm, sparse ? " sparse" : "", start, start + nr, + hose->index, size); + return 0; +} + +static inline int has_sparse(struct pci_controller *hose, + enum pci_mmap_state mmap_type) +{ + unsigned long base; + + base = (mmap_type == pci_mmap_mem) ? hose->sparse_mem_base : + hose->sparse_io_base; + + return base != 0; +} + +int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, + enum pci_mmap_state mmap_type) +{ + struct pci_controller *hose = bus->sysdata; + int sparse = has_sparse(hose, mmap_type); + unsigned long res_size; + + res_size = (mmap_type == pci_mmap_mem) ? bus->legacy_mem->size : + bus->legacy_io->size; + if (!__legacy_mmap_fits(hose, vma, res_size, sparse)) + return -EINVAL; + + return hose_mmap_page_range(hose, vma, mmap_type, sparse); +} + +/** + * pci_adjust_legacy_attr - adjustment of legacy file attributes + * @b: bus to create files under + * @mmap_type: I/O port or memory + * + * Adjust file name and size for sparse mappings. + */ +void pci_adjust_legacy_attr(struct pci_bus *bus, enum pci_mmap_state mmap_type) +{ + struct pci_controller *hose = bus->sysdata; + + if (!has_sparse(hose, mmap_type)) + return; + + if (mmap_type == pci_mmap_mem) { + bus->legacy_mem->attr.name = "legacy_mem_sparse"; + bus->legacy_mem->size <<= 5; + } else { + bus->legacy_io->attr.name = "legacy_io_sparse"; + bus->legacy_io->size <<= 5; + } + return; +} + +/* Legacy I/O bus read/write functions */ +int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) +{ + struct pci_controller *hose = bus->sysdata; + + port += hose->io_space->start; + + switch(size) { + case 1: + *((u8 *)val) = inb(port); + return 1; + case 2: + if (port & 1) + return -EINVAL; + *((u16 *)val) = inw(port); + return 2; + case 4: + if (port & 3) + return -EINVAL; + *((u32 *)val) = inl(port); + return 4; + } + return -EINVAL; +} + +int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) +{ + struct pci_controller *hose = bus->sysdata; + + port += hose->io_space->start; + + switch(size) { + case 1: + outb(port, val); + return 1; + case 2: + if (port & 1) + return -EINVAL; + outw(port, val); + return 2; + case 4: + if (port & 3) + return -EINVAL; + outl(port, val); + return 4; + } + return -EINVAL; +} diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index a3b938811400..a91ba28999b5 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c @@ -168,7 +168,7 @@ pcibios_align_resource(void *data, struct resource *res, */ /* Align to multiple of size of minimum base. */ - alignto = max(0x1000UL, align); + alignto = max_t(resource_size_t, 0x1000, align); start = ALIGN(start, alignto); if (hose->sparse_mem_base && size <= 7 * 16*MB) { if (((start / (16*MB)) & 0x7) == 0) { diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index b9094da05d7a..bfb880af959d 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -247,7 +247,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, && paddr + size <= __direct_map_size) { ret = paddr + __direct_map_base; - DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n", + DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n", cpu_addr, size, ret, __builtin_return_address(0)); return ret; @@ -258,7 +258,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, if (dac_allowed) { ret = paddr + alpha_mv.pci_dac_offset; - DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n", + DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n", cpu_addr, size, ret, __builtin_return_address(0)); return ret; @@ -299,7 +299,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, ret = arena->dma_base + dma_ofs * PAGE_SIZE; ret += (unsigned long)cpu_addr & ~PAGE_MASK; - DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n", + DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n", cpu_addr, size, npages, ret, __builtin_return_address(0)); return ret; @@ -355,14 +355,14 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, && dma_addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ - DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n", + DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n", dma_addr, size, __builtin_return_address(0)); return; } if (dma_addr > 0xffffffff) { - DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n", + DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n", dma_addr, size, __builtin_return_address(0)); return; } @@ -373,9 +373,9 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; if (dma_ofs * PAGE_SIZE >= arena->size) { - printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx " - " base %lx size %x\n", dma_addr, arena->dma_base, - arena->size); + printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx " + " base %llx size %x\n", + dma_addr, arena->dma_base, arena->size); return; BUG(); } @@ -394,7 +394,7 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, spin_unlock_irqrestore(&arena->lock, flags); - DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n", + DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n", dma_addr, size, npages, __builtin_return_address(0)); } EXPORT_SYMBOL(pci_unmap_single); @@ -444,7 +444,7 @@ try_again: goto try_again; } - DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n", + DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n", size, cpu_addr, *dma_addrp, __builtin_return_address(0)); return cpu_addr; @@ -464,7 +464,7 @@ pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); free_pages((unsigned long)cpu_addr, get_order(size)); - DBGA2("pci_free_consistent: [%x,%lx] from %p\n", + DBGA2("pci_free_consistent: [%llx,%zx] from %p\n", dma_addr, size, __builtin_return_address(0)); } EXPORT_SYMBOL(pci_free_consistent); @@ -551,7 +551,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, out->dma_address = paddr + __direct_map_base; out->dma_length = size; - DBGA(" sg_fill: [%p,%lx] -> direct %lx\n", + DBGA(" sg_fill: [%p,%lx] -> direct %llx\n", __va(paddr), size, out->dma_address); return 0; @@ -563,7 +563,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, out->dma_address = paddr + alpha_mv.pci_dac_offset; out->dma_length = size; - DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n", + DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n", __va(paddr), size, out->dma_address); return 0; @@ -589,7 +589,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; out->dma_length = size; - DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n", + DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n", __va(paddr), size, out->dma_address, npages); /* All virtually contiguous. We need to find the length of each @@ -752,7 +752,7 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, if (addr > 0xffffffff) { /* It's a DAC address -- nothing to do. */ - DBGA(" (%ld) DAC [%lx,%lx]\n", + DBGA(" (%ld) DAC [%llx,%zx]\n", sg - end + nents, addr, size); continue; } @@ -760,12 +760,12 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, if (addr >= __direct_map_base && addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ - DBGA(" (%ld) direct [%lx,%lx]\n", + DBGA(" (%ld) direct [%llx,%zx]\n", sg - end + nents, addr, size); continue; } - DBGA(" (%ld) sg [%lx,%lx]\n", + DBGA(" (%ld) sg [%llx,%zx]\n", sg - end + nents, addr, size); npages = iommu_num_pages(addr, size, PAGE_SIZE); diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h index fe14c6747cd6..567f2598d090 100644 --- a/arch/alpha/kernel/proto.h +++ b/arch/alpha/kernel/proto.h @@ -20,7 +20,7 @@ struct pci_controller; extern struct pci_ops apecs_pci_ops; extern void apecs_init_arch(void); extern void apecs_pci_clr_err(void); -extern void apecs_machine_check(u64, u64); +extern void apecs_machine_check(unsigned long vector, unsigned long la_ptr); extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_cia.c */ @@ -29,7 +29,7 @@ extern void cia_init_pci(void); extern void cia_init_arch(void); extern void pyxis_init_arch(void); extern void cia_kill_arch(int); -extern void cia_machine_check(u64, u64); +extern void cia_machine_check(unsigned long vector, unsigned long la_ptr); extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_irongate.c */ @@ -42,7 +42,7 @@ extern void irongate_machine_check(u64, u64); /* core_lca.c */ extern struct pci_ops lca_pci_ops; extern void lca_init_arch(void); -extern void lca_machine_check(u64, u64); +extern void lca_machine_check(unsigned long vector, unsigned long la_ptr); extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_marvel.c */ @@ -64,7 +64,7 @@ void io7_clear_errors(struct io7 *io7); extern struct pci_ops mcpcia_pci_ops; extern void mcpcia_init_arch(void); extern void mcpcia_init_hoses(void); -extern void mcpcia_machine_check(u64, u64); +extern void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr); extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_polaris.c */ @@ -72,14 +72,14 @@ extern struct pci_ops polaris_pci_ops; extern int polaris_read_config_dword(struct pci_dev *, int, u32 *); extern int polaris_write_config_dword(struct pci_dev *, int, u32); extern void polaris_init_arch(void); -extern void polaris_machine_check(u64, u64); +extern void polaris_machine_check(unsigned long vector, unsigned long la_ptr); #define polaris_pci_tbi ((void *)0) /* core_t2.c */ extern struct pci_ops t2_pci_ops; extern void t2_init_arch(void); extern void t2_kill_arch(int); -extern void t2_machine_check(u64, u64); +extern void t2_machine_check(unsigned long vector, unsigned long la_ptr); extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_titan.c */ @@ -94,14 +94,14 @@ extern struct _alpha_agp_info *titan_agp_info(void); extern struct pci_ops tsunami_pci_ops; extern void tsunami_init_arch(void); extern void tsunami_kill_arch(int); -extern void tsunami_machine_check(u64, u64); +extern void tsunami_machine_check(unsigned long vector, unsigned long la_ptr); extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_wildfire.c */ extern struct pci_ops wildfire_pci_ops; extern void wildfire_init_arch(void); extern void wildfire_kill_arch(int); -extern void wildfire_machine_check(u64, u64); +extern void wildfire_machine_check(unsigned long vector, unsigned long la_ptr); extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); extern int wildfire_pa_to_nid(unsigned long); extern int wildfire_cpuid_to_nid(int); diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 02bee6983ce2..80df86cd746b 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -1255,7 +1255,7 @@ show_cpuinfo(struct seq_file *f, void *slot) platform_string(), nr_processors); #ifdef CONFIG_SMP - seq_printf(f, "cpus active\t\t: %d\n" + seq_printf(f, "cpus active\t\t: %u\n" "cpu active mask\t\t: %016lx\n", num_online_cpus(), cpus_addr(cpu_possible_map)[0]); #endif diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c index fd467b207f0f..bca5bda90cde 100644 --- a/arch/alpha/kernel/smc37c669.c +++ b/arch/alpha/kernel/smc37c669.c @@ -2542,8 +2542,8 @@ void __init SMC669_Init ( int index ) SMC37c669_display_device_info( ); #endif local_irq_restore(flags); - printk( "SMC37c669 Super I/O Controller found @ 0x%lx\n", - (unsigned long) SMC_base ); + printk( "SMC37c669 Super I/O Controller found @ 0x%p\n", + SMC_base ); } else { local_irq_restore(flags); diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c index 78ad7cd1bbd6..d12af472e1c0 100644 --- a/arch/alpha/kernel/srm_env.c +++ b/arch/alpha/kernel/srm_env.c @@ -218,7 +218,6 @@ srm_env_init(void) BASE_DIR); goto cleanup; } - base_dir->owner = THIS_MODULE; /* * Create per-name subdirectory @@ -229,7 +228,6 @@ srm_env_init(void) BASE_DIR, NAMED_DIR); goto cleanup; } - named_dir->owner = THIS_MODULE; /* * Create per-number subdirectory @@ -241,7 +239,6 @@ srm_env_init(void) goto cleanup; } - numbered_dir->owner = THIS_MODULE; /* * Create all named nodes @@ -254,7 +251,6 @@ srm_env_init(void) goto cleanup; entry->proc_entry->data = (void *) entry; - entry->proc_entry->owner = THIS_MODULE; entry->proc_entry->read_proc = srm_env_read; entry->proc_entry->write_proc = srm_env_write; @@ -275,7 +271,6 @@ srm_env_init(void) entry->id = var_num; entry->proc_entry->data = (void *) entry; - entry->proc_entry->owner = THIS_MODULE; entry->proc_entry->read_proc = srm_env_read; entry->proc_entry->write_proc = srm_env_write; } diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index e2516f9a8967..2b5caf3d9b15 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c @@ -244,12 +244,11 @@ jensen_init_arch(void) } static void -jensen_machine_check (u64 vector, u64 la) +jensen_machine_check(unsigned long vector, unsigned long la) { printk(KERN_CRIT "Machine check\n"); } - /* * The System Vector */ diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index d232e42be018..9e263256a42d 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c @@ -453,7 +453,7 @@ sable_lynx_enable_irq(unsigned int irq) sable_lynx_irq_swizzle->update_irq_hw(bit, mask); spin_unlock(&sable_lynx_irq_lock); #if 0 - printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n", + printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n", __func__, mask, bit, irq); #endif } @@ -469,7 +469,7 @@ sable_lynx_disable_irq(unsigned int irq) sable_lynx_irq_swizzle->update_irq_hw(bit, mask); spin_unlock(&sable_lynx_irq_lock); #if 0 - printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n", + printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n", __func__, mask, bit, irq); #endif } diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index cefc5a355ef9..6ee7655b7568 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -623,7 +623,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, } lock_kernel(); - printk("Bad unaligned kernel access at %016lx: %p %lx %ld\n", + printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", pc, va, opcode, reg); do_exit(SIGSEGV); diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index ce4e4296b954..62d4abbaa654 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -250,21 +250,3 @@ asmlinkage void do_bus_error(unsigned long addr, int write_access, dump_dtlb(); die("Bus Error", regs, SIGKILL); } - -/* - * This functionality is currently not possible to implement because - * we're using segmentation to ensure a fixed mapping of the kernel - * virtual address space. - * - * It would be possible to implement this, but it would require us to - * disable segmentation at startup and load the kernel mappings into - * the TLB like any other pages. There will be lots of trickery to - * avoid recursive invocation of the TLB miss handler, though... - */ -#ifdef CONFIG_DEBUG_PAGEALLOC -void kernel_map_pages(struct page *page, int numpages, int enable) -{ - -} -EXPORT_SYMBOL(kernel_map_pages); -#endif diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c index 834cab7438a8..530d1393a232 100644 --- a/arch/blackfin/mm/sram-alloc.c +++ b/arch/blackfin/mm/sram-alloc.c @@ -854,7 +854,6 @@ static int __init sram_proc_init(void) printk(KERN_WARNING "unable to create /proc/sram\n"); return -1; } - ptr->owner = THIS_MODULE; ptr->read_proc = sram_proc_read; return 0; } diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c index c685ba4c3387..2b73c7a5b649 100644 --- a/arch/cris/arch-v10/kernel/time.c +++ b/arch/cris/arch-v10/kernel/time.c @@ -261,7 +261,6 @@ timer_interrupt(int irq, void *dev_id) static struct irqaction irq2 = { .handler = timer_interrupt, .flags = IRQF_SHARED | IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "timer", }; diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 9dac17334640..f59a973c97ee 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c @@ -65,7 +65,6 @@ static int send_ipi(int vector, int wait, cpumask_t cpu_mask); static struct irqaction irq_ipi = { .handler = crisv32_ipi_interrupt, .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "ipi", }; diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c index 3a13dd6e0a9a..65633d0dab86 100644 --- a/arch/cris/arch-v32/kernel/time.c +++ b/arch/cris/arch-v32/kernel/time.c @@ -267,7 +267,6 @@ timer_interrupt(int irq, void *dev_id) static struct irqaction irq_timer = { .handler = timer_interrupt, .flags = IRQF_SHARED | IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "timer" }; diff --git a/arch/frv/kernel/irq-mb93091.c b/arch/frv/kernel/irq-mb93091.c index 9e38f99bbab8..4dd9adaf115a 100644 --- a/arch/frv/kernel/irq-mb93091.c +++ b/arch/frv/kernel/irq-mb93091.c @@ -109,28 +109,24 @@ static struct irqaction fpga_irq[4] = { [0] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, - .mask = CPU_MASK_NONE, .name = "fpga.0", .dev_id = (void *) 0x0028UL, }, [1] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, - .mask = CPU_MASK_NONE, .name = "fpga.1", .dev_id = (void *) 0x0050UL, }, [2] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, - .mask = CPU_MASK_NONE, .name = "fpga.2", .dev_id = (void *) 0x1c00UL, }, [3] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, - .mask = CPU_MASK_NONE, .name = "fpga.3", .dev_id = (void *) 0x6386UL, } diff --git a/arch/frv/kernel/irq-mb93093.c b/arch/frv/kernel/irq-mb93093.c index 3c2752ca9775..e45209031873 100644 --- a/arch/frv/kernel/irq-mb93093.c +++ b/arch/frv/kernel/irq-mb93093.c @@ -108,7 +108,6 @@ static struct irqaction fpga_irq[1] = { [0] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "fpga.0", .dev_id = (void *) 0x0700UL, } diff --git a/arch/frv/kernel/irq-mb93493.c b/arch/frv/kernel/irq-mb93493.c index 7754c7338e4b..ba55ecdfb245 100644 --- a/arch/frv/kernel/irq-mb93493.c +++ b/arch/frv/kernel/irq-mb93493.c @@ -120,14 +120,12 @@ static struct irqaction mb93493_irq[2] = { [0] = { .handler = mb93493_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, - .mask = CPU_MASK_NONE, .name = "mb93493.0", .dev_id = (void *) __addr_MB93493_IQSR(0), }, [1] = { .handler = mb93493_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, - .mask = CPU_MASK_NONE, .name = "mb93493.1", .dev_id = (void *) __addr_MB93493_IQSR(1), } diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c index 69f6a4ef5d61..fb0ce7577225 100644 --- a/arch/frv/kernel/time.c +++ b/arch/frv/kernel/time.c @@ -45,7 +45,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy); static struct irqaction timer_irq = { .handler = timer_interrupt, .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "timer", }; diff --git a/arch/h8300/kernel/timer/itu.c b/arch/h8300/kernel/timer/itu.c index d1c926596b08..4883ba7103a8 100644 --- a/arch/h8300/kernel/timer/itu.c +++ b/arch/h8300/kernel/timer/itu.c @@ -60,7 +60,6 @@ static struct irqaction itu_irq = { .name = "itu", .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER, - .mask = CPU_MASK_NONE, }; static const int __initdata divide_rate[] = {1, 2, 4, 8}; diff --git a/arch/h8300/kernel/timer/timer16.c b/arch/h8300/kernel/timer/timer16.c index e14271b72119..042dbb53f3fb 100644 --- a/arch/h8300/kernel/timer/timer16.c +++ b/arch/h8300/kernel/timer/timer16.c @@ -55,7 +55,6 @@ static struct irqaction timer16_irq = { .name = "timer-16", .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER, - .mask = CPU_MASK_NONE, }; static const int __initdata divide_rate[] = {1, 2, 4, 8}; diff --git a/arch/h8300/kernel/timer/timer8.c b/arch/h8300/kernel/timer/timer8.c index 0556d7c7bea6..38be0cabef0d 100644 --- a/arch/h8300/kernel/timer/timer8.c +++ b/arch/h8300/kernel/timer/timer8.c @@ -75,7 +75,6 @@ static struct irqaction timer8_irq = { .name = "timer-8", .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER, - .mask = CPU_MASK_NONE, }; static const int __initdata divide_rate[] = {8, 64, 8192}; diff --git a/arch/h8300/kernel/timer/tpu.c b/arch/h8300/kernel/timer/tpu.c index df7f453a9673..ad383caae196 100644 --- a/arch/h8300/kernel/timer/tpu.c +++ b/arch/h8300/kernel/timer/tpu.c @@ -65,7 +65,6 @@ static struct irqaction tpu_irq = { .name = "tpu", .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER, - .mask = CPU_MASK_NONE, }; const static int __initdata divide_rate[] = { diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 24b1ad5334cb..2bef5261d96d 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -24,6 +24,7 @@ #include <linux/major.h> #include <linux/fcntl.h> #include <linux/mm.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/capability.h> #include <linux/console.h> @@ -848,38 +849,36 @@ static int rs_open(struct tty_struct *tty, struct file * filp) * /proc fs routines.... */ -static inline int line_info(char *buf, struct serial_state *state) +static inline void line_info(struct seq_file *m, struct serial_state *state) { - return sprintf(buf, "%d: uart:%s port:%lX irq:%d\n", + seq_printf(m, "%d: uart:%s port:%lX irq:%d\n", state->line, uart_config[state->type].name, state->port, state->irq); } -static int rs_read_proc(char *page, char **start, off_t off, int count, - int *eof, void *data) +static int rs_proc_show(struct seq_file *m, void *v) { - int i, len = 0, l; - off_t begin = 0; - - len += sprintf(page, "simserinfo:1.0 driver:%s\n", serial_version); - for (i = 0; i < NR_PORTS && len < 4000; i++) { - l = line_info(page + len, &rs_table[i]); - len += l; - if (len+begin > off+count) - goto done; - if (len+begin < off) { - begin += len; - len = 0; - } - } - *eof = 1; -done: - if (off >= len+begin) - return 0; - *start = page + (begin-off); - return ((count < begin+len-off) ? count : begin+len-off); + int i; + + seq_printf(m, "simserinfo:1.0 driver:%s\n", serial_version); + for (i = 0; i < NR_PORTS; i++) + line_info(m, &rs_table[i]); + return 0; } +static int rs_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, rs_proc_show, NULL); +} + +static const struct file_operations rs_proc_fops = { + .owner = THIS_MODULE, + .open = rs_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + /* * --------------------------------------------------------------------- * rs_init() and friends @@ -917,7 +916,7 @@ static const struct tty_operations hp_ops = { .start = rs_start, .hangup = rs_hangup, .wait_until_sent = rs_wait_until_sent, - .read_proc = rs_read_proc, + .proc_fops = &rs_proc_fops, }; /* diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h index c47830e26cb7..111ed5222892 100644 --- a/arch/ia64/include/asm/intrinsics.h +++ b/arch/ia64/include/asm/intrinsics.h @@ -202,7 +202,11 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void); #ifndef __ASSEMBLY__ #if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) -#define IA64_INTRINSIC_API(name) pv_cpu_ops.name +#ifdef ASM_SUPPORTED +# define IA64_INTRINSIC_API(name) paravirt_ ## name +#else +# define IA64_INTRINSIC_API(name) pv_cpu_ops.name +#endif #define IA64_INTRINSIC_MACRO(name) paravirt_ ## name #else #define IA64_INTRINSIC_API(name) ia64_native_ ## name diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h index 040bc87db930..7f2a456603cb 100644 --- a/arch/ia64/include/asm/mmu_context.h +++ b/arch/ia64/include/asm/mmu_context.h @@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm) /* re-check, now that we've got the lock: */ context = mm->context; if (context == 0) { - cpus_clear(mm->cpu_vm_mask); + cpumask_clear(mm_cpumask(mm)); if (ia64_ctx.next >= ia64_ctx.limit) { ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, ia64_ctx.max_ctx, ia64_ctx.next); @@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm) do { context = get_mmu_context(mm); - if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) - cpu_set(smp_processor_id(), mm->cpu_vm_mask); + if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); reload_context(context); /* * in the unlikely event of a TLB-flush by another thread, diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h index d2da61e4c49b..908eaef42a08 100644 --- a/arch/ia64/include/asm/module.h +++ b/arch/ia64/include/asm/module.h @@ -16,6 +16,12 @@ struct mod_arch_specific { struct elf64_shdr *got; /* global offset table */ struct elf64_shdr *opd; /* official procedure descriptors */ struct elf64_shdr *unwind; /* unwind-table section */ +#ifdef CONFIG_PARAVIRT + struct elf64_shdr *paravirt_bundles; + /* paravirt_alt_bundle_patch table */ + struct elf64_shdr *paravirt_insts; + /* paravirt_alt_inst_patch table */ +#endif unsigned long gp; /* global-pointer for module */ void *core_unw_table; /* core unwind-table cookie returned by unwinder */ diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h index 0a1026cca4fa..d2d46efb3e6e 100644 --- a/arch/ia64/include/asm/native/inst.h +++ b/arch/ia64/include/asm/native/inst.h @@ -30,6 +30,9 @@ #define __paravirt_work_processed_syscall_target \ ia64_work_processed_syscall +#define paravirt_fsyscall_table ia64_native_fsyscall_table +#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down + #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK # define PARAVIRT_POISON 0xdeadbeefbaadf00d # define CLOBBER(clob) \ @@ -74,6 +77,11 @@ (pred) mov reg = psr \ CLOBBER(clob) +#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ +(pred) mov reg = ar.itc \ + CLOBBER(clob) \ + CLOBBER_PRED(pred_clob) + #define MOV_TO_IFA(reg, clob) \ mov cr.ifa = reg \ CLOBBER(clob) @@ -158,6 +166,11 @@ #define RSM_PSR_DT \ rsm psr.dt +#define RSM_PSR_BE_I(clob0, clob1) \ + rsm psr.be | psr.i \ + CLOBBER(clob0) \ + CLOBBER(clob1) + #define SSM_PSR_DT_AND_SRLZ_I \ ssm psr.dt \ ;; \ diff --git a/arch/ia64/include/asm/native/patchlist.h b/arch/ia64/include/asm/native/patchlist.h new file mode 100644 index 000000000000..be16ca9311bf --- /dev/null +++ b/arch/ia64/include/asm/native/patchlist.h @@ -0,0 +1,38 @@ +/****************************************************************************** + * arch/ia64/include/asm/native/inst.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define __paravirt_start_gate_fsyscall_patchlist \ + __ia64_native_start_gate_fsyscall_patchlist +#define __paravirt_end_gate_fsyscall_patchlist \ + __ia64_native_end_gate_fsyscall_patchlist +#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ + __ia64_native_start_gate_brl_fsys_bubble_down_patchlist +#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ + __ia64_native_end_gate_brl_fsys_bubble_down_patchlist +#define __paravirt_start_gate_vtop_patchlist \ + __ia64_native_start_gate_vtop_patchlist +#define __paravirt_end_gate_vtop_patchlist \ + __ia64_native_end_gate_vtop_patchlist +#define __paravirt_start_gate_mckinley_e9_patchlist \ + __ia64_native_start_gate_mckinley_e9_patchlist +#define __paravirt_end_gate_mckinley_e9_patchlist \ + __ia64_native_end_gate_mckinley_e9_patchlist diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h index b8e6eb1090d7..8d72962ec838 100644 --- a/arch/ia64/include/asm/native/pvchk_inst.h +++ b/arch/ia64/include/asm/native/pvchk_inst.h @@ -180,6 +180,11 @@ IS_PRED_IN(pred) \ IS_RREG_OUT(reg) \ IS_RREG_CLOB(clob) +#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ + IS_PRED_IN(pred) \ + IS_PRED_CLOB(pred_clob) \ + IS_RREG_OUT(reg) \ + IS_RREG_CLOB(clob) #define MOV_TO_IFA(reg, clob) \ IS_RREG_IN(reg) \ IS_RREG_CLOB(clob) @@ -246,6 +251,9 @@ IS_RREG_CLOB(clob2) #define RSM_PSR_DT \ nop 0 +#define RSM_PSR_BE_I(clob0, clob1) \ + IS_RREG_CLOB(clob0) \ + IS_RREG_CLOB(clob1) #define SSM_PSR_DT_AND_SRLZ_I \ nop 0 #define BSW_0(clob0, clob1, clob2) \ diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index 2bf3636473fe..2eb0a981a09a 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h @@ -22,6 +22,56 @@ #ifndef __ASM_PARAVIRT_H #define __ASM_PARAVIRT_H +#ifndef __ASSEMBLY__ +/****************************************************************************** + * fsys related addresses + */ +struct pv_fsys_data { + unsigned long *fsyscall_table; + void *fsys_bubble_down; +}; + +extern struct pv_fsys_data pv_fsys_data; + +unsigned long *paravirt_get_fsyscall_table(void); +char *paravirt_get_fsys_bubble_down(void); + +/****************************************************************************** + * patchlist addresses for gate page + */ +enum pv_gate_patchlist { + PV_GATE_START_FSYSCALL, + PV_GATE_END_FSYSCALL, + + PV_GATE_START_BRL_FSYS_BUBBLE_DOWN, + PV_GATE_END_BRL_FSYS_BUBBLE_DOWN, + + PV_GATE_START_VTOP, + PV_GATE_END_VTOP, + + PV_GATE_START_MCKINLEY_E9, + PV_GATE_END_MCKINLEY_E9, +}; + +struct pv_patchdata { + unsigned long start_fsyscall_patchlist; + unsigned long end_fsyscall_patchlist; + unsigned long start_brl_fsys_bubble_down_patchlist; + unsigned long end_brl_fsys_bubble_down_patchlist; + unsigned long start_vtop_patchlist; + unsigned long end_vtop_patchlist; + unsigned long start_mckinley_e9_patchlist; + unsigned long end_mckinley_e9_patchlist; + + void *gate_section; +}; + +extern struct pv_patchdata pv_patchdata; + +unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type); +void *paravirt_get_gate_section(void); +#endif + #ifdef CONFIG_PARAVIRT_GUEST #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 @@ -68,6 +118,14 @@ struct pv_init_ops { int (*arch_setup_nomca)(void); void (*post_smp_prepare_boot_cpu)(void); + +#ifdef ASM_SUPPORTED + unsigned long (*patch_bundle)(void *sbundle, void *ebundle, + unsigned long type); + unsigned long (*patch_inst)(unsigned long stag, unsigned long etag, + unsigned long type); +#endif + void (*patch_branch)(unsigned long tag, unsigned long type); }; extern struct pv_init_ops pv_init_ops; @@ -210,6 +268,8 @@ struct pv_time_ops { int (*do_steal_accounting)(unsigned long *new_itm); void (*clocksource_resume)(void); + + unsigned long long (*sched_clock)(void); }; extern struct pv_time_ops pv_time_ops; @@ -227,6 +287,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm) return pv_time_ops.do_steal_accounting(new_itm); } +static inline unsigned long long paravirt_sched_clock(void) +{ + return pv_time_ops.sched_clock(); +} + #endif /* !__ASSEMBLY__ */ #else diff --git a/arch/ia64/include/asm/paravirt_patch.h b/arch/ia64/include/asm/paravirt_patch.h new file mode 100644 index 000000000000..128ff5db6e67 --- /dev/null +++ b/arch/ia64/include/asm/paravirt_patch.h @@ -0,0 +1,143 @@ +/****************************************************************************** + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __ASM_PARAVIRT_PATCH_H +#define __ASM_PARAVIRT_PATCH_H + +#ifdef __ASSEMBLY__ + + .section .paravirt_branches, "a" + .previous +#define PARAVIRT_PATCH_SITE_BR(type) \ + { \ + [1:] ; \ + br.cond.sptk.many 2f ; \ + nop.b 0 ; \ + nop.b 0;; ; \ + } ; \ + 2: \ + .xdata8 ".paravirt_branches", 1b, type + +#else + +#include <linux/stringify.h> +#include <asm/intrinsics.h> + +/* for binary patch */ +struct paravirt_patch_site_bundle { + void *sbundle; + void *ebundle; + unsigned long type; +}; + +/* label means the beginning of new bundle */ +#define paravirt_alt_bundle(instr, privop) \ + "\t998:\n" \ + "\t" instr "\n" \ + "\t999:\n" \ + "\t.pushsection .paravirt_bundles, \"a\"\n" \ + "\t.popsection\n" \ + "\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \ + __stringify(privop) "\n" + + +struct paravirt_patch_bundle_elem { + const void *sbundle; + const void *ebundle; + unsigned long type; +}; + + +struct paravirt_patch_site_inst { + unsigned long stag; + unsigned long etag; + unsigned long type; +}; + +#define paravirt_alt_inst(instr, privop) \ + "\t[998:]\n" \ + "\t" instr "\n" \ + "\t[999:]\n" \ + "\t.pushsection .paravirt_insts, \"a\"\n" \ + "\t.popsection\n" \ + "\t.xdata8 \".paravirt_insts\", 998b, 999b, " \ + __stringify(privop) "\n" + +struct paravirt_patch_site_branch { + unsigned long tag; + unsigned long type; +}; + +struct paravirt_patch_branch_target { + const void *entry; + unsigned long type; +}; + +void +__paravirt_patch_apply_branch( + unsigned long tag, unsigned long type, + const struct paravirt_patch_branch_target *entries, + unsigned int nr_entries); + +void +paravirt_patch_reloc_br(unsigned long tag, const void *target); + +void +paravirt_patch_reloc_brl(unsigned long tag, const void *target); + + +#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT) +unsigned long +ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type); + +unsigned long +__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type, + const struct paravirt_patch_bundle_elem *elems, + unsigned long nelems, + const struct paravirt_patch_bundle_elem **found); + +void +paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start, + const struct paravirt_patch_site_bundle *end); + +void +paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start, + const struct paravirt_patch_site_inst *end); + +void paravirt_patch_apply(void); +#else +#define paravirt_patch_apply_bundle(start, end) do { } while (0) +#define paravirt_patch_apply_inst(start, end) do { } while (0) +#define paravirt_patch_apply() do { } while (0) +#endif + +#endif /* !__ASSEMBLEY__ */ + +#endif /* __ASM_PARAVIRT_PATCH_H */ + +/* + * Local variables: + * mode: C + * c-set-style: "linux" + * c-basic-offset: 8 + * tab-width: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h index 33c8e55f5775..3d2951130b5f 100644 --- a/arch/ia64/include/asm/paravirt_privop.h +++ b/arch/ia64/include/asm/paravirt_privop.h @@ -33,7 +33,7 @@ */ struct pv_cpu_ops { - void (*fc)(unsigned long addr); + void (*fc)(void *addr); unsigned long (*thash)(unsigned long addr); unsigned long (*get_cpuid)(int index); unsigned long (*get_pmd)(int index); @@ -60,12 +60,18 @@ extern unsigned long ia64_native_getreg_func(int regnum); /* Instructions paravirtualized for performance */ /************************************************/ +#ifndef ASM_SUPPORTED +#define paravirt_ssm_i() pv_cpu_ops.ssm_i() +#define paravirt_rsm_i() pv_cpu_ops.rsm_i() +#define __paravirt_getreg() pv_cpu_ops.getreg() +#endif + /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). * static inline function doesn't satisfy it. */ #define paravirt_ssm(mask) \ do { \ if ((mask) == IA64_PSR_I) \ - pv_cpu_ops.ssm_i(); \ + paravirt_ssm_i(); \ else \ ia64_native_ssm(mask); \ } while (0) @@ -73,7 +79,7 @@ extern unsigned long ia64_native_getreg_func(int regnum); #define paravirt_rsm(mask) \ do { \ if ((mask) == IA64_PSR_I) \ - pv_cpu_ops.rsm_i(); \ + paravirt_rsm_i(); \ else \ ia64_native_rsm(mask); \ } while (0) @@ -86,7 +92,7 @@ extern unsigned long ia64_native_getreg_func(int regnum); if ((reg) == _IA64_REG_IP) \ res = ia64_native_getreg(_IA64_REG_IP); \ else \ - res = pv_cpu_ops.getreg(reg); \ + res = __paravirt_getreg(reg); \ res; \ }) @@ -112,6 +118,12 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch); #endif /* CONFIG_PARAVIRT */ +#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED) +#define paravirt_dv_serialize_data() ia64_dv_serialize_data() +#else +#define paravirt_dv_serialize_data() /* nothing */ +#endif + /* these routines utilize privilege-sensitive or performance-sensitive * privileged instructions so the code must be replaced with * paravirtualized versions */ @@ -121,4 +133,349 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch); IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) #define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) + +#if defined(CONFIG_PARAVIRT) +/****************************************************************************** + * binary patching infrastructure + */ +#define PARAVIRT_PATCH_TYPE_FC 1 +#define PARAVIRT_PATCH_TYPE_THASH 2 +#define PARAVIRT_PATCH_TYPE_GET_CPUID 3 +#define PARAVIRT_PATCH_TYPE_GET_PMD 4 +#define PARAVIRT_PATCH_TYPE_PTCGA 5 +#define PARAVIRT_PATCH_TYPE_GET_RR 6 +#define PARAVIRT_PATCH_TYPE_SET_RR 7 +#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8 +#define PARAVIRT_PATCH_TYPE_SSM_I 9 +#define PARAVIRT_PATCH_TYPE_RSM_I 10 +#define PARAVIRT_PATCH_TYPE_GET_PSR_I 11 +#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12 + +/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */ +#define PARAVIRT_PATCH_TYPE_GETREG 0x10000000 +#define PARAVIRT_PATCH_TYPE_SETREG 0x20000000 + +/* + * struct task_struct* (*ia64_switch_to)(void* next_task); + * void *ia64_leave_syscall; + * void *ia64_work_processed_syscall + * void *ia64_leave_kernel; + */ + +#define PARAVIRT_PATCH_TYPE_BR_START 0x30000000 +#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \ + (PARAVIRT_PATCH_TYPE_BR_START + 0) +#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \ + (PARAVIRT_PATCH_TYPE_BR_START + 1) +#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \ + (PARAVIRT_PATCH_TYPE_BR_START + 2) +#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \ + (PARAVIRT_PATCH_TYPE_BR_START + 3) + +#ifdef ASM_SUPPORTED +#include <asm/paravirt_patch.h> + +/* + * pv_cpu_ops calling stub. + * normal function call convension can't be written by gcc + * inline assembly. + * + * from the caller's point of view, + * the following registers will be clobbered. + * r2, r3 + * r8-r15 + * r16, r17 + * b6, b7 + * p6-p15 + * ar.ccv + * + * from the callee's point of view , + * the following registers can be used. + * r2, r3: scratch + * r8: scratch, input argument0 and return value + * r0-r15: scratch, input argument1-5 + * b6: return pointer + * b7: scratch + * p6-p15: scratch + * ar.ccv: scratch + * + * other registers must not be changed. especially + * b0: rp: preserved. gcc ignores b0 in clobbered register. + * r16: saved gp + */ +/* 5 bundles */ +#define __PARAVIRT_BR \ + ";;\n" \ + "{ .mlx\n" \ + "nop 0\n" \ + "movl r2 = %[op_addr]\n"/* get function pointer address */ \ + ";;\n" \ + "}\n" \ + "1:\n" \ + "{ .mii\n" \ + "ld8 r2 = [r2]\n" /* load function descriptor address */ \ + "mov r17 = ip\n" /* get ip to calc return address */ \ + "mov r16 = gp\n" /* save gp */ \ + ";;\n" \ + "}\n" \ + "{ .mii\n" \ + "ld8 r3 = [r2], 8\n" /* load entry address */ \ + "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \ + ";;\n" \ + "mov b7 = r3\n" /* set entry address */ \ + "}\n" \ + "{ .mib\n" \ + "ld8 gp = [r2]\n" /* load gp value */ \ + "mov b6 = r17\n" /* set return address */ \ + "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \ + "}\n" \ + "1:\n" \ + "{ .mii\n" \ + "mov gp = r16\n" /* restore gp value */ \ + "nop 0\n" \ + "nop 0\n" \ + ";;\n" \ + "}\n" + +#define PARAVIRT_OP(op) \ + [op_addr] "i"(&pv_cpu_ops.op) + +#define PARAVIRT_TYPE(type) \ + PARAVIRT_PATCH_TYPE_ ## type + +#define PARAVIRT_REG_CLOBBERS0 \ + "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ + "r15", "r16", "r17" + +#define PARAVIRT_REG_CLOBBERS1 \ + "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ + "r15", "r16", "r17" + +#define PARAVIRT_REG_CLOBBERS2 \ + "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \ + "r15", "r16", "r17" + +#define PARAVIRT_REG_CLOBBERS5 \ + "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \ + "r15", "r16", "r17" + +#define PARAVIRT_BR_CLOBBERS \ + "b6", "b7" + +#define PARAVIRT_PR_CLOBBERS \ + "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15" + +#define PARAVIRT_AR_CLOBBERS \ + "ar.ccv" + +#define PARAVIRT_CLOBBERS0 \ + PARAVIRT_REG_CLOBBERS0, \ + PARAVIRT_BR_CLOBBERS, \ + PARAVIRT_PR_CLOBBERS, \ + PARAVIRT_AR_CLOBBERS, \ + "memory" + +#define PARAVIRT_CLOBBERS1 \ + PARAVIRT_REG_CLOBBERS1, \ + PARAVIRT_BR_CLOBBERS, \ + PARAVIRT_PR_CLOBBERS, \ + PARAVIRT_AR_CLOBBERS, \ + "memory" + +#define PARAVIRT_CLOBBERS2 \ + PARAVIRT_REG_CLOBBERS2, \ + PARAVIRT_BR_CLOBBERS, \ + PARAVIRT_PR_CLOBBERS, \ + PARAVIRT_AR_CLOBBERS, \ + "memory" + +#define PARAVIRT_CLOBBERS5 \ + PARAVIRT_REG_CLOBBERS5, \ + PARAVIRT_BR_CLOBBERS, \ + PARAVIRT_PR_CLOBBERS, \ + PARAVIRT_AR_CLOBBERS, \ + "memory" + +#define PARAVIRT_BR0(op, type) \ + register unsigned long ia64_clobber asm ("r8"); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_clobber) \ + : PARAVIRT_OP(op) \ + : PARAVIRT_CLOBBERS0) + +#define PARAVIRT_BR0_RET(op, type) \ + register unsigned long ia64_intri_res asm ("r8"); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_intri_res) \ + : PARAVIRT_OP(op) \ + : PARAVIRT_CLOBBERS0) + +#define PARAVIRT_BR1(op, type, arg1) \ + register unsigned long __##arg1 asm ("r8") = arg1; \ + register unsigned long ia64_clobber asm ("r8"); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_clobber) \ + : PARAVIRT_OP(op), "0"(__##arg1) \ + : PARAVIRT_CLOBBERS1) + +#define PARAVIRT_BR1_RET(op, type, arg1) \ + register unsigned long ia64_intri_res asm ("r8"); \ + register unsigned long __##arg1 asm ("r8") = arg1; \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_intri_res) \ + : PARAVIRT_OP(op), "0"(__##arg1) \ + : PARAVIRT_CLOBBERS1) + +#define PARAVIRT_BR1_VOID(op, type, arg1) \ + register void *__##arg1 asm ("r8") = arg1; \ + register unsigned long ia64_clobber asm ("r8"); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_clobber) \ + : PARAVIRT_OP(op), "0"(__##arg1) \ + : PARAVIRT_CLOBBERS1) + +#define PARAVIRT_BR2(op, type, arg1, arg2) \ + register unsigned long __##arg1 asm ("r8") = arg1; \ + register unsigned long __##arg2 asm ("r9") = arg2; \ + register unsigned long ia64_clobber1 asm ("r8"); \ + register unsigned long ia64_clobber2 asm ("r9"); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \ + : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \ + : PARAVIRT_CLOBBERS2) + + +#define PARAVIRT_DEFINE_CPU_OP0(op, type) \ + static inline void \ + paravirt_ ## op (void) \ + { \ + PARAVIRT_BR0(op, type); \ + } + +#define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \ + static inline unsigned long \ + paravirt_ ## op (void) \ + { \ + PARAVIRT_BR0_RET(op, type); \ + return ia64_intri_res; \ + } + +#define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \ + static inline void \ + paravirt_ ## op (void *arg1) \ + { \ + PARAVIRT_BR1_VOID(op, type, arg1); \ + } + +#define PARAVIRT_DEFINE_CPU_OP1(op, type) \ + static inline void \ + paravirt_ ## op (unsigned long arg1) \ + { \ + PARAVIRT_BR1(op, type, arg1); \ + } + +#define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \ + static inline unsigned long \ + paravirt_ ## op (unsigned long arg1) \ + { \ + PARAVIRT_BR1_RET(op, type, arg1); \ + return ia64_intri_res; \ + } + +#define PARAVIRT_DEFINE_CPU_OP2(op, type) \ + static inline void \ + paravirt_ ## op (unsigned long arg1, \ + unsigned long arg2) \ + { \ + PARAVIRT_BR2(op, type, arg1, arg2); \ + } + + +PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC); +PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH) +PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID) +PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD) +PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA) +PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR) +PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR) +PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I) +PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I) +PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I) +PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE) + +static inline void +paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1, + unsigned long val2, unsigned long val3, + unsigned long val4) +{ + register unsigned long __val0 asm ("r8") = val0; + register unsigned long __val1 asm ("r9") = val1; + register unsigned long __val2 asm ("r10") = val2; + register unsigned long __val3 asm ("r11") = val3; + register unsigned long __val4 asm ("r14") = val4; + + register unsigned long ia64_clobber0 asm ("r8"); + register unsigned long ia64_clobber1 asm ("r9"); + register unsigned long ia64_clobber2 asm ("r10"); + register unsigned long ia64_clobber3 asm ("r11"); + register unsigned long ia64_clobber4 asm ("r14"); + + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, + PARAVIRT_TYPE(SET_RR0_TO_RR4)) + : "=r"(ia64_clobber0), + "=r"(ia64_clobber1), + "=r"(ia64_clobber2), + "=r"(ia64_clobber3), + "=r"(ia64_clobber4) + : PARAVIRT_OP(set_rr0_to_rr4), + "0"(__val0), "1"(__val1), "2"(__val2), + "3"(__val3), "4"(__val4) + : PARAVIRT_CLOBBERS5); +} + +/* unsigned long paravirt_getreg(int reg) */ +#define __paravirt_getreg(reg) \ + ({ \ + register unsigned long ia64_intri_res asm ("r8"); \ + register unsigned long __reg asm ("r8") = (reg); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(reg)); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(GETREG) \ + + (reg)) \ + : "=r"(ia64_intri_res) \ + : PARAVIRT_OP(getreg), "0"(__reg) \ + : PARAVIRT_CLOBBERS1); \ + \ + ia64_intri_res; \ + }) + +/* void paravirt_setreg(int reg, unsigned long val) */ +#define paravirt_setreg(reg, val) \ + do { \ + register unsigned long __val asm ("r8") = val; \ + register unsigned long __reg asm ("r9") = reg; \ + register unsigned long ia64_clobber1 asm ("r8"); \ + register unsigned long ia64_clobber2 asm ("r9"); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(reg)); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(SETREG) \ + + (reg)) \ + : "=r"(ia64_clobber1), \ + "=r"(ia64_clobber2) \ + : PARAVIRT_OP(setreg), \ + "1"(__reg), "0"(__val) \ + : PARAVIRT_CLOBBERS2); \ + } while (0) + +#endif /* ASM_SUPPORTED */ +#endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */ + #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index 21c402365d0e..598408336251 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h @@ -126,7 +126,8 @@ extern void identify_siblings (struct cpuinfo_ia64 *); extern int is_multithreading_enabled(void); extern void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi(cpumask_t mask); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask #else /* CONFIG_SMP */ diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h index 4e03cfe74a0c..86c7db861180 100644 --- a/arch/ia64/include/asm/timex.h +++ b/arch/ia64/include/asm/timex.h @@ -40,5 +40,6 @@ get_cycles (void) } extern void ia64_cpu_local_tick (void); +extern unsigned long long ia64_native_sched_clock (void); #endif /* _ASM_IA64_TIMEX_H */ diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 3193f4417e16..7b4c8c70b2d1 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -44,11 +44,6 @@ #define parent_node(nid) (nid) /* - * Returns the number of the first CPU on Node 'node'. - */ -#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) - -/* * Determines the node for a given pci bus */ #define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node @@ -117,11 +112,6 @@ void build_cpu_to_node_map(void); extern void arch_fix_phys_package_id(int num, u32 slot); -#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ - CPU_MASK_ALL : \ - node_to_cpumask(pcibus_to_node(bus)) \ - ) - #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ cpu_all_mask : \ cpumask_of_node(pcibus_to_node(bus))) diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h index 7a804e80fc67..e425227a418e 100644 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ b/arch/ia64/include/asm/xen/hypervisor.h @@ -33,9 +33,6 @@ #ifndef _ASM_IA64_XEN_HYPERVISOR_H #define _ASM_IA64_XEN_HYPERVISOR_H -#ifdef CONFIG_XEN - -#include <linux/init.h> #include <xen/interface/xen.h> #include <xen/interface/version.h> /* to compile feature.c */ #include <xen/features.h> /* to comiple xen-netfront.c */ @@ -43,22 +40,32 @@ /* xen_domain_type is set before executing any C code by early_xen_setup */ enum xen_domain_type { - XEN_NATIVE, - XEN_PV_DOMAIN, - XEN_HVM_DOMAIN, + XEN_NATIVE, /* running on bare hardware */ + XEN_PV_DOMAIN, /* running in a PV domain */ + XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/ }; +#ifdef CONFIG_XEN extern enum xen_domain_type xen_domain_type; +#else +#define xen_domain_type XEN_NATIVE +#endif #define xen_domain() (xen_domain_type != XEN_NATIVE) -#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN) -#define xen_initial_domain() (xen_pv_domain() && \ +#define xen_pv_domain() (xen_domain() && \ + xen_domain_type == XEN_PV_DOMAIN) +#define xen_hvm_domain() (xen_domain() && \ + xen_domain_type == XEN_HVM_DOMAIN) + +#ifdef CONFIG_XEN_DOM0 +#define xen_initial_domain() (xen_pv_domain() && \ (xen_start_info->flags & SIF_INITDOMAIN)) -#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) +#else +#define xen_initial_domain() (0) +#endif -/* deprecated. remove this */ -#define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN) +#ifdef CONFIG_XEN extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; @@ -74,16 +81,6 @@ void force_evtchn_callback(void); /* For setup_arch() in arch/ia64/kernel/setup.c */ void xen_ia64_enable_opt_feature(void); - -#else /* CONFIG_XEN */ - -#define xen_domain() (0) -#define xen_pv_domain() (0) -#define xen_initial_domain() (0) -#define xen_hvm_domain() (0) -#define is_running_on_xen() (0) /* deprecated. remove this */ #endif -#define is_initial_xendomain() (0) /* deprecated. remove this */ - #endif /* _ASM_IA64_XEN_HYPERVISOR_H */ diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h index 19c2ae1d878a..c53a47611208 100644 --- a/arch/ia64/include/asm/xen/inst.h +++ b/arch/ia64/include/asm/xen/inst.h @@ -33,6 +33,9 @@ #define __paravirt_work_processed_syscall_target \ xen_work_processed_syscall +#define paravirt_fsyscall_table xen_fsyscall_table +#define paravirt_fsys_bubble_down xen_fsys_bubble_down + #define MOV_FROM_IFA(reg) \ movl reg = XSI_IFA; \ ;; \ @@ -110,6 +113,27 @@ .endm #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob +/* assuming ar.itc is read with interrupt disabled. */ +#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ +(pred) movl clob = XSI_ITC_OFFSET; \ + ;; \ +(pred) ld8 clob = [clob]; \ +(pred) mov reg = ar.itc; \ + ;; \ +(pred) add reg = reg, clob; \ + ;; \ +(pred) movl clob = XSI_ITC_LAST; \ + ;; \ +(pred) ld8 clob = [clob]; \ + ;; \ +(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \ + ;; \ +(pred_clob) add reg = 1, clob; \ + ;; \ +(pred) movl clob = XSI_ITC_LAST; \ + ;; \ +(pred) st8 [clob] = reg + #define MOV_TO_IFA(reg, clob) \ movl clob = XSI_IFA; \ @@ -362,6 +386,10 @@ #define RSM_PSR_DT \ XEN_HYPER_RSM_PSR_DT +#define RSM_PSR_BE_I(clob0, clob1) \ + RSM_PSR_I(p0, clob0, clob1); \ + rum psr.be + #define SSM_PSR_DT_AND_SRLZ_I \ XEN_HYPER_SSM_PSR_DT diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h index f00fab40854d..e951e740bdf2 100644 --- a/arch/ia64/include/asm/xen/interface.h +++ b/arch/ia64/include/asm/xen/interface.h @@ -209,6 +209,15 @@ struct mapped_regs { unsigned long krs[8]; /* kernel registers */ unsigned long tmp[16]; /* temp registers (e.g. for hyperprivops) */ + + /* itc paravirtualization + * vAR.ITC = mAR.ITC + itc_offset + * itc_last is one which was lastly passed to + * the guest OS in order to prevent it from + * going backwords. + */ + unsigned long itc_offset; + unsigned long itc_last; }; }; }; diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h index 4d92d9bbda7b..c57fa910f2c9 100644 --- a/arch/ia64/include/asm/xen/minstate.h +++ b/arch/ia64/include/asm/xen/minstate.h @@ -1,3 +1,12 @@ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +/* read ar.itc in advance, and use it before leaving bank 0 */ +#define XEN_ACCOUNT_GET_STAMP \ + MOV_FROM_ITC(pUStk, p6, r20, r2); +#else +#define XEN_ACCOUNT_GET_STAMP +#endif + /* * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves * the minimum state necessary that allows us to turn psr.ic back @@ -123,7 +132,7 @@ ;; \ .mem.offset 0,0; st8.spill [r16]=r2,16; \ .mem.offset 8,0; st8.spill [r17]=r3,16; \ - ACCOUNT_GET_STAMP \ + XEN_ACCOUNT_GET_STAMP \ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ ;; \ EXTRA; \ diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h new file mode 100644 index 000000000000..eae944e88846 --- /dev/null +++ b/arch/ia64/include/asm/xen/patchlist.h @@ -0,0 +1,38 @@ +/****************************************************************************** + * arch/ia64/include/asm/xen/patchlist.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define __paravirt_start_gate_fsyscall_patchlist \ + __xen_start_gate_fsyscall_patchlist +#define __paravirt_end_gate_fsyscall_patchlist \ + __xen_end_gate_fsyscall_patchlist +#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ + __xen_start_gate_brl_fsys_bubble_down_patchlist +#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ + __xen_end_gate_brl_fsys_bubble_down_patchlist +#define __paravirt_start_gate_vtop_patchlist \ + __xen_start_gate_vtop_patchlist +#define __paravirt_end_gate_vtop_patchlist \ + __xen_end_gate_vtop_patchlist +#define __paravirt_start_gate_mckinley_e9_patchlist \ + __xen_start_gate_mckinley_e9_patchlist +#define __paravirt_end_gate_mckinley_e9_patchlist \ + __xen_end_gate_mckinley_e9_patchlist diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h index 71ec7546e100..fb4ec5e0b066 100644 --- a/arch/ia64/include/asm/xen/privop.h +++ b/arch/ia64/include/asm/xen/privop.h @@ -55,6 +55,8 @@ #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) +#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS) +#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS) #endif #ifndef __ASSEMBLY__ @@ -67,7 +69,7 @@ * may have different semantics depending on whether they are executed * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't * be allowed to execute directly, lest incorrect semantics result. */ -extern void xen_fc(unsigned long addr); +extern void xen_fc(void *addr); extern unsigned long xen_thash(unsigned long addr); /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" @@ -80,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr); extern unsigned long xen_get_cpuid(int index); extern unsigned long xen_get_pmd(int index); +#ifndef ASM_SUPPORTED extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ +#endif /************************************************/ /* Instructions paravirtualized for performance */ @@ -106,6 +110,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ #define xen_get_virtual_pend() \ (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) +#ifndef ASM_SUPPORTED /* Although all privileged operations can be left to trap and will * be properly handled by Xen, some are frequent enough that we use * hyperprivops for performance. */ @@ -123,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, unsigned long val4); extern void xen_set_kr(unsigned long index, unsigned long val); extern void xen_ptcga(unsigned long addr, unsigned long size); +#endif /* !ASM_SUPPORTED */ #endif /* !__ASSEMBLY__ */ diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index f2778f2c4fd9..5628e9a990a6 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -5,7 +5,7 @@ extra-y := head.o init_task.o vmlinux.lds obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ - irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ + irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ unwind.o mca.o mca_asm.o topology.o dma-mapping.o @@ -36,7 +36,8 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o mca_recovery-y += mca_drv.o mca_drv_asm.o obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o -obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o +obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ + paravirt_patch.o obj-$(CONFIG_IA64_ESI) += esi.o ifneq ($(CONFIG_IA64_ESI),) @@ -45,35 +46,13 @@ endif obj-$(CONFIG_DMAR) += pci-dma.o obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o -# The gate DSO image is built using a special linker script. -targets += gate.so gate-syms.o - -extra-y += gate.so gate-syms.o gate.lds gate.o - # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 -CPPFLAGS_gate.lds := -P -C -U$(ARCH) - -quiet_cmd_gate = GATE $@ - cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ - -GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \ - $(call ld-option, -Wl$(comma)--hash-style=sysv) -$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE - $(call if_changed,gate) - -$(obj)/built-in.o: $(obj)/gate-syms.o -$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o - -GATECFLAGS_gate-syms.o = -r -$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE - $(call if_changed,gate) - -# gate-data.o contains the gate DSO image as data in section .data.gate. -# We must build gate.so before we can assemble it. -# Note: kbuild does not track this dependency due to usage of .incbin -$(obj)/gate-data.o: $(obj)/gate.so +# The gate DSO image is built using a special linker script. +include $(srctree)/arch/ia64/kernel/Makefile.gate +# tell compiled for native +CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE # Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config define sed-y @@ -109,9 +88,9 @@ include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s clean-files += $(objtree)/include/asm-ia64/nr-irqs.h # -# native ivt.S and entry.S +# native ivt.S, entry.S and fsys.S # -ASM_PARAVIRT_OBJS = ivt.o entry.o +ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o define paravirtualized_native AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate new file mode 100644 index 000000000000..1d87f84069b3 --- /dev/null +++ b/arch/ia64/kernel/Makefile.gate @@ -0,0 +1,27 @@ +# The gate DSO image is built using a special linker script. + +targets += gate.so gate-syms.o + +extra-y += gate.so gate-syms.o gate.lds gate.o + +CPPFLAGS_gate.lds := -P -C -U$(ARCH) + +quiet_cmd_gate = GATE $@ + cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ + +GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \ + $(call ld-option, -Wl$(comma)--hash-style=sysv) +$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE + $(call if_changed,gate) + +$(obj)/built-in.o: $(obj)/gate-syms.o +$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o + +GATECFLAGS_gate-syms.o = -r +$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE + $(call if_changed,gate) + +# gate-data.o contains the gate DSO image as data in section .data.gate. +# We must build gate.so before we can assemble it. +# Note: kbuild does not track this dependency due to usage of .incbin +$(obj)/gate-data.o: $(obj)/gate.so diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index bdef2ce38c8b..5510317db37b 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -890,7 +890,7 @@ __init void prefill_possible_map(void) possible, max((possible - available_cpus), 0)); for (i = 0; i < possible; i++) - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); } int acpi_map_lsapic(acpi_handle handle, int *pcpu) @@ -928,9 +928,9 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu) buffer.length = ACPI_ALLOCATE_BUFFER; buffer.pointer = NULL; - cpus_complement(tmp_map, cpu_present_map); - cpu = first_cpu(tmp_map); - if (cpu >= NR_CPUS) + cpumask_complement(&tmp_map, cpu_present_mask); + cpu = cpumask_first(&tmp_map); + if (cpu >= nr_cpu_ids) return -EINVAL; acpi_map_cpu2node(handle, cpu, physid); diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 742dbb1d5a4f..af5650169043 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -316,5 +316,7 @@ void foo(void) DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); + DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); + DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); #endif /* CONFIG_XEN */ } diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index efaff15d8cf1..7ef80e8161ce 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -456,6 +456,7 @@ efi_map_pal_code (void) GRANULEROUNDDOWN((unsigned long) pal_vaddr), pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), IA64_GRANULE_SHIFT); + paravirt_dv_serialize_data(); ia64_set_psr(psr); /* restore psr */ } diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index e5341e2c1175..ccfdeee9d89f 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -735,7 +735,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall) __paravirt_work_processed_syscall: #ifdef CONFIG_VIRT_CPU_ACCOUNTING adds r2=PT(LOADRS)+16,r12 -(pUStk) mov.m r22=ar.itc // fetch time at leave + MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 ;; (p6) ld4 r31=[r18] // load current_thread_info()->flags @@ -984,7 +984,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) #ifdef CONFIG_VIRT_CPU_ACCOUNTING .pred.rel.mutex pUStk,pKStk MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled -(pUStk) mov.m r22=ar.itc // M fetch time at leave + MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave nop.i 0 ;; #else diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index c1625c7e1779..3567d54f8cee 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -25,6 +25,7 @@ #include <asm/unistd.h> #include "entry.h" +#include "paravirt_inst.h" /* * See Documentation/ia64/fsys.txt for details on fsyscalls. @@ -279,7 +280,7 @@ ENTRY(fsys_gettimeofday) (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control ;; .pred.rel.mutex p8,p9 -(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! + MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!! (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. (p13) ld8 r25 = [r19] // get itc_lastcycle value ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec @@ -418,7 +419,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1)) ;; - rsm psr.i // mask interrupt delivery + RSM_PSR_I(p0, r18, r19) // mask interrupt delivery mov ar.ccv=0 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP @@ -491,7 +492,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set #ifdef CONFIG_SMP st4.rel [r31]=r0 // release the lock #endif - ssm psr.i + SSM_PSR_I(p0, p9, r31) ;; srlz.d // ensure psr.i is set again @@ -513,7 +514,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3) #ifdef CONFIG_SMP st4.rel [r31]=r0 // release the lock #endif - ssm psr.i + SSM_PSR_I(p0, p9, r17) ;; srlz.d br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall @@ -521,7 +522,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3) #ifdef CONFIG_SMP .lock_contention: /* Rather than spinning here, fall back on doing a heavy-weight syscall. */ - ssm psr.i + SSM_PSR_I(p0, p9, r17) ;; srlz.d br.sptk.many fsys_fallback_syscall @@ -592,17 +593,17 @@ ENTRY(fsys_fallback_syscall) adds r17=-1024,r15 movl r14=sys_call_table ;; - rsm psr.i + RSM_PSR_I(p0, r26, r27) shladd r18=r17,3,r14 ;; ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point - mov r29=psr // read psr (12 cyc load latency) + MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency) mov r27=ar.rsc mov r21=ar.fpsr mov r26=ar.pfs END(fsys_fallback_syscall) /* FALL THROUGH */ -GLOBAL_ENTRY(fsys_bubble_down) +GLOBAL_ENTRY(paravirt_fsys_bubble_down) .prologue .altrp b6 .body @@ -640,7 +641,7 @@ GLOBAL_ENTRY(fsys_bubble_down) * * PSR.BE : already is turned off in __kernel_syscall_via_epc() * PSR.AC : don't care (kernel normally turns PSR.AC on) - * PSR.I : already turned off by the time fsys_bubble_down gets + * PSR.I : already turned off by the time paravirt_fsys_bubble_down gets * invoked * PSR.DFL: always 0 (kernel never turns it on) * PSR.DFH: don't care --- kernel never touches f32-f127 on its own @@ -650,7 +651,7 @@ GLOBAL_ENTRY(fsys_bubble_down) * PSR.DB : don't care --- kernel never enables kernel-level * breakpoints * PSR.TB : must be 0 already; if it wasn't zero on entry to - * __kernel_syscall_via_epc, the branch to fsys_bubble_down + * __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down * will trigger a taken branch; the taken-trap-handler then * converts the syscall into a break-based system-call. */ @@ -683,7 +684,7 @@ GLOBAL_ENTRY(fsys_bubble_down) ;; mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 #ifdef CONFIG_VIRT_CPU_ACCOUNTING - mov.m r30=ar.itc // M get cycle for accounting + MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting #else nop.m 0 #endif @@ -734,21 +735,21 @@ GLOBAL_ENTRY(fsys_bubble_down) mov rp=r14 // I0 set the real return addr and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A ;; - ssm psr.i // M2 we're on kernel stacks now, reenable irqs + SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs cmp.eq p8,p0=r3,r0 // A (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT nop.m 0 (p8) br.call.sptk.many b6=b6 // B (ignore return address) br.cond.spnt ia64_trace_syscall // B -END(fsys_bubble_down) +END(paravirt_fsys_bubble_down) .rodata .align 8 - .globl fsyscall_table + .globl paravirt_fsyscall_table - data8 fsys_bubble_down -fsyscall_table: + data8 paravirt_fsys_bubble_down +paravirt_fsyscall_table: data8 fsys_ni_syscall data8 0 // exit // 1025 data8 0 // read @@ -1033,4 +1034,4 @@ fsyscall_table: // fill in zeros for the remaining entries .zero: - .space fsyscall_table + 8*NR_syscalls - .zero, 0 + .space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0 diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index 74b1ccce4e84..cf5e0a105e16 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S @@ -13,6 +13,7 @@ #include <asm/sigcontext.h> #include <asm/system.h> #include <asm/unistd.h> +#include "paravirt_inst.h" /* * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, @@ -48,87 +49,6 @@ GLOBAL_ENTRY(__kernel_syscall_via_break) } END(__kernel_syscall_via_break) -/* - * On entry: - * r11 = saved ar.pfs - * r15 = system call # - * b0 = saved return address - * b6 = return address - * On exit: - * r11 = saved ar.pfs - * r15 = system call # - * b0 = saved return address - * all other "scratch" registers: undefined - * all "preserved" registers: same as on entry - */ - -GLOBAL_ENTRY(__kernel_syscall_via_epc) - .prologue - .altrp b6 - .body -{ - /* - * Note: the kernel cannot assume that the first two instructions in this - * bundle get executed. The remaining code must be safe even if - * they do not get executed. - */ - adds r17=-1024,r15 // A - mov r10=0 // A default to successful syscall execution - epc // B causes split-issue -} - ;; - rsm psr.be | psr.i // M2 (5 cyc to srlz.d) - LOAD_FSYSCALL_TABLE(r14) // X - ;; - mov r16=IA64_KR(CURRENT) // M2 (12 cyc) - shladd r18=r17,3,r14 // A - mov r19=NR_syscalls-1 // A - ;; - lfetch [r18] // M0|1 - mov r29=psr // M2 (12 cyc) - // If r17 is a NaT, p6 will be zero - cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? - ;; - mov r21=ar.fpsr // M2 (12 cyc) - tnat.nz p10,p9=r15 // I0 - mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) - ;; - srlz.d // M0 (forces split-issue) ensure PSR.BE==0 -(p6) ld8 r18=[r18] // M0|1 - nop.i 0 - ;; - nop.m 0 -(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) - nop.i 0 - ;; -(p8) ssm psr.i -(p6) mov b7=r18 // I0 -(p8) br.dptk.many b7 // B - - mov r27=ar.rsc // M2 (12 cyc) -/* - * brl.cond doesn't work as intended because the linker would convert this branch - * into a branch to a PLT. Perhaps there will be a way to avoid this with some - * future version of the linker. In the meantime, we just use an indirect branch - * instead. - */ -#ifdef CONFIG_ITANIUM -(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry - ;; -(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down - ;; -(p6) mov b7=r14 -(p6) br.sptk.many b7 -#else - BRL_COND_FSYS_BUBBLE_DOWN(p6) -#endif - ssm psr.i - mov r10=-1 -(p10) mov r8=EINVAL -(p9) mov r8=ENOSYS - FSYS_RETURN -END(__kernel_syscall_via_epc) - # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) # define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) @@ -374,3 +294,92 @@ restore_rbs: // invala not necessary as that will happen when returning to user-mode br.cond.sptk back_from_restore_rbs END(__kernel_sigtramp) + +/* + * On entry: + * r11 = saved ar.pfs + * r15 = system call # + * b0 = saved return address + * b6 = return address + * On exit: + * r11 = saved ar.pfs + * r15 = system call # + * b0 = saved return address + * all other "scratch" registers: undefined + * all "preserved" registers: same as on entry + */ + +GLOBAL_ENTRY(__kernel_syscall_via_epc) + .prologue + .altrp b6 + .body +{ + /* + * Note: the kernel cannot assume that the first two instructions in this + * bundle get executed. The remaining code must be safe even if + * they do not get executed. + */ + adds r17=-1024,r15 // A + mov r10=0 // A default to successful syscall execution + epc // B causes split-issue +} + ;; + RSM_PSR_BE_I(r20, r22) // M2 (5 cyc to srlz.d) + LOAD_FSYSCALL_TABLE(r14) // X + ;; + mov r16=IA64_KR(CURRENT) // M2 (12 cyc) + shladd r18=r17,3,r14 // A + mov r19=NR_syscalls-1 // A + ;; + lfetch [r18] // M0|1 + MOV_FROM_PSR(p0, r29, r8) // M2 (12 cyc) + // If r17 is a NaT, p6 will be zero + cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? + ;; + mov r21=ar.fpsr // M2 (12 cyc) + tnat.nz p10,p9=r15 // I0 + mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) + ;; + srlz.d // M0 (forces split-issue) ensure PSR.BE==0 +(p6) ld8 r18=[r18] // M0|1 + nop.i 0 + ;; + nop.m 0 +(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) + nop.i 0 + ;; + SSM_PSR_I(p8, p14, r25) +(p6) mov b7=r18 // I0 +(p8) br.dptk.many b7 // B + + mov r27=ar.rsc // M2 (12 cyc) +/* + * brl.cond doesn't work as intended because the linker would convert this branch + * into a branch to a PLT. Perhaps there will be a way to avoid this with some + * future version of the linker. In the meantime, we just use an indirect branch + * instead. + */ +#ifdef CONFIG_ITANIUM +(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry + ;; +(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down + ;; +(p6) mov b7=r14 +(p6) br.sptk.many b7 +#else + BRL_COND_FSYS_BUBBLE_DOWN(p6) +#endif + SSM_PSR_I(p0, p14, r10) + mov r10=-1 +(p10) mov r8=EINVAL +(p9) mov r8=ENOSYS + FSYS_RETURN + +#ifdef CONFIG_PARAVIRT + /* + * padd to make the size of this symbol constant + * independent of paravirtualization. + */ + .align PAGE_SIZE / 8 +#endif +END(__kernel_syscall_via_epc) diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index 3cb1abc00e24..88c64ed47c36 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S @@ -7,6 +7,7 @@ #include <asm/system.h> +#include "paravirt_patchlist.h" SECTIONS { @@ -33,21 +34,21 @@ SECTIONS . = GATE_ADDR + 0x600; .data.patch : { - __start_gate_mckinley_e9_patchlist = .; + __paravirt_start_gate_mckinley_e9_patchlist = .; *(.data.patch.mckinley_e9) - __end_gate_mckinley_e9_patchlist = .; + __paravirt_end_gate_mckinley_e9_patchlist = .; - __start_gate_vtop_patchlist = .; + __paravirt_start_gate_vtop_patchlist = .; *(.data.patch.vtop) - __end_gate_vtop_patchlist = .; + __paravirt_end_gate_vtop_patchlist = .; - __start_gate_fsyscall_patchlist = .; + __paravirt_start_gate_fsyscall_patchlist = .; *(.data.patch.fsyscall_table) - __end_gate_fsyscall_patchlist = .; + __paravirt_end_gate_fsyscall_patchlist = .; - __start_gate_brl_fsys_bubble_down_patchlist = .; + __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; *(.data.patch.brl_fsys_bubble_down) - __end_gate_brl_fsys_bubble_down_patchlist = .; + __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; } :readable .IA_64.unwind_info : { *(.IA_64.unwind_info*) } diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 59301c472800..23f846de62d5 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -1050,7 +1050,7 @@ END(ia64_delay_loop) * except that the multiplication and the shift are done with 128-bit * intermediate precision so that we can produce a full 64-bit result. */ -GLOBAL_ENTRY(sched_clock) +GLOBAL_ENTRY(ia64_native_sched_clock) addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 mov.m r9=ar.itc // fetch cycle-counter (35 cyc) ;; @@ -1066,7 +1066,13 @@ GLOBAL_ENTRY(sched_clock) ;; shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT br.ret.sptk.many rp -END(sched_clock) +END(ia64_native_sched_clock) +#ifndef CONFIG_PARAVIRT + //unsigned long long + //sched_clock(void) __attribute__((alias("ia64_native_sched_clock"))); + .global sched_clock +sched_clock = ia64_native_sched_clock +#endif #ifdef CONFIG_VIRT_CPU_ACCOUNTING GLOBAL_ENTRY(cycle_to_cputime) diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index f675d8e33853..ec9a5fdfa1b9 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -804,7 +804,7 @@ ENTRY(break_fault) /////////////////////////////////////////////////////////////////////// st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag #ifdef CONFIG_VIRT_CPU_ACCOUNTING - mov.m r30=ar.itc // M get cycle for accounting + MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting #else mov b6=r30 // I0 setup syscall handler branch reg early #endif diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index bab1de2d2f6a..8f33a8840422 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1456,9 +1456,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg) ia64_mca_cmc_int_handler(cmc_irq, arg); - for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); + cpuid = cpumask_next(cpuid+1, cpu_online_mask); - if (cpuid < NR_CPUS) { + if (cpuid < nr_cpu_ids) { platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); } else { /* If no log record, switch out of polling mode */ @@ -1525,7 +1525,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg) ia64_mca_cpe_int_handler(cpe_irq, arg); - for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); + cpuid = cpumask_next(cpuid+1, cpu_online_mask); if (cpuid < NR_CPUS) { platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index aaa7d901521f..da3b0cf495a3 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -446,6 +446,14 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, mod->arch.opd = s; else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) mod->arch.unwind = s; +#ifdef CONFIG_PARAVIRT + else if (strcmp(".paravirt_bundles", + secstrings + s->sh_name) == 0) + mod->arch.paravirt_bundles = s; + else if (strcmp(".paravirt_insts", + secstrings + s->sh_name) == 0) + mod->arch.paravirt_insts = s; +#endif if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { printk(KERN_ERR "%s: sections missing\n", mod->name); @@ -525,8 +533,7 @@ get_ltoff (struct module *mod, uint64_t value, int *okp) goto found; /* Not enough GOT entries? */ - if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)) - BUG(); + BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)); e->val = value; ++mod->arch.next_got_entry; @@ -921,6 +928,30 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo DEBUGP("%s: init: entry=%p\n", __func__, mod->init); if (mod->arch.unwind) register_unwind_table(mod); +#ifdef CONFIG_PARAVIRT + if (mod->arch.paravirt_bundles) { + struct paravirt_patch_site_bundle *start = + (struct paravirt_patch_site_bundle *) + mod->arch.paravirt_bundles->sh_addr; + struct paravirt_patch_site_bundle *end = + (struct paravirt_patch_site_bundle *) + (mod->arch.paravirt_bundles->sh_addr + + mod->arch.paravirt_bundles->sh_size); + + paravirt_patch_apply_bundle(start, end); + } + if (mod->arch.paravirt_insts) { + struct paravirt_patch_site_inst *start = + (struct paravirt_patch_site_inst *) + mod->arch.paravirt_insts->sh_addr; + struct paravirt_patch_site_inst *end = + (struct paravirt_patch_site_inst *) + (mod->arch.paravirt_insts->sh_addr + + mod->arch.paravirt_insts->sh_size); + + paravirt_patch_apply_inst(start, end); + } +#endif return 0; } diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index e5c57f413ca2..a4f19c70aadd 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -1002,8 +1002,6 @@ create_palinfo_proc_entries(unsigned int cpu) *pdir = create_proc_read_entry( palinfo_entries[j].name, 0, cpu_dir, palinfo_read_entry, (void *)f.value); - if (*pdir) - (*pdir)->owner = THIS_MODULE; pdir++; } } diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c index 9f14c16f6369..a21d7bb9c69c 100644 --- a/arch/ia64/kernel/paravirt.c +++ b/arch/ia64/kernel/paravirt.c @@ -46,13 +46,23 @@ struct pv_info pv_info = { * initialization hooks. */ -struct pv_init_ops pv_init_ops; +static void __init +ia64_native_patch_branch(unsigned long tag, unsigned long type); + +struct pv_init_ops pv_init_ops = +{ +#ifdef ASM_SUPPORTED + .patch_bundle = ia64_native_patch_bundle, +#endif + .patch_branch = ia64_native_patch_branch, +}; /*************************************************************************** * pv_cpu_ops * intrinsics hooks. */ +#ifndef ASM_SUPPORTED /* ia64_native_xxx are macros so that we have to make them real functions */ #define DEFINE_VOID_FUNC1(name) \ @@ -60,7 +70,14 @@ struct pv_init_ops pv_init_ops; ia64_native_ ## name ## _func(unsigned long arg) \ { \ ia64_native_ ## name(arg); \ - } \ + } + +#define DEFINE_VOID_FUNC1_VOID(name) \ + static void \ + ia64_native_ ## name ## _func(void *arg) \ + { \ + ia64_native_ ## name(arg); \ + } #define DEFINE_VOID_FUNC2(name) \ static void \ @@ -68,7 +85,7 @@ struct pv_init_ops pv_init_ops; unsigned long arg1) \ { \ ia64_native_ ## name(arg0, arg1); \ - } \ + } #define DEFINE_FUNC0(name) \ static unsigned long \ @@ -84,7 +101,7 @@ struct pv_init_ops pv_init_ops; return ia64_native_ ## name(arg); \ } \ -DEFINE_VOID_FUNC1(fc); +DEFINE_VOID_FUNC1_VOID(fc); DEFINE_VOID_FUNC1(intrin_local_irq_restore); DEFINE_VOID_FUNC2(ptcga); @@ -274,6 +291,266 @@ ia64_native_setreg_func(int regnum, unsigned long val) break; } } +#else + +#define __DEFINE_FUNC(name, code) \ + extern const char ia64_native_ ## name ## _direct_start[]; \ + extern const char ia64_native_ ## name ## _direct_end[]; \ + asm (".align 32\n" \ + ".proc ia64_native_" #name "_func\n" \ + "ia64_native_" #name "_func:\n" \ + "ia64_native_" #name "_direct_start:\n" \ + code \ + "ia64_native_" #name "_direct_end:\n" \ + "br.cond.sptk.many b6\n" \ + ".endp ia64_native_" #name "_func\n") + +#define DEFINE_VOID_FUNC0(name, code) \ + extern void \ + ia64_native_ ## name ## _func(void); \ + __DEFINE_FUNC(name, code) + +#define DEFINE_VOID_FUNC1(name, code) \ + extern void \ + ia64_native_ ## name ## _func(unsigned long arg); \ + __DEFINE_FUNC(name, code) + +#define DEFINE_VOID_FUNC1_VOID(name, code) \ + extern void \ + ia64_native_ ## name ## _func(void *arg); \ + __DEFINE_FUNC(name, code) + +#define DEFINE_VOID_FUNC2(name, code) \ + extern void \ + ia64_native_ ## name ## _func(unsigned long arg0, \ + unsigned long arg1); \ + __DEFINE_FUNC(name, code) + +#define DEFINE_FUNC0(name, code) \ + extern unsigned long \ + ia64_native_ ## name ## _func(void); \ + __DEFINE_FUNC(name, code) + +#define DEFINE_FUNC1(name, type, code) \ + extern unsigned long \ + ia64_native_ ## name ## _func(type arg); \ + __DEFINE_FUNC(name, code) + +DEFINE_VOID_FUNC1_VOID(fc, + "fc r8\n"); +DEFINE_VOID_FUNC1(intrin_local_irq_restore, + ";;\n" + " cmp.ne p6, p7 = r8, r0\n" + ";;\n" + "(p6) ssm psr.i\n" + "(p7) rsm psr.i\n" + ";;\n" + "(p6) srlz.d\n"); + +DEFINE_VOID_FUNC2(ptcga, + "ptc.ga r8, r9\n"); +DEFINE_VOID_FUNC2(set_rr, + "mov rr[r8] = r9\n"); + +/* ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I */ +DEFINE_FUNC0(get_psr_i, + "mov r2 = " __stringify(1 << IA64_PSR_I_BIT) "\n" + "mov r8 = psr\n" + ";;\n" + "and r8 = r2, r8\n"); + +DEFINE_FUNC1(thash, unsigned long, + "thash r8 = r8\n"); +DEFINE_FUNC1(get_cpuid, int, + "mov r8 = cpuid[r8]\n"); +DEFINE_FUNC1(get_pmd, int, + "mov r8 = pmd[r8]\n"); +DEFINE_FUNC1(get_rr, unsigned long, + "mov r8 = rr[r8]\n"); + +DEFINE_VOID_FUNC0(ssm_i, + "ssm psr.i\n"); +DEFINE_VOID_FUNC0(rsm_i, + "rsm psr.i\n"); + +extern void +ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1, + unsigned long val2, unsigned long val3, + unsigned long val4); +__DEFINE_FUNC(set_rr0_to_rr4, + "mov rr[r0] = r8\n" + "movl r2 = 0x2000000000000000\n" + ";;\n" + "mov rr[r2] = r9\n" + "shl r3 = r2, 1\n" /* movl r3 = 0x4000000000000000 */ + ";;\n" + "add r2 = r2, r3\n" /* movl r2 = 0x6000000000000000 */ + "mov rr[r3] = r10\n" + ";;\n" + "mov rr[r2] = r11\n" + "shl r3 = r3, 1\n" /* movl r3 = 0x8000000000000000 */ + ";;\n" + "mov rr[r3] = r14\n"); + +extern unsigned long ia64_native_getreg_func(int regnum); +asm(".global ia64_native_getreg_func\n"); +#define __DEFINE_GET_REG(id, reg) \ + "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ + ";;\n" \ + "cmp.eq p6, p0 = r2, r8\n" \ + ";;\n" \ + "(p6) mov r8 = " #reg "\n" \ + "(p6) br.cond.sptk.many b6\n" \ + ";;\n" +#define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg) +#define __DEFINE_GET_CR(id, reg) __DEFINE_GET_REG(CR_ ## id, cr.reg) + +__DEFINE_FUNC(getreg, + __DEFINE_GET_REG(GP, gp) + /*__DEFINE_GET_REG(IP, ip)*/ /* returned ip value shouldn't be constant */ + __DEFINE_GET_REG(PSR, psr) + __DEFINE_GET_REG(TP, tp) + __DEFINE_GET_REG(SP, sp) + + __DEFINE_GET_REG(AR_KR0, ar0) + __DEFINE_GET_REG(AR_KR1, ar1) + __DEFINE_GET_REG(AR_KR2, ar2) + __DEFINE_GET_REG(AR_KR3, ar3) + __DEFINE_GET_REG(AR_KR4, ar4) + __DEFINE_GET_REG(AR_KR5, ar5) + __DEFINE_GET_REG(AR_KR6, ar6) + __DEFINE_GET_REG(AR_KR7, ar7) + __DEFINE_GET_AR(RSC, rsc) + __DEFINE_GET_AR(BSP, bsp) + __DEFINE_GET_AR(BSPSTORE, bspstore) + __DEFINE_GET_AR(RNAT, rnat) + __DEFINE_GET_AR(FCR, fcr) + __DEFINE_GET_AR(EFLAG, eflag) + __DEFINE_GET_AR(CSD, csd) + __DEFINE_GET_AR(SSD, ssd) + __DEFINE_GET_REG(AR_CFLAG, ar27) + __DEFINE_GET_AR(FSR, fsr) + __DEFINE_GET_AR(FIR, fir) + __DEFINE_GET_AR(FDR, fdr) + __DEFINE_GET_AR(CCV, ccv) + __DEFINE_GET_AR(UNAT, unat) + __DEFINE_GET_AR(FPSR, fpsr) + __DEFINE_GET_AR(ITC, itc) + __DEFINE_GET_AR(PFS, pfs) + __DEFINE_GET_AR(LC, lc) + __DEFINE_GET_AR(EC, ec) + + __DEFINE_GET_CR(DCR, dcr) + __DEFINE_GET_CR(ITM, itm) + __DEFINE_GET_CR(IVA, iva) + __DEFINE_GET_CR(PTA, pta) + __DEFINE_GET_CR(IPSR, ipsr) + __DEFINE_GET_CR(ISR, isr) + __DEFINE_GET_CR(IIP, iip) + __DEFINE_GET_CR(IFA, ifa) + __DEFINE_GET_CR(ITIR, itir) + __DEFINE_GET_CR(IIPA, iipa) + __DEFINE_GET_CR(IFS, ifs) + __DEFINE_GET_CR(IIM, iim) + __DEFINE_GET_CR(IHA, iha) + __DEFINE_GET_CR(LID, lid) + __DEFINE_GET_CR(IVR, ivr) + __DEFINE_GET_CR(TPR, tpr) + __DEFINE_GET_CR(EOI, eoi) + __DEFINE_GET_CR(IRR0, irr0) + __DEFINE_GET_CR(IRR1, irr1) + __DEFINE_GET_CR(IRR2, irr2) + __DEFINE_GET_CR(IRR3, irr3) + __DEFINE_GET_CR(ITV, itv) + __DEFINE_GET_CR(PMV, pmv) + __DEFINE_GET_CR(CMCV, cmcv) + __DEFINE_GET_CR(LRR0, lrr0) + __DEFINE_GET_CR(LRR1, lrr1) + + "mov r8 = -1\n" /* unsupported case */ + ); + +extern void ia64_native_setreg_func(int regnum, unsigned long val); +asm(".global ia64_native_setreg_func\n"); +#define __DEFINE_SET_REG(id, reg) \ + "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ + ";;\n" \ + "cmp.eq p6, p0 = r2, r9\n" \ + ";;\n" \ + "(p6) mov " #reg " = r8\n" \ + "(p6) br.cond.sptk.many b6\n" \ + ";;\n" +#define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg) +#define __DEFINE_SET_CR(id, reg) __DEFINE_SET_REG(CR_ ## id, cr.reg) +__DEFINE_FUNC(setreg, + "mov r2 = " __stringify(_IA64_REG_PSR_L) "\n" + ";;\n" + "cmp.eq p6, p0 = r2, r9\n" + ";;\n" + "(p6) mov psr.l = r8\n" +#ifdef HAVE_SERIALIZE_DIRECTIVE + ".serialize.data\n" +#endif + "(p6) br.cond.sptk.many b6\n" + __DEFINE_SET_REG(GP, gp) + __DEFINE_SET_REG(SP, sp) + + __DEFINE_SET_REG(AR_KR0, ar0) + __DEFINE_SET_REG(AR_KR1, ar1) + __DEFINE_SET_REG(AR_KR2, ar2) + __DEFINE_SET_REG(AR_KR3, ar3) + __DEFINE_SET_REG(AR_KR4, ar4) + __DEFINE_SET_REG(AR_KR5, ar5) + __DEFINE_SET_REG(AR_KR6, ar6) + __DEFINE_SET_REG(AR_KR7, ar7) + __DEFINE_SET_AR(RSC, rsc) + __DEFINE_SET_AR(BSP, bsp) + __DEFINE_SET_AR(BSPSTORE, bspstore) + __DEFINE_SET_AR(RNAT, rnat) + __DEFINE_SET_AR(FCR, fcr) + __DEFINE_SET_AR(EFLAG, eflag) + __DEFINE_SET_AR(CSD, csd) + __DEFINE_SET_AR(SSD, ssd) + __DEFINE_SET_REG(AR_CFLAG, ar27) + __DEFINE_SET_AR(FSR, fsr) + __DEFINE_SET_AR(FIR, fir) + __DEFINE_SET_AR(FDR, fdr) + __DEFINE_SET_AR(CCV, ccv) + __DEFINE_SET_AR(UNAT, unat) + __DEFINE_SET_AR(FPSR, fpsr) + __DEFINE_SET_AR(ITC, itc) + __DEFINE_SET_AR(PFS, pfs) + __DEFINE_SET_AR(LC, lc) + __DEFINE_SET_AR(EC, ec) + + __DEFINE_SET_CR(DCR, dcr) + __DEFINE_SET_CR(ITM, itm) + __DEFINE_SET_CR(IVA, iva) + __DEFINE_SET_CR(PTA, pta) + __DEFINE_SET_CR(IPSR, ipsr) + __DEFINE_SET_CR(ISR, isr) + __DEFINE_SET_CR(IIP, iip) + __DEFINE_SET_CR(IFA, ifa) + __DEFINE_SET_CR(ITIR, itir) + __DEFINE_SET_CR(IIPA, iipa) + __DEFINE_SET_CR(IFS, ifs) + __DEFINE_SET_CR(IIM, iim) + __DEFINE_SET_CR(IHA, iha) + __DEFINE_SET_CR(LID, lid) + __DEFINE_SET_CR(IVR, ivr) + __DEFINE_SET_CR(TPR, tpr) + __DEFINE_SET_CR(EOI, eoi) + __DEFINE_SET_CR(IRR0, irr0) + __DEFINE_SET_CR(IRR1, irr1) + __DEFINE_SET_CR(IRR2, irr2) + __DEFINE_SET_CR(IRR3, irr3) + __DEFINE_SET_CR(ITV, itv) + __DEFINE_SET_CR(PMV, pmv) + __DEFINE_SET_CR(CMCV, cmcv) + __DEFINE_SET_CR(LRR0, lrr0) + __DEFINE_SET_CR(LRR1, lrr1) + ); +#endif struct pv_cpu_ops pv_cpu_ops = { .fc = ia64_native_fc_func, @@ -366,4 +643,258 @@ ia64_native_do_steal_accounting(unsigned long *new_itm) struct pv_time_ops pv_time_ops = { .do_steal_accounting = ia64_native_do_steal_accounting, + .sched_clock = ia64_native_sched_clock, +}; + +/*************************************************************************** + * binary pacthing + * pv_init_ops.patch_bundle + */ + +#ifdef ASM_SUPPORTED +#define IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg) \ + __DEFINE_FUNC(get_ ## name, \ + ";;\n" \ + "mov r8 = " #reg "\n" \ + ";;\n") + +#define IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ + __DEFINE_FUNC(set_ ## name, \ + ";;\n" \ + "mov " #reg " = r8\n" \ + ";;\n") + +#define IA64_NATIVE_PATCH_DEFINE_REG(name, reg) \ + IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg); \ + IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ + +#define IA64_NATIVE_PATCH_DEFINE_AR(name, reg) \ + IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg) + +#define IA64_NATIVE_PATCH_DEFINE_CR(name, reg) \ + IA64_NATIVE_PATCH_DEFINE_REG(cr_ ## name, cr.reg) + + +IA64_NATIVE_PATCH_DEFINE_GET_REG(psr, psr); +IA64_NATIVE_PATCH_DEFINE_GET_REG(tp, tp); + +/* IA64_NATIVE_PATCH_DEFINE_SET_REG(psr_l, psr.l); */ +__DEFINE_FUNC(set_psr_l, + ";;\n" + "mov psr.l = r8\n" +#ifdef HAVE_SERIALIZE_DIRECTIVE + ".serialize.data\n" +#endif + ";;\n"); + +IA64_NATIVE_PATCH_DEFINE_REG(gp, gp); +IA64_NATIVE_PATCH_DEFINE_REG(sp, sp); + +IA64_NATIVE_PATCH_DEFINE_REG(kr0, ar0); +IA64_NATIVE_PATCH_DEFINE_REG(kr1, ar1); +IA64_NATIVE_PATCH_DEFINE_REG(kr2, ar2); +IA64_NATIVE_PATCH_DEFINE_REG(kr3, ar3); +IA64_NATIVE_PATCH_DEFINE_REG(kr4, ar4); +IA64_NATIVE_PATCH_DEFINE_REG(kr5, ar5); +IA64_NATIVE_PATCH_DEFINE_REG(kr6, ar6); +IA64_NATIVE_PATCH_DEFINE_REG(kr7, ar7); + +IA64_NATIVE_PATCH_DEFINE_AR(rsc, rsc); +IA64_NATIVE_PATCH_DEFINE_AR(bsp, bsp); +IA64_NATIVE_PATCH_DEFINE_AR(bspstore, bspstore); +IA64_NATIVE_PATCH_DEFINE_AR(rnat, rnat); +IA64_NATIVE_PATCH_DEFINE_AR(fcr, fcr); +IA64_NATIVE_PATCH_DEFINE_AR(eflag, eflag); +IA64_NATIVE_PATCH_DEFINE_AR(csd, csd); +IA64_NATIVE_PATCH_DEFINE_AR(ssd, ssd); +IA64_NATIVE_PATCH_DEFINE_REG(ar27, ar27); +IA64_NATIVE_PATCH_DEFINE_AR(fsr, fsr); +IA64_NATIVE_PATCH_DEFINE_AR(fir, fir); +IA64_NATIVE_PATCH_DEFINE_AR(fdr, fdr); +IA64_NATIVE_PATCH_DEFINE_AR(ccv, ccv); +IA64_NATIVE_PATCH_DEFINE_AR(unat, unat); +IA64_NATIVE_PATCH_DEFINE_AR(fpsr, fpsr); +IA64_NATIVE_PATCH_DEFINE_AR(itc, itc); +IA64_NATIVE_PATCH_DEFINE_AR(pfs, pfs); +IA64_NATIVE_PATCH_DEFINE_AR(lc, lc); +IA64_NATIVE_PATCH_DEFINE_AR(ec, ec); + +IA64_NATIVE_PATCH_DEFINE_CR(dcr, dcr); +IA64_NATIVE_PATCH_DEFINE_CR(itm, itm); +IA64_NATIVE_PATCH_DEFINE_CR(iva, iva); +IA64_NATIVE_PATCH_DEFINE_CR(pta, pta); +IA64_NATIVE_PATCH_DEFINE_CR(ipsr, ipsr); +IA64_NATIVE_PATCH_DEFINE_CR(isr, isr); +IA64_NATIVE_PATCH_DEFINE_CR(iip, iip); +IA64_NATIVE_PATCH_DEFINE_CR(ifa, ifa); +IA64_NATIVE_PATCH_DEFINE_CR(itir, itir); +IA64_NATIVE_PATCH_DEFINE_CR(iipa, iipa); +IA64_NATIVE_PATCH_DEFINE_CR(ifs, ifs); +IA64_NATIVE_PATCH_DEFINE_CR(iim, iim); +IA64_NATIVE_PATCH_DEFINE_CR(iha, iha); +IA64_NATIVE_PATCH_DEFINE_CR(lid, lid); +IA64_NATIVE_PATCH_DEFINE_CR(ivr, ivr); +IA64_NATIVE_PATCH_DEFINE_CR(tpr, tpr); +IA64_NATIVE_PATCH_DEFINE_CR(eoi, eoi); +IA64_NATIVE_PATCH_DEFINE_CR(irr0, irr0); +IA64_NATIVE_PATCH_DEFINE_CR(irr1, irr1); +IA64_NATIVE_PATCH_DEFINE_CR(irr2, irr2); +IA64_NATIVE_PATCH_DEFINE_CR(irr3, irr3); +IA64_NATIVE_PATCH_DEFINE_CR(itv, itv); +IA64_NATIVE_PATCH_DEFINE_CR(pmv, pmv); +IA64_NATIVE_PATCH_DEFINE_CR(cmcv, cmcv); +IA64_NATIVE_PATCH_DEFINE_CR(lrr0, lrr0); +IA64_NATIVE_PATCH_DEFINE_CR(lrr1, lrr1); + +static const struct paravirt_patch_bundle_elem ia64_native_patch_bundle_elems[] +__initdata_or_module = +{ +#define IA64_NATIVE_PATCH_BUNDLE_ELEM(name, type) \ + { \ + (void*)ia64_native_ ## name ## _direct_start, \ + (void*)ia64_native_ ## name ## _direct_end, \ + PARAVIRT_PATCH_TYPE_ ## type, \ + } + + IA64_NATIVE_PATCH_BUNDLE_ELEM(fc, FC), + IA64_NATIVE_PATCH_BUNDLE_ELEM(thash, THASH), + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), + IA64_NATIVE_PATCH_BUNDLE_ELEM(ptcga, PTCGA), + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_rr, GET_RR), + IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr, SET_RR), + IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), + IA64_NATIVE_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), + IA64_NATIVE_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), + IA64_NATIVE_PATCH_BUNDLE_ELEM(intrin_local_irq_restore, + INTRIN_LOCAL_IRQ_RESTORE), + +#define IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ + { \ + (void*)ia64_native_get_ ## name ## _direct_start, \ + (void*)ia64_native_get_ ## name ## _direct_end, \ + PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ + } + +#define IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ + { \ + (void*)ia64_native_set_ ## name ## _direct_start, \ + (void*)ia64_native_set_ ## name ## _direct_end, \ + PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ + } + +#define IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(name, reg) \ + IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg), \ + IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ + +#define IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(name, reg) \ + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar_ ## name, AR_ ## reg) + +#define IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(name, reg) \ + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(cr_ ## name, CR_ ## reg) + + IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(tp, TP), + + IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(psr_l, PSR_L), + + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(gp, GP), + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(sp, SP), + + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr0, AR_KR0), + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr1, AR_KR1), + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr2, AR_KR2), + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr3, AR_KR3), + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr4, AR_KR4), + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr5, AR_KR5), + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr6, AR_KR6), + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr7, AR_KR7), + + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rsc, RSC), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bsp, BSP), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bspstore, BSPSTORE), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rnat, RNAT), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fcr, FCR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(eflag, EFLAG), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(csd, CSD), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ssd, SSD), + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar27, AR_CFLAG), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fsr, FSR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fir, FIR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fdr, FDR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ccv, CCV), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(unat, UNAT), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fpsr, FPSR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(itc, ITC), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(pfs, PFS), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(lc, LC), + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ec, EC), + + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(dcr, DCR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itm, ITM), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iva, IVA), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pta, PTA), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ipsr, IPSR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(isr, ISR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iip, IIP), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifa, IFA), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itir, ITIR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iipa, IIPA), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifs, IFS), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iim, IIM), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iha, IHA), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lid, LID), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ivr, IVR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(tpr, TPR), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(eoi, EOI), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr0, IRR0), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr1, IRR1), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr2, IRR2), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr3, IRR3), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itv, ITV), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pmv, PMV), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(cmcv, CMCV), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr0, LRR0), + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr1, LRR1), }; + +unsigned long __init_or_module +ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type) +{ + const unsigned long nelems = sizeof(ia64_native_patch_bundle_elems) / + sizeof(ia64_native_patch_bundle_elems[0]); + + return __paravirt_patch_apply_bundle(sbundle, ebundle, type, + ia64_native_patch_bundle_elems, + nelems, NULL); +} +#endif /* ASM_SUPPOTED */ + +extern const char ia64_native_switch_to[]; +extern const char ia64_native_leave_syscall[]; +extern const char ia64_native_work_processed_syscall[]; +extern const char ia64_native_leave_kernel[]; + +const struct paravirt_patch_branch_target ia64_native_branch_target[] +__initconst = { +#define PARAVIRT_BR_TARGET(name, type) \ + { \ + ia64_native_ ## name, \ + PARAVIRT_PATCH_TYPE_BR_ ## type, \ + } + PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), + PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), + PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), + PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), +}; + +static void __init +ia64_native_patch_branch(unsigned long tag, unsigned long type) +{ + const unsigned long nelem = + sizeof(ia64_native_branch_target) / + sizeof(ia64_native_branch_target[0]); + __paravirt_patch_apply_branch(tag, type, + ia64_native_branch_target, nelem); +} diff --git a/arch/ia64/kernel/paravirt_patch.c b/arch/ia64/kernel/paravirt_patch.c new file mode 100644 index 000000000000..bfdfef1b1ffd --- /dev/null +++ b/arch/ia64/kernel/paravirt_patch.c @@ -0,0 +1,514 @@ +/****************************************************************************** + * linux/arch/ia64/xen/paravirt_patch.c + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/init.h> +#include <asm/intrinsics.h> +#include <asm/kprobes.h> +#include <asm/paravirt.h> +#include <asm/paravirt_patch.h> + +typedef union ia64_inst { + struct { + unsigned long long qp : 6; + unsigned long long : 31; + unsigned long long opcode : 4; + unsigned long long reserved : 23; + } generic; + unsigned long long l; +} ia64_inst_t; + +/* + * flush_icache_range() can't be used here. + * we are here before cpu_init() which initializes + * ia64_i_cache_stride_shift. flush_icache_range() uses it. + */ +void __init_or_module +paravirt_flush_i_cache_range(const void *instr, unsigned long size) +{ + extern void paravirt_fc_i(const void *addr); + unsigned long i; + + for (i = 0; i < size; i += sizeof(bundle_t)) + paravirt_fc_i(instr + i); +} + +bundle_t* __init_or_module +paravirt_get_bundle(unsigned long tag) +{ + return (bundle_t *)(tag & ~3UL); +} + +unsigned long __init_or_module +paravirt_get_slot(unsigned long tag) +{ + return tag & 3UL; +} + +unsigned long __init_or_module +paravirt_get_num_inst(unsigned long stag, unsigned long etag) +{ + bundle_t *sbundle = paravirt_get_bundle(stag); + unsigned long sslot = paravirt_get_slot(stag); + bundle_t *ebundle = paravirt_get_bundle(etag); + unsigned long eslot = paravirt_get_slot(etag); + + return (ebundle - sbundle) * 3 + eslot - sslot + 1; +} + +unsigned long __init_or_module +paravirt_get_next_tag(unsigned long tag) +{ + unsigned long slot = paravirt_get_slot(tag); + + switch (slot) { + case 0: + case 1: + return tag + 1; + case 2: { + bundle_t *bundle = paravirt_get_bundle(tag); + return (unsigned long)(bundle + 1); + } + default: + BUG(); + } + /* NOTREACHED */ +} + +ia64_inst_t __init_or_module +paravirt_read_slot0(const bundle_t *bundle) +{ + ia64_inst_t inst; + inst.l = bundle->quad0.slot0; + return inst; +} + +ia64_inst_t __init_or_module +paravirt_read_slot1(const bundle_t *bundle) +{ + ia64_inst_t inst; + inst.l = bundle->quad0.slot1_p0 | + ((unsigned long long)bundle->quad1.slot1_p1 << 18UL); + return inst; +} + +ia64_inst_t __init_or_module +paravirt_read_slot2(const bundle_t *bundle) +{ + ia64_inst_t inst; + inst.l = bundle->quad1.slot2; + return inst; +} + +ia64_inst_t __init_or_module +paravirt_read_inst(unsigned long tag) +{ + bundle_t *bundle = paravirt_get_bundle(tag); + unsigned long slot = paravirt_get_slot(tag); + + switch (slot) { + case 0: + return paravirt_read_slot0(bundle); + case 1: + return paravirt_read_slot1(bundle); + case 2: + return paravirt_read_slot2(bundle); + default: + BUG(); + } + /* NOTREACHED */ +} + +void __init_or_module +paravirt_write_slot0(bundle_t *bundle, ia64_inst_t inst) +{ + bundle->quad0.slot0 = inst.l; +} + +void __init_or_module +paravirt_write_slot1(bundle_t *bundle, ia64_inst_t inst) +{ + bundle->quad0.slot1_p0 = inst.l; + bundle->quad1.slot1_p1 = inst.l >> 18UL; +} + +void __init_or_module +paravirt_write_slot2(bundle_t *bundle, ia64_inst_t inst) +{ + bundle->quad1.slot2 = inst.l; +} + +void __init_or_module +paravirt_write_inst(unsigned long tag, ia64_inst_t inst) +{ + bundle_t *bundle = paravirt_get_bundle(tag); + unsigned long slot = paravirt_get_slot(tag); + + switch (slot) { + case 0: + paravirt_write_slot0(bundle, inst); + break; + case 1: + paravirt_write_slot1(bundle, inst); + break; + case 2: + paravirt_write_slot2(bundle, inst); + break; + default: + BUG(); + break; + } + paravirt_flush_i_cache_range(bundle, sizeof(*bundle)); +} + +/* for debug */ +void +paravirt_print_bundle(const bundle_t *bundle) +{ + const unsigned long *quad = (const unsigned long *)bundle; + ia64_inst_t slot0 = paravirt_read_slot0(bundle); + ia64_inst_t slot1 = paravirt_read_slot1(bundle); + ia64_inst_t slot2 = paravirt_read_slot2(bundle); + + printk(KERN_DEBUG + "bundle 0x%p 0x%016lx 0x%016lx\n", bundle, quad[0], quad[1]); + printk(KERN_DEBUG + "bundle template 0x%x\n", + bundle->quad0.template); + printk(KERN_DEBUG + "slot0 0x%lx slot1_p0 0x%lx slot1_p1 0x%lx slot2 0x%lx\n", + (unsigned long)bundle->quad0.slot0, + (unsigned long)bundle->quad0.slot1_p0, + (unsigned long)bundle->quad1.slot1_p1, + (unsigned long)bundle->quad1.slot2); + printk(KERN_DEBUG + "slot0 0x%016llx slot1 0x%016llx slot2 0x%016llx\n", + slot0.l, slot1.l, slot2.l); +} + +static int noreplace_paravirt __init_or_module = 0; + +static int __init setup_noreplace_paravirt(char *str) +{ + noreplace_paravirt = 1; + return 1; +} +__setup("noreplace-paravirt", setup_noreplace_paravirt); + +#ifdef ASM_SUPPORTED +static void __init_or_module +fill_nop_bundle(void *sbundle, void *ebundle) +{ + extern const char paravirt_nop_bundle[]; + extern const unsigned long paravirt_nop_bundle_size; + + void *bundle = sbundle; + + BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0); + BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0); + + while (bundle < ebundle) { + memcpy(bundle, paravirt_nop_bundle, paravirt_nop_bundle_size); + + bundle += paravirt_nop_bundle_size; + } +} + +/* helper function */ +unsigned long __init_or_module +__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type, + const struct paravirt_patch_bundle_elem *elems, + unsigned long nelems, + const struct paravirt_patch_bundle_elem **found) +{ + unsigned long used = 0; + unsigned long i; + + BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0); + BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0); + + found = NULL; + for (i = 0; i < nelems; i++) { + const struct paravirt_patch_bundle_elem *p = &elems[i]; + if (p->type == type) { + unsigned long need = p->ebundle - p->sbundle; + unsigned long room = ebundle - sbundle; + + if (found != NULL) + *found = p; + + if (room < need) { + /* no room to replace. skip it */ + printk(KERN_DEBUG + "the space is too small to put " + "bundles. type %ld need %ld room %ld\n", + type, need, room); + break; + } + + used = need; + memcpy(sbundle, p->sbundle, used); + break; + } + } + + return used; +} + +void __init_or_module +paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start, + const struct paravirt_patch_site_bundle *end) +{ + const struct paravirt_patch_site_bundle *p; + + if (noreplace_paravirt) + return; + if (pv_init_ops.patch_bundle == NULL) + return; + + for (p = start; p < end; p++) { + unsigned long used; + + used = (*pv_init_ops.patch_bundle)(p->sbundle, p->ebundle, + p->type); + if (used == 0) + continue; + + fill_nop_bundle(p->sbundle + used, p->ebundle); + paravirt_flush_i_cache_range(p->sbundle, + p->ebundle - p->sbundle); + } + ia64_sync_i(); + ia64_srlz_i(); +} + +/* + * nop.i, nop.m, nop.f instruction are same format. + * but nop.b has differennt format. + * This doesn't support nop.b for now. + */ +static void __init_or_module +fill_nop_inst(unsigned long stag, unsigned long etag) +{ + extern const bundle_t paravirt_nop_mfi_inst_bundle[]; + unsigned long tag; + const ia64_inst_t nop_inst = + paravirt_read_slot0(paravirt_nop_mfi_inst_bundle); + + for (tag = stag; tag < etag; tag = paravirt_get_next_tag(tag)) + paravirt_write_inst(tag, nop_inst); +} + +void __init_or_module +paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start, + const struct paravirt_patch_site_inst *end) +{ + const struct paravirt_patch_site_inst *p; + + if (noreplace_paravirt) + return; + if (pv_init_ops.patch_inst == NULL) + return; + + for (p = start; p < end; p++) { + unsigned long tag; + bundle_t *sbundle; + bundle_t *ebundle; + + tag = (*pv_init_ops.patch_inst)(p->stag, p->etag, p->type); + if (tag == p->stag) + continue; + + fill_nop_inst(tag, p->etag); + sbundle = paravirt_get_bundle(p->stag); + ebundle = paravirt_get_bundle(p->etag) + 1; + paravirt_flush_i_cache_range(sbundle, (ebundle - sbundle) * + sizeof(bundle_t)); + } + ia64_sync_i(); + ia64_srlz_i(); +} +#endif /* ASM_SUPPOTED */ + +/* brl.cond.sptk.many <target64> X3 */ +typedef union inst_x3_op { + ia64_inst_t inst; + struct { + unsigned long qp: 6; + unsigned long btyp: 3; + unsigned long unused: 3; + unsigned long p: 1; + unsigned long imm20b: 20; + unsigned long wh: 2; + unsigned long d: 1; + unsigned long i: 1; + unsigned long opcode: 4; + }; + unsigned long l; +} inst_x3_op_t; + +typedef union inst_x3_imm { + ia64_inst_t inst; + struct { + unsigned long unused: 2; + unsigned long imm39: 39; + }; + unsigned long l; +} inst_x3_imm_t; + +void __init_or_module +paravirt_patch_reloc_brl(unsigned long tag, const void *target) +{ + unsigned long tag_op = paravirt_get_next_tag(tag); + unsigned long tag_imm = tag; + bundle_t *bundle = paravirt_get_bundle(tag); + + ia64_inst_t inst_op = paravirt_read_inst(tag_op); + ia64_inst_t inst_imm = paravirt_read_inst(tag_imm); + + inst_x3_op_t inst_x3_op = { .l = inst_op.l }; + inst_x3_imm_t inst_x3_imm = { .l = inst_imm.l }; + + unsigned long imm60 = + ((unsigned long)target - (unsigned long)bundle) >> 4; + + BUG_ON(paravirt_get_slot(tag) != 1); /* MLX */ + BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0); + + /* imm60[59] 1bit */ + inst_x3_op.i = (imm60 >> 59) & 1; + /* imm60[19:0] 20bit */ + inst_x3_op.imm20b = imm60 & ((1UL << 20) - 1); + /* imm60[58:20] 39bit */ + inst_x3_imm.imm39 = (imm60 >> 20) & ((1UL << 39) - 1); + + inst_op.l = inst_x3_op.l; + inst_imm.l = inst_x3_imm.l; + + paravirt_write_inst(tag_op, inst_op); + paravirt_write_inst(tag_imm, inst_imm); +} + +/* br.cond.sptk.many <target25> B1 */ +typedef union inst_b1 { + ia64_inst_t inst; + struct { + unsigned long qp: 6; + unsigned long btype: 3; + unsigned long unused: 3; + unsigned long p: 1; + unsigned long imm20b: 20; + unsigned long wh: 2; + unsigned long d: 1; + unsigned long s: 1; + unsigned long opcode: 4; + }; + unsigned long l; +} inst_b1_t; + +void __init +paravirt_patch_reloc_br(unsigned long tag, const void *target) +{ + bundle_t *bundle = paravirt_get_bundle(tag); + ia64_inst_t inst = paravirt_read_inst(tag); + unsigned long target25 = (unsigned long)target - (unsigned long)bundle; + inst_b1_t inst_b1; + + BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0); + + inst_b1.l = inst.l; + if (target25 & (1UL << 63)) + inst_b1.s = 1; + else + inst_b1.s = 0; + + inst_b1.imm20b = target25 >> 4; + inst.l = inst_b1.l; + + paravirt_write_inst(tag, inst); +} + +void __init +__paravirt_patch_apply_branch( + unsigned long tag, unsigned long type, + const struct paravirt_patch_branch_target *entries, + unsigned int nr_entries) +{ + unsigned int i; + for (i = 0; i < nr_entries; i++) { + if (entries[i].type == type) { + paravirt_patch_reloc_br(tag, entries[i].entry); + break; + } + } +} + +static void __init +paravirt_patch_apply_branch(const struct paravirt_patch_site_branch *start, + const struct paravirt_patch_site_branch *end) +{ + const struct paravirt_patch_site_branch *p; + + if (noreplace_paravirt) + return; + if (pv_init_ops.patch_branch == NULL) + return; + + for (p = start; p < end; p++) + (*pv_init_ops.patch_branch)(p->tag, p->type); + + ia64_sync_i(); + ia64_srlz_i(); +} + +void __init +paravirt_patch_apply(void) +{ + extern const char __start_paravirt_bundles[]; + extern const char __stop_paravirt_bundles[]; + extern const char __start_paravirt_insts[]; + extern const char __stop_paravirt_insts[]; + extern const char __start_paravirt_branches[]; + extern const char __stop_paravirt_branches[]; + + paravirt_patch_apply_bundle((const struct paravirt_patch_site_bundle *) + __start_paravirt_bundles, + (const struct paravirt_patch_site_bundle *) + __stop_paravirt_bundles); + paravirt_patch_apply_inst((const struct paravirt_patch_site_inst *) + __start_paravirt_insts, + (const struct paravirt_patch_site_inst *) + __stop_paravirt_insts); + paravirt_patch_apply_branch((const struct paravirt_patch_site_branch *) + __start_paravirt_branches, + (const struct paravirt_patch_site_branch *) + __stop_paravirt_branches); +} + +/* + * Local variables: + * mode: C + * c-set-style: "linux" + * c-basic-offset: 8 + * tab-width: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/arch/ia64/kernel/paravirt_patchlist.c b/arch/ia64/kernel/paravirt_patchlist.c new file mode 100644 index 000000000000..b28082a95d45 --- /dev/null +++ b/arch/ia64/kernel/paravirt_patchlist.c @@ -0,0 +1,79 @@ +/****************************************************************************** + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/bug.h> +#include <asm/paravirt.h> + +#define DECLARE(name) \ + extern unsigned long \ + __ia64_native_start_gate_##name##_patchlist[]; \ + extern unsigned long \ + __ia64_native_end_gate_##name##_patchlist[] + +DECLARE(fsyscall); +DECLARE(brl_fsys_bubble_down); +DECLARE(vtop); +DECLARE(mckinley_e9); + +extern unsigned long __start_gate_section[]; + +#define ASSIGN(name) \ + .start_##name##_patchlist = \ + (unsigned long)__ia64_native_start_gate_##name##_patchlist, \ + .end_##name##_patchlist = \ + (unsigned long)__ia64_native_end_gate_##name##_patchlist + +struct pv_patchdata pv_patchdata __initdata = { + ASSIGN(fsyscall), + ASSIGN(brl_fsys_bubble_down), + ASSIGN(vtop), + ASSIGN(mckinley_e9), + + .gate_section = (void*)__start_gate_section, +}; + + +unsigned long __init +paravirt_get_gate_patchlist(enum pv_gate_patchlist type) +{ + +#define CASE(NAME, name) \ + case PV_GATE_START_##NAME: \ + return pv_patchdata.start_##name##_patchlist; \ + case PV_GATE_END_##NAME: \ + return pv_patchdata.end_##name##_patchlist; \ + + switch (type) { + CASE(FSYSCALL, fsyscall); + CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down); + CASE(VTOP, vtop); + CASE(MCKINLEY_E9, mckinley_e9); + default: + BUG(); + break; + } + return 0; +} + +void * __init +paravirt_get_gate_section(void) +{ + return pv_patchdata.gate_section; +} diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h new file mode 100644 index 000000000000..0684aa6c6507 --- /dev/null +++ b/arch/ia64/kernel/paravirt_patchlist.h @@ -0,0 +1,28 @@ +/****************************************************************************** + * linux/arch/ia64/xen/paravirt_patchlist.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) +#include <asm/xen/patchlist.h> +#else +#include <asm/native/patchlist.h> +#endif + diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S index 2f42fcb9776a..6158560d7f17 100644 --- a/arch/ia64/kernel/paravirtentry.S +++ b/arch/ia64/kernel/paravirtentry.S @@ -20,8 +20,11 @@ * */ +#include <linux/init.h> #include <asm/asmmacro.h> #include <asm/asm-offsets.h> +#include <asm/paravirt_privop.h> +#include <asm/paravirt_patch.h> #include "entry.h" #define DATA8(sym, init_value) \ @@ -32,29 +35,87 @@ data8 init_value ; \ .popsection -#define BRANCH(targ, reg, breg) \ - movl reg=targ ; \ - ;; \ - ld8 reg=[reg] ; \ - ;; \ - mov breg=reg ; \ +#define BRANCH(targ, reg, breg, type) \ + PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \ + ;; \ + movl reg=targ ; \ + ;; \ + ld8 reg=[reg] ; \ + ;; \ + mov breg=reg ; \ br.cond.sptk.many breg -#define BRANCH_PROC(sym, reg, breg) \ - DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ - GLOBAL_ENTRY(paravirt_ ## sym) ; \ - BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ +#define BRANCH_PROC(sym, reg, breg, type) \ + DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ + GLOBAL_ENTRY(paravirt_ ## sym) ; \ + BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \ END(paravirt_ ## sym) -#define BRANCH_PROC_UNWINFO(sym, reg, breg) \ - DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ - GLOBAL_ENTRY(paravirt_ ## sym) ; \ - PT_REGS_UNWIND_INFO(0) ; \ - BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ +#define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \ + DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ + GLOBAL_ENTRY(paravirt_ ## sym) ; \ + PT_REGS_UNWIND_INFO(0) ; \ + BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \ END(paravirt_ ## sym) -BRANCH_PROC(switch_to, r22, b7) -BRANCH_PROC_UNWINFO(leave_syscall, r22, b7) -BRANCH_PROC(work_processed_syscall, r2, b7) -BRANCH_PROC_UNWINFO(leave_kernel, r22, b7) +BRANCH_PROC(switch_to, r22, b7, SWITCH_TO) +BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL) +BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL) +BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL) + + +#ifdef CONFIG_MODULES +#define __INIT_OR_MODULE .text +#define __INITDATA_OR_MODULE .data +#else +#define __INIT_OR_MODULE __INIT +#define __INITDATA_OR_MODULE __INITDATA +#endif /* CONFIG_MODULES */ + + __INIT_OR_MODULE + GLOBAL_ENTRY(paravirt_fc_i) + fc.i r32 + br.ret.sptk.many rp + END(paravirt_fc_i) + __FINIT + + __INIT_OR_MODULE + .align 32 + GLOBAL_ENTRY(paravirt_nop_b_inst_bundle) + { + nop.b 0 + nop.b 0 + nop.b 0 + } + END(paravirt_nop_b_inst_bundle) + __FINIT + + /* NOTE: nop.[mfi] has same format */ + __INIT_OR_MODULE + GLOBAL_ENTRY(paravirt_nop_mfi_inst_bundle) + { + nop.m 0 + nop.f 0 + nop.i 0 + } + END(paravirt_nop_mfi_inst_bundle) + __FINIT + + __INIT_OR_MODULE + GLOBAL_ENTRY(paravirt_nop_bundle) +paravirt_nop_bundle_start: + { + nop 0 + nop 0 + nop 0 + } +paravirt_nop_bundle_end: + END(paravirt_nop_bundle) + __FINIT + + __INITDATA_OR_MODULE + .align 8 + .global paravirt_nop_bundle_size +paravirt_nop_bundle_size: + data8 paravirt_nop_bundle_end - paravirt_nop_bundle_start diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index b83b2c516008..68a1311db806 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c @@ -7,6 +7,7 @@ #include <linux/init.h> #include <linux/string.h> +#include <asm/paravirt.h> #include <asm/patch.h> #include <asm/processor.h> #include <asm/sections.h> @@ -169,16 +170,35 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) ia64_srlz_i(); } +extern unsigned long ia64_native_fsyscall_table[NR_syscalls]; +extern char ia64_native_fsys_bubble_down[]; +struct pv_fsys_data pv_fsys_data __initdata = { + .fsyscall_table = (unsigned long *)ia64_native_fsyscall_table, + .fsys_bubble_down = (void *)ia64_native_fsys_bubble_down, +}; + +unsigned long * __init +paravirt_get_fsyscall_table(void) +{ + return pv_fsys_data.fsyscall_table; +} + +char * __init +paravirt_get_fsys_bubble_down(void) +{ + return pv_fsys_data.fsys_bubble_down; +} + static void __init patch_fsyscall_table (unsigned long start, unsigned long end) { - extern unsigned long fsyscall_table[NR_syscalls]; + u64 fsyscall_table = (u64)paravirt_get_fsyscall_table(); s32 *offp = (s32 *) start; u64 ip; while (offp < (s32 *) end) { ip = (u64) ia64_imva((char *) offp + *offp); - ia64_patch_imm64(ip, (u64) fsyscall_table); + ia64_patch_imm64(ip, fsyscall_table); ia64_fc((void *) ip); ++offp; } @@ -189,7 +209,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end) static void __init patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) { - extern char fsys_bubble_down[]; + u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down(); s32 *offp = (s32 *) start; u64 ip; @@ -207,13 +227,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) void __init ia64_patch_gate (void) { -# define START(name) ((unsigned long) __start_gate_##name##_patchlist) -# define END(name) ((unsigned long)__end_gate_##name##_patchlist) +# define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name) +# define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name) - patch_fsyscall_table(START(fsyscall), END(fsyscall)); - patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down)); - ia64_patch_vtop(START(vtop), END(vtop)); - ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); + patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL)); + patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN)); + ia64_patch_vtop(START(VTOP), END(VTOP)); + ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9)); } void ia64_patch_phys_stack_reg(unsigned long val) @@ -229,7 +249,7 @@ void ia64_patch_phys_stack_reg(unsigned long val) while (offp < end) { ip = (u64) offp + *offp; ia64_patch(ip, mask, imm); - ia64_fc(ip); + ia64_fc((void *)ip); ++offp; } ia64_sync_i(); diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 5c0f408cfd71..8a06dc480594 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg) * /proc/perfmon interface, for debug only */ -#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) +#define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1) static void * pfm_proc_start(struct seq_file *m, loff_t *pos) @@ -5612,7 +5612,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos) return PFM_PROC_SHOW_HEADER; } - while (*pos <= NR_CPUS) { + while (*pos <= nr_cpu_ids) { if (cpu_online(*pos - 1)) { return (void *)*pos; } diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index ecb9eb78d687..7053c55b7649 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -317,7 +317,7 @@ retry: } n = data->cpu_check; - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) { if (cpu_isset(n, data->cpu_event)) { if (!cpu_online(n)) { cpu_clear(n, data->cpu_event); @@ -326,7 +326,7 @@ retry: cpu = n; break; } - if (++n == NR_CPUS) + if (++n == nr_cpu_ids) n = 0; } @@ -337,7 +337,7 @@ retry: /* for next read, start checking at next CPU */ data->cpu_check = cpu; - if (++data->cpu_check == NR_CPUS) + if (++data->cpu_check == nr_cpu_ids) data->cpu_check = 0; snprintf(cmd, sizeof(cmd), "read %d\n", cpu); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 865af27c7737..714066aeda7f 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -52,6 +52,7 @@ #include <asm/meminit.h> #include <asm/page.h> #include <asm/paravirt.h> +#include <asm/paravirt_patch.h> #include <asm/patch.h> #include <asm/pgtable.h> #include <asm/processor.h> @@ -537,6 +538,7 @@ setup_arch (char **cmdline_p) paravirt_arch_setup_early(); ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); + paravirt_patch_apply(); *cmdline_p = __va(ia64_boot_param->command_line); strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); @@ -730,10 +732,10 @@ static void * c_start (struct seq_file *m, loff_t *pos) { #ifdef CONFIG_SMP - while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) + while (*pos < nr_cpu_ids && !cpu_online(*pos)) ++*pos; #endif - return *pos < NR_CPUS ? cpu_data(*pos) : NULL; + return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; } static void * @@ -1016,8 +1018,7 @@ cpu_init (void) | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; - if (current->mm) - BUG(); + BUG_ON(current->mm); ia64_mmu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data)); diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index da8f020d82c1..2ea4199d9c57 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -166,11 +166,11 @@ send_IPI_allbutself (int op) * Called with preemption disabled. */ static inline void -send_IPI_mask(cpumask_t mask, int op) +send_IPI_mask(const struct cpumask *mask, int op) { unsigned int cpu; - for_each_cpu_mask(cpu, mask) { + for_each_cpu(cpu, mask) { send_IPI_single(cpu, op); } } @@ -316,7 +316,7 @@ void arch_send_call_function_single_ipi(int cpu) send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); } -void arch_send_call_function_ipi(cpumask_t mask) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_IPI_mask(mask, IPI_CALL_FUNC); } diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 52290547c85b..7700e23034bb 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -581,14 +581,14 @@ smp_build_cpu_map (void) ia64_cpu_to_sapicid[0] = boot_cpu_id; cpus_clear(cpu_present_map); - cpu_set(0, cpu_present_map); - cpu_set(0, cpu_possible_map); + set_cpu_present(0, true); + set_cpu_possible(0, true); for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { sapicid = smp_boot_data.cpu_phys_id[i]; if (sapicid == boot_cpu_id) continue; - cpu_set(cpu, cpu_present_map); - cpu_set(cpu, cpu_possible_map); + set_cpu_present(cpu, true); + set_cpu_possible(cpu, true); ia64_cpu_to_sapicid[cpu] = sapicid; cpu++; } @@ -626,12 +626,9 @@ smp_prepare_cpus (unsigned int max_cpus) */ if (!max_cpus) { printk(KERN_INFO "SMP mode deactivated.\n"); - cpus_clear(cpu_online_map); - cpus_clear(cpu_present_map); - cpus_clear(cpu_possible_map); - cpu_set(0, cpu_online_map); - cpu_set(0, cpu_present_map); - cpu_set(0, cpu_possible_map); + init_cpu_online(cpumask_of(0)); + init_cpu_present(cpumask_of(0)); + init_cpu_possible(cpumask_of(0)); return; } } diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index f0ebb342409d..641c8b61c4f1 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -20,6 +20,7 @@ #include <linux/efi.h> #include <linux/timex.h> #include <linux/clocksource.h> +#include <linux/platform_device.h> #include <asm/machvec.h> #include <asm/delay.h> @@ -50,6 +51,15 @@ EXPORT_SYMBOL(last_cli_ip); #endif #ifdef CONFIG_PARAVIRT +/* We need to define a real function for sched_clock, to override the + weak default version */ +unsigned long long sched_clock(void) +{ + return paravirt_sched_clock(); +} +#endif + +#ifdef CONFIG_PARAVIRT static void paravirt_clocksource_resume(void) { @@ -405,6 +415,21 @@ static struct irqaction timer_irqaction = { .name = "timer" }; +static struct platform_device rtc_efi_dev = { + .name = "rtc-efi", + .id = -1, +}; + +static int __init rtc_init(void) +{ + if (platform_device_register(&rtc_efi_dev) < 0) + printk(KERN_ERR "unable to register rtc device...\n"); + + /* not necessarily an error */ + return 0; +} +module_init(rtc_init); + void __init time_init (void) { diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 3765efc5f963..4a95e86b9ac2 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -169,6 +169,30 @@ SECTIONS __end___mckinley_e9_bundles = .; } +#if defined(CONFIG_PARAVIRT) + . = ALIGN(16); + .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) + { + __start_paravirt_bundles = .; + *(.paravirt_bundles) + __stop_paravirt_bundles = .; + } + . = ALIGN(16); + .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) + { + __start_paravirt_insts = .; + *(.paravirt_insts) + __stop_paravirt_insts = .; + } + . = ALIGN(16); + .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) + { + __start_paravirt_branches = .; + *(.paravirt_branches) + __stop_paravirt_branches = .; + } +#endif + #if defined(CONFIG_IA64_GENERIC) /* Machine Vector */ . = ALIGN(16); @@ -201,6 +225,12 @@ SECTIONS __start_gate_section = .; *(.data.gate) __stop_gate_section = .; +#ifdef CONFIG_XEN + . = ALIGN(PAGE_SIZE); + __xen_start_gate_section = .; + *(.data.gate.xen) + __xen_stop_gate_section = .; +#endif } . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose * kernel data diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 076b00d1dbff..28af6a731bb8 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -70,7 +70,7 @@ static void kvm_flush_icache(unsigned long start, unsigned long len) int l; for (l = 0; l < (len + 32); l += 32) - ia64_fc(start + l); + ia64_fc((void *)(start + l)); ia64_sync_i(); ia64_srlz_i(); diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index d4d280505878..a18ee17b9192 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c @@ -386,7 +386,7 @@ void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, else *rnat_addr = (*rnat_addr) & (~nat_mask); - ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore); + ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); ia64_setreg(_IA64_REG_AR_RNAT, rnat); } local_irq_restore(psr); diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 38232b37668b..2c2501f13159 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -210,6 +210,7 @@ void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type) phy_pte &= ~PAGE_FLAGS_RV_MASK; psr = ia64_clear_ic(); ia64_itc(type, va, phy_pte, itir_ps(itir)); + paravirt_dv_serialize_data(); ia64_set_psr(psr); } @@ -456,6 +457,7 @@ void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, phy_pte &= ~PAGE_FLAGS_RV_MASK; psr = ia64_clear_ic(); ia64_itc(type, ifa, phy_pte, ps); + paravirt_dv_serialize_data(); ia64_set_psr(psr); } if (!(pte&VTLB_PTE_IO)) diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 56e12903973c..c0f3bee69042 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -35,6 +35,7 @@ #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/mca.h> +#include <asm/paravirt.h> DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); @@ -259,6 +260,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) static void __init setup_gate (void) { + void *gate_section; struct page *page; /* @@ -266,10 +268,11 @@ setup_gate (void) * headers etc. and once execute-only page to enable * privilege-promotion via "epc": */ - page = virt_to_page(ia64_imva(__start_gate_section)); + gate_section = paravirt_get_gate_section(); + page = virt_to_page(ia64_imva(gate_section)); put_kernel_page(page, GATE_ADDR, PAGE_READONLY); #ifdef HAVE_BUGGY_SEGREL - page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); + page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); #else put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); @@ -633,8 +636,7 @@ mem_init (void) #endif #ifdef CONFIG_FLATMEM - if (!mem_map) - BUG(); + BUG_ON(!mem_map); max_mapnr = max_low_pfn; #endif @@ -667,8 +669,8 @@ mem_init (void) * code can tell them apart. */ for (i = 0; i < NR_syscalls; ++i) { - extern unsigned long fsyscall_table[NR_syscalls]; extern unsigned long sys_call_table[NR_syscalls]; + unsigned long *fsyscall_table = paravirt_get_fsyscall_table(); if (!fsyscall_table[i] || nolwsys) fsyscall_table[i] = sys_call_table[i] | 1; diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index bd9818a36b47..b9f3d7bbb338 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, preempt_disable(); #ifdef CONFIG_SMP - if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { + if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) { platform_global_tlb_purge(mm, start, end, nbits); preempt_enable(); return; diff --git a/arch/ia64/scripts/pvcheck.sed b/arch/ia64/scripts/pvcheck.sed index ba66ac2e4c60..e59809a3fc01 100644 --- a/arch/ia64/scripts/pvcheck.sed +++ b/arch/ia64/scripts/pvcheck.sed @@ -17,6 +17,7 @@ s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g +s/mov.*=.*ar\.itc.*/.warning \"ar.itc should not used directly\"/g s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 0d4ffa4da1da..57f280dd9def 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c @@ -135,8 +135,7 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, } war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); - if (!war_list) - BUG(); + BUG_ON(!war_list); SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, nasid, widget, __pa(war_list), 0, 0, 0 ,0); @@ -180,23 +179,20 @@ sn_common_hubdev_init(struct hubdev_info *hubdev) sizeof(struct sn_flush_device_kernel *); hubdev->hdi_flush_nasid_list.widget_p = kzalloc(size, GFP_KERNEL); - if (!hubdev->hdi_flush_nasid_list.widget_p) - BUG(); + BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p); for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { size = DEV_PER_WIDGET * sizeof(struct sn_flush_device_kernel); sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); - if (!sn_flush_device_kernel) - BUG(); + BUG_ON(!sn_flush_device_kernel); dev_entry = sn_flush_device_kernel; for (device = 0; device < DEV_PER_WIDGET; device++, dev_entry++) { size = sizeof(struct sn_flush_device_common); dev_entry->common = kzalloc(size, GFP_KERNEL); - if (!dev_entry->common) - BUG(); + BUG_ON(!dev_entry->common); if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST)) status = sal_get_device_dmaflush_list( hubdev->hdi_nasid, widget, device, @@ -326,8 +322,7 @@ sn_common_bus_fixup(struct pci_bus *bus, */ controller->platform_data = kzalloc(sizeof(struct sn_platform_data), GFP_KERNEL); - if (controller->platform_data == NULL) - BUG(); + BUG_ON(controller->platform_data == NULL); sn_platform_data = (struct sn_platform_data *) controller->platform_data; sn_platform_data->provider_soft = provider_soft; diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index e2eb2da60f96..ee774c366a06 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -128,8 +128,7 @@ sn_legacy_pci_window_fixup(struct pci_controller *controller, { controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL); - if (controller->window == NULL) - BUG(); + BUG_ON(controller->window == NULL); controller->window[0].offset = legacy_io; controller->window[0].resource.name = "legacy_io"; controller->window[0].resource.flags = IORESOURCE_IO; @@ -168,8 +167,7 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, idx = controller->windows; new_count = controller->windows + count; new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL); - if (new_window == NULL) - BUG(); + BUG_ON(new_window == NULL); if (controller->window) { memcpy(new_window, controller->window, sizeof(struct pci_window) * controller->windows); @@ -222,8 +220,7 @@ sn_io_slot_fixup(struct pci_dev *dev) (u64) __pa(pcidev_info), (u64) __pa(sn_irq_info)); - if (status) - BUG(); /* Cannot get platform pci device information */ + BUG_ON(status); /* Cannot get platform pci device information */ /* Copy over PIO Mapped Addresses */ @@ -307,8 +304,7 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) prom_bussoft_ptr = __va(prom_bussoft_ptr); controller = kzalloc(sizeof(*controller), GFP_KERNEL); - if (!controller) - BUG(); + BUG_ON(!controller); controller->segment = segment; /* diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 02c5b8a9fb60..e456f062f241 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -732,8 +732,7 @@ void __init build_cnode_tables(void) kl_config_hdr_t *klgraph_header; nasid = cnodeid_to_nasid(node); klgraph_header = ia64_sn_get_klconfig_addr(nasid); - if (klgraph_header == NULL) - BUG(); + BUG_ON(klgraph_header == NULL); brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); while (brd) { if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { @@ -750,7 +749,7 @@ nasid_slice_to_cpuid(int nasid, int slice) { long cpu; - for (cpu = 0; cpu < NR_CPUS; cpu++) + for (cpu = 0; cpu < nr_cpu_ids; cpu++) if (cpuid_to_nasid(cpu) == nasid && cpuid_to_slice(cpu) == slice) return cpu; diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c index 4dcce3d0e04c..e63328818643 100644 --- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c +++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c @@ -225,7 +225,6 @@ static struct proc_dir_entry *sgi_prominfo_entry; int __init prominfo_init(void) { struct proc_dir_entry **entp; - struct proc_dir_entry *p; cnodeid_t cnodeid; unsigned long nasid; int size; @@ -246,14 +245,10 @@ int __init prominfo_init(void) sprintf(name, "node%d", cnodeid); *entp = proc_mkdir(name, sgi_prominfo_entry); nasid = cnodeid_to_nasid(cnodeid); - p = create_proc_read_entry("fit", 0, *entp, read_fit_entry, + create_proc_read_entry("fit", 0, *entp, read_fit_entry, (void *)nasid); - if (p) - p->owner = THIS_MODULE; - p = create_proc_read_entry("version", 0, *entp, + create_proc_read_entry("version", 0, *entp, read_version_entry, (void *)nasid); - if (p) - p->owner = THIS_MODULE; entp++; } diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index e585f9a2afb9..1176506b2bae 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm) unsigned long itc; itc = ia64_get_itc(); - smp_flush_tlb_cpumask(mm->cpu_vm_mask); + smp_flush_tlb_cpumask(*mm_cpumask(mm)); itc = ia64_get_itc() - itc; __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; __get_cpu_var(ptcstats).shub_ipi_flushes++; @@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, nodes_clear(nodes_flushed); i = 0; - for_each_cpu_mask(cpu, mm->cpu_vm_mask) { + for_each_cpu(cpu, mm_cpumask(mm)) { cnode = cpu_to_node(cpu); node_set(cnode, nodes_flushed); lcpu = cpu; @@ -461,7 +461,7 @@ bool sn_cpu_disable_allowed(int cpu) static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) { - if (*offset < NR_CPUS) + if (*offset < nr_cpu_ids) return offset; return NULL; } @@ -469,7 +469,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) { (*offset)++; - if (*offset < NR_CPUS) + if (*offset < nr_cpu_ids) return offset; return NULL; } @@ -491,7 +491,7 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); } - if (cpu < NR_CPUS && cpu_online(cpu)) { + if (cpu < nr_cpu_ids && cpu_online(cpu)) { stat = &per_cpu(ptcstats, cpu); seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, @@ -554,7 +554,7 @@ static int __init sn2_ptc_init(void) proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, NULL, &proc_sn2_ptc_operations); - if (!&proc_sn2_ptc_operations) { + if (!proc_sn2_ptc) { printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); return -EINVAL; } diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index be339477f906..9e6491cf72bd 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -275,8 +275,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb /* get it's interconnect topology */ sz = op->ports * sizeof(struct sn_hwperf_port_info); - if (sz > sizeof(ptdata)) - BUG(); + BUG_ON(sz > sizeof(ptdata)); e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, (u64)&ptdata, 0, 0, NULL); @@ -310,8 +309,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb if (router && (!found_cpu || !found_mem)) { /* search for a node connected to the same router */ sz = router->ports * sizeof(struct sn_hwperf_port_info); - if (sz > sizeof(ptdata)) - BUG(); + BUG_ON(sz > sizeof(ptdata)); e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_PORTS, router->id, sz, (u64)&ptdata, 0, 0, NULL); @@ -612,7 +610,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; if (cpu != SN_HWPERF_ARG_ANY_CPU) { - if (cpu >= NR_CPUS || !cpu_online(cpu)) { + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { r = -EINVAL; goto out; } diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 060df4aa9916..c659ad5613a0 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c @@ -256,9 +256,7 @@ void sn_dma_flush(u64 addr) hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; - if (!hubinfo) { - BUG(); - } + BUG_ON(!hubinfo); flush_nasid_list = &hubinfo->hdi_flush_nasid_list; if (flush_nasid_list->widget_p == NULL) diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile index 0ad0224693d9..e6f4a0a74228 100644 --- a/arch/ia64/xen/Makefile +++ b/arch/ia64/xen/Makefile @@ -3,14 +3,29 @@ # obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ - hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o + hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \ + gate-data.o obj-$(CONFIG_IA64_GENERIC) += machvec.o +# The gate DSO image is built using a special linker script. +include $(srctree)/arch/ia64/kernel/Makefile.gate + +# tell compiled for xen +CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN +AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN + +# use same file of native. +$(obj)/gate.o: $(src)/../kernel/gate.S FORCE + $(call if_changed_dep,as_o_S) +$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE + $(call if_changed_dep,cpp_lds_S) + + AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN # xen multi compile -ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S +ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) obj-y += $(ASM_PARAVIRT_OBJS) define paravirtualized_xen diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S new file mode 100644 index 000000000000..7d4830afc91d --- /dev/null +++ b/arch/ia64/xen/gate-data.S @@ -0,0 +1,3 @@ + .section .data.gate.xen, "aw" + + .incbin "arch/ia64/xen/gate.so" diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S index 45e02bb64a92..e32dae444dd6 100644 --- a/arch/ia64/xen/hypercall.S +++ b/arch/ia64/xen/hypercall.S @@ -9,6 +9,7 @@ #include <asm/intrinsics.h> #include <asm/xen/privop.h> +#ifdef __INTEL_COMPILER /* * Hypercalls without parameter. */ @@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4) br.ret.sptk.many rp ;; END(xen_set_rr0_to_rr4) +#endif GLOBAL_ENTRY(xen_send_ipi) mov r14=r32 diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index 68d6204c3f16..fb8332690179 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c @@ -175,10 +175,58 @@ static void xen_itc_jitter_data_reset(void) } while (unlikely(ret != lcycle)); } +/* based on xen_sched_clock() in arch/x86/xen/time.c. */ +/* + * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined, + * something similar logic should be implemented here. + */ +/* + * Xen sched_clock implementation. Returns the number of unstolen + * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED + * states. + */ +static unsigned long long xen_sched_clock(void) +{ + struct vcpu_runstate_info runstate; + + unsigned long long now; + unsigned long long offset; + unsigned long long ret; + + /* + * Ideally sched_clock should be called on a per-cpu basis + * anyway, so preempt should already be disabled, but that's + * not current practice at the moment. + */ + preempt_disable(); + + /* + * both ia64_native_sched_clock() and xen's runstate are + * based on mAR.ITC. So difference of them makes sense. + */ + now = ia64_native_sched_clock(); + + get_runstate_snapshot(&runstate); + + WARN_ON(runstate.state != RUNSTATE_running); + + offset = 0; + if (now > runstate.state_entry_time) + offset = now - runstate.state_entry_time; + ret = runstate.time[RUNSTATE_blocked] + + runstate.time[RUNSTATE_running] + + offset; + + preempt_enable(); + + return ret; +} + struct pv_time_ops xen_time_ops __initdata = { .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, .do_steal_accounting = xen_do_steal_accounting, .clocksource_resume = xen_itc_jitter_data_reset, + .sched_clock = xen_sched_clock, }; /* Called after suspend, to resume time. */ diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c index 936cff3c96e0..5e2270a999fa 100644 --- a/arch/ia64/xen/xen_pv_ops.c +++ b/arch/ia64/xen/xen_pv_ops.c @@ -24,6 +24,7 @@ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/pm.h> +#include <linux/unistd.h> #include <asm/xen/hypervisor.h> #include <asm/xen/xencomm.h> @@ -153,6 +154,13 @@ xen_post_smp_prepare_boot_cpu(void) xen_setup_vcpu_info_placement(); } +#ifdef ASM_SUPPORTED +static unsigned long __init_or_module +xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type); +#endif +static void __init +xen_patch_branch(unsigned long tag, unsigned long type); + static const struct pv_init_ops xen_init_ops __initconst = { .banner = xen_banner, @@ -163,6 +171,53 @@ static const struct pv_init_ops xen_init_ops __initconst = { .arch_setup_nomca = xen_arch_setup_nomca, .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, +#ifdef ASM_SUPPORTED + .patch_bundle = xen_patch_bundle, +#endif + .patch_branch = xen_patch_branch, +}; + +/*************************************************************************** + * pv_fsys_data + * addresses for fsys + */ + +extern unsigned long xen_fsyscall_table[NR_syscalls]; +extern char xen_fsys_bubble_down[]; +struct pv_fsys_data xen_fsys_data __initdata = { + .fsyscall_table = (unsigned long *)xen_fsyscall_table, + .fsys_bubble_down = (void *)xen_fsys_bubble_down, +}; + +/*************************************************************************** + * pv_patchdata + * patchdata addresses + */ + +#define DECLARE(name) \ + extern unsigned long __xen_start_gate_##name##_patchlist[]; \ + extern unsigned long __xen_end_gate_##name##_patchlist[] + +DECLARE(fsyscall); +DECLARE(brl_fsys_bubble_down); +DECLARE(vtop); +DECLARE(mckinley_e9); + +extern unsigned long __xen_start_gate_section[]; + +#define ASSIGN(name) \ + .start_##name##_patchlist = \ + (unsigned long)__xen_start_gate_##name##_patchlist, \ + .end_##name##_patchlist = \ + (unsigned long)__xen_end_gate_##name##_patchlist + +static struct pv_patchdata xen_patchdata __initdata = { + ASSIGN(fsyscall), + ASSIGN(brl_fsys_bubble_down), + ASSIGN(vtop), + ASSIGN(mckinley_e9), + + .gate_section = (void*)__xen_start_gate_section, }; /*************************************************************************** @@ -170,6 +225,76 @@ static const struct pv_init_ops xen_init_ops __initconst = { * intrinsics hooks. */ +#ifndef ASM_SUPPORTED +static void +xen_set_itm_with_offset(unsigned long val) +{ + /* ia64_cpu_local_tick() calls this with interrupt enabled. */ + /* WARN_ON(!irqs_disabled()); */ + xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); +} + +static unsigned long +xen_get_itm_with_offset(void) +{ + /* unused at this moment */ + printk(KERN_DEBUG "%s is called.\n", __func__); + + WARN_ON(!irqs_disabled()); + return ia64_native_getreg(_IA64_REG_CR_ITM) + + XEN_MAPPEDREGS->itc_offset; +} + +/* ia64_set_itc() is only called by + * cpu_init() with ia64_set_itc(0) and ia64_sync_itc(). + * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant. + */ +static void +xen_set_itc(unsigned long val) +{ + unsigned long mitc; + + WARN_ON(!irqs_disabled()); + mitc = ia64_native_getreg(_IA64_REG_AR_ITC); + XEN_MAPPEDREGS->itc_offset = val - mitc; + XEN_MAPPEDREGS->itc_last = val; +} + +static unsigned long +xen_get_itc(void) +{ + unsigned long res; + unsigned long itc_offset; + unsigned long itc_last; + unsigned long ret_itc_last; + + itc_offset = XEN_MAPPEDREGS->itc_offset; + do { + itc_last = XEN_MAPPEDREGS->itc_last; + res = ia64_native_getreg(_IA64_REG_AR_ITC); + res += itc_offset; + if (itc_last >= res) + res = itc_last + 1; + ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, + itc_last, res); + } while (unlikely(ret_itc_last != itc_last)); + return res; + +#if 0 + /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled. + Should it be paravirtualized instead? */ + WARN_ON(!irqs_disabled()); + itc_offset = XEN_MAPPEDREGS->itc_offset; + itc_last = XEN_MAPPEDREGS->itc_last; + res = ia64_native_getreg(_IA64_REG_AR_ITC); + res += itc_offset; + if (itc_last >= res) + res = itc_last + 1; + XEN_MAPPEDREGS->itc_last = res; + return res; +#endif +} + static void xen_setreg(int regnum, unsigned long val) { switch (regnum) { @@ -181,11 +306,14 @@ static void xen_setreg(int regnum, unsigned long val) xen_set_eflag(val); break; #endif + case _IA64_REG_AR_ITC: + xen_set_itc(val); + break; case _IA64_REG_CR_TPR: xen_set_tpr(val); break; case _IA64_REG_CR_ITM: - xen_set_itm(val); + xen_set_itm_with_offset(val); break; case _IA64_REG_CR_EOI: xen_eoi(val); @@ -209,6 +337,12 @@ static unsigned long xen_getreg(int regnum) res = xen_get_eflag(); break; #endif + case _IA64_REG_AR_ITC: + res = xen_get_itc(); + break; + case _IA64_REG_CR_ITM: + res = xen_get_itm_with_offset(); + break; case _IA64_REG_CR_IVR: res = xen_get_ivr(); break; @@ -259,8 +393,417 @@ xen_intrin_local_irq_restore(unsigned long mask) else xen_rsm_i(); } +#else +#define __DEFINE_FUNC(name, code) \ + extern const char xen_ ## name ## _direct_start[]; \ + extern const char xen_ ## name ## _direct_end[]; \ + asm (".align 32\n" \ + ".proc xen_" #name "\n" \ + "xen_" #name ":\n" \ + "xen_" #name "_direct_start:\n" \ + code \ + "xen_" #name "_direct_end:\n" \ + "br.cond.sptk.many b6\n" \ + ".endp xen_" #name "\n") + +#define DEFINE_VOID_FUNC0(name, code) \ + extern void \ + xen_ ## name (void); \ + __DEFINE_FUNC(name, code) + +#define DEFINE_VOID_FUNC1(name, code) \ + extern void \ + xen_ ## name (unsigned long arg); \ + __DEFINE_FUNC(name, code) + +#define DEFINE_VOID_FUNC1_VOID(name, code) \ + extern void \ + xen_ ## name (void *arg); \ + __DEFINE_FUNC(name, code) + +#define DEFINE_VOID_FUNC2(name, code) \ + extern void \ + xen_ ## name (unsigned long arg0, \ + unsigned long arg1); \ + __DEFINE_FUNC(name, code) -static const struct pv_cpu_ops xen_cpu_ops __initdata = { +#define DEFINE_FUNC0(name, code) \ + extern unsigned long \ + xen_ ## name (void); \ + __DEFINE_FUNC(name, code) + +#define DEFINE_FUNC1(name, type, code) \ + extern unsigned long \ + xen_ ## name (type arg); \ + __DEFINE_FUNC(name, code) + +#define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) + +/* + * static void xen_set_itm_with_offset(unsigned long val) + * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); + */ +/* 2 bundles */ +DEFINE_VOID_FUNC1(set_itm_with_offset, + "mov r2 = " __stringify(XSI_BASE) " + " + __stringify(XSI_ITC_OFFSET_OFS) "\n" + ";;\n" + "ld8 r3 = [r2]\n" + ";;\n" + "sub r8 = r8, r3\n" + "break " __stringify(HYPERPRIVOP_SET_ITM) "\n"); + +/* + * static unsigned long xen_get_itm_with_offset(void) + * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset; + */ +/* 2 bundles */ +DEFINE_FUNC0(get_itm_with_offset, + "mov r2 = " __stringify(XSI_BASE) " + " + __stringify(XSI_ITC_OFFSET_OFS) "\n" + ";;\n" + "ld8 r3 = [r2]\n" + "mov r8 = cr.itm\n" + ";;\n" + "add r8 = r8, r2\n"); + +/* + * static void xen_set_itc(unsigned long val) + * unsigned long mitc; + * + * WARN_ON(!irqs_disabled()); + * mitc = ia64_native_getreg(_IA64_REG_AR_ITC); + * XEN_MAPPEDREGS->itc_offset = val - mitc; + * XEN_MAPPEDREGS->itc_last = val; + */ +/* 2 bundles */ +DEFINE_VOID_FUNC1(set_itc, + "mov r2 = " __stringify(XSI_BASE) " + " + __stringify(XSI_ITC_LAST_OFS) "\n" + "mov r3 = ar.itc\n" + ";;\n" + "sub r3 = r8, r3\n" + "st8 [r2] = r8, " + __stringify(XSI_ITC_LAST_OFS) " - " + __stringify(XSI_ITC_OFFSET_OFS) "\n" + ";;\n" + "st8 [r2] = r3\n"); + +/* + * static unsigned long xen_get_itc(void) + * unsigned long res; + * unsigned long itc_offset; + * unsigned long itc_last; + * unsigned long ret_itc_last; + * + * itc_offset = XEN_MAPPEDREGS->itc_offset; + * do { + * itc_last = XEN_MAPPEDREGS->itc_last; + * res = ia64_native_getreg(_IA64_REG_AR_ITC); + * res += itc_offset; + * if (itc_last >= res) + * res = itc_last + 1; + * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, + * itc_last, res); + * } while (unlikely(ret_itc_last != itc_last)); + * return res; + */ +/* 5 bundles */ +DEFINE_FUNC0(get_itc, + "mov r2 = " __stringify(XSI_BASE) " + " + __stringify(XSI_ITC_OFFSET_OFS) "\n" + ";;\n" + "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - " + __stringify(XSI_ITC_OFFSET_OFS) "\n" + /* r9 = itc_offset */ + /* r2 = XSI_ITC_OFFSET */ + "888:\n" + "mov r8 = ar.itc\n" /* res = ar.itc */ + ";;\n" + "ld8 r3 = [r2]\n" /* r3 = itc_last */ + "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */ + ";;\n" + "cmp.gtu p6, p0 = r3, r8\n" + ";;\n" + "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */ + ";;\n" + "mov ar.ccv = r8\n" + ";;\n" + "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n" + ";;\n" + "cmp.ne p6, p0 = r10, r3\n" + "(p6) hint @pause\n" + "(p6) br.cond.spnt 888b\n"); + +DEFINE_VOID_FUNC1_VOID(fc, + "break " __stringify(HYPERPRIVOP_FC) "\n"); + +/* + * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR + * masked_addr = *psr_i_addr_addr + * pending_intr_addr = masked_addr - 1 + * if (val & IA64_PSR_I) { + * masked = *masked_addr + * *masked_addr = 0:xen_set_virtual_psr_i(1) + * compiler barrier + * if (masked) { + * uint8_t pending = *pending_intr_addr; + * if (pending) + * XEN_HYPER_SSM_I + * } + * } else { + * *masked_addr = 1:xen_set_virtual_psr_i(0) + * } + */ +/* 6 bundles */ +DEFINE_VOID_FUNC1(intrin_local_irq_restore, + /* r8 = input value: 0 or IA64_PSR_I + * p6 = (flags & IA64_PSR_I) + * = if clause + * p7 = !(flags & IA64_PSR_I) + * = else clause + */ + "cmp.ne p6, p7 = r8, r0\n" + "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" + ";;\n" + /* r9 = XEN_PSR_I_ADDR */ + "ld8 r9 = [r9]\n" + ";;\n" + + /* r10 = masked previous value */ + "(p6) ld1.acq r10 = [r9]\n" + ";;\n" + + /* p8 = !masked interrupt masked previously? */ + "(p6) cmp.ne.unc p8, p0 = r10, r0\n" + + /* p7 = else clause */ + "(p7) mov r11 = 1\n" + ";;\n" + /* masked = 1 */ + "(p7) st1.rel [r9] = r11\n" + + /* p6 = if clause */ + /* masked = 0 + * r9 = masked_addr - 1 + * = pending_intr_addr + */ + "(p8) st1.rel [r9] = r0, -1\n" + ";;\n" + /* r8 = pending_intr */ + "(p8) ld1.acq r11 = [r9]\n" + ";;\n" + /* p9 = interrupt pending? */ + "(p8) cmp.ne.unc p9, p10 = r11, r0\n" + ";;\n" + "(p10) mf\n" + /* issue hypercall to trigger interrupt */ + "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"); + +DEFINE_VOID_FUNC2(ptcga, + "break " __stringify(HYPERPRIVOP_PTC_GA) "\n"); +DEFINE_VOID_FUNC2(set_rr, + "break " __stringify(HYPERPRIVOP_SET_RR) "\n"); + +/* + * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; + * tmp = *tmp + * tmp = *tmp; + * psr_i = tmp? 0: IA64_PSR_I; + */ +/* 4 bundles */ +DEFINE_FUNC0(get_psr_i, + "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" + ";;\n" + "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */ + "mov r8 = 0\n" /* psr_i = 0 */ + ";;\n" + "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */ + ";;\n" + "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */ + ";;\n" + "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n"); + +DEFINE_FUNC1(thash, unsigned long, + "break " __stringify(HYPERPRIVOP_THASH) "\n"); +DEFINE_FUNC1(get_cpuid, int, + "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n"); +DEFINE_FUNC1(get_pmd, int, + "break " __stringify(HYPERPRIVOP_GET_PMD) "\n"); +DEFINE_FUNC1(get_rr, unsigned long, + "break " __stringify(HYPERPRIVOP_GET_RR) "\n"); + +/* + * void xen_privop_ssm_i(void) + * + * int masked = !xen_get_virtual_psr_i(); + * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) + * xen_set_virtual_psr_i(1) + * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 + * // compiler barrier + * if (masked) { + * uint8_t* pend_int_addr = + * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; + * uint8_t pending = *pend_int_addr; + * if (pending) + * XEN_HYPER_SSM_I + * } + */ +/* 4 bundles */ +DEFINE_VOID_FUNC0(ssm_i, + "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" + ";;\n" + "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */ + ";;\n" + "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */ + ";;\n" + "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt + * r8 = XEN_PSR_I_ADDR - 1 + * = pend_int_addr + */ + "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I + * previously interrupt + * masked? + */ + ";;\n" + "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */ + ";;\n" + "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/ + ";;\n" + /* issue hypercall to get interrupt */ + "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n" + ";;\n"); + +/* + * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr + * = XEN_PSR_I_ADDR_ADDR; + * psr_i_addr = *psr_i_addr_addr; + * *psr_i_addr = 1; + */ +/* 2 bundles */ +DEFINE_VOID_FUNC0(rsm_i, + "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" + /* r8 = XEN_PSR_I_ADDR */ + "mov r9 = 1\n" + ";;\n" + "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */ + ";;\n" + "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */ + +extern void +xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, + unsigned long val2, unsigned long val3, + unsigned long val4); +__DEFINE_FUNC(set_rr0_to_rr4, + "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n"); + + +extern unsigned long xen_getreg(int regnum); +#define __DEFINE_GET_REG(id, privop) \ + "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ + ";;\n" \ + "cmp.eq p6, p0 = r2, r8\n" \ + ";;\n" \ + "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \ + "(p6) br.cond.sptk.many b6\n" \ + ";;\n" + +__DEFINE_FUNC(getreg, + __DEFINE_GET_REG(PSR, PSR) +#ifdef CONFIG_IA32_SUPPORT + __DEFINE_GET_REG(AR_EFLAG, EFLAG) +#endif + + /* get_itc */ + "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" + ";;\n" + "cmp.eq p6, p0 = r2, r8\n" + ";;\n" + "(p6) br.cond.spnt xen_get_itc\n" + ";;\n" + + /* get itm */ + "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" + ";;\n" + "cmp.eq p6, p0 = r2, r8\n" + ";;\n" + "(p6) br.cond.spnt xen_get_itm_with_offset\n" + ";;\n" + + __DEFINE_GET_REG(CR_IVR, IVR) + __DEFINE_GET_REG(CR_TPR, TPR) + + /* fall back */ + "movl r2 = ia64_native_getreg_func\n" + ";;\n" + "mov b7 = r2\n" + ";;\n" + "br.cond.sptk.many b7\n"); + +extern void xen_setreg(int regnum, unsigned long val); +#define __DEFINE_SET_REG(id, privop) \ + "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ + ";;\n" \ + "cmp.eq p6, p0 = r2, r9\n" \ + ";;\n" \ + "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \ + "(p6) br.cond.sptk.many b6\n" \ + ";;\n" + +__DEFINE_FUNC(setreg, + /* kr0 .. kr 7*/ + /* + * if (_IA64_REG_AR_KR0 <= regnum && + * regnum <= _IA64_REG_AR_KR7) { + * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0 + * register __val asm ("r9") = val + * "break HYPERPRIVOP_SET_KR" + * } + */ + "mov r17 = r9\n" + "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n" + ";;\n" + "cmp.ge p6, p0 = r9, r2\n" + "sub r17 = r17, r2\n" + ";;\n" + "(p6) cmp.ge.unc p7, p0 = " + __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0) + ", r17\n" + ";;\n" + "(p7) mov r9 = r8\n" + ";;\n" + "(p7) mov r8 = r17\n" + "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n" + + /* set itm */ + "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" + ";;\n" + "cmp.eq p6, p0 = r2, r8\n" + ";;\n" + "(p6) br.cond.spnt xen_set_itm_with_offset\n" + + /* set itc */ + "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" + ";;\n" + "cmp.eq p6, p0 = r2, r8\n" + ";;\n" + "(p6) br.cond.spnt xen_set_itc\n" + +#ifdef CONFIG_IA32_SUPPORT + __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG) +#endif + __DEFINE_SET_REG(CR_TPR, SET_TPR) + __DEFINE_SET_REG(CR_EOI, EOI) + + /* fall back */ + "movl r2 = ia64_native_setreg_func\n" + ";;\n" + "mov b7 = r2\n" + ";;\n" + "br.cond.sptk.many b7\n"); +#endif + +static const struct pv_cpu_ops xen_cpu_ops __initconst = { .fc = xen_fc, .thash = xen_thash, .get_cpuid = xen_get_cpuid, @@ -337,7 +880,7 @@ xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); } -static const struct pv_iosapic_ops xen_iosapic_ops __initconst = { +static struct pv_iosapic_ops xen_iosapic_ops __initdata = { .pcat_compat_init = xen_pcat_compat_init, .__get_irq_chip = xen_iosapic_get_irq_chip, @@ -355,6 +898,8 @@ xen_setup_pv_ops(void) xen_info_init(); pv_info = xen_info; pv_init_ops = xen_init_ops; + pv_fsys_data = xen_fsys_data; + pv_patchdata = xen_patchdata; pv_cpu_ops = xen_cpu_ops; pv_iosapic_ops = xen_iosapic_ops; pv_irq_ops = xen_irq_ops; @@ -362,3 +907,252 @@ xen_setup_pv_ops(void) paravirt_cpu_asm_init(&xen_cpu_asm_switch); } + +#ifdef ASM_SUPPORTED +/*************************************************************************** + * binary pacthing + * pv_init_ops.patch_bundle + */ + +#define DEFINE_FUNC_GETREG(name, privop) \ + DEFINE_FUNC0(get_ ## name, \ + "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n") + +DEFINE_FUNC_GETREG(psr, PSR); +DEFINE_FUNC_GETREG(eflag, EFLAG); +DEFINE_FUNC_GETREG(ivr, IVR); +DEFINE_FUNC_GETREG(tpr, TPR); + +#define DEFINE_FUNC_SET_KR(n) \ + DEFINE_VOID_FUNC0(set_kr ## n, \ + ";;\n" \ + "mov r9 = r8\n" \ + "mov r8 = " #n "\n" \ + "break " __stringify(HYPERPRIVOP_SET_KR) "\n") + +DEFINE_FUNC_SET_KR(0); +DEFINE_FUNC_SET_KR(1); +DEFINE_FUNC_SET_KR(2); +DEFINE_FUNC_SET_KR(3); +DEFINE_FUNC_SET_KR(4); +DEFINE_FUNC_SET_KR(5); +DEFINE_FUNC_SET_KR(6); +DEFINE_FUNC_SET_KR(7); + +#define __DEFINE_FUNC_SETREG(name, privop) \ + DEFINE_VOID_FUNC0(name, \ + "break "__stringify(HYPERPRIVOP_ ## privop) "\n") + +#define DEFINE_FUNC_SETREG(name, privop) \ + __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop) + +DEFINE_FUNC_SETREG(eflag, EFLAG); +DEFINE_FUNC_SETREG(tpr, TPR); +__DEFINE_FUNC_SETREG(eoi, EOI); + +extern const char xen_check_events[]; +extern const char __xen_intrin_local_irq_restore_direct_start[]; +extern const char __xen_intrin_local_irq_restore_direct_end[]; +extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc; + +asm ( + ".align 32\n" + ".proc xen_check_events\n" + "xen_check_events:\n" + /* masked = 0 + * r9 = masked_addr - 1 + * = pending_intr_addr + */ + "st1.rel [r9] = r0, -1\n" + ";;\n" + /* r8 = pending_intr */ + "ld1.acq r11 = [r9]\n" + ";;\n" + /* p9 = interrupt pending? */ + "cmp.ne p9, p10 = r11, r0\n" + ";;\n" + "(p10) mf\n" + /* issue hypercall to trigger interrupt */ + "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n" + "br.cond.sptk.many b6\n" + ".endp xen_check_events\n" + "\n" + ".align 32\n" + ".proc __xen_intrin_local_irq_restore_direct\n" + "__xen_intrin_local_irq_restore_direct:\n" + "__xen_intrin_local_irq_restore_direct_start:\n" + "1:\n" + "{\n" + "cmp.ne p6, p7 = r8, r0\n" + "mov r17 = ip\n" /* get ip to calc return address */ + "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n" + ";;\n" + "}\n" + "{\n" + /* r9 = XEN_PSR_I_ADDR */ + "ld8 r9 = [r9]\n" + ";;\n" + /* r10 = masked previous value */ + "(p6) ld1.acq r10 = [r9]\n" + "adds r17 = 1f - 1b, r17\n" /* calculate return address */ + ";;\n" + "}\n" + "{\n" + /* p8 = !masked interrupt masked previously? */ + "(p6) cmp.ne.unc p8, p0 = r10, r0\n" + "\n" + /* p7 = else clause */ + "(p7) mov r11 = 1\n" + ";;\n" + "(p8) mov b6 = r17\n" /* set return address */ + "}\n" + "{\n" + /* masked = 1 */ + "(p7) st1.rel [r9] = r11\n" + "\n" + "[99:]\n" + "(p8) brl.cond.dptk.few xen_check_events\n" + "}\n" + /* pv calling stub is 5 bundles. fill nop to adjust return address */ + "{\n" + "nop 0\n" + "nop 0\n" + "nop 0\n" + "}\n" + "1:\n" + "__xen_intrin_local_irq_restore_direct_end:\n" + ".endp __xen_intrin_local_irq_restore_direct\n" + "\n" + ".align 8\n" + "__xen_intrin_local_irq_restore_direct_reloc:\n" + "data8 99b\n" +); + +static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[] +__initdata_or_module = +{ +#define XEN_PATCH_BUNDLE_ELEM(name, type) \ + { \ + (void*)xen_ ## name ## _direct_start, \ + (void*)xen_ ## name ## _direct_end, \ + PARAVIRT_PATCH_TYPE_ ## type, \ + } + + XEN_PATCH_BUNDLE_ELEM(fc, FC), + XEN_PATCH_BUNDLE_ELEM(thash, THASH), + XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), + XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), + XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA), + XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR), + XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR), + XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), + XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), + XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), + XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), + { + (void*)__xen_intrin_local_irq_restore_direct_start, + (void*)__xen_intrin_local_irq_restore_direct_end, + PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE, + }, + +#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ + { \ + xen_get_ ## name ## _direct_start, \ + xen_get_ ## name ## _direct_end, \ + PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ + } + + XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), + XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG), + + XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR), + XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR), + + XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC), + XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM), + + +#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ + { \ + xen_ ## name ## _direct_start, \ + xen_ ## name ## _direct_end, \ + PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ + } + +#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ + __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg) + + XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0), + XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1), + XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2), + XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3), + XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4), + XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5), + XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6), + XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7), + + XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG), + XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR), + __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI), + + XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC), + XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM), +}; + +static unsigned long __init_or_module +xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type) +{ + const unsigned long nelems = sizeof(xen_patch_bundle_elems) / + sizeof(xen_patch_bundle_elems[0]); + unsigned long used; + const struct paravirt_patch_bundle_elem *found; + + used = __paravirt_patch_apply_bundle(sbundle, ebundle, type, + xen_patch_bundle_elems, nelems, + &found); + + if (found == NULL) + /* fallback */ + return ia64_native_patch_bundle(sbundle, ebundle, type); + if (used == 0) + return used; + + /* relocation */ + switch (type) { + case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: { + unsigned long reloc = + __xen_intrin_local_irq_restore_direct_reloc; + unsigned long reloc_offset = reloc - (unsigned long) + __xen_intrin_local_irq_restore_direct_start; + unsigned long tag = (unsigned long)sbundle + reloc_offset; + paravirt_patch_reloc_brl(tag, xen_check_events); + break; + } + default: + /* nothing */ + break; + } + return used; +} +#endif /* ASM_SUPPOTED */ + +const struct paravirt_patch_branch_target xen_branch_target[] +__initconst = { +#define PARAVIRT_BR_TARGET(name, type) \ + { \ + &xen_ ## name, \ + PARAVIRT_PATCH_TYPE_BR_ ## type, \ + } + PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), + PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), + PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), + PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), +}; + +static void __init +xen_patch_branch(unsigned long tag, unsigned long type) +{ + const unsigned long nelem = + sizeof(xen_branch_target) / sizeof(xen_branch_target[0]); + __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem); +} diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index 6ea017727cce..cada3ba4b990 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -230,7 +230,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "MFT2", }; diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c index ac4fb912649d..cb9bf820fe53 100644 --- a/arch/mips/cobalt/irq.c +++ b/arch/mips/cobalt/irq.c @@ -47,7 +47,6 @@ asmlinkage void plat_irq_dispatch(void) static struct irqaction cascade = { .handler = no_action, - .mask = CPU_MASK_NONE, .name = "cascade", }; diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c index 2bbc41a1623c..43828ae796ec 100644 --- a/arch/mips/emma/markeins/irq.c +++ b/arch/mips/emma/markeins/irq.c @@ -186,7 +186,6 @@ void emma2rh_gpio_irq_init(void) static struct irqaction irq_cascade = { .handler = no_action, .flags = 0, - .mask = CPU_MASK_NONE, .name = "cascade", .dev_id = NULL, .next = NULL, diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h index d8ff4cd89ab5..1784fde2e28f 100644 --- a/arch/mips/include/asm/mach-bcm47xx/gpio.h +++ b/arch/mips/include/asm/mach-bcm47xx/gpio.h @@ -31,24 +31,28 @@ static inline void gpio_set_value(unsigned gpio, int value) static inline int gpio_direction_input(unsigned gpio) { - return ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 0); + ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 0); + return 0; } static inline int gpio_direction_output(unsigned gpio, int value) { - return ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 1 << gpio); + ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 1 << gpio); + return 0; } -static int gpio_intmask(unsigned gpio, int value) +static inline int gpio_intmask(unsigned gpio, int value) { - return ssb_gpio_intmask(&ssb_bcm47xx, 1 << gpio, - value ? 1 << gpio : 0); + ssb_gpio_intmask(&ssb_bcm47xx, 1 << gpio, + value ? 1 << gpio : 0); + return 0; } -static int gpio_polarity(unsigned gpio, int value) +static inline int gpio_polarity(unsigned gpio, int value) { - return ssb_gpio_polarity(&ssb_bcm47xx, 1 << gpio, - value ? 1 << gpio : 0); + ssb_gpio_polarity(&ssb_bcm47xx, 1 << gpio, + value ? 1 << gpio : 0); + return 0; } diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h index 55d481569a1f..07547231e078 100644 --- a/arch/mips/include/asm/mach-ip27/topology.h +++ b/arch/mips/include/asm/mach-ip27/topology.h @@ -26,7 +26,6 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS]; #define parent_node(node) (node) #define node_to_cpumask(node) (hub_data(node)->h_cpus) #define cpumask_of_node(node) (&hub_data(node)->h_cpus) -#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) struct pci_bus; extern int pcibus_to_node(struct pci_bus *); diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c index 03965cb1b252..d9b6a5b5399d 100644 --- a/arch/mips/jazz/irq.c +++ b/arch/mips/jazz/irq.c @@ -134,7 +134,6 @@ static irqreturn_t r4030_timer_interrupt(int irq, void *dev_id) static struct irqaction r4030_timer_irqaction = { .handler = r4030_timer_interrupt, .flags = IRQF_DISABLED, - .mask = CPU_MASK_CPU0, .name = "R4030 timer", }; diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index b820661678b0..a5182a207696 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c @@ -144,7 +144,6 @@ void __cpuinit sb1480_clockevent_init(void) action->handler = sibyte_counter_handler; action->flags = IRQF_DISABLED | IRQF_PERCPU; - action->mask = cpumask_of_cpu(cpu); action->name = name; action->dev_id = cd; diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c index a2eebaafda52..340f53e5c6b1 100644 --- a/arch/mips/kernel/cevt-sb1250.c +++ b/arch/mips/kernel/cevt-sb1250.c @@ -143,7 +143,6 @@ void __cpuinit sb1250_clockevent_init(void) action->handler = sibyte_counter_handler; action->flags = IRQF_DISABLED | IRQF_PERCPU; - action->mask = cpumask_of_cpu(cpu); action->name = name; action->dev_id = cd; diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index f4d187825f96..689719e34f08 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -98,7 +98,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_NOBALANCING, - .mask = CPU_MASK_NONE, .name = "timer" }; @@ -121,7 +120,6 @@ void __init setup_pit_timer(void) cd->min_delta_ns = clockevent_delta2ns(0xF, cd); clockevents_register_device(cd); - irq0.mask = cpumask_of_cpu(cpu); setup_irq(0, &irq0); } diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 413bd1d37f54..01c0885a8061 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -306,7 +306,6 @@ static void init_8259A(int auto_eoi) */ static struct irqaction irq2 = { .handler = no_action, - .mask = CPU_MASK_NONE, .name = "cascade", }; diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c index d1ac7a25c856..1353fb135ed3 100644 --- a/arch/mips/lasat/interrupt.c +++ b/arch/mips/lasat/interrupt.c @@ -104,7 +104,6 @@ asmlinkage void plat_irq_dispatch(void) static struct irqaction cascade = { .handler = no_action, - .mask = CPU_MASK_NONE, .name = "cascade", }; diff --git a/arch/mips/lemote/lm2e/irq.c b/arch/mips/lemote/lm2e/irq.c index 3e0b7beb1009..1d0a09f3b832 100644 --- a/arch/mips/lemote/lm2e/irq.c +++ b/arch/mips/lemote/lm2e/irq.c @@ -92,7 +92,6 @@ asmlinkage void plat_irq_dispatch(void) static struct irqaction cascade_irqaction = { .handler = no_action, - .mask = CPU_MASK_NONE, .name = "cascade", }; diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 060d28dca8a8..4481656d1065 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -42,6 +42,7 @@ void *__kmap_atomic(struct page *page, enum km_type type) if (!PageHighMem(page)) return page_address(page); + debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM @@ -88,6 +89,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) pagefault_disable(); + debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c index a1f21d9421e8..6c5a630566f9 100644 --- a/arch/mips/sgi-ip27/ip27-nmi.c +++ b/arch/mips/sgi-ip27/ip27-nmi.c @@ -219,7 +219,7 @@ cont_nmi_dump(void) if (i == 1000) { for_each_online_node(node) if (NODEPDA(node)->dump_count == 0) { - cpu = node_to_first_cpu(node); + cpu = cpumask_first(cpumask_of_node(node)); for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) { CPUMASK_SETB(nmied_cpus, cpu); /* diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c index 0aefc5319a03..83a0b3c359da 100644 --- a/arch/mips/sgi-ip32/ip32-irq.c +++ b/arch/mips/sgi-ip32/ip32-irq.c @@ -115,14 +115,12 @@ extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id); struct irqaction memerr_irq = { .handler = crime_memerr_intr, .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "CRIME memory error", }; struct irqaction cpuerr_irq = { .handler = crime_cpuerr_intr, .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "CRIME CPU error", }; diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c index b4352a0c8151..5e687819cbc2 100644 --- a/arch/mips/sni/rm200.c +++ b/arch/mips/sni/rm200.c @@ -359,7 +359,8 @@ void sni_rm200_init_8259A(void) * IRQ2 is cascade interrupt to second interrupt controller */ static struct irqaction sni_rm200_irq2 = { - no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL + .handler = no_action, + .name = "cascade", }; static struct resource sni_rm200_pic1_resource = { diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c index 92dd1a0ca352..9cc389109b19 100644 --- a/arch/mips/vr41xx/common/irq.c +++ b/arch/mips/vr41xx/common/irq.c @@ -32,7 +32,6 @@ static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned; static struct irqaction cascade_irqaction = { .handler = no_action, - .mask = CPU_MASK_NONE, .name = "cascade", }; diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c index e4606586f94c..395caf01b909 100644 --- a/arch/mn10300/kernel/time.c +++ b/arch/mn10300/kernel/time.c @@ -37,7 +37,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id); static struct irqaction timer_irq = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER, - .mask = CPU_MASK_NONE, .name = "timer", }; diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 9d46c43a4152..e75cae6072c5 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -216,17 +216,14 @@ void __init start_cpu_itimer(void) per_cpu(cpu_data, cpu).it_value = next_tick; } -struct platform_device rtc_parisc_dev = { +static struct platform_device rtc_parisc_dev = { .name = "rtc-parisc", .id = -1, }; static int __init rtc_init(void) { - int ret; - - ret = platform_device_register(&rtc_parisc_dev); - if (ret < 0) + if (platform_device_register(&rtc_parisc_dev) < 0) printk(KERN_ERR "unable to register rtc device...\n"); /* not necessarily an error */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index ad6b1c084fe3..45192dce65c4 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -228,6 +228,9 @@ config PPC_OF_PLATFORM_PCI depends on PPC64 # not supported on 32 bits yet default n +config ARCH_SUPPORTS_DEBUG_PAGEALLOC + def_bool y + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 22091bbfdc9b..6aa0b5e087cd 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -30,6 +30,7 @@ config DEBUG_STACK_USAGE config DEBUG_PAGEALLOC bool "Debug page memory allocations" depends on DEBUG_KERNEL && !HIBERNATION + depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC help Unmap pages from the kernel linear mapping after free_pages(). This results in a large slowdown, but helps to find certain types diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts index dea30910c136..4319bd70a580 100644 --- a/arch/powerpc/boot/dts/mpc832x_rdb.dts +++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts @@ -152,10 +152,21 @@ }; par_io@1400 { + #address-cells = <1>; + #size-cells = <1>; reg = <0x1400 0x100>; + ranges = <3 0x1448 0x18>; + compatible = "fsl,mpc8323-qe-pario"; device_type = "par_io"; num-ports = <7>; + qe_pio_d: gpio-controller@1448 { + #gpio-cells = <2>; + compatible = "fsl,mpc8323-qe-pario-bank"; + reg = <3 0x18>; + gpio-controller; + }; + ucc2pio:ucc_pin@02 { pio-map = < /* port pin dir open_drain assignment has_irq */ @@ -225,12 +236,25 @@ }; spi@4c0 { + #address-cells = <1>; + #size-cells = <0>; cell-index = <0>; compatible = "fsl,spi"; reg = <0x4c0 0x40>; interrupts = <2>; interrupt-parent = <&qeic>; + gpios = <&qe_pio_d 13 0>; mode = "cpu-qe"; + + mmc-slot@0 { + compatible = "fsl,mpc8323rdb-mmc-slot", + "mmc-spi-slot"; + reg = <0>; + gpios = <&qe_pio_d 14 1 + &qe_pio_d 15 0>; + voltage-ranges = <3300 3300>; + spi-max-frequency = <50000000>; + }; }; spi@500 { diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 545028f86488..684a73f4324f 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -24,6 +24,7 @@ #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/highmem.h> #include <asm/kmap_types.h> #include <asm/tlbflush.h> #include <asm/page.h> @@ -94,6 +95,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro if (!PageHighMem(page)) return page_address(page); + debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 3548159a1beb..ba17d5d90a49 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -114,6 +114,10 @@ extern int pci_domain_nr(struct pci_bus *bus); /* Decide whether to display the domain number in /proc */ extern int pci_proc_domain(struct pci_bus *bus); +/* MSI arch hooks */ +#define arch_setup_msi_irqs arch_setup_msi_irqs +#define arch_teardown_msi_irqs arch_teardown_msi_irqs +#define arch_msi_check_device arch_msi_check_device struct vm_area_struct; /* Map a range of PCI memory or I/O space for a device into user space */ diff --git a/arch/powerpc/include/asm/suspend.h b/arch/powerpc/include/asm/suspend.h index cbf2c9404c37..c6efc3466aa6 100644 --- a/arch/powerpc/include/asm/suspend.h +++ b/arch/powerpc/include/asm/suspend.h @@ -3,7 +3,4 @@ static inline int arch_prepare_suspend(void) { return 0; } -void save_processor_state(void); -void restore_processor_state(void); - #endif /* __ASM_POWERPC_SUSPEND_H */ diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 375258559ae6..054a16d68082 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -24,11 +24,6 @@ static inline cpumask_t node_to_cpumask(int node) #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) -static inline int node_to_first_cpu(int node) -{ - return cpumask_first(cpumask_of_node(node)); -} - int of_node_to_nid(struct device_node *device); struct pci_bus; diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c index 3bb7d3dd28be..8bbc12d20f5c 100644 --- a/arch/powerpc/kernel/msi.c +++ b/arch/powerpc/kernel/msi.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include <linux/msi.h> +#include <linux/pci.h> #include <asm/machdep.h> @@ -19,6 +20,10 @@ int arch_msi_check_device(struct pci_dev* dev, int nvec, int type) return -ENOSYS; } + /* PowerPC doesn't support multiple MSI yet */ + if (type == PCI_CAP_ID_MSI && nvec > 1) + return 1; + if (ppc_md.msi_check_device) { pr_debug("msi: Using platform check routine.\n"); return ppc_md.msi_check_device(dev, nvec, type); diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 149cb112cd1a..13011a96a977 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -669,7 +669,6 @@ static void remove_flash_pde(struct proc_dir_entry *dp) { if (dp) { kfree(dp->data); - dp->owner = NULL; remove_proc_entry(dp->name, dp->parent); } } diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index 2a1295f19832..567ded7c3b9b 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -20,6 +20,7 @@ #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> #include <linux/of_platform.h> +#include <linux/fsl_devices.h> #include <asm/time.h> #include <asm/ipic.h> @@ -39,16 +40,116 @@ #endif #ifdef CONFIG_QUICC_ENGINE -static void mpc83xx_spi_activate_cs(u8 cs, u8 polarity) +static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, + struct spi_board_info *board_infos, + unsigned int num_board_infos, + void (*cs_control)(struct spi_device *dev, + bool on)) { - pr_debug("%s %d %d\n", __func__, cs, polarity); - par_io_data_set(3, 13, polarity); + struct device_node *np; + unsigned int i = 0; + + for_each_compatible_node(np, type, compatible) { + int ret; + unsigned int j; + const void *prop; + struct resource res[2]; + struct platform_device *pdev; + struct fsl_spi_platform_data pdata = { + .cs_control = cs_control, + }; + + memset(res, 0, sizeof(res)); + + pdata.sysclk = sysclk; + + prop = of_get_property(np, "reg", NULL); + if (!prop) + goto err; + pdata.bus_num = *(u32 *)prop; + + prop = of_get_property(np, "cell-index", NULL); + if (prop) + i = *(u32 *)prop; + + prop = of_get_property(np, "mode", NULL); + if (prop && !strcmp(prop, "cpu-qe")) + pdata.qe_mode = 1; + + for (j = 0; j < num_board_infos; j++) { + if (board_infos[j].bus_num == pdata.bus_num) + pdata.max_chipselect++; + } + + if (!pdata.max_chipselect) + continue; + + ret = of_address_to_resource(np, 0, &res[0]); + if (ret) + goto err; + + ret = of_irq_to_resource(np, 0, &res[1]); + if (ret == NO_IRQ) + goto err; + + pdev = platform_device_alloc("mpc83xx_spi", i); + if (!pdev) + goto err; + + ret = platform_device_add_data(pdev, &pdata, sizeof(pdata)); + if (ret) + goto unreg; + + ret = platform_device_add_resources(pdev, res, + ARRAY_SIZE(res)); + if (ret) + goto unreg; + + ret = platform_device_add(pdev); + if (ret) + goto unreg; + + goto next; +unreg: + platform_device_del(pdev); +err: + pr_err("%s: registration failed\n", np->full_name); +next: + i++; + } + + return i; } -static void mpc83xx_spi_deactivate_cs(u8 cs, u8 polarity) +static int __init fsl_spi_init(struct spi_board_info *board_infos, + unsigned int num_board_infos, + void (*cs_control)(struct spi_device *spi, + bool on)) { - pr_debug("%s %d %d\n", __func__, cs, polarity); - par_io_data_set(3, 13, !polarity); + u32 sysclk = -1; + int ret; + + /* SPI controller is either clocked from QE or SoC clock */ + sysclk = get_brgfreq(); + if (sysclk == -1) { + sysclk = fsl_get_sys_freq(); + if (sysclk == -1) + return -ENODEV; + } + + ret = of_fsl_spi_probe(NULL, "fsl,spi", sysclk, board_infos, + num_board_infos, cs_control); + if (!ret) + of_fsl_spi_probe("spi", "fsl_spi", sysclk, board_infos, + num_board_infos, cs_control); + + return spi_register_board_info(board_infos, num_board_infos); +} + +static void mpc83xx_spi_cs_control(struct spi_device *spi, bool on) +{ + pr_debug("%s %d %d\n", __func__, spi->chip_select, on); + par_io_data_set(3, 13, on); } static struct mmc_spi_platform_data mpc832x_mmc_pdata = { @@ -74,9 +175,13 @@ static int __init mpc832x_spi_init(void) par_io_config_pin(3, 14, 2, 0, 0, 0); /* SD_INSERT, I */ par_io_config_pin(3, 15, 2, 0, 0, 0); /* SD_PROTECT,I */ - return fsl_spi_init(&mpc832x_spi_boardinfo, 1, - mpc83xx_spi_activate_cs, - mpc83xx_spi_deactivate_cs); + /* + * Don't bother with legacy stuff when device tree contains + * mmc-spi-slot node. + */ + if (of_find_compatible_node(NULL, NULL, "mmc-spi-slot")) + return 0; + return fsl_spi_init(&mpc832x_spi_boardinfo, 1, mpc83xx_spi_cs_control); } machine_device_initcall(mpc832x_rdb, mpc832x_spi_init); #endif /* CONFIG_QUICC_ENGINE */ diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 0a9e49104bdc..458d91fba91d 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -179,7 +179,6 @@ static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id) static struct irqaction mpc85xxcds_8259_irqaction = { .handler = mpc85xx_8259_cascade_action, .flags = IRQF_SHARED, - .mask = CPU_MASK_NONE, .name = "8259 cascade", }; #endif /* PPC_I8259 */ diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index 0d9f75c74f8c..385acfc48397 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -44,7 +44,6 @@ static irqreturn_t timebase_interrupt(int irq, void *dev) static struct irqaction tbint_irqaction = { .handler = timebase_interrupt, - .mask = CPU_MASK_NONE, .name = "tbint", }; diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 272d79a8d289..cd4ad9aea760 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -472,7 +472,6 @@ static void __init chrp_find_openpic(void) #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_XMON) static struct irqaction xmon_irqaction = { .handler = xmon_irq, - .mask = CPU_MASK_NONE, .name = "XMON break", }; #endif diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 6d149ae8ffa7..7039d8f1d3ba 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -266,7 +266,6 @@ static unsigned int pmac_pic_get_irq(void) static struct irqaction xmon_action = { .handler = xmon_irq, .flags = 0, - .mask = CPU_MASK_NONE, .name = "NMI - XMON" }; #endif @@ -274,7 +273,6 @@ static struct irqaction xmon_action = { static struct irqaction gatwick_cascade_action = { .handler = gatwick_action, .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "cascade", }; diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index bd8817b00fa4..cf1dbe758890 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -385,7 +385,6 @@ static void __init psurge_dual_sync_tb(int cpu_nr) static struct irqaction psurge_irqaction = { .handler = psurge_primary_intr, .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "primary IPI", }; diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index 490473ce8103..82424cd7e128 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c @@ -119,7 +119,6 @@ static irqreturn_t cpm_error_interrupt(int irq, void *dev) static struct irqaction cpm_error_irqaction = { .handler = cpm_error_interrupt, - .mask = CPU_MASK_NONE, .name = "error", }; diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index a01c89d3f9bd..afe8dbc964aa 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -417,115 +417,6 @@ err: arch_initcall(fsl_usb_of_init); -static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, - struct spi_board_info *board_infos, - unsigned int num_board_infos, - void (*activate_cs)(u8 cs, u8 polarity), - void (*deactivate_cs)(u8 cs, u8 polarity)) -{ - struct device_node *np; - unsigned int i = 0; - - for_each_compatible_node(np, type, compatible) { - int ret; - unsigned int j; - const void *prop; - struct resource res[2]; - struct platform_device *pdev; - struct fsl_spi_platform_data pdata = { - .activate_cs = activate_cs, - .deactivate_cs = deactivate_cs, - }; - - memset(res, 0, sizeof(res)); - - pdata.sysclk = sysclk; - - prop = of_get_property(np, "reg", NULL); - if (!prop) - goto err; - pdata.bus_num = *(u32 *)prop; - - prop = of_get_property(np, "cell-index", NULL); - if (prop) - i = *(u32 *)prop; - - prop = of_get_property(np, "mode", NULL); - if (prop && !strcmp(prop, "cpu-qe")) - pdata.qe_mode = 1; - - for (j = 0; j < num_board_infos; j++) { - if (board_infos[j].bus_num == pdata.bus_num) - pdata.max_chipselect++; - } - - if (!pdata.max_chipselect) - continue; - - ret = of_address_to_resource(np, 0, &res[0]); - if (ret) - goto err; - - ret = of_irq_to_resource(np, 0, &res[1]); - if (ret == NO_IRQ) - goto err; - - pdev = platform_device_alloc("mpc83xx_spi", i); - if (!pdev) - goto err; - - ret = platform_device_add_data(pdev, &pdata, sizeof(pdata)); - if (ret) - goto unreg; - - ret = platform_device_add_resources(pdev, res, - ARRAY_SIZE(res)); - if (ret) - goto unreg; - - ret = platform_device_add(pdev); - if (ret) - goto unreg; - - goto next; -unreg: - platform_device_del(pdev); -err: - pr_err("%s: registration failed\n", np->full_name); -next: - i++; - } - - return i; -} - -int __init fsl_spi_init(struct spi_board_info *board_infos, - unsigned int num_board_infos, - void (*activate_cs)(u8 cs, u8 polarity), - void (*deactivate_cs)(u8 cs, u8 polarity)) -{ - u32 sysclk = -1; - int ret; - -#ifdef CONFIG_QUICC_ENGINE - /* SPI controller is either clocked from QE or SoC clock */ - sysclk = get_brgfreq(); -#endif - if (sysclk == -1) { - sysclk = fsl_get_sys_freq(); - if (sysclk == -1) - return -ENODEV; - } - - ret = of_fsl_spi_probe(NULL, "fsl,spi", sysclk, board_infos, - num_board_infos, activate_cs, deactivate_cs); - if (!ret) - of_fsl_spi_probe("spi", "fsl_spi", sysclk, board_infos, - num_board_infos, activate_cs, deactivate_cs); - - return spi_register_board_info(board_infos, num_board_infos); -} - #if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx) static __be32 __iomem *rstcr; diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h index 9c744e4285a0..42381bb6cd51 100644 --- a/arch/powerpc/sysdev/fsl_soc.h +++ b/arch/powerpc/sysdev/fsl_soc.h @@ -4,6 +4,8 @@ #include <asm/mmu.h> +struct spi_device; + extern phys_addr_t get_immrbase(void); #if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) extern u32 get_brgfreq(void); @@ -17,11 +19,6 @@ extern u32 fsl_get_sys_freq(void); struct spi_board_info; struct device_node; -extern int fsl_spi_init(struct spi_board_info *board_infos, - unsigned int num_board_infos, - void (*activate_cs)(u8 cs, u8 polarity), - void (*deactivate_cs)(u8 cs, u8 polarity)); - extern void fsl_rstcr_restart(char *cmd); #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2a8af5e16345..dcb667c4375a 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -72,6 +72,9 @@ config PGSTE config VIRT_CPU_ACCOUNTING def_bool y +config ARCH_SUPPORTS_DEBUG_PAGEALLOC + def_bool y + mainmenu "Linux Kernel Configuration" config S390 diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 4599fa06bd82..7e297a3cde34 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug @@ -9,6 +9,7 @@ source "lib/Kconfig.debug" config DEBUG_PAGEALLOC bool "Debug page memory allocations" depends on DEBUG_KERNEL + depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC help Unmap pages from the kernel linear mapping after free_pages(). This results in a slowdown, but helps to find certain types of diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index b1e892a43816..704dd396257b 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -12,6 +12,8 @@ #include <linux/types.h> #include <linux/errno.h> +#include <linux/gfp.h> +#include <linux/slab.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <asm/ebcdic.h> diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 6dccb071aec3..619bf94b11f1 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -456,6 +456,8 @@ struct ciw { #define CIO_OPER 0x0004 /* Sick revalidation of device. */ #define CIO_REVALIDATE 0x0008 +/* Device did not respond in time. */ +#define CIO_BOXED 0x0010 /** * struct ccw_dev_id - unique identifier for ccw devices diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index 066f0fba590e..a3f239545897 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h @@ -33,7 +33,6 @@ #define node_to_cpumask(node) ((void)node, cpu_online_map) #define cpumask_of_node(node) ((void)node, cpu_online_mask) -#define node_to_first_cpu(node) ((void)(node),0) #define pcibus_to_node(bus) ((void)(bus), -1) #define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c index 59d2a03e8b3c..988c77c37231 100644 --- a/arch/sh/kernel/time_64.c +++ b/arch/sh/kernel/time_64.c @@ -284,7 +284,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, .name = "timer", }; diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c index c127293271e1..9aa348658ae3 100644 --- a/arch/sh/kernel/timers/timer-cmt.c +++ b/arch/sh/kernel/timers/timer-cmt.c @@ -109,7 +109,6 @@ static struct irqaction cmt_irq = { .name = "timer", .handler = cmt_timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, - .mask = CPU_MASK_NONE, }; static void cmt_clk_init(struct clk *clk) diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c index 9a77ae86b403..9b0ef0126479 100644 --- a/arch/sh/kernel/timers/timer-mtu2.c +++ b/arch/sh/kernel/timers/timer-mtu2.c @@ -115,7 +115,6 @@ static struct irqaction mtu2_irq = { .name = "timer", .handler = mtu2_timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, - .mask = CPU_MASK_NONE, }; static unsigned int divisors[] = { 1, 4, 16, 64, 1, 1, 256 }; diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index 10b5a6f17cc0..c5d3396f5960 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -162,7 +162,6 @@ static struct irqaction tmu0_irq = { .name = "periodic/oneshot timer", .handler = tmu_timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, - .mask = CPU_MASK_NONE, }; static void __init tmu_clk_init(struct clk *clk) diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index c3ea215334f6..cc12cd48bbc5 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -124,6 +124,9 @@ config ARCH_NO_VIRT_TO_BUS config OF def_bool y +config ARCH_SUPPORTS_DEBUG_PAGEALLOC + def_bool y if SPARC64 + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug index b8a15e271bfa..d001b42041a5 100644 --- a/arch/sparc/Kconfig.debug +++ b/arch/sparc/Kconfig.debug @@ -24,7 +24,8 @@ config STACK_DEBUG config DEBUG_PAGEALLOC bool "Debug page memory allocations" - depends on SPARC64 && DEBUG_KERNEL && !HIBERNATION + depends on DEBUG_KERNEL && !HIBERNATION + depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC help Unmap pages from the kernel linear mapping after free_pages(). This results in a large slowdown, but helps to find certain types diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 5693ab482606..666a73fef28d 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -121,8 +121,8 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str * local TLB. */ cpu = smp_processor_id(); - if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { - cpu_set(cpu, mm->cpu_vm_mask); + if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { + cpumask_set_cpu(cpu, mm_cpumask(mm)); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); } @@ -141,8 +141,8 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm if (!CTX_VALID(mm->context)) get_new_mmu_context(mm); cpu = smp_processor_id(); - if (!cpu_isset(cpu, mm->cpu_vm_mask)) - cpu_set(cpu, mm->cpu_vm_mask); + if (!cpumask_test_cpu(cpu, mm_cpumask(mm))) + cpumask_set_cpu(cpu, mm_cpumask(mm)); load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index 57224dd37b3a..becb6bf353a9 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h @@ -35,7 +35,8 @@ extern cpumask_t cpu_core_map[NR_CPUS]; extern int sparc64_multi_core; extern void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi(cpumask_t mask); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask /* * General functions that each host system must provide. diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h index 79c1ae2b42a3..751c8c17f5a0 100644 --- a/arch/sparc/include/asm/system_32.h +++ b/arch/sparc/include/asm/system_32.h @@ -126,7 +126,7 @@ extern void flushw_all(void); #define switch_to(prev, next, last) do { \ SWITCH_ENTER(prev); \ SWITCH_DO_LAZY_FPU(next); \ - cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask); \ + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \ __asm__ __volatile__( \ "sethi %%hi(here - 0x8), %%o7\n\t" \ "mov %%g6, %%g3\n\t" \ diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index 5bc0b8fd6374..e5ea8d332421 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h @@ -28,11 +28,6 @@ static inline cpumask_t node_to_cpumask(int node) #define node_to_cpumask_ptr_next(v, node) \ v = &(numa_cpumask_lookup_table[node]) -static inline int node_to_first_cpu(int node) -{ - return cpumask_first(cpumask_of_node(node)); -} - struct pci_bus; #ifdef CONFIG_PCI extern int pcibus_to_node(struct pci_bus *pbus); @@ -43,13 +38,9 @@ static inline int pcibus_to_node(struct pci_bus *pbus) } #endif -#define pcibus_to_cpumask(bus) \ - (pcibus_to_node(bus) == -1 ? \ - CPU_MASK_ALL : \ - node_to_cpumask(pcibus_to_node(bus))) #define cpumask_of_pcibus(bus) \ (pcibus_to_node(bus) == -1 ? \ - CPU_MASK_ALL_PTR : \ + cpu_all_mask : \ cpumask_of_node(pcibus_to_node(bus))) #define SD_NODE_INIT (struct sched_domain) { \ @@ -89,7 +80,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus) #define smt_capable() (sparc64_multi_core) #endif /* CONFIG_SMP */ -#define cpu_coregroup_map(cpu) (cpu_core_map[cpu]) #define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu]) #endif /* _ASM_SPARC64_TOPOLOGY_H */ diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 57c39843fb2a..90350f838f05 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -653,7 +653,7 @@ static void __cpuinit dr_cpu_data(struct ds_info *dp, if (cpu_list[i] == CPU_SENTINEL) continue; - if (cpu_list[i] < NR_CPUS) + if (cpu_list[i] < nr_cpu_ids) cpu_set(cpu_list[i], mask); } diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c index 44dd5ee64339..ad800b80c718 100644 --- a/arch/sparc/kernel/irq_32.c +++ b/arch/sparc/kernel/irq_32.c @@ -439,7 +439,6 @@ static int request_fast_irq(unsigned int irq, flush_cache_all(); action->flags = irqflags; - cpus_clear(action->mask); action->name = devname; action->dev_id = NULL; action->next = NULL; @@ -574,7 +573,6 @@ int request_irq(unsigned int irq, action->handler = handler; action->flags = irqflags; - cpus_clear(action->mask); action->name = devname; action->next = NULL; action->dev_id = dev_id; diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index d0d6a515499a..5deabe921a47 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -266,12 +266,12 @@ static int irq_choose_cpu(unsigned int virt_irq) spin_lock_irqsave(&irq_rover_lock, flags); while (!cpu_online(irq_rover)) { - if (++irq_rover >= NR_CPUS) + if (++irq_rover >= nr_cpu_ids) irq_rover = 0; } cpuid = irq_rover; do { - if (++irq_rover >= NR_CPUS) + if (++irq_rover >= nr_cpu_ids) irq_rover = 0; } while (!cpu_online(irq_rover)); diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c index adaaed4ea2fb..00d034ea2164 100644 --- a/arch/sparc/kernel/led.c +++ b/arch/sparc/kernel/led.c @@ -126,7 +126,6 @@ static int __init led_init(void) led = proc_create("led", 0, NULL, &led_proc_fops); if (!led) return -ENOMEM; - led->owner = THIS_MODULE; printk(KERN_INFO "led: version %s, Lars Kotthoff <metalhead@metalhead.ws>\n", diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 3f79f0c23a08..f0e6ed23a468 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -567,7 +567,7 @@ static void __init report_platform_properties(void) max_cpu = NR_CPUS; } for (i = 0; i < max_cpu; i++) - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); } #endif diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index f3577223c863..2c0cc72d295b 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/kprobes.h> #include <linux/kernel_stat.h> +#include <linux/reboot.h> #include <linux/slab.h> #include <linux/kdebug.h> #include <linux/delay.h> @@ -206,13 +207,33 @@ void nmi_adjust_hz(unsigned int new_hz) } EXPORT_SYMBOL_GPL(nmi_adjust_hz); +static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) +{ + on_each_cpu(stop_watchdog, NULL, 1); + return 0; +} + +static struct notifier_block nmi_reboot_notifier = { + .notifier_call = nmi_shutdown, +}; + int __init nmi_init(void) { + int err; + nmi_usable = 1; on_each_cpu(start_watchdog, NULL, 1); - return check_nmi_watchdog(); + err = check_nmi_watchdog(); + if (!err) { + err = register_reboot_notifier(&nmi_reboot_notifier); + if (err) { + nmi_usable = 0; + on_each_cpu(stop_watchdog, NULL, 1); + } + } + return err; } static int __init setup_nmi_watchdog(char *str) diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index edecca7b8116..ca55c7012f77 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c @@ -518,8 +518,8 @@ void __init of_fill_in_cpu_data(void) } #ifdef CONFIG_SMP - cpu_set(cpuid, cpu_present_map); - cpu_set(cpuid, cpu_possible_map); + set_cpu_present(cpuid, true); + set_cpu_possible(cpuid, true); #endif } diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 1e5ac4e282e1..132d81fb2616 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -70,13 +70,12 @@ void __init smp_cpus_done(unsigned int max_cpus) extern void smp4m_smp_done(void); extern void smp4d_smp_done(void); unsigned long bogosum = 0; - int cpu, num; + int cpu, num = 0; - for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++) - if (cpu_online(cpu)) { - num++; - bogosum += cpu_data(cpu).udelay_val; - } + for_each_online_cpu(cpu) { + num++; + bogosum += cpu_data(cpu).udelay_val; + } printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n", num, bogosum/(500000/HZ), @@ -144,7 +143,7 @@ void smp_flush_tlb_all(void) void smp_flush_cache_mm(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = mm->cpu_vm_mask; + cpumask_t cpu_mask = *mm_cpumask(mm); cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); @@ -155,12 +154,13 @@ void smp_flush_cache_mm(struct mm_struct *mm) void smp_flush_tlb_mm(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = mm->cpu_vm_mask; + cpumask_t cpu_mask = *mm_cpumask(mm); cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) { xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) - mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); + cpumask_copy(mm_cpumask(mm), + cpumask_of(smp_processor_id())); } local_flush_tlb_mm(mm); } @@ -172,7 +172,7 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start, struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = mm->cpu_vm_mask; + cpumask_t cpu_mask = *mm_cpumask(mm); cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); @@ -186,7 +186,7 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = mm->cpu_vm_mask; + cpumask_t cpu_mask = *mm_cpumask(mm); cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); @@ -199,7 +199,7 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) struct mm_struct *mm = vma->vm_mm; if(mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = mm->cpu_vm_mask; + cpumask_t cpu_mask = *mm_cpumask(mm); cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); @@ -212,7 +212,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) struct mm_struct *mm = vma->vm_mm; if(mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = mm->cpu_vm_mask; + cpumask_t cpu_mask = *mm_cpumask(mm); cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); @@ -241,7 +241,7 @@ void smp_flush_page_to_ram(unsigned long page) void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { - cpumask_t cpu_mask = mm->cpu_vm_mask; + cpumask_t cpu_mask = *mm_cpumask(mm); cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); @@ -332,8 +332,8 @@ void __init smp_setup_cpu_possible_map(void) instance = 0; while (!cpu_find_by_instance(instance, NULL, &mid)) { if (mid < NR_CPUS) { - cpu_set(mid, cpu_possible_map); - cpu_set(mid, cpu_present_map); + set_cpu_possible(mid, true); + set_cpu_present(mid, true); } instance++; } @@ -351,8 +351,8 @@ void __init smp_prepare_boot_cpu(void) printk("boot cpu id != 0, this could work but is untested\n"); current_thread_info()->cpu = cpuid; - cpu_set(cpuid, cpu_online_map); - cpu_set(cpuid, cpu_possible_map); + set_cpu_online(cpuid, true); + set_cpu_possible(cpuid, true); } int __cpuinit __cpu_up(unsigned int cpu) diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 79457f682b5a..708e12a26b05 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -808,9 +808,9 @@ static void smp_start_sync_tick_client(int cpu) extern unsigned long xcall_call_function; -void arch_send_call_function_ipi(cpumask_t mask) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { - xcall_deliver((u64) &xcall_call_function, 0, 0, &mask); + xcall_deliver((u64) &xcall_call_function, 0, 0, mask); } extern unsigned long xcall_call_function_single; @@ -850,7 +850,7 @@ static void tsb_sync(void *info) void smp_tsb_sync(struct mm_struct *mm) { - smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1); + smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); } extern unsigned long xcall_flush_tlb_mm; @@ -1055,13 +1055,13 @@ void smp_flush_tlb_mm(struct mm_struct *mm) int cpu = get_cpu(); if (atomic_read(&mm->mm_users) == 1) { - mm->cpu_vm_mask = cpumask_of_cpu(cpu); + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); goto local_flush_and_out; } smp_cross_call_masked(&xcall_flush_tlb_mm, ctx, 0, 0, - &mm->cpu_vm_mask); + mm_cpumask(mm)); local_flush_and_out: __flush_tlb_mm(ctx, SECONDARY_CONTEXT); @@ -1075,11 +1075,11 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long int cpu = get_cpu(); if (mm == current->mm && atomic_read(&mm->mm_users) == 1) - mm->cpu_vm_mask = cpumask_of_cpu(cpu); + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else smp_cross_call_masked(&xcall_flush_tlb_pending, ctx, nr, (unsigned long) vaddrs, - &mm->cpu_vm_mask); + mm_cpumask(mm)); __flush_tlb_pending(ctx, nr, vaddrs); diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c index 3369fef5b4b3..ab036a72de5a 100644 --- a/arch/sparc/kernel/sun4d_irq.c +++ b/arch/sparc/kernel/sun4d_irq.c @@ -326,7 +326,6 @@ int sun4d_request_irq(unsigned int irq, action->handler = handler; action->flags = irqflags; - cpus_clear(action->mask); action->name = devname; action->next = NULL; action->dev_id = dev_id; diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 50afaed99c8a..54fb02468f0d 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -150,7 +150,7 @@ void __cpuinit smp4d_callin(void) spin_lock_irqsave(&sun4d_imsk_lock, flags); cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ spin_unlock_irqrestore(&sun4d_imsk_lock, flags); - cpu_set(cpuid, cpu_online_map); + set_cpu_online(cpuid, true); } @@ -228,11 +228,10 @@ void __init smp4d_smp_done(void) /* setup cpu list for irq rotation */ first = 0; prev = &first; - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i)) { - *prev = i; - prev = &cpu_data(i).next; - } + for_each_online_cpu(i) { + *prev = i; + prev = &cpu_data(i).next; + } *prev = first; local_flush_cache_all(); diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 8040376c4890..960b113d0006 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -113,7 +113,7 @@ void __cpuinit smp4m_callin(void) local_irq_enable(); - cpu_set(cpuid, cpu_online_map); + set_cpu_online(cpuid, true); } /* @@ -186,11 +186,9 @@ void __init smp4m_smp_done(void) /* setup cpu list for irq rotation */ first = 0; prev = &first; - for (i = 0; i < NR_CPUS; i++) { - if (cpu_online(i)) { - *prev = i; - prev = &cpu_data(i).next; - } + for_each_online_cpu(i) { + *prev = i; + prev = &cpu_data(i).next; } *prev = first; local_flush_cache_all(); diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c index 752d0c9fb544..7916feba6e4a 100644 --- a/arch/sparc/mm/highmem.c +++ b/arch/sparc/mm/highmem.c @@ -39,6 +39,7 @@ void *kmap_atomic(struct page *page, enum km_type type) if (!PageHighMem(page)) return page_address(page); + debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 00373ce2d8fb..2c8dfeb7ab04 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1092,7 +1092,7 @@ static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, if (strcmp(name, "cpu")) continue; id = mdesc_get_property(md, target, "id", NULL); - if (*id < NR_CPUS) + if (*id < nr_cpu_ids) cpu_set(*id, *mask); } } diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index fe7ed08390bb..06c9a7d98206 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -1425,7 +1425,7 @@ static void __init init_vac_layout(void) min_line_size = vac_line_size; //FIXME: cpus not contiguous!! cpu++; - if (cpu >= NR_CPUS || !cpu_online(cpu)) + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) break; #else break; diff --git a/arch/um/drivers/pcap_user.h b/arch/um/drivers/pcap_user.h index 96b80b565eeb..d8ba6153f912 100644 --- a/arch/um/drivers/pcap_user.h +++ b/arch/um/drivers/pcap_user.h @@ -19,13 +19,3 @@ extern const struct net_user_info pcap_user_info; extern int pcap_user_read(int fd, void *buf, int len, struct pcap_data *pri); -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/drivers/port.h b/arch/um/drivers/port.h index 9117609a575d..372a80c0556a 100644 --- a/arch/um/drivers/port.h +++ b/arch/um/drivers/port.h @@ -18,13 +18,3 @@ extern void port_remove_dev(void *d); #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/drivers/ssl.h b/arch/um/drivers/ssl.h index 98412aa66607..314d17725ce6 100644 --- a/arch/um/drivers/ssl.h +++ b/arch/um/drivers/ssl.h @@ -11,13 +11,3 @@ extern void ssl_receive_char(int line, char ch); #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/drivers/stdio_console.h b/arch/um/drivers/stdio_console.h index 505a3d5bea5e..6d8275f71fd4 100644 --- a/arch/um/drivers/stdio_console.h +++ b/arch/um/drivers/stdio_console.h @@ -9,13 +9,3 @@ extern void save_console_flags(void); #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 0a868118cf06..d42f826a8ab9 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -17,7 +17,6 @@ * James McMechan */ -#define MAJOR_NR UBD_MAJOR #define UBD_SHIFT 4 #include "linux/kernel.h" @@ -115,7 +114,7 @@ static struct block_device_operations ubd_blops = { }; /* Protected by ubd_lock */ -static int fake_major = MAJOR_NR; +static int fake_major = UBD_MAJOR; static struct gendisk *ubd_gendisk[MAX_DEV]; static struct gendisk *fake_gendisk[MAX_DEV]; @@ -299,7 +298,7 @@ static int ubd_setup_common(char *str, int *index_out, char **error_out) } mutex_lock(&ubd_lock); - if(fake_major != MAJOR_NR){ + if (fake_major != UBD_MAJOR) { *error_out = "Can't assign a fake major twice"; goto out1; } @@ -818,13 +817,13 @@ static int ubd_disk_register(int major, u64 size, int unit, disk->first_minor = unit << UBD_SHIFT; disk->fops = &ubd_blops; set_capacity(disk, size / 512); - if(major == MAJOR_NR) + if (major == UBD_MAJOR) sprintf(disk->disk_name, "ubd%c", 'a' + unit); else sprintf(disk->disk_name, "ubd_fake%d", unit); /* sysfs register (not for ide fake devices) */ - if (major == MAJOR_NR) { + if (major == UBD_MAJOR) { ubd_devs[unit].pdev.id = unit; ubd_devs[unit].pdev.name = DRIVER_NAME; ubd_devs[unit].pdev.dev.release = ubd_device_release; @@ -871,13 +870,13 @@ static int ubd_add(int n, char **error_out) ubd_dev->queue->queuedata = ubd_dev; blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG); - err = ubd_disk_register(MAJOR_NR, ubd_dev->size, n, &ubd_gendisk[n]); + err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); if(err){ *error_out = "Failed to register device"; goto out_cleanup; } - if(fake_major != MAJOR_NR) + if (fake_major != UBD_MAJOR) ubd_disk_register(fake_major, ubd_dev->size, n, &fake_gendisk[n]); @@ -1059,10 +1058,10 @@ static int __init ubd_init(void) char *error; int i, err; - if (register_blkdev(MAJOR_NR, "ubd")) + if (register_blkdev(UBD_MAJOR, "ubd")) return -1; - if (fake_major != MAJOR_NR) { + if (fake_major != UBD_MAJOR) { char name[sizeof("ubd_nnn\0")]; snprintf(name, sizeof(name), "ubd_%d", fake_major); diff --git a/arch/um/drivers/xterm.h b/arch/um/drivers/xterm.h index f33a6e77b186..56b9c4aba423 100644 --- a/arch/um/drivers/xterm.h +++ b/arch/um/drivers/xterm.h @@ -10,13 +10,3 @@ extern int xterm_fd(int socket, int *pid_out); #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/asm/irq_vectors.h b/arch/um/include/asm/irq_vectors.h index 62ddba6fc733..272a81e0ce14 100644 --- a/arch/um/include/asm/irq_vectors.h +++ b/arch/um/include/asm/irq_vectors.h @@ -8,13 +8,3 @@ #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h index 2cf35c21d694..cf259de51531 100644 --- a/arch/um/include/asm/mmu.h +++ b/arch/um/include/asm/mmu.h @@ -10,13 +10,3 @@ #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/asm/pda.h b/arch/um/include/asm/pda.h index 0d8bf33ffd42..ddcd774fc2a0 100644 --- a/arch/um/include/asm/pda.h +++ b/arch/um/include/asm/pda.h @@ -19,13 +19,3 @@ extern struct foo me; #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h index 9062a6e72241..718984359f8c 100644 --- a/arch/um/include/asm/pgalloc.h +++ b/arch/um/include/asm/pgalloc.h @@ -60,13 +60,3 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h index 0446f456b428..084de4a9fc70 100644 --- a/arch/um/include/asm/pgtable-3level.h +++ b/arch/um/include/asm/pgtable-3level.h @@ -134,13 +134,3 @@ static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot) #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/shared/frame_kern.h b/arch/um/include/shared/frame_kern.h index ce9514f57211..76078490c258 100644 --- a/arch/um/include/shared/frame_kern.h +++ b/arch/um/include/shared/frame_kern.h @@ -20,13 +20,3 @@ extern int setup_signal_stack_si(unsigned long stack_top, int sig, #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/shared/initrd.h b/arch/um/include/shared/initrd.h index 439b9a814985..22673bcc273d 100644 --- a/arch/um/include/shared/initrd.h +++ b/arch/um/include/shared/initrd.h @@ -10,13 +10,3 @@ extern int load_initrd(char *filename, void *buf, int size); #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/shared/irq_kern.h b/arch/um/include/shared/irq_kern.h index fba3895274f9..b05d22f3d84e 100644 --- a/arch/um/include/shared/irq_kern.h +++ b/arch/um/include/shared/irq_kern.h @@ -16,13 +16,3 @@ extern int um_request_irq(unsigned int irq, int fd, int type, #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/shared/mem_kern.h b/arch/um/include/shared/mem_kern.h index cb7e196d366b..69be0fd0ce4b 100644 --- a/arch/um/include/shared/mem_kern.h +++ b/arch/um/include/shared/mem_kern.h @@ -18,13 +18,3 @@ extern void register_remapper(struct remapper *info); #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/shared/ubd_user.h b/arch/um/include/shared/ubd_user.h index bb66517f0739..3845051f1b10 100644 --- a/arch/um/include/shared/ubd_user.h +++ b/arch/um/include/shared/ubd_user.h @@ -14,13 +14,3 @@ extern int kernel_fd; #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 499e5e95e609..388ec0a3ea9b 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -28,7 +28,7 @@ $(obj)/config.tmp: $(objtree)/.config FORCE $(call if_changed,quote1) quiet_cmd_quote1 = QUOTE $@ - cmd_quote1 = sed -e 's/"/\\"/g' -e 's/^/"/' -e 's/$$/\\n"/' \ + cmd_quote1 = sed -e 's/"/\\"/g' -e 's/^/"/' -e 's/$$/\\n",/' \ $< > $@ $(obj)/config.c: $(src)/config.c.in $(obj)/config.tmp FORCE @@ -36,9 +36,9 @@ $(obj)/config.c: $(src)/config.c.in $(obj)/config.tmp FORCE quiet_cmd_quote2 = QUOTE $@ cmd_quote2 = sed -e '/CONFIG/{' \ - -e 's/"CONFIG"\;/""/' \ + -e 's/"CONFIG"//' \ -e 'r $(obj)/config.tmp' \ -e 'a \' \ - -e '""\;' \ + -e '""' \ -e '}' \ $< > $@ diff --git a/arch/um/kernel/config.c.in b/arch/um/kernel/config.c.in index c062cbfe386e..b7a43feafde7 100644 --- a/arch/um/kernel/config.c.in +++ b/arch/um/kernel/config.c.in @@ -7,11 +7,15 @@ #include <stdlib.h> #include "init.h" -static __initdata char *config = "CONFIG"; +static __initdata const char *config[] = { +"CONFIG" +}; static int __init print_config(char *line, int *add) { - printf("%s", config); + int i; + for (i = 0; i < sizeof(config)/sizeof(config[0]); i++) + printf("%s", config[i]); exit(0); } @@ -20,13 +24,3 @@ __uml_setup("--showconfig", print_config, " Prints the config file that this UML binary was generated from.\n\n" ); -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index 183db26d01bf..02ee9adff54a 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c @@ -244,7 +244,7 @@ static void __init check_sysemu(void) if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *) PTRACE_O_TRACESYSGOOD) < 0)) - fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed"); + fatal_perror("check_sysemu: PTRACE_OLDSETOPTIONS failed"); while (1) { count++; @@ -252,12 +252,12 @@ static void __init check_sysemu(void) goto fail; CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) - fatal_perror("check_ptrace : wait failed"); + fatal_perror("check_sysemu: wait failed"); if (WIFSTOPPED(status) && (WSTOPSIG(status) == (SIGTRAP|0x80))) { if (!count) { - non_fatal("check_ptrace : SYSEMU_SINGLESTEP " + non_fatal("check_sysemu: SYSEMU_SINGLESTEP " "doesn't singlestep"); goto fail; } @@ -271,7 +271,7 @@ static void __init check_sysemu(void) else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP)) count++; else { - non_fatal("check_ptrace : expected SIGTRAP or " + non_fatal("check_sysemu: expected SIGTRAP or " "(SIGTRAP | 0x80), got status = %d\n", status); goto fail; diff --git a/arch/um/sys-i386/asm/archparam.h b/arch/um/sys-i386/asm/archparam.h index 93fd723344e5..2a18a884ca1b 100644 --- a/arch/um/sys-i386/asm/archparam.h +++ b/arch/um/sys-i386/asm/archparam.h @@ -14,13 +14,3 @@ #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-i386/shared/sysdep/checksum.h b/arch/um/sys-i386/shared/sysdep/checksum.h index 0cb4645cbeb8..ed47445f3905 100644 --- a/arch/um/sys-i386/shared/sysdep/checksum.h +++ b/arch/um/sys-i386/shared/sysdep/checksum.h @@ -199,13 +199,3 @@ static __inline__ __wsum csum_and_copy_to_user(const void *src, #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-ia64/sysdep/ptrace.h b/arch/um/sys-ia64/sysdep/ptrace.h index 42dd8fb6f2f9..0f0f4e6fd334 100644 --- a/arch/um/sys-ia64/sysdep/ptrace.h +++ b/arch/um/sys-ia64/sysdep/ptrace.h @@ -14,13 +14,3 @@ struct sys_pt_regs { #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-ia64/sysdep/sigcontext.h b/arch/um/sys-ia64/sysdep/sigcontext.h index f15fb25260ba..76b43161e779 100644 --- a/arch/um/sys-ia64/sysdep/sigcontext.h +++ b/arch/um/sys-ia64/sysdep/sigcontext.h @@ -8,13 +8,3 @@ #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-ia64/sysdep/syscalls.h b/arch/um/sys-ia64/sysdep/syscalls.h index 4a1f46ef1ebc..5f6700c41558 100644 --- a/arch/um/sys-ia64/sysdep/syscalls.h +++ b/arch/um/sys-ia64/sysdep/syscalls.h @@ -8,13 +8,3 @@ #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-ppc/miscthings.c b/arch/um/sys-ppc/miscthings.c index 373061c50129..1c11aed9c719 100644 --- a/arch/um/sys-ppc/miscthings.c +++ b/arch/um/sys-ppc/miscthings.c @@ -40,14 +40,3 @@ void shove_aux_table(unsigned long sp) } /* END stuff taken from arch/ppc/kernel/process.c */ - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-ppc/ptrace.c b/arch/um/sys-ppc/ptrace.c index 8e71b47f2b8e..66ef155248f1 100644 --- a/arch/um/sys-ppc/ptrace.c +++ b/arch/um/sys-ppc/ptrace.c @@ -56,13 +56,3 @@ int peek_user(struct task_struct *child, long addr, long data) return put_user(tmp, (unsigned long *) data); } -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-ppc/ptrace_user.c b/arch/um/sys-ppc/ptrace_user.c index ff0b9c077a13..224d2403c37b 100644 --- a/arch/um/sys-ppc/ptrace_user.c +++ b/arch/um/sys-ppc/ptrace_user.c @@ -27,13 +27,3 @@ int ptrace_setregs(long pid, unsigned long *regs_in) } return 0; } -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-ppc/shared/sysdep/ptrace.h b/arch/um/sys-ppc/shared/sysdep/ptrace.h index df2397dba3e5..0e3230e937e1 100644 --- a/arch/um/sys-ppc/shared/sysdep/ptrace.h +++ b/arch/um/sys-ppc/shared/sysdep/ptrace.h @@ -91,13 +91,3 @@ extern void shove_aux_table(unsigned long sp); #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-ppc/shared/sysdep/sigcontext.h b/arch/um/sys-ppc/shared/sysdep/sigcontext.h index f20d965de9c7..b7286f0a1e00 100644 --- a/arch/um/sys-ppc/shared/sysdep/sigcontext.h +++ b/arch/um/sys-ppc/shared/sysdep/sigcontext.h @@ -50,13 +50,3 @@ #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-ppc/shared/sysdep/syscalls.h b/arch/um/sys-ppc/shared/sysdep/syscalls.h index 679df351e19b..1ff81552251c 100644 --- a/arch/um/sys-ppc/shared/sysdep/syscalls.h +++ b/arch/um/sys-ppc/shared/sysdep/syscalls.h @@ -41,13 +41,3 @@ int old_mmap(unsigned long addr, unsigned long len, #define LAST_ARCH_SYSCALL __NR_fadvise64 -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-ppc/sigcontext.c b/arch/um/sys-ppc/sigcontext.c index 4bdc15c89edd..40694d0f3d15 100644 --- a/arch/um/sys-ppc/sigcontext.c +++ b/arch/um/sys-ppc/sigcontext.c @@ -2,13 +2,3 @@ #include "asm/sigcontext.h" #include "sysdep/ptrace.h" -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-x86_64/asm/archparam.h b/arch/um/sys-x86_64/asm/archparam.h index 270ed9586b68..6c083663b8d9 100644 --- a/arch/um/sys-x86_64/asm/archparam.h +++ b/arch/um/sys-x86_64/asm/archparam.h @@ -14,13 +14,3 @@ #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-x86_64/asm/module.h b/arch/um/sys-x86_64/asm/module.h index 35b5491d3e96..8eb79c2d07d5 100644 --- a/arch/um/sys-x86_64/asm/module.h +++ b/arch/um/sys-x86_64/asm/module.h @@ -18,13 +18,3 @@ struct mod_arch_specific #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-x86_64/mem.c b/arch/um/sys-x86_64/mem.c index 3f59a0a4f156..3f8df8abf347 100644 --- a/arch/um/sys-x86_64/mem.c +++ b/arch/um/sys-x86_64/mem.c @@ -14,12 +14,3 @@ unsigned long vm_data_default_flags = __VM_DATA_DEFAULT_FLAGS; unsigned long vm_data_default_flags32 = __VM_DATA_DEFAULT_FLAGS; unsigned long vm_force_exec32 = PROT_EXEC; -/* Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 45161b816313..748e50a1a152 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -165,6 +165,9 @@ config AUDIT_ARCH config ARCH_SUPPORTS_OPTIMIZED_INLINING def_bool y +config ARCH_SUPPORTS_DEBUG_PAGEALLOC + def_bool y + # Use the generic interrupt handling code in kernel/irq/: config GENERIC_HARDIRQS bool diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index fdb45df608b6..a345cb5447a8 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -75,6 +75,7 @@ config DEBUG_STACK_USAGE config DEBUG_PAGEALLOC bool "Debug page memory allocations" depends on DEBUG_KERNEL + depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC ---help--- Unmap pages from the kernel linear mapping after free_pages(). This results in a large slowdown, but helps to find certain types diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index 8c3c25f35578..5054c2ddd1a0 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c @@ -2,6 +2,7 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved + * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -16,24 +17,38 @@ #define SMAP 0x534d4150 /* ASCII "SMAP" */ +struct e820_ext_entry { + struct e820entry std; + u32 ext_flags; +} __attribute__((packed)); + static int detect_memory_e820(void) { int count = 0; u32 next = 0; - u32 size, id; + u32 size, id, edi; u8 err; struct e820entry *desc = boot_params.e820_map; + static struct e820_ext_entry buf; /* static so it is zeroed */ + + /* + * Set this here so that if the BIOS doesn't change this field + * but still doesn't change %ecx, we're still okay... + */ + buf.ext_flags = 1; do { - size = sizeof(struct e820entry); + size = sizeof buf; - /* Important: %edx is clobbered by some BIOSes, - so it must be either used for the error output - or explicitly marked clobbered. */ - asm("int $0x15; setc %0" + /* Important: %edx and %esi are clobbered by some BIOSes, + so they must be either used for the error output + or explicitly marked clobbered. Given that, assume there + is something out there clobbering %ebp and %edi, too. */ + asm("pushl %%ebp; int $0x15; popl %%ebp; setc %0" : "=d" (err), "+b" (next), "=a" (id), "+c" (size), - "=m" (*desc) - : "D" (desc), "d" (SMAP), "a" (0xe820)); + "=D" (edi), "+m" (buf) + : "D" (&buf), "d" (SMAP), "a" (0xe820) + : "esi"); /* BIOSes which terminate the chain with CF = 1 as opposed to %ebx = 0 don't always report the SMAP signature on @@ -51,8 +66,14 @@ static int detect_memory_e820(void) break; } + /* ACPI 3.0 added the extended flags support. If bit 0 + in the extended flags is zero, we're supposed to simply + ignore the entry -- a backwards incompatible change! */ + if (size > 20 && !(buf.ext_flags & 1)) + continue; + + *desc++ = buf.std; count++; - desc++; } while (next && count < ARRAY_SIZE(boot_params.e820_map)); return boot_params.e820_entries = count; diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index 43894428c3c2..0f4ee7148afe 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h @@ -26,36 +26,20 @@ #ifndef __ASSEMBLY__ #include <asm/hw_irq.h> +#include <asm/kvm_para.h> /*G:031 But first, how does our Guest contact the Host to ask for privileged * operations? There are two ways: the direct way is to make a "hypercall", * to make requests of the Host Itself. * - * Our hypercall mechanism uses the highest unused trap code (traps 32 and - * above are used by real hardware interrupts). Fifteen hypercalls are + * We use the KVM hypercall mechanism. Eighteen hypercalls are * available: the hypercall number is put in the %eax register, and the - * arguments (when required) are placed in %edx, %ebx and %ecx. If a return + * arguments (when required) are placed in %ebx, %ecx and %edx. If a return * value makes sense, it's returned in %eax. * * Grossly invalid calls result in Sudden Death at the hands of the vengeful * Host, rather than returning failure. This reflects Winston Churchill's * definition of a gentleman: "someone who is only rude intentionally". */ -static inline unsigned long -hcall(unsigned long call, - unsigned long arg1, unsigned long arg2, unsigned long arg3) -{ - /* "int" is the Intel instruction to trigger a trap. */ - asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) - /* The call in %eax (aka "a") might be overwritten */ - : "=a"(call) - /* The arguments are in %eax, %edx, %ebx & %ecx */ - : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) - /* "memory" means this might write somewhere in memory. - * This isn't true for all calls, but it's safe to tell - * gcc that it might happen so it doesn't get clever. */ - : "memory"); - return call; -} /*:*/ /* Can't use our min() macro here: needs to be a constant */ @@ -64,7 +48,7 @@ hcall(unsigned long call, #define LHCALL_RING_SIZE 64 struct hcall_args { /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ - unsigned long arg0, arg2, arg3, arg1; + unsigned long arg0, arg1, arg2, arg3; }; #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index a977de23cb4d..a0301bfeb954 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -86,6 +86,9 @@ static inline void early_quirks(void) { } extern void pci_iommu_alloc(void); +/* MSI arch hook */ +#define arch_setup_msi_irqs arch_setup_msi_irqs + #endif /* __KERNEL__ */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index a5074bd0f8be..48dcfa62ea07 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h @@ -24,28 +24,4 @@ struct saved_context { unsigned long return_address; } __attribute__((packed)); -#ifdef CONFIG_ACPI -extern unsigned long saved_eip; -extern unsigned long saved_esp; -extern unsigned long saved_ebp; -extern unsigned long saved_ebx; -extern unsigned long saved_esi; -extern unsigned long saved_edi; - -static inline void acpi_save_register_state(unsigned long return_point) -{ - saved_eip = return_point; - asm volatile("movl %%esp,%0" : "=m" (saved_esp)); - asm volatile("movl %%ebp,%0" : "=m" (saved_ebp)); - asm volatile("movl %%ebx,%0" : "=m" (saved_ebx)); - asm volatile("movl %%edi,%0" : "=m" (saved_edi)); - asm volatile("movl %%esi,%0" : "=m" (saved_esi)); -} - -#define acpi_restore_register_state() do {} while (0) - -/* routines for saving/restoring kernel state */ -extern int acpi_save_state_mem(void); -#endif - #endif /* _ASM_X86_SUSPEND_32_H */ diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 77cfb2cfb386..744299c0b774 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -217,10 +217,6 @@ static inline cpumask_t node_to_cpumask(int node) { return cpu_online_map; } -static inline int node_to_first_cpu(int node) -{ - return first_cpu(cpu_online_map); -} static inline void setup_node_to_cpumask_map(void) { } @@ -237,14 +233,6 @@ static inline void setup_node_to_cpumask_map(void) { } #include <asm-generic/topology.h> -#ifdef CONFIG_NUMA -/* Returns the number of the first CPU on Node 'node'. */ -static inline int node_to_first_cpu(int node) -{ - return cpumask_first(cpumask_of_node(node)); -} -#endif - extern cpumask_t cpu_coregroup_map(int cpu); extern const struct cpumask *cpu_coregroup_mask(int cpu); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index da99ffcdfde6..1bb5c6cee3eb 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -3468,6 +3468,10 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) struct intel_iommu *iommu = NULL; int index = 0; + /* x86 doesn't support multiple MSI yet */ + if (type == PCI_CAP_ID_MSI && nvec > 1) + return 1; + irq_want = nr_irqs_gsi; sub_handle = 0; list_for_each_entry(msidesc, &dev->msi_list, list) { diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index fbf2f33e3080..5a6aa1c1162f 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -18,6 +18,7 @@ #include <asm/thread_info.h> #include <asm/bootparam.h> #include <asm/elf.h> +#include <asm/suspend.h> #include <xen/interface/xen.h> diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 8793ab33e2c1..e72f062fb4b5 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -16,6 +16,7 @@ #include <asm/thread_info.h> #include <asm/ia32.h> #include <asm/bootparam.h> +#include <asm/suspend.h> #include <xen/interface/xen.h> diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 4c4214690dd1..fb73a52913a4 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -377,10 +377,6 @@ static const struct file_operations mtrr_fops = { .release = mtrr_close, }; - -static struct proc_dir_entry *proc_root_mtrr; - - static int mtrr_seq_show(struct seq_file *seq, void *offset) { char factor; @@ -423,11 +419,7 @@ static int __init mtrr_if_init(void) (!cpu_has(c, X86_FEATURE_CENTAUR_MCR))) return -ENODEV; - proc_root_mtrr = - proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); - - if (proc_root_mtrr) - proc_root_mtrr->owner = THIS_MODULE; + proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); return 0; } diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index bc1326105448..368b0a8836f9 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -50,7 +50,6 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) */ static struct irqaction fpu_irq = { .handler = math_error_irq, - .mask = CPU_MASK_NONE, .name = "fpu", }; @@ -83,7 +82,6 @@ void __init init_ISA_irqs(void) */ static struct irqaction irq2 = { .handler = no_action, - .mask = CPU_MASK_NONE, .name = "cascade", }; diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index c7a49e0ffbfb..8cd10537fd46 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -45,7 +45,6 @@ static struct irqaction irq2 = { .handler = no_action, - .mask = CPU_MASK_NONE, .name = "cascade", }; DEFINE_PER_CPU(vector_irq_t, vector_irq) = { diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 8815f3c7fec7..846510b78a09 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c @@ -348,7 +348,6 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id) static struct irqaction mfgptirq = { .handler = mfgpt_tick, .flags = IRQF_DISABLED | IRQF_NOBALANCING, - .mask = CPU_MASK_NONE, .name = "mfgpt-timer" }; diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index c7c4776ff630..90f5b9ef5def 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -300,8 +300,7 @@ fs_initcall(pci_iommu_init); static __devinit void via_no_dac(struct pci_dev *dev) { if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { - printk(KERN_INFO - "PCI: VIA PCI bridge detected. Disabling DAC.\n"); + dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); forbid_dac = 1; } } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index a0d26237d7cf..b4158439bf63 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1049,7 +1049,6 @@ void __init x86_quirk_trap_init(void) static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, - .mask = CPU_MASK_NONE, .name = "timer" }; diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index 241ec3923f61..5ba343e61844 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c @@ -116,7 +116,6 @@ unsigned long __init calibrate_cpu(void) static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER, - .mask = CPU_MASK_NONE, .name = "timer" }; @@ -125,7 +124,6 @@ void __init hpet_time_init(void) if (!hpet_enable()) setup_pit_timer(); - irq0.mask = cpumask_of_cpu(0); setup_irq(0, &irq0); } diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 33a788d5879c..d303369a7bad 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -202,7 +202,6 @@ static struct irqaction vmi_clock_action = { .name = "vmi-timer", .handler = vmi_timer_interrupt, .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, - .mask = CPU_MASK_ALL, }; static void __devinit vmi_time_init_clockevent(void) diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 90e44a10e68a..e94a11e42f98 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -107,7 +107,7 @@ static void async_hcall(unsigned long call, unsigned long arg1, local_irq_save(flags); if (lguest_data.hcall_status[next_call] != 0xFF) { /* Table full, so do normal hcall which will flush table. */ - hcall(call, arg1, arg2, arg3); + kvm_hypercall3(call, arg1, arg2, arg3); } else { lguest_data.hcalls[next_call].arg0 = call; lguest_data.hcalls[next_call].arg1 = arg1; @@ -134,13 +134,32 @@ static void async_hcall(unsigned long call, unsigned long arg1, * * So, when we're in lazy mode, we call async_hcall() to store the call for * future processing: */ -static void lazy_hcall(unsigned long call, +static void lazy_hcall1(unsigned long call, + unsigned long arg1) +{ + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) + kvm_hypercall1(call, arg1); + else + async_hcall(call, arg1, 0, 0); +} + +static void lazy_hcall2(unsigned long call, + unsigned long arg1, + unsigned long arg2) +{ + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) + kvm_hypercall2(call, arg1, arg2); + else + async_hcall(call, arg1, arg2, 0); +} + +static void lazy_hcall3(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) - hcall(call, arg1, arg2, arg3); + kvm_hypercall3(call, arg1, arg2, arg3); else async_hcall(call, arg1, arg2, arg3); } @@ -150,7 +169,7 @@ static void lazy_hcall(unsigned long call, static void lguest_leave_lazy_mode(void) { paravirt_leave_lazy(paravirt_get_lazy_mode()); - hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); + kvm_hypercall0(LHCALL_FLUSH_ASYNC); } /*G:033 @@ -229,7 +248,7 @@ static void lguest_write_idt_entry(gate_desc *dt, /* Keep the local copy up to date. */ native_write_idt_entry(dt, entrynum, g); /* Tell Host about this new entry. */ - hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); + kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); } /* Changing to a different IDT is very rare: we keep the IDT up-to-date every @@ -241,7 +260,7 @@ static void lguest_load_idt(const struct desc_ptr *desc) struct desc_struct *idt = (void *)desc->address; for (i = 0; i < (desc->size+1)/8; i++) - hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); + kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); } /* @@ -261,8 +280,8 @@ static void lguest_load_idt(const struct desc_ptr *desc) */ static void lguest_load_gdt(const struct desc_ptr *desc) { - BUG_ON((desc->size+1)/8 != GDT_ENTRIES); - hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0); + BUG_ON((desc->size + 1) / 8 != GDT_ENTRIES); + kvm_hypercall2(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES); } /* For a single GDT entry which changes, we do the lazy thing: alter our GDT, @@ -272,7 +291,7 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, const void *desc, int type) { native_write_gdt_entry(dt, entrynum, desc, type); - hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0); + kvm_hypercall2(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES); } /* OK, I lied. There are three "thread local storage" GDT entries which change @@ -284,7 +303,7 @@ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) * can't handle us removing entries we're currently using. So we clear * the GS register here: if it's needed it'll be reloaded anyway. */ lazy_load_gs(0); - lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0); + lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu); } /*G:038 That's enough excitement for now, back to ploughing through each of @@ -382,7 +401,7 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, static unsigned long current_cr0; static void lguest_write_cr0(unsigned long val) { - lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0); + lazy_hcall1(LHCALL_TS, val & X86_CR0_TS); current_cr0 = val; } @@ -396,7 +415,7 @@ static unsigned long lguest_read_cr0(void) * the vowels have been optimized out. */ static void lguest_clts(void) { - lazy_hcall(LHCALL_TS, 0, 0, 0); + lazy_hcall1(LHCALL_TS, 0); current_cr0 &= ~X86_CR0_TS; } @@ -418,7 +437,7 @@ static bool cr3_changed = false; static void lguest_write_cr3(unsigned long cr3) { lguest_data.pgdir = cr3; - lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0); + lazy_hcall1(LHCALL_NEW_PGTABLE, cr3); cr3_changed = true; } @@ -490,11 +509,17 @@ static void lguest_write_cr4(unsigned long val) * into a process' address space. We set the entry then tell the Host the * toplevel and address this corresponds to. The Guest uses one pagetable per * process, so we need to tell the Host which one we're changing (mm->pgd). */ +static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low); +} + static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { *ptep = pteval; - lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low); + lguest_pte_update(mm, addr, ptep); } /* The Guest calls this to set a top-level entry. Again, we set the entry then @@ -503,8 +528,8 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) { *pmdp = pmdval; - lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK, - (__pa(pmdp)&(PAGE_SIZE-1))/4, 0); + lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK, + (__pa(pmdp) & (PAGE_SIZE - 1)) / 4); } /* There are a couple of legacy places where the kernel sets a PTE, but we @@ -520,7 +545,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) { *ptep = pteval; if (cr3_changed) - lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); + lazy_hcall1(LHCALL_FLUSH_TLB, 1); } /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on @@ -536,7 +561,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) static void lguest_flush_tlb_single(unsigned long addr) { /* Simply set it to zero: if it was not, it will fault back in. */ - lazy_hcall(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); + lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); } /* This is what happens after the Guest has removed a large number of entries. @@ -544,7 +569,7 @@ static void lguest_flush_tlb_single(unsigned long addr) * have changed, ie. virtual addresses below PAGE_OFFSET. */ static void lguest_flush_tlb_user(void) { - lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0); + lazy_hcall1(LHCALL_FLUSH_TLB, 0); } /* This is called when the kernel page tables have changed. That's not very @@ -552,7 +577,7 @@ static void lguest_flush_tlb_user(void) * slow), so it's worth separating this from the user flushing above. */ static void lguest_flush_tlb_kernel(void) { - lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); + lazy_hcall1(LHCALL_FLUSH_TLB, 1); } /* @@ -689,7 +714,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta, } /* Please wake us this far in the future. */ - hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0); + kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta); return 0; } @@ -700,7 +725,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode, case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: /* A 0 argument shuts the clock down. */ - hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0); + kvm_hypercall0(LHCALL_SET_CLOCKEVENT); break; case CLOCK_EVT_MODE_ONESHOT: /* This is what we expect. */ @@ -775,8 +800,8 @@ static void lguest_time_init(void) static void lguest_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { - lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->sp0, - THREAD_SIZE/PAGE_SIZE); + lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0, + THREAD_SIZE / PAGE_SIZE); } /* Let's just say, I wouldn't do debugging under a Guest. */ @@ -849,7 +874,7 @@ static void set_lguest_basic_apic_ops(void) /* STOP! Until an interrupt comes in. */ static void lguest_safe_halt(void) { - hcall(LHCALL_HALT, 0, 0, 0); + kvm_hypercall0(LHCALL_HALT); } /* The SHUTDOWN hypercall takes a string to describe what's happening, and @@ -859,7 +884,8 @@ static void lguest_safe_halt(void) * rather than virtual addresses, so we use __pa() here. */ static void lguest_power_off(void) { - hcall(LHCALL_SHUTDOWN, __pa("Power down"), LGUEST_SHUTDOWN_POWEROFF, 0); + kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), + LGUEST_SHUTDOWN_POWEROFF); } /* @@ -869,7 +895,7 @@ static void lguest_power_off(void) */ static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) { - hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0); + kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF); /* The hcall won't return, but to keep gcc happy, we're "done". */ return NOTIFY_DONE; } @@ -910,7 +936,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) len = sizeof(scratch) - 1; scratch[len] = '\0'; memcpy(scratch, buf, len); - hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0); + kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch)); /* This routine returns the number of bytes actually written. */ return len; @@ -920,7 +946,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) * Launcher to reboot us. */ static void lguest_restart(char *reason) { - hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0); + kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); } /*G:050 @@ -1040,6 +1066,8 @@ __init void lguest_init(void) pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode; + pv_mmu_ops.pte_update = lguest_pte_update; + pv_mmu_ops.pte_update_defer = lguest_pte_update; #ifdef CONFIG_X86_LOCAL_APIC /* apic read/write intercepts */ diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 10b9bd35a8ff..f79541989471 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S @@ -27,8 +27,8 @@ ENTRY(lguest_entry) /* We make the "initialization" hypercall now to tell the Host about * us, and also find out where it put our page tables. */ movl $LHCALL_LGUEST_INIT, %eax - movl $lguest_data - __PAGE_OFFSET, %edx - int $LGUEST_TRAP_ENTRY + movl $lguest_data - __PAGE_OFFSET, %ebx + .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ /* Set up the initial stack so we can run C code. */ movl $(init_thread_union+THREAD_SIZE),%esp diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 522db5e3d0bf..5bc5d1688c1c 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -19,49 +19,6 @@ void kunmap(struct page *page) kunmap_high(page); } -static void debug_kmap_atomic_prot(enum km_type type) -{ -#ifdef CONFIG_DEBUG_HIGHMEM - static unsigned warn_count = 10; - - if (unlikely(warn_count == 0)) - return; - - if (unlikely(in_interrupt())) { - if (in_irq()) { - if (type != KM_IRQ0 && type != KM_IRQ1 && - type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && - type != KM_BOUNCE_READ) { - WARN_ON(1); - warn_count--; - } - } else if (!irqs_disabled()) { /* softirq */ - if (type != KM_IRQ0 && type != KM_IRQ1 && - type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && - type != KM_SKB_SUNRPC_DATA && - type != KM_SKB_DATA_SOFTIRQ && - type != KM_BOUNCE_READ) { - WARN_ON(1); - warn_count--; - } - } - } - - if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || - type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) { - if (!irqs_disabled()) { - WARN_ON(1); - warn_count--; - } - } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { - if (irq_count() == 0 && !irqs_disabled()) { - WARN_ON(1); - warn_count--; - } - } -#endif -} - /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB @@ -81,8 +38,9 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) if (!PageHighMem(page)) return page_address(page); - debug_kmap_atomic_prot(type); + debug_kmap_atomic(type); + debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 699c9b2895ae..bff0c9032f8c 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -19,6 +19,7 @@ #include <asm/iomap.h> #include <asm/pat.h> #include <linux/module.h> +#include <linux/highmem.h> int is_io_mapping_possible(resource_size_t base, unsigned long size) { @@ -71,6 +72,7 @@ iounmap_atomic(void *kvaddr, enum km_type type) unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + debug_kmap_atomic(type); /* * Force other mappings to Oops if they'll try to access this pte * without first remap it. Keeping stale mappings around is a bad idea diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c index f6adf2c6d751..aaf26ae58cd5 100644 --- a/arch/x86/pci/early.c +++ b/arch/x86/pci/early.c @@ -69,11 +69,12 @@ void early_dump_pci_device(u8 bus, u8 slot, u8 func) int j; u32 val; - printk(KERN_INFO "PCI: %02x:%02x:%02x", bus, slot, func); + printk(KERN_INFO "pci 0000:%02x:%02x.%d config space:", + bus, slot, func); for (i = 0; i < 256; i += 4) { if (!(i & 0x0f)) - printk("\n%04x:",i); + printk("\n %02x:",i); val = read_pci_config(bus, slot, func, i); for (j = 0; j < 4; j++) { @@ -96,20 +97,22 @@ void early_dump_pci_devices(void) for (func = 0; func < 8; func++) { u32 class; u8 type; + class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); if (class == 0xffffffff) - break; + continue; early_dump_pci_device(bus, slot, func); - /* No multi-function device? */ - type = read_pci_config_byte(bus, slot, func, + if (func == 0) { + type = read_pci_config_byte(bus, slot, + func, PCI_HEADER_TYPE); - if (!(type & 0x80)) - break; + if (!(type & 0x80)) + break; + } } } } } - diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 9c49919e4d1c..6dd89555fbfa 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -495,26 +495,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015, pci_siemens_interrupt_controller); /* - * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have - * 4096 bytes configuration space for each function of their processor - * configuration space. - */ -static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev) -{ - dev->cfg_size = pci_cfg_space_size_ext(dev); -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size); - -/* * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from * confusing the PCI engine: */ diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c index f1065b129e9c..4061bb0f267d 100644 --- a/arch/x86/pci/legacy.c +++ b/arch/x86/pci/legacy.c @@ -50,8 +50,6 @@ static int __init pci_legacy_init(void) if (pci_root_bus) pci_bus_add_devices(pci_root_bus); - pcibios_fixup_peer_bridges(); - return 0; } @@ -67,6 +65,7 @@ int __init pci_subsys_init(void) pci_visws_init(); #endif pci_legacy_init(); + pcibios_fixup_peer_bridges(); pcibios_irq_init(); pcibios_init(); diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 89bf9242c80a..905bb526b133 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/acpi.h> #include <linux/bitmap.h> +#include <linux/sort.h> #include <asm/e820.h> #include <asm/pci_x86.h> @@ -24,24 +25,49 @@ /* Indicate if the mmcfg resources have been placed into the resource table. */ static int __initdata pci_mmcfg_resources_inserted; +static __init int extend_mmcfg(int num) +{ + struct acpi_mcfg_allocation *new; + int new_num = pci_mmcfg_config_num + num; + + new = kzalloc(sizeof(pci_mmcfg_config[0]) * new_num, GFP_KERNEL); + if (!new) + return -1; + + if (pci_mmcfg_config) { + memcpy(new, pci_mmcfg_config, + sizeof(pci_mmcfg_config[0]) * new_num); + kfree(pci_mmcfg_config); + } + pci_mmcfg_config = new; + + return 0; +} + +static __init void fill_one_mmcfg(u64 addr, int segment, int start, int end) +{ + int i = pci_mmcfg_config_num; + + pci_mmcfg_config_num++; + pci_mmcfg_config[i].address = addr; + pci_mmcfg_config[i].pci_segment = segment; + pci_mmcfg_config[i].start_bus_number = start; + pci_mmcfg_config[i].end_bus_number = end; +} + static const char __init *pci_mmcfg_e7520(void) { u32 win; raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win); win = win & 0xf000; - if(win == 0x0000 || win == 0xf000) - pci_mmcfg_config_num = 0; - else { - pci_mmcfg_config_num = 1; - pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]), GFP_KERNEL); - if (!pci_mmcfg_config) - return NULL; - pci_mmcfg_config[0].address = win << 16; - pci_mmcfg_config[0].pci_segment = 0; - pci_mmcfg_config[0].start_bus_number = 0; - pci_mmcfg_config[0].end_bus_number = 255; - } + if (win == 0x0000 || win == 0xf000) + return NULL; + + if (extend_mmcfg(1) == -1) + return NULL; + + fill_one_mmcfg(win << 16, 0, 0, 255); return "Intel Corporation E7520 Memory Controller Hub"; } @@ -50,13 +76,11 @@ static const char __init *pci_mmcfg_intel_945(void) { u32 pciexbar, mask = 0, len = 0; - pci_mmcfg_config_num = 1; - raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar); /* Enable bit */ if (!(pciexbar & 1)) - pci_mmcfg_config_num = 0; + return NULL; /* Size bits */ switch ((pciexbar >> 1) & 3) { @@ -73,28 +97,23 @@ static const char __init *pci_mmcfg_intel_945(void) len = 0x04000000U; break; default: - pci_mmcfg_config_num = 0; + return NULL; } /* Errata #2, things break when not aligned on a 256Mb boundary */ /* Can only happen in 64M/128M mode */ if ((pciexbar & mask) & 0x0fffffffU) - pci_mmcfg_config_num = 0; + return NULL; /* Don't hit the APIC registers and their friends */ if ((pciexbar & mask) >= 0xf0000000U) - pci_mmcfg_config_num = 0; - - if (pci_mmcfg_config_num) { - pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]), GFP_KERNEL); - if (!pci_mmcfg_config) - return NULL; - pci_mmcfg_config[0].address = pciexbar & mask; - pci_mmcfg_config[0].pci_segment = 0; - pci_mmcfg_config[0].start_bus_number = 0; - pci_mmcfg_config[0].end_bus_number = (len >> 20) - 1; - } + return NULL; + + if (extend_mmcfg(1) == -1) + return NULL; + + fill_one_mmcfg(pciexbar & mask, 0, 0, (len >> 20) - 1); return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; } @@ -138,22 +157,77 @@ static const char __init *pci_mmcfg_amd_fam10h(void) busnbits = 8; } - pci_mmcfg_config_num = (1 << segnbits); - pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]) * - pci_mmcfg_config_num, GFP_KERNEL); - if (!pci_mmcfg_config) + if (extend_mmcfg(1 << segnbits) == -1) return NULL; - for (i = 0; i < (1 << segnbits); i++) { - pci_mmcfg_config[i].address = base + (1<<28) * i; - pci_mmcfg_config[i].pci_segment = i; - pci_mmcfg_config[i].start_bus_number = 0; - pci_mmcfg_config[i].end_bus_number = (1 << busnbits) - 1; - } + for (i = 0; i < (1 << segnbits); i++) + fill_one_mmcfg(base + (1<<28) * i, i, 0, (1 << busnbits) - 1); return "AMD Family 10h NB"; } +static bool __initdata mcp55_checked; +static const char __init *pci_mmcfg_nvidia_mcp55(void) +{ + int bus; + int mcp55_mmconf_found = 0; + + static const u32 extcfg_regnum = 0x90; + static const u32 extcfg_regsize = 4; + static const u32 extcfg_enable_mask = 1<<31; + static const u32 extcfg_start_mask = 0xff<<16; + static const int extcfg_start_shift = 16; + static const u32 extcfg_size_mask = 0x3<<28; + static const int extcfg_size_shift = 28; + static const int extcfg_sizebus[] = {0x100, 0x80, 0x40, 0x20}; + static const u32 extcfg_base_mask[] = {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff}; + static const int extcfg_base_lshift = 25; + + /* + * do check if amd fam10h already took over + */ + if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked) + return NULL; + + mcp55_checked = true; + for (bus = 0; bus < 256; bus++) { + u64 base; + u32 l, extcfg; + u16 vendor, device; + int start, size_index, end; + + raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l); + vendor = l & 0xffff; + device = (l >> 16) & 0xffff; + + if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device) + continue; + + raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum, + extcfg_regsize, &extcfg); + + if (!(extcfg & extcfg_enable_mask)) + continue; + + if (extend_mmcfg(1) == -1) + continue; + + size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift; + base = extcfg & extcfg_base_mask[size_index]; + /* base could > 4G */ + base <<= extcfg_base_lshift; + start = (extcfg & extcfg_start_mask) >> extcfg_start_shift; + end = start + extcfg_sizebus[size_index] - 1; + fill_one_mmcfg(base, 0, start, end); + mcp55_mmconf_found++; + } + + if (!mcp55_mmconf_found) + return NULL; + + return "nVidia MCP55"; +} + struct pci_mmcfg_hostbridge_probe { u32 bus; u32 devfn; @@ -171,8 +245,52 @@ static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = { 0x1200, pci_mmcfg_amd_fam10h }, { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD, 0x1200, pci_mmcfg_amd_fam10h }, + { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA, + 0x0369, pci_mmcfg_nvidia_mcp55 }, }; +static int __init cmp_mmcfg(const void *x1, const void *x2) +{ + const typeof(pci_mmcfg_config[0]) *m1 = x1; + const typeof(pci_mmcfg_config[0]) *m2 = x2; + int start1, start2; + + start1 = m1->start_bus_number; + start2 = m2->start_bus_number; + + return start1 - start2; +} + +static void __init pci_mmcfg_check_end_bus_number(void) +{ + int i; + typeof(pci_mmcfg_config[0]) *cfg, *cfgx; + + /* sort them at first */ + sort(pci_mmcfg_config, pci_mmcfg_config_num, + sizeof(pci_mmcfg_config[0]), cmp_mmcfg, NULL); + + /* last one*/ + if (pci_mmcfg_config_num > 0) { + i = pci_mmcfg_config_num - 1; + cfg = &pci_mmcfg_config[i]; + if (cfg->end_bus_number < cfg->start_bus_number) + cfg->end_bus_number = 255; + } + + /* don't overlap please */ + for (i = 0; i < pci_mmcfg_config_num - 1; i++) { + cfg = &pci_mmcfg_config[i]; + cfgx = &pci_mmcfg_config[i+1]; + + if (cfg->end_bus_number < cfg->start_bus_number) + cfg->end_bus_number = 255; + + if (cfg->end_bus_number >= cfgx->start_bus_number) + cfg->end_bus_number = cfgx->start_bus_number - 1; + } +} + static int __init pci_mmcfg_check_hostbridge(void) { u32 l; @@ -186,31 +304,33 @@ static int __init pci_mmcfg_check_hostbridge(void) pci_mmcfg_config_num = 0; pci_mmcfg_config = NULL; - name = NULL; - for (i = 0; !name && i < ARRAY_SIZE(pci_mmcfg_probes); i++) { + for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) { bus = pci_mmcfg_probes[i].bus; devfn = pci_mmcfg_probes[i].devfn; raw_pci_ops->read(0, bus, devfn, 0, 4, &l); vendor = l & 0xffff; device = (l >> 16) & 0xffff; + name = NULL; if (pci_mmcfg_probes[i].vendor == vendor && pci_mmcfg_probes[i].device == device) name = pci_mmcfg_probes[i].probe(); - } - if (name) { - printk(KERN_INFO "PCI: Found %s %s MMCONFIG support.\n", - name, pci_mmcfg_config_num ? "with" : "without"); + if (name) + printk(KERN_INFO "PCI: Found %s with MMCONFIG support.\n", + name); } - return name != NULL; + /* some end_bus_number is crazy, fix it */ + pci_mmcfg_check_end_bus_number(); + + return pci_mmcfg_config_num != 0; } static void __init pci_mmcfg_insert_resources(void) { -#define PCI_MMCFG_RESOURCE_NAME_LEN 19 +#define PCI_MMCFG_RESOURCE_NAME_LEN 24 int i; struct resource *res; char *names; @@ -228,9 +348,10 @@ static void __init pci_mmcfg_insert_resources(void) struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i]; num_buses = cfg->end_bus_number - cfg->start_bus_number + 1; res->name = names; - snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u", - cfg->pci_segment); - res->start = cfg->address; + snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, + "PCI MMCONFIG %u [%02x-%02x]", cfg->pci_segment, + cfg->start_bus_number, cfg->end_bus_number); + res->start = cfg->address + (cfg->start_bus_number << 20); res->end = res->start + (num_buses << 20) - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; insert_resource(&iomem_resource, res); @@ -354,8 +475,6 @@ static void __init pci_mmcfg_reject_broken(int early) (pci_mmcfg_config[0].address == 0)) return; - cfg = &pci_mmcfg_config[0]; - for (i = 0; i < pci_mmcfg_config_num; i++) { int valid = 0; u64 addr, size; @@ -423,10 +542,10 @@ static void __init __pci_mmcfg_init(int early) known_bridge = 1; } - if (!known_bridge) { + if (!known_bridge) acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); - pci_mmcfg_reject_broken(early); - } + + pci_mmcfg_reject_broken(early); if ((pci_mmcfg_config_num == 0) || (pci_mmcfg_config == NULL) || diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c index 30007ffc8e11..94349f8b2f96 100644 --- a/arch/x86/pci/mmconfig_64.c +++ b/arch/x86/pci/mmconfig_64.c @@ -112,13 +112,18 @@ static struct pci_raw_ops pci_mmcfg = { static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg) { void __iomem *addr; - u32 size; - - size = (cfg->end_bus_number + 1) << 20; - addr = ioremap_nocache(cfg->address, size); + u64 start, size; + + start = cfg->start_bus_number; + start <<= 20; + start += cfg->address; + size = cfg->end_bus_number + 1 - cfg->start_bus_number; + size <<= 20; + addr = ioremap_nocache(start, size); if (addr) { printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n", - cfg->address, cfg->address + size - 1); + start, start + size - 1); + addr -= cfg->start_bus_number << 20; } return addr; } @@ -157,7 +162,7 @@ void __init pci_mmcfg_arch_free(void) for (i = 0; i < pci_mmcfg_config_num; ++i) { if (pci_mmcfg_virt[i].virt) { - iounmap(pci_mmcfg_virt[i].virt); + iounmap(pci_mmcfg_virt[i].virt + (pci_mmcfg_virt[i].cfg->start_bus_number << 20)); pci_mmcfg_virt[i].virt = NULL; pci_mmcfg_virt[i].cfg = NULL; } diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index 274d06082f48..ce702c5b3a2c 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c @@ -12,6 +12,7 @@ #include <asm/mtrr.h> #include <asm/mce.h> #include <asm/xcr.h> +#include <asm/suspend.h> static struct saved_context saved_context; diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c index e3b6cf70d62c..5343540f2607 100644 --- a/arch/x86/power/cpu_64.c +++ b/arch/x86/power/cpu_64.c @@ -15,6 +15,7 @@ #include <asm/pgtable.h> #include <asm/mtrr.h> #include <asm/xcr.h> +#include <asm/suspend.h> static void fix_processor_context(void); diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 6dd000dd7933..65fdc86e923f 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -14,6 +14,7 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mtrr.h> +#include <asm/suspend.h> /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c index 25d46c84eb08..4c559cf7da2d 100644 --- a/arch/xtensa/platforms/iss/console.c +++ b/arch/xtensa/platforms/iss/console.c @@ -18,6 +18,7 @@ #include <linux/mm.h> #include <linux/major.h> #include <linux/param.h> +#include <linux/seq_file.h> #include <linux/serial.h> #include <linux/serialP.h> @@ -176,22 +177,24 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) /* Stub, once again.. */ } -static int rs_read_proc(char *page, char **start, off_t off, int count, - int *eof, void *data) +static int rs_proc_show(struct seq_file *m, void *v) { - int len = 0; - off_t begin = 0; - - len += sprintf(page, "serinfo:1.0 driver:%s\n", serial_version); - *eof = 1; - - if (off >= len + begin) - return 0; + seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version); + return 0; +} - *start = page + (off - begin); - return ((count < begin + len - off) ? count : begin + len - off); +static int rs_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, rs_proc_show, NULL); } +static const struct file_operations rs_proc_fops = { + .owner = THIS_MODULE, + .open = rs_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; static struct tty_operations serial_ops = { .open = rs_open, @@ -203,7 +206,7 @@ static struct tty_operations serial_ops = { .chars_in_buffer = rs_chars_in_buffer, .hangup = rs_hangup, .wait_until_sent = rs_wait_until_sent, - .read_proc = rs_read_proc + .proc_fops = &rs_proc_fops, }; int __init rs_init(void) |