diff options
author | Dmitry Torokhov <dtor_core@ameritech.net> | 2006-04-02 07:08:05 +0200 |
---|---|---|
committer | Dmitry Torokhov <dtor_core@ameritech.net> | 2006-04-02 07:08:05 +0200 |
commit | 95d465fd750897ab32462a6702fbfe1b122cbbc0 (patch) | |
tree | 65c38b2f11c51bb6932e44dd6c92f15b0091abfe /include/asm-powerpc | |
parent | Input: gameport - fix memory leak (diff) | |
parent | Merge master.kernel.org:/home/rmk/linux-2.6-serial (diff) | |
download | linux-95d465fd750897ab32462a6702fbfe1b122cbbc0.tar.xz linux-95d465fd750897ab32462a6702fbfe1b122cbbc0.zip |
Manual merge with Linus.
Conflicts:
arch/powerpc/kernel/setup-common.c
drivers/input/keyboard/hil_kbd.c
drivers/input/mouse/hil_ptr.c
Diffstat (limited to 'include/asm-powerpc')
40 files changed, 719 insertions, 473 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index 147a38dcc766..bb3c0ab7e667 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -8,6 +8,7 @@ typedef struct { volatile int counter; } atomic_t; #ifdef __KERNEL__ +#include <linux/compiler.h> #include <asm/synch.h> #include <asm/asm-compat.h> @@ -176,20 +177,29 @@ static __inline__ int atomic_dec_return(atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int t; + + __asm__ __volatile__ ( + LWSYNC_ON_SMP +"1: lwarx %0,0,%1 # atomic_add_unless\n\ + cmpw 0,%0,%3 \n\ + beq- 2f \n\ + add %0,%2,%0 \n" + PPC405_ERR77(0,%2) +" stwcx. %0,0,%1 \n\ + bne- 1b \n" + ISYNC_ON_SMP +" subf %0,%2,%0 \n\ +2:" + : "=&r" (t) + : "r" (&v->counter), "r" (a), "r" (u) + : "cc", "memory"); + + return t != u; +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h index bf6941a810b8..d1c2a4405660 100644 --- a/include/asm-powerpc/bitops.h +++ b/include/asm-powerpc/bitops.h @@ -184,72 +184,7 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr) : "cc"); } -/* Non-atomic versions */ -static __inline__ int test_bit(unsigned long nr, - __const__ volatile unsigned long *addr) -{ - return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); -} - -static __inline__ void __set_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p |= mask; -} - -static __inline__ void __clear_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p &= ~mask; -} - -static __inline__ void __change_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p ^= mask; -} - -static __inline__ int __test_and_set_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old | mask; - return (old & mask) != 0; -} - -static __inline__ int __test_and_clear_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old & ~mask; - return (old & mask) != 0; -} - -static __inline__ int __test_and_change_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old ^ mask; - return (old & mask) != 0; -} +#include <asm-generic/bitops/non-atomic.h> /* * Return the zero-based bit position (LE, not IBM bit numbering) of @@ -310,16 +245,9 @@ static __inline__ int fls(unsigned int x) asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); return 32 - lz; } -#define fls64(x) generic_fls64(x) +#include <asm-generic/bitops/fls64.h> -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ -#define hweight64(x) generic_hweight64(x) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/hweight.h> #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) unsigned long find_next_zero_bit(const unsigned long *addr, @@ -397,32 +325,7 @@ unsigned long find_next_zero_le_bit(const unsigned long *addr, #define minix_find_first_zero_bit(addr,size) \ find_first_zero_le_bit((unsigned long *)addr, size) -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(const unsigned long *b) -{ -#ifdef CONFIG_PPC64 - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 64; - return __ffs(b[2]) + 128; -#else - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -#endif -} +#include <asm-generic/bitops/sched.h> #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h index 99817a802ca4..f44b529e3298 100644 --- a/include/asm-powerpc/bug.h +++ b/include/asm-powerpc/bug.h @@ -30,34 +30,60 @@ struct bug_entry *find_bug(unsigned long bugaddr); #ifdef CONFIG_BUG +/* + * BUG_ON() and WARN_ON() do their best to cooperate with compile-time + * optimisations. However depending on the complexity of the condition + * some compiler versions may not produce optimal results. + */ + #define BUG() do { \ __asm__ __volatile__( \ "1: twi 31,0,0\n" \ ".section __bug_table,\"a\"\n" \ - "\t"PPC_LONG" 1b,%0,%1,%2\n" \ + "\t"PPC_LONG" 1b,%0,%1,%2\n" \ ".previous" \ : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ } while (0) #define BUG_ON(x) do { \ - __asm__ __volatile__( \ + if (__builtin_constant_p(x)) { \ + if (x) \ + BUG(); \ + } else { \ + __asm__ __volatile__( \ "1: "PPC_TLNEI" %0,0\n" \ ".section __bug_table,\"a\"\n" \ - "\t"PPC_LONG" 1b,%1,%2,%3\n" \ + "\t"PPC_LONG" 1b,%1,%2,%3\n" \ ".previous" \ : : "r" ((long)(x)), "i" (__LINE__), \ "i" (__FILE__), "i" (__FUNCTION__)); \ + } \ } while (0) -#define WARN_ON(x) do { \ +#define __WARN() do { \ __asm__ __volatile__( \ + "1: twi 31,0,0\n" \ + ".section __bug_table,\"a\"\n" \ + "\t"PPC_LONG" 1b,%0,%1,%2\n" \ + ".previous" \ + : : "i" (__LINE__ + BUG_WARNING_TRAP), \ + "i" (__FILE__), "i" (__FUNCTION__)); \ +} while (0) + +#define WARN_ON(x) do { \ + if (__builtin_constant_p(x)) { \ + if (x) \ + __WARN(); \ + } else { \ + __asm__ __volatile__( \ "1: "PPC_TLNEI" %0,0\n" \ ".section __bug_table,\"a\"\n" \ - "\t"PPC_LONG" 1b,%1,%2,%3\n" \ + "\t"PPC_LONG" 1b,%1,%2,%3\n" \ ".previous" \ : : "r" ((long)(x)), \ "i" (__LINE__ + BUG_WARNING_TRAP), \ "i" (__FILE__), "i" (__FUNCTION__)); \ + } \ } while (0) #define HAVE_ARCH_BUG diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 5638518968c3..4321483cce51 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -102,38 +102,40 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) #define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) #define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000) -#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) +#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) #ifdef __powerpc64__ /* Add the 64b processor unique features in the top half of the word */ -#define CPU_FTR_SLB ASM_CONST(0x0000000100000000) -#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000) -#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000) -#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000) -#define CPU_FTR_IABR ASM_CONST(0x0000002000000000) -#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000) +#define CPU_FTR_SLB ASM_CONST(0x0000000100000000) +#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000) +#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000) +#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000) +#define CPU_FTR_IABR ASM_CONST(0x0000002000000000) +#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000) #define CPU_FTR_CTRL ASM_CONST(0x0000008000000000) -#define CPU_FTR_SMT ASM_CONST(0x0000010000000000) -#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000) +#define CPU_FTR_SMT ASM_CONST(0x0000010000000000) +#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000) #define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000) #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000) #define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000) #define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000) +#define CPU_FTR_PURR ASM_CONST(0x0000400000000000) #else /* ensure on 32b processors the flags are available for compiling but * don't do anything */ -#define CPU_FTR_SLB ASM_CONST(0x0) -#define CPU_FTR_16M_PAGE ASM_CONST(0x0) -#define CPU_FTR_TLBIEL ASM_CONST(0x0) -#define CPU_FTR_NOEXECUTE ASM_CONST(0x0) -#define CPU_FTR_IABR ASM_CONST(0x0) -#define CPU_FTR_MMCRA ASM_CONST(0x0) +#define CPU_FTR_SLB ASM_CONST(0x0) +#define CPU_FTR_16M_PAGE ASM_CONST(0x0) +#define CPU_FTR_TLBIEL ASM_CONST(0x0) +#define CPU_FTR_NOEXECUTE ASM_CONST(0x0) +#define CPU_FTR_IABR ASM_CONST(0x0) +#define CPU_FTR_MMCRA ASM_CONST(0x0) #define CPU_FTR_CTRL ASM_CONST(0x0) -#define CPU_FTR_SMT ASM_CONST(0x0) -#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0) +#define CPU_FTR_SMT ASM_CONST(0x0) +#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0) #define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0) #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0) #define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0) +#define CPU_FTR_PURR ASM_CONST(0x0) #endif #ifndef __ASSEMBLY__ @@ -186,153 +188,154 @@ extern void do_cpu_ftr_fixups(unsigned long offset); !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \ !defined(CONFIG_BOOKE)) -enum { - CPU_FTRS_PPC601 = CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE, - CPU_FTRS_603 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | - CPU_FTR_MAYBE_CAN_NAP, - CPU_FTRS_604 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE, - CPU_FTRS_740_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | - CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP, - CPU_FTRS_740 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | - CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP, - CPU_FTRS_750 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | - CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP, - CPU_FTRS_750FX1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | - CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | - CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM, - CPU_FTRS_750FX2 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | - CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | - CPU_FTR_NO_DPM, - CPU_FTRS_750FX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | - CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | - CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS, - CPU_FTRS_750GX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | - CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | - CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | - CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS, - CPU_FTRS_7400_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | - CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | - CPU_FTR_MAYBE_CAN_NAP, - CPU_FTRS_7400 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | - CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | - CPU_FTR_MAYBE_CAN_NAP, - CPU_FTRS_7450_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | - CPU_FTR_NEED_COHERENT, - CPU_FTRS_7450_21 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | - CPU_FTR_NEED_COHERENT, - CPU_FTRS_7450_23 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT, - CPU_FTRS_7455_1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | - CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | - CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | - CPU_FTR_NEED_COHERENT, - CPU_FTRS_7455_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | - CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS, - CPU_FTRS_7455 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | - CPU_FTR_NEED_COHERENT, - CPU_FTRS_7447_10 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | - CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC, - CPU_FTRS_7447 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | - CPU_FTR_NEED_COHERENT, - CPU_FTRS_7447A = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | - CPU_FTR_NEED_COHERENT, - CPU_FTRS_82XX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB, - CPU_FTRS_G2_LE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS, - CPU_FTRS_E300 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | - CPU_FTR_COMMON, - CPU_FTRS_CLASSIC32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE, - CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE, - CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_NODSISRALIGN, - CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN, - CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, - CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_NODSISRALIGN, - CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_NODSISRALIGN, - CPU_FTRS_E200 = CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN, - CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_NODSISRALIGN, - CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN, - CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN, +#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE) +#define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP) +#define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE) +#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP) +#define CPU_FTRS_740 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP) +#define CPU_FTRS_750 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP) +#define CPU_FTRS_750FX1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM) +#define CPU_FTRS_750FX2 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_NO_DPM) +#define CPU_FTRS_750FX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS) +#define CPU_FTRS_750GX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \ + CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS) +#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ + CPU_FTR_MAYBE_CAN_NAP) +#define CPU_FTRS_7400 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ + CPU_FTR_MAYBE_CAN_NAP) +#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_NEED_COHERENT) +#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ + CPU_FTR_NEED_COHERENT) +#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT) +#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | \ + CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_NEED_COHERENT) +#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ + CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS) +#define CPU_FTRS_7455 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_NEED_COHERENT) +#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC) +#define CPU_FTRS_7447 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_NEED_COHERENT) +#define CPU_FTRS_7447A (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_NEED_COHERENT) +#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB) +#define CPU_FTRS_G2_LE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \ + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS) +#define CPU_FTRS_E300 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \ + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_COMMON) +#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE) +#define CPU_FTRS_POWER3_32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE) +#define CPU_FTRS_POWER4_32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_NODSISRALIGN) +#define CPU_FTRS_970_32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN) +#define CPU_FTRS_8XX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB) +#define CPU_FTRS_40X (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_NODSISRALIGN) +#define CPU_FTRS_44X (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_NODSISRALIGN) +#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN) +#define CPU_FTRS_E500 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_NODSISRALIGN) +#define CPU_FTRS_E500_2 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN) +#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) #ifdef __powerpc64__ - CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_HPTE_TABLE | CPU_FTR_IABR, - CPU_FTRS_RS64 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | - CPU_FTR_MMCRA | CPU_FTR_CTRL, - CPU_FTRS_POWER4 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA, - CPU_FTRS_PPC970 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | - CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA, - CPU_FTRS_POWER5 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | - CPU_FTR_MMCRA | CPU_FTR_SMT | - CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | - CPU_FTR_MMCRA_SIHV, - CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | - CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | - CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO, - CPU_FTRS_COMPATIBLE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2, +#define CPU_FTRS_POWER3 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_IABR) +#define CPU_FTRS_RS64 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ + CPU_FTR_MMCRA | CPU_FTR_CTRL) +#define CPU_FTRS_POWER4 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA) +#define CPU_FTRS_PPC970 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ + CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA) +#define CPU_FTRS_POWER5 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ + CPU_FTR_MMCRA | CPU_FTR_SMT | \ + CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ + CPU_FTR_MMCRA_SIHV | CPU_FTR_PURR) +#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ + CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ + CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO) +#define CPU_FTRS_COMPATIBLE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2) #endif - CPU_FTRS_POSSIBLE = #ifdef __powerpc64__ - CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | - CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL | - CPU_FTR_CI_LARGE_PAGE | +#define CPU_FTRS_POSSIBLE \ + (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ + CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL | \ + CPU_FTR_CI_LARGE_PAGE) #else +enum { + CPU_FTRS_POSSIBLE = #if CLASSIC_PPC CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU | CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 | @@ -366,14 +369,18 @@ enum { #ifdef CONFIG_E500 CPU_FTRS_E500 | CPU_FTRS_E500_2 | #endif -#endif /* __powerpc64__ */ 0, +}; +#endif /* __powerpc64__ */ - CPU_FTRS_ALWAYS = #ifdef __powerpc64__ - CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & - CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_CELL & +#define CPU_FTRS_ALWAYS \ + (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ + CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_CELL & \ + CPU_FTRS_POSSIBLE) #else +enum { + CPU_FTRS_ALWAYS = #if CLASSIC_PPC CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU & CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 & @@ -407,9 +414,9 @@ enum { #ifdef CONFIG_E500 CPU_FTRS_E500 & CPU_FTRS_E500_2 & #endif -#endif /* __powerpc64__ */ CPU_FTRS_POSSIBLE, }; +#endif /* __powerpc64__ */ static inline int cpu_has_feature(unsigned long feature) { diff --git a/include/asm-powerpc/cputime.h b/include/asm-powerpc/cputime.h index 6d68ad7e0ea3..a21185d47883 100644 --- a/include/asm-powerpc/cputime.h +++ b/include/asm-powerpc/cputime.h @@ -1 +1,203 @@ +/* + * Definitions for measuring cputime on powerpc machines. + * + * Copyright (C) 2006 Paul Mackerras, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in + * the same units as the timebase. Otherwise we measure cpu time + * in jiffies using the generic definitions. + */ + +#ifndef __POWERPC_CPUTIME_H +#define __POWERPC_CPUTIME_H + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING #include <asm-generic/cputime.h> +#else + +#include <linux/types.h> +#include <linux/time.h> +#include <asm/div64.h> +#include <asm/time.h> +#include <asm/param.h> + +typedef u64 cputime_t; +typedef u64 cputime64_t; + +#define cputime_zero ((cputime_t)0) +#define cputime_max ((~((cputime_t)0) >> 1) - 1) +#define cputime_add(__a, __b) ((__a) + (__b)) +#define cputime_sub(__a, __b) ((__a) - (__b)) +#define cputime_div(__a, __n) ((__a) / (__n)) +#define cputime_halve(__a) ((__a) >> 1) +#define cputime_eq(__a, __b) ((__a) == (__b)) +#define cputime_gt(__a, __b) ((__a) > (__b)) +#define cputime_ge(__a, __b) ((__a) >= (__b)) +#define cputime_lt(__a, __b) ((__a) < (__b)) +#define cputime_le(__a, __b) ((__a) <= (__b)) + +#define cputime64_zero ((cputime64_t)0) +#define cputime64_add(__a, __b) ((__a) + (__b)) +#define cputime_to_cputime64(__ct) (__ct) + +#ifdef __KERNEL__ + +/* + * Convert cputime <-> jiffies + */ +extern u64 __cputime_jiffies_factor; + +static inline unsigned long cputime_to_jiffies(const cputime_t ct) +{ + return mulhdu(ct, __cputime_jiffies_factor); +} + +static inline cputime_t jiffies_to_cputime(const unsigned long jif) +{ + cputime_t ct; + unsigned long sec; + + /* have to be a little careful about overflow */ + ct = jif % HZ; + sec = jif / HZ; + if (ct) { + ct *= tb_ticks_per_sec; + do_div(ct, HZ); + } + if (sec) + ct += (cputime_t) sec * tb_ticks_per_sec; + return ct; +} + +static inline u64 cputime64_to_jiffies64(const cputime_t ct) +{ + return mulhdu(ct, __cputime_jiffies_factor); +} + +/* + * Convert cputime <-> milliseconds + */ +extern u64 __cputime_msec_factor; + +static inline unsigned long cputime_to_msecs(const cputime_t ct) +{ + return mulhdu(ct, __cputime_msec_factor); +} + +static inline cputime_t msecs_to_cputime(const unsigned long ms) +{ + cputime_t ct; + unsigned long sec; + + /* have to be a little careful about overflow */ + ct = ms % 1000; + sec = ms / 1000; + if (ct) { + ct *= tb_ticks_per_sec; + do_div(ct, 1000); + } + if (sec) + ct += (cputime_t) sec * tb_ticks_per_sec; + return ct; +} + +/* + * Convert cputime <-> seconds + */ +extern u64 __cputime_sec_factor; + +static inline unsigned long cputime_to_secs(const cputime_t ct) +{ + return mulhdu(ct, __cputime_sec_factor); +} + +static inline cputime_t secs_to_cputime(const unsigned long sec) +{ + return (cputime_t) sec * tb_ticks_per_sec; +} + +/* + * Convert cputime <-> timespec + */ +static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) +{ + u64 x = ct; + unsigned int frac; + + frac = do_div(x, tb_ticks_per_sec); + p->tv_sec = x; + x = (u64) frac * 1000000000; + do_div(x, tb_ticks_per_sec); + p->tv_nsec = x; +} + +static inline cputime_t timespec_to_cputime(const struct timespec *p) +{ + cputime_t ct; + + ct = (u64) p->tv_nsec * tb_ticks_per_sec; + do_div(ct, 1000000000); + return ct + (u64) p->tv_sec * tb_ticks_per_sec; +} + +/* + * Convert cputime <-> timeval + */ +static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) +{ + u64 x = ct; + unsigned int frac; + + frac = do_div(x, tb_ticks_per_sec); + p->tv_sec = x; + x = (u64) frac * 1000000; + do_div(x, tb_ticks_per_sec); + p->tv_usec = x; +} + +static inline cputime_t timeval_to_cputime(const struct timeval *p) +{ + cputime_t ct; + + ct = (u64) p->tv_usec * tb_ticks_per_sec; + do_div(ct, 1000000); + return ct + (u64) p->tv_sec * tb_ticks_per_sec; +} + +/* + * Convert cputime <-> clock_t (units of 1/USER_HZ seconds) + */ +extern u64 __cputime_clockt_factor; + +static inline unsigned long cputime_to_clock_t(const cputime_t ct) +{ + return mulhdu(ct, __cputime_clockt_factor); +} + +static inline cputime_t clock_t_to_cputime(const unsigned long clk) +{ + cputime_t ct; + unsigned long sec; + + /* have to be a little careful about overflow */ + ct = clk % USER_HZ; + sec = clk / USER_HZ; + if (ct) { + ct *= tb_ticks_per_sec; + do_div(ct, USER_HZ); + } + if (sec) + ct += (cputime_t) sec * tb_ticks_per_sec; + return ct; +} + +#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) + +#endif /* __KERNEL__ */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* __POWERPC_CPUTIME_H */ diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h index f804b34cf06a..77069df92bf8 100644 --- a/include/asm-powerpc/firmware.h +++ b/include/asm-powerpc/firmware.h @@ -41,6 +41,7 @@ #define FW_FEATURE_MULTITCE (1UL<<19) #define FW_FEATURE_SPLPAR (1UL<<20) #define FW_FEATURE_ISERIES (1UL<<21) +#define FW_FEATURE_LPAR (1UL<<22) enum { #ifdef CONFIG_PPC64 @@ -51,10 +52,10 @@ enum { FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ | FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE | - FW_FEATURE_SPLPAR, + FW_FEATURE_SPLPAR | FW_FEATURE_LPAR, FW_FEATURE_PSERIES_ALWAYS = 0, - FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES, - FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES, + FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, + FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, FW_FEATURE_POSSIBLE = #ifdef CONFIG_PPC_PSERIES FW_FEATURE_PSERIES_POSSIBLE | @@ -81,22 +82,11 @@ enum { /* This is used to identify firmware features which are available * to the kernel. */ -extern unsigned long ppc64_firmware_features; +extern unsigned long powerpc_firmware_features; -static inline unsigned long firmware_has_feature(unsigned long feature) -{ - return (FW_FEATURE_ALWAYS & feature) || - (FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature); -} - -#ifdef CONFIG_PPC_PSERIES -typedef struct { - unsigned long val; - char * name; -} firmware_feature_t; - -extern firmware_feature_t firmware_features_table[]; -#endif +#define firmware_has_feature(feature) \ + ((FW_FEATURE_ALWAYS & (feature)) || \ + (FW_FEATURE_POSSIBLE & powerpc_firmware_features & (feature))) extern void system_reset_fwnmi(void); extern void machine_check_fwnmi(void); diff --git a/include/asm-powerpc/floppy.h b/include/asm-powerpc/floppy.h index e258778ca429..608164c39efb 100644 --- a/include/asm-powerpc/floppy.h +++ b/include/asm-powerpc/floppy.h @@ -35,6 +35,7 @@ #ifdef CONFIG_PCI #include <linux/pci.h> +#include <asm/ppc-pci.h> /* for ppc64_isabridge_dev */ #define fd_dma_setup(addr,size,mode,io) powerpc_fd_dma_setup(addr,size,mode,io) @@ -52,12 +53,12 @@ static __inline__ int powerpc_fd_dma_setup(char *addr, unsigned long size, if (bus_addr && (addr != prev_addr || size != prev_size || dir != prev_dir)) { /* different from last time -- unmap prev */ - pci_unmap_single(NULL, bus_addr, prev_size, prev_dir); + pci_unmap_single(ppc64_isabridge_dev, bus_addr, prev_size, prev_dir); bus_addr = 0; } if (!bus_addr) /* need to map it */ - bus_addr = pci_map_single(NULL, addr, size, dir); + bus_addr = pci_map_single(ppc64_isabridge_dev, addr, size, dir); /* remember this one as prev */ prev_addr = addr; diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h index 39e85f320a76..f1b3c00bc1ce 100644 --- a/include/asm-powerpc/futex.h +++ b/include/asm-powerpc/futex.h @@ -81,5 +81,11 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + return -ENOSYS; +} + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_FUTEX_H */ diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h index 38ca9ad6110d..b72c04f3f551 100644 --- a/include/asm-powerpc/hvcall.h +++ b/include/asm-powerpc/hvcall.h @@ -9,6 +9,7 @@ #define H_Closed 2 /* Resource closed */ #define H_Constrained 4 /* Resource request constrained to max allowed */ #define H_InProgress 14 /* Kind of like busy */ +#define H_Pending 17 /* returned from H_POLL_PENDING */ #define H_Continue 18 /* Returned from H_Join on success */ #define H_LongBusyStartRange 9900 /* Start of long busy range */ #define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */ diff --git a/include/asm-powerpc/hvconsole.h b/include/asm-powerpc/hvconsole.h index 34daf7b9b62f..35ea69e8121f 100644 --- a/include/asm-powerpc/hvconsole.h +++ b/include/asm-powerpc/hvconsole.h @@ -24,28 +24,18 @@ #ifdef __KERNEL__ /* - * This is the max number of console adapters that can/will be found as - * console devices on first stage console init. Any number beyond this range - * can't be used as a console device but is still a valid tty device. + * PSeries firmware will only send/recv up to 16 bytes of character data per + * hcall. */ -#define MAX_NR_HVC_CONSOLES 16 +#define MAX_VIO_PUT_CHARS 16 +#define SIZE_VIO_GET_CHARS 16 -/* implemented by a low level driver */ -struct hv_ops { - int (*get_chars)(uint32_t vtermno, char *buf, int count); - int (*put_chars)(uint32_t vtermno, const char *buf, int count); -}; +/* + * Vio firmware always attempts to fetch MAX_VIO_GET_CHARS chars. The 'count' + * parm is included to conform to put_chars() function pointer template + */ extern int hvc_get_chars(uint32_t vtermno, char *buf, int count); extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count); -struct hvc_struct; - -/* Register a vterm and a slot index for use as a console (console_init) */ -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops); -/* register a vterm for hvc tty operation (module_init or hotplug add) */ -extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq, - struct hv_ops *ops); -/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */ -extern int __devexit hvc_remove(struct hvc_struct *hp); #endif /* __KERNEL__ */ #endif /* _PPC64_HVCONSOLE_H */ diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index 8eb7e857ec4c..51f87d9993b6 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h @@ -479,6 +479,10 @@ extern int distribute_irqs; struct irqaction; struct pt_regs; +#define __ARCH_HAS_DO_SOFTIRQ + +extern void __do_softirq(void); + #ifdef CONFIG_IRQSTACKS /* * Per-cpu stacks for handling hard and soft interrupts. @@ -491,8 +495,6 @@ extern void call_do_softirq(struct thread_info *tp); extern int call___do_IRQ(int irq, struct pt_regs *regs, struct thread_info *tp); -#define __ARCH_HAS_DO_SOFTIRQ - #else #define irq_ctx_init() diff --git a/include/asm-powerpc/iseries/mf.h b/include/asm-powerpc/iseries/mf.h index 857e5202fc78..eb851a9c9e5c 100644 --- a/include/asm-powerpc/iseries/mf.h +++ b/include/asm-powerpc/iseries/mf.h @@ -41,16 +41,11 @@ extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type, unsigned count, MFCompleteHandler hdlr, void *userToken); extern void mf_power_off(void); -extern void mf_reboot(void); +extern void mf_reboot(char *cmd); extern void mf_display_src(u32 word); extern void mf_display_progress(u16 value); -extern void mf_clear_src(void); extern void mf_init(void); -extern int mf_get_rtc(struct rtc_time *tm); -extern int mf_get_boot_rtc(struct rtc_time *tm); -extern int mf_set_rtc(struct rtc_time *tm); - #endif /* _ASM_POWERPC_ISERIES_MF_H */ diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h index 7c16265568e0..c01786ab5fa6 100644 --- a/include/asm-powerpc/kdebug.h +++ b/include/asm-powerpc/kdebug.h @@ -16,13 +16,9 @@ struct die_args { int signr; }; -/* - Note - you should never unregister because that can race with NMIs. - If you really want to do it first unregister - then synchronize_sched - - then free. - */ -int register_die_notifier(struct notifier_block *nb); -extern struct notifier_block *powerpc_die_chain; +extern int register_die_notifier(struct notifier_block *); +extern int unregister_die_notifier(struct notifier_block *); +extern struct atomic_notifier_head powerpc_die_chain; /* Grossly misnamed. */ enum die_val { @@ -37,7 +33,7 @@ enum die_val { static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) { struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; - return notifier_call_chain(&powerpc_die_chain, val, &args); + return atomic_notifier_call_chain(&powerpc_die_chain, val, &args); } #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/lmb.h b/include/asm-powerpc/lmb.h index d3546c4c9f46..0c5880f70225 100644 --- a/include/asm-powerpc/lmb.h +++ b/include/asm-powerpc/lmb.h @@ -19,8 +19,6 @@ #define MAX_LMB_REGIONS 128 -#define LMB_ALLOC_ANYWHERE 0 - struct lmb_property { unsigned long base; unsigned long size; @@ -43,20 +41,19 @@ extern struct lmb lmb; extern void __init lmb_init(void); extern void __init lmb_analyze(void); -extern long __init lmb_add(unsigned long, unsigned long); -extern long __init lmb_reserve(unsigned long, unsigned long); -extern unsigned long __init lmb_alloc(unsigned long, unsigned long); -extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long, - unsigned long); +extern long __init lmb_add(unsigned long base, unsigned long size); +extern long __init lmb_reserve(unsigned long base, unsigned long size); +extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align); +extern unsigned long __init lmb_alloc_base(unsigned long size, + unsigned long align, unsigned long max_addr); +extern unsigned long __init __lmb_alloc_base(unsigned long size, + unsigned long align, unsigned long max_addr); extern unsigned long __init lmb_phys_mem_size(void); extern unsigned long __init lmb_end_of_DRAM(void); -extern unsigned long __init lmb_abs_to_phys(unsigned long); -extern void __init lmb_enforce_memory_limit(unsigned long); +extern void __init lmb_enforce_memory_limit(unsigned long memory_limit); extern void lmb_dump_all(void); -extern unsigned long io_hole_start; - static inline unsigned long lmb_size_bytes(struct lmb_region *type, unsigned long region_nr) { diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index 5348b820788c..5ed847680754 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h @@ -47,6 +47,7 @@ struct smp_ops_t { #endif struct machdep_calls { + char *name; #ifdef CONFIG_PPC64 void (*hpte_invalidate)(unsigned long slot, unsigned long va, @@ -85,9 +86,9 @@ struct machdep_calls { void (*iommu_dev_setup)(struct pci_dev *dev); void (*iommu_bus_setup)(struct pci_bus *bus); void (*irq_bus_setup)(struct pci_bus *bus); -#endif +#endif /* CONFIG_PPC64 */ - int (*probe)(int platform); + int (*probe)(void); void (*setup_arch)(void); void (*init_early)(void); /* Optional, may be NULL. */ @@ -158,6 +159,12 @@ struct machdep_calls { /* Idle loop for this platform, leave empty for default idle loop */ void (*idle_loop)(void); + /* + * Function for waiting for work with reduced power in idle loop; + * called with interrupts disabled. + */ + void (*power_save)(void); + /* Function to enable performance monitor counters for this platform, called once per cpu. */ void (*enable_pmcs)(void); @@ -170,13 +177,6 @@ struct machdep_calls { May be NULL. */ void (*init)(void); - void (*idle)(void); - void (*power_save)(void); - - void (*heartbeat)(void); - unsigned long heartbeat_reset; - unsigned long heartbeat_count; - void (*setup_io_mappings)(void); void (*early_serial_map)(void); @@ -208,8 +208,6 @@ struct machdep_calls { /* Called at then very end of pcibios_init() */ void (*pcibios_after_init)(void); - /* this is for modules, since _machine can be a define -- Cort */ - int ppc_machine; #endif /* CONFIG_PPC32 */ /* Called to shutdown machine specific hardware not already controlled @@ -242,10 +240,29 @@ struct machdep_calls { #endif /* CONFIG_KEXEC */ }; -extern void default_idle(void); -extern void native_idle(void); +extern void power4_idle(void); +extern void ppc6xx_idle(void); +/* + * ppc_md contains a copy of the machine description structure for the + * current platform. machine_id contains the initial address where the + * description was found during boot. + */ extern struct machdep_calls ppc_md; +extern struct machdep_calls *machine_id; + +#define __machine_desc __attribute__ ((__section__ (".machine.desc"))) + +#define define_machine(name) struct machdep_calls mach_##name __machine_desc = +#define machine_is(name) \ + ({ \ + extern struct machdep_calls mach_##name \ + __attribute__((weak)); \ + machine_id == &mach_##name; \ + }) + +extern void probe_machine(void); + extern char cmd_line[COMMAND_LINE_SIZE]; #ifdef CONFIG_PPC_PMAC diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index b0b9a3f8cdc2..31f721994bd8 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h @@ -236,7 +236,6 @@ extern void htab_initialize_secondary(void); extern void hpte_init_native(void); extern void hpte_init_lpar(void); extern void hpte_init_iSeries(void); -extern void mm_init_ppc64(void); extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long prpn, diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h index 338e6a7cff4a..5b33994cd488 100644 --- a/include/asm-powerpc/oprofile_impl.h +++ b/include/asm-powerpc/oprofile_impl.h @@ -17,9 +17,6 @@ /* Per-counter configuration as set via oprofilefs. */ struct op_counter_config { -#ifdef __powerpc64__ - unsigned long valid; -#endif unsigned long enabled; unsigned long event; unsigned long count; @@ -38,9 +35,6 @@ struct op_system_config { #endif unsigned long enable_kernel; unsigned long enable_user; -#ifdef CONFIG_PPC64 - unsigned long backtrace_spinlocks; -#endif }; /* Per-arch configuration */ @@ -56,17 +50,12 @@ struct op_powerpc_model { int num_counters; }; -#ifdef CONFIG_FSL_BOOKE extern struct op_powerpc_model op_model_fsl_booke; -#else /* Otherwise, it's classic */ - -#ifdef CONFIG_PPC64 extern struct op_powerpc_model op_model_rs64; extern struct op_powerpc_model op_model_power4; - -#else /* Otherwise, CONFIG_PPC32 */ extern struct op_powerpc_model op_model_7450; -#endif + +#ifndef CONFIG_FSL_BOOKE /* All the classic PPC parts use these */ static inline unsigned int ctr_read(unsigned int i) @@ -134,5 +123,7 @@ static inline void ctr_write(unsigned int i, unsigned int val) } #endif /* !CONFIG_FSL_BOOKE */ +extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth); + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_OPROFILE_IMPL_H */ diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index c9add8f1ad94..706325f99a84 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -54,7 +54,7 @@ struct paca_struct { #endif /* CONFIG_PPC_ISERIES */ /* - * MAGIC: the spinlock functions in arch/ppc64/lib/locks.c + * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c * load lock_token and paca_index with a single lwz * instruction. They must travel together and be properly * aligned. @@ -96,9 +96,16 @@ struct paca_struct { u64 saved_r1; /* r1 save for RTAS calls */ u64 saved_msr; /* MSR saved here by enter_rtas */ u8 proc_enabled; /* irq soft-enable flag */ + + /* Stuff for accurate time accounting */ + u64 user_time; /* accumulated usermode TB ticks */ + u64 system_time; /* accumulated system TB ticks */ + u64 startpurr; /* PURR/TB value snapshot */ }; extern struct paca_struct paca[]; +void setup_boot_paca(void); + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PACA_H */ diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h index 0b82df483f7f..2fbecebe1c92 100644 --- a/include/asm-powerpc/page.h +++ b/include/asm-powerpc/page.h @@ -69,8 +69,6 @@ #endif #ifdef CONFIG_FLATMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif @@ -200,6 +198,7 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p); extern int page_is_ram(unsigned long pfn); +#include <asm-generic/memory_model.h> #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index e31922c50e53..184a7a4d2fdf 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h @@ -27,10 +27,9 @@ #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for (__i = 0; __i < NR_CPUS; __i++) \ - if (cpu_possible(__i)) \ - memcpy((pcpudst)+__per_cpu_offset(__i), \ - (src), (size)); \ + for_each_possible_cpu(__i) \ + memcpy((pcpudst)+__per_cpu_offset(__i), \ + (src), (size)); \ } while (0) extern void setup_per_cpu_areas(void); diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h index 80a7832d2721..b2e18629932a 100644 --- a/include/asm-powerpc/pgtable-4k.h +++ b/include/asm-powerpc/pgtable-4k.h @@ -62,9 +62,14 @@ /* shift to put page number into pte */ #define PTE_RPN_SHIFT (17) -#define __real_pte(e,p) ((real_pte_t)(e)) -#define __rpte_to_pte(r) (r) -#define __rpte_to_hidx(r,index) (pte_val((r)) >> 12) +#ifdef STRICT_MM_TYPECHECKS +#define __real_pte(e,p) ((real_pte_t){(e)}) +#define __rpte_to_pte(r) ((r).pte) +#else +#define __real_pte(e,p) (e) +#define __rpte_to_pte(r) (__pte(r)) +#endif +#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ do { \ diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h index e38931379a72..e9f1f4627e6b 100644 --- a/include/asm-powerpc/pgtable.h +++ b/include/asm-powerpc/pgtable.h @@ -188,9 +188,13 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) #define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) #define pte_page(x) pfn_to_page(pte_pfn(x)) +#define PMD_BAD_BITS (PTE_TABLE_SIZE-1) +#define PUD_BAD_BITS (PMD_TABLE_SIZE-1) + #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) #define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_bad(pmd) (pmd_val(pmd) == 0) +#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ + || (pmd_val(pmd) & PMD_BAD_BITS)) #define pmd_present(pmd) (pmd_val(pmd) != 0) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) #define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) @@ -198,7 +202,8 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) #define pud_none(pud) (!pud_val(pud)) -#define pud_bad(pud) ((pud_val(pud)) == 0) +#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ + || (pud_val(pud) & PUD_BAD_BITS)) #define pud_present(pud) (pud_val(pud) != 0) #define pud_clear(pudp) (pud_val(*(pudp)) = 0) #define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS) @@ -468,11 +473,6 @@ extern pgd_t swapper_pg_dir[]; extern void paging_init(void); -#ifdef CONFIG_HUGETLB_PAGE -#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ - free_pgd_range(tlb, addr, end, floor, ceiling) -#endif - /* * This gets called at the end of handling a page fault, when * the kernel has put a new PTE into the page table for the process. diff --git a/include/asm-powerpc/pmac_feature.h b/include/asm-powerpc/pmac_feature.h index 3221628130c4..d3599cc9aa74 100644 --- a/include/asm-powerpc/pmac_feature.h +++ b/include/asm-powerpc/pmac_feature.h @@ -305,7 +305,7 @@ extern void pmac_feature_init(void); extern void pmac_set_early_video_resume(void (*proc)(void *data), void *data); extern void pmac_call_early_video_resume(void); -#define PMAC_FTR_DEF(x) ((_MACH_Pmac << 16) | (x)) +#define PMAC_FTR_DEF(x) ((0x6660000) | (x)) /* The AGP driver registers itself here */ extern void pmac_register_agp_pm(struct pci_dev *bridge, diff --git a/include/asm-powerpc/poll.h b/include/asm-powerpc/poll.h index edd2054da86b..9c7d12631033 100644 --- a/include/asm-powerpc/poll.h +++ b/include/asm-powerpc/poll.h @@ -13,6 +13,7 @@ #define POLLWRBAND 0x0200 #define POLLMSG 0x0400 #define POLLREMOVE 0x1000 +#define POLLRDHUP 0x2000 struct pollfd { int fd; diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h index f80482c7231f..cf79bc7ebb55 100644 --- a/include/asm-powerpc/ppc-pci.h +++ b/include/asm-powerpc/ppc-pci.h @@ -38,6 +38,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, void pci_devs_phb_init(void); void pci_devs_phb_init_dynamic(struct pci_controller *phb); +int setup_phb(struct device_node *dev, struct pci_controller *phb); void __devinit scan_phb(struct pci_controller *hose); /* From rtas_pci.h */ diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index ab8688d39024..dd1c0a913d5f 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h @@ -15,6 +15,48 @@ #define SZL (BITS_PER_LONG/8) /* + * Stuff for accurate CPU time accounting. + * These macros handle transitions between user and system state + * in exception entry and exit and accumulate time to the + * user_time and system_time fields in the paca. + */ + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#define ACCOUNT_CPU_USER_ENTRY(ra, rb) +#define ACCOUNT_CPU_USER_EXIT(ra, rb) +#else +#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \ + beq 2f; /* if from kernel mode */ \ +BEGIN_FTR_SECTION; \ + mfspr ra,SPRN_PURR; /* get processor util. reg */ \ +END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ +BEGIN_FTR_SECTION; \ + mftb ra; /* or get TB if no PURR */ \ +END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ + ld rb,PACA_STARTPURR(r13); \ + std ra,PACA_STARTPURR(r13); \ + subf rb,rb,ra; /* subtract start value */ \ + ld ra,PACA_USER_TIME(r13); \ + add ra,ra,rb; /* add on to user time */ \ + std ra,PACA_USER_TIME(r13); \ +2: + +#define ACCOUNT_CPU_USER_EXIT(ra, rb) \ +BEGIN_FTR_SECTION; \ + mfspr ra,SPRN_PURR; /* get processor util. reg */ \ +END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ +BEGIN_FTR_SECTION; \ + mftb ra; /* or get TB if no PURR */ \ +END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ + ld rb,PACA_STARTPURR(r13); \ + std ra,PACA_STARTPURR(r13); \ + subf rb,rb,ra; /* subtract start value */ \ + ld ra,PACA_SYSTEM_TIME(r13); \ + add ra,ra,rb; /* add on to user time */ \ + std ra,PACA_SYSTEM_TIME(r13); +#endif + +/* * Macros for storing registers into and loading registers from * exception frames. */ diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index 415fa393b00c..93f83efeb310 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h @@ -22,22 +22,6 @@ * -- BenH. */ -/* Platforms codes (to be obsoleted) */ -#define PLATFORM_PSERIES 0x0100 -#define PLATFORM_PSERIES_LPAR 0x0101 -#define PLATFORM_ISERIES_LPAR 0x0201 -#define PLATFORM_LPAR 0x0001 -#define PLATFORM_POWERMAC 0x0400 -#define PLATFORM_MAPLE 0x0500 -#define PLATFORM_PREP 0x0600 -#define PLATFORM_CHRP 0x0700 -#define PLATFORM_CELL 0x1000 - -/* Compat platform codes for 32 bits */ -#define _MACH_prep PLATFORM_PREP -#define _MACH_Pmac PLATFORM_POWERMAC -#define _MACH_chrp PLATFORM_CHRP - /* PREP sub-platform types see residual.h for these */ #define _PREP_Motorola 0x01 /* motorola prep */ #define _PREP_Firm 0x02 /* firmworks prep */ @@ -49,19 +33,14 @@ #define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */ #define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */ -#ifdef __KERNEL__ -#define platform_is_pseries() (_machine == PLATFORM_PSERIES || \ - _machine == PLATFORM_PSERIES_LPAR) -#define platform_is_lpar() (!!(_machine & PLATFORM_LPAR)) +#if defined(__KERNEL__) && defined(CONFIG_PPC32) -#if defined(CONFIG_PPC_MULTIPLATFORM) -extern int _machine; +extern int _chrp_type; -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_PREP /* what kind of prep workstation we are */ extern int _prep_type; -extern int _chrp_type; /* * This is used to identify the board type from a given PReP board @@ -71,17 +50,14 @@ extern int _chrp_type; extern unsigned char ucBoardRev; extern unsigned char ucBoardRevMaj, ucBoardRevMin; -#endif /* CONFIG_PPC32 */ +#endif /* CONFIG_PPC_PREP */ -#elif defined(CONFIG_PPC_ISERIES) -/* - * iSeries is soon to become MULTIPLATFORM hopefully ... - */ -#define _machine PLATFORM_ISERIES_LPAR -#else +#ifndef CONFIG_PPC_MULTIPLATFORM #define _machine 0 #endif /* CONFIG_PPC_MULTIPLATFORM */ -#endif /* __KERNEL__ */ + +#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */ + /* * Default implementation of macro that returns current * instruction pointer ("program counter"). @@ -252,6 +228,10 @@ static inline unsigned long __pack_fe01(unsigned int fpmode) #define cpu_relax() barrier() #endif +/* Check that a certain kernel stack pointer is valid in task_struct p */ +int validate_sp(unsigned long sp, struct task_struct *p, + unsigned long nbytes); + /* * Prefetch macros. */ diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index cbd297f44cce..97ef1cd71a4d 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -126,8 +126,14 @@ extern struct device_node *find_all_nodes(void); /* New style node lookup */ extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); +#define for_each_node_by_name(dn, name) \ + for (dn = of_find_node_by_name(NULL, name); dn; \ + dn = of_find_node_by_name(dn, name)) extern struct device_node *of_find_node_by_type(struct device_node *from, const char *type); +#define for_each_node_by_type(dn, type) \ + for (dn = of_find_node_by_type(NULL, type); dn; \ + dn = of_find_node_by_type(dn, type)) extern struct device_node *of_find_compatible_node(struct device_node *from, const char *type, const char *compat); extern struct device_node *of_find_node_by_path(const char *path); @@ -143,12 +149,14 @@ extern struct device_node *of_node_get(struct device_node *node); extern void of_node_put(struct device_node *node); /* For scanning the flat device-tree at boot time */ -int __init of_scan_flat_dt(int (*it)(unsigned long node, - const char *uname, int depth, - void *data), - void *data); -void* __init of_get_flat_dt_prop(unsigned long node, const char *name, - unsigned long *size); +extern int __init of_scan_flat_dt(int (*it)(unsigned long node, + const char *uname, int depth, + void *data), + void *data); +extern void* __init of_get_flat_dt_prop(unsigned long node, const char *name, + unsigned long *size); +extern int __init of_flat_dt_is_compatible(unsigned long node, const char *name); +extern unsigned long __init of_get_flat_dt_root(void); /* For updating the device tree at runtime */ extern void of_attach_node(struct device_node *); diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index 72bfe3af0460..bd467bf5cf5a 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h @@ -622,6 +622,10 @@ extern void ppc64_runlatch_off(void); extern unsigned long scom970_read(unsigned int address); extern void scom970_write(unsigned int address, unsigned long value); +#else +#define ppc64_runlatch_on() +#define ppc64_runlatch_off() + #endif /* CONFIG_PPC64 */ #define __get_SP() ({unsigned long sp; \ diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h index 79bae4933b73..2c2fe9647595 100644 --- a/include/asm-powerpc/rwsem.h +++ b/include/asm-powerpc/rwsem.h @@ -4,7 +4,7 @@ #ifdef __KERNEL__ /* - * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff + * include/asm-powerpc/rwsem.h: R/W semaphores for PPC using the stuff * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h * by Paul Mackerras <paulus@samba.org>. */ diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h index 98581e5a8279..4a716f707cf6 100644 --- a/include/asm-powerpc/smp.h +++ b/include/asm-powerpc/smp.h @@ -29,7 +29,6 @@ #endif extern int boot_cpuid; -extern int boot_cpuid_phys; extern void cpu_die(void); @@ -99,6 +98,7 @@ extern void smp_release_cpus(void); #else /* 32-bit */ #ifndef CONFIG_SMP +extern int boot_cpuid_phys; #define get_hard_smp_processor_id(cpu) boot_cpuid_phys #define set_hard_smp_processor_id(cpu, phys) #endif diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h index 38bacf2f6e0c..f431d8b0b651 100644 --- a/include/asm-powerpc/spu.h +++ b/include/asm-powerpc/spu.h @@ -110,6 +110,7 @@ struct spu { char *name; unsigned long local_store_phys; u8 *local_store; + unsigned long problem_phys; struct spu_problem __iomem *problem; struct spu_priv1 __iomem *priv1; struct spu_priv2 __iomem *priv2; @@ -137,6 +138,7 @@ struct spu { void (* wbox_callback)(struct spu *spu); void (* ibox_callback)(struct spu *spu); void (* stop_callback)(struct spu *spu); + void (* mfc_callback)(struct spu *spu); char irq_c0[8]; char irq_c1[8]; @@ -149,6 +151,14 @@ int spu_irq_class_0_bottom(struct spu *spu); int spu_irq_class_1_bottom(struct spu *spu); void spu_irq_setaffinity(struct spu *spu, int cpu); +/* system callbacks from the SPU */ +struct spu_syscall_block { + u64 nr_ret; + u64 parm[6]; +}; +extern long spu_sys_callback(struct spu_syscall_block *s); + +/* syscalls implemented in spufs */ extern struct spufs_calls { asmlinkage long (*create_thread)(const char __user *name, unsigned int flags, mode_t mode); @@ -399,7 +409,6 @@ struct spu_priv1 { #define SPU_GET_REVISION_BITS(vr) (vr & SPU_REVISION_BITS) u8 pad_0x28_0x100[0x100 - 0x28]; /* 0x28 */ - /* Interrupt Area */ u64 int_mask_RW[3]; /* 0x100 */ #define CLASS0_ENABLE_DMA_ALIGNMENT_INTR 0x1L diff --git a/include/asm-powerpc/string.h b/include/asm-powerpc/string.h index 8606a696c088..faa407f33c6b 100644 --- a/include/asm-powerpc/string.h +++ b/include/asm-powerpc/string.h @@ -15,7 +15,7 @@ #define __HAVE_ARCH_MEMCHR extern int strcasecmp(const char *, const char *); -extern int strncasecmp(const char *, const char *, int); +extern int strncasecmp(const char *, const char *, __kernel_size_t); extern char * strcpy(char *,const char *); extern char * strncpy(char *,const char *, __kernel_size_t); extern __kernel_size_t strlen(const char *); diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h index c90d9d9aae72..2cda3c38a9fa 100644 --- a/include/asm-powerpc/synch.h +++ b/include/asm-powerpc/synch.h @@ -15,7 +15,7 @@ #endif #ifdef CONFIG_SMP -#define ISYNC_ON_SMP "\n\tisync" +#define ISYNC_ON_SMP "\n\tisync\n" #define LWSYNC_ON_SMP __stringify(LWSYNC) "\n" #else #define ISYNC_ON_SMP diff --git a/include/asm-powerpc/syscalls.h b/include/asm-powerpc/syscalls.h new file mode 100644 index 000000000000..c2fe79d4f90f --- /dev/null +++ b/include/asm-powerpc/syscalls.h @@ -0,0 +1,58 @@ +#ifndef __ASM_POWERPC_SYSCALLS_H +#define __ASM_POWERPC_SYSCALLS_H +#ifdef __KERNEL__ + +#include <linux/compiler.h> +#include <linux/linkage.h> +#include <linux/types.h> +#include <asm/signal.h> + +struct new_utsname; +struct pt_regs; +struct rtas_args; +struct sigaction; + +asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len, + unsigned long prot, unsigned long flags, + unsigned long fd, off_t offset); +asmlinkage unsigned long sys_mmap2(unsigned long addr, size_t len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +asmlinkage int sys_execve(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, struct pt_regs *regs); +asmlinkage int sys_clone(unsigned long clone_flags, unsigned long usp, + int __user *parent_tidp, void __user *child_threadptr, + int __user *child_tidp, int p6, struct pt_regs *regs); +asmlinkage int sys_fork(unsigned long p1, unsigned long p2, + unsigned long p3, unsigned long p4, unsigned long p5, + unsigned long p6, struct pt_regs *regs); +asmlinkage int sys_vfork(unsigned long p1, unsigned long p2, + unsigned long p3, unsigned long p4, unsigned long p5, + unsigned long p6, struct pt_regs *regs); +asmlinkage int sys_pipe(int __user *fildes); +asmlinkage long sys_rt_sigaction(int sig, + const struct sigaction __user *act, + struct sigaction __user *oact, size_t sigsetsize); +asmlinkage int sys_ipc(uint call, int first, unsigned long second, + long third, void __user *ptr, long fifth); +asmlinkage long ppc64_personality(unsigned long personality); +asmlinkage int ppc_rtas(struct rtas_args __user *uargs); +asmlinkage time_t sys64_time(time_t __user * tloc); +asmlinkage long ppc_newuname(struct new_utsname __user * name); + +asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, + size_t sigsetsize); + +#ifndef __powerpc64__ +asmlinkage long sys_sigaltstack(const stack_t __user *uss, + stack_t __user *uoss, int r5, int r6, int r7, int r8, + struct pt_regs *regs); +#else /* __powerpc64__ */ +asmlinkage long sys_sigaltstack(const stack_t __user *uss, + stack_t __user *uoss, unsigned long r5, unsigned long r6, + unsigned long r7, unsigned long r8, struct pt_regs *regs); +#endif /* __powerpc64__ */ + +#endif /* __KERNEL__ */ +#endif /* __ASM_POWERPC_SYSCALLS_H */ diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index d9bf53653b10..d075725bf444 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -171,6 +171,8 @@ extern u32 booke_wdt_period; /* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ extern unsigned char e2a(unsigned char); +extern unsigned char* strne2a(unsigned char *dest, + const unsigned char *src, size_t n); struct device_node; extern void note_scsi_host(struct device_node *, void *); @@ -363,8 +365,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, * powers of 2 writes until it reaches sufficient alignment). * * Based on this we disable the IP header alignment in network drivers. + * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining + * cacheline alignment of buffers. */ -#define NET_IP_ALIGN 0 +#define NET_IP_ALIGN 0 +#define NET_SKB_PAD L1_CACHE_BYTES #endif #define arch_align_stack(x) (x) @@ -424,5 +429,9 @@ static inline void create_function_call(unsigned long addr, void * func) create_branch(addr, func_addr, BRANCH_SET_LINK); } +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void account_system_vtime(struct task_struct *); +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SYSTEM_H */ diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h index baddc9ab57ad..912118db13ae 100644 --- a/include/asm-powerpc/time.h +++ b/include/asm-powerpc/time.h @@ -41,6 +41,7 @@ extern time_t last_rtc_update; extern void generic_calibrate_decr(void); extern void wakeup_decrementer(void); +extern void snapshot_timebase(void); /* Some sane defaults: 125 MHz timebase, 1GHz processor */ extern unsigned long ppc_proc_freq; @@ -221,5 +222,19 @@ struct cpu_usage { DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void account_process_vtime(struct task_struct *tsk); +#else +#define account_process_vtime(tsk) do { } while (0) +#endif + +#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) +extern void calculate_steal_time(void); +extern void snapshot_timebases(void); +#else +#define calculate_steal_time() do { } while (0) +#define snapshot_timebases() do { } while (0) +#endif + #endif /* __KERNEL__ */ #endif /* __PPC64_TIME_H */ diff --git a/include/asm-powerpc/types.h b/include/asm-powerpc/types.h index ec3c2ee8bf86..baabba96e313 100644 --- a/include/asm-powerpc/types.h +++ b/include/asm-powerpc/types.h @@ -103,6 +103,11 @@ typedef u64 sector_t; #define HAVE_SECTOR_T #endif +#ifdef CONFIG_LSF +typedef u64 blkcnt_t; +#define HAVE_BLKCNT_T +#endif + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h index 35556993f066..536ba0873052 100644 --- a/include/asm-powerpc/unistd.h +++ b/include/asm-powerpc/unistd.h @@ -301,8 +301,9 @@ #define __NR_pselect6 280 #define __NR_ppoll 281 #define __NR_unshare 282 +#define __NR_splice 283 -#define __NR_syscalls 283 +#define __NR_syscalls 284 #ifdef __KERNEL__ #define __NR__exit __NR_exit @@ -425,6 +426,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6 #include <linux/types.h> #include <linux/compiler.h> #include <linux/linkage.h> +#include <asm/syscalls.h> #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR @@ -460,44 +462,10 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6 * System call prototypes. */ #ifdef __KERNEL_SYSCALLS__ -extern pid_t setsid(void); -extern int write(int fd, const char *buf, off_t count); -extern int read(int fd, char *buf, off_t count); -extern off_t lseek(int fd, off_t offset, int count); -extern int dup(int fd); extern int execve(const char *file, char **argv, char **envp); -extern int open(const char *file, int flag, int mode); -extern int close(int fd); -extern pid_t waitpid(pid_t pid, int *wait_stat, int options); #endif /* __KERNEL_SYSCALLS__ */ /* - * Functions that implement syscalls. - */ -unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot, - unsigned long flags, unsigned long fd, off_t offset); -unsigned long sys_mmap2(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff); -struct pt_regs; -int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, - unsigned long a3, unsigned long a4, unsigned long a5, - struct pt_regs *regs); -int sys_clone(unsigned long clone_flags, unsigned long usp, - int __user *parent_tidp, void __user *child_threadptr, - int __user *child_tidp, int p6, struct pt_regs *regs); -int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, - unsigned long p4, unsigned long p5, unsigned long p6, - struct pt_regs *regs); -int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, - unsigned long p4, unsigned long p5, unsigned long p6, - struct pt_regs *regs); -int sys_pipe(int __user *fildes); -struct sigaction; -long sys_rt_sigaction(int sig, const struct sigaction __user *act, - struct sigaction __user *oact, size_t sigsetsize); - -/* * "Conditional" syscalls * * What we want is __attribute__((weak,alias("sys_ni_syscall"))), diff --git a/include/asm-powerpc/vdso_datapage.h b/include/asm-powerpc/vdso_datapage.h index 7aa92086c3fb..8a94f0eba5e9 100644 --- a/include/asm-powerpc/vdso_datapage.h +++ b/include/asm-powerpc/vdso_datapage.h @@ -55,6 +55,9 @@ struct vdso_data { __u32 minor; /* Minor number 0x14 */ } version; + /* Note about the platform flags: it now only contains the lpar + * bit. The actual platform number is dead and burried + */ __u32 platform; /* Platform flags 0x18 */ __u32 processor; /* Processor type 0x1C */ __u64 processorCount; /* # of physical processors 0x20 */ |