diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-ia64/bitops.h | 50 | ||||
-rw-r--r-- | include/asm-ia64/gcc_intrin.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/mca.h | 6 | ||||
-rw-r--r-- | include/asm-ia64/mca_asm.h | 3 | ||||
-rw-r--r-- | include/asm-ia64/processor.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/sal.h | 14 |
6 files changed, 39 insertions, 38 deletions
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index a1b9719f5fbb..953d3df9dd22 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -122,38 +122,40 @@ clear_bit_unlock (int nr, volatile void *addr) } /** - * __clear_bit_unlock - Non-atomically clear a bit with release + * __clear_bit_unlock - Non-atomically clears a bit in memory with release + * @nr: Bit to clear + * @addr: Address to start counting from * - * This is like clear_bit_unlock, but the implementation uses a store + * Similarly to clear_bit_unlock, the implementation uses a store * with release semantics. See also __raw_spin_unlock(). */ static __inline__ void -__clear_bit_unlock(int nr, volatile void *addr) +__clear_bit_unlock(int nr, void *addr) { - __u32 mask, new; - volatile __u32 *m; + __u32 * const m = (__u32 *) addr + (nr >> 5); + __u32 const new = *m & ~(1 << (nr & 31)); - m = (volatile __u32 *)addr + (nr >> 5); - mask = ~(1 << (nr & 31)); - new = *m & mask; - barrier(); ia64_st4_rel_nta(m, new); } /** * __clear_bit - Clears a bit in memory (non-atomic version) + * @nr: the bit to clear + * @addr: the address to start counting from + * + * Unlike clear_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. */ static __inline__ void __clear_bit (int nr, volatile void *addr) { - volatile __u32 *p = (__u32 *) addr + (nr >> 5); - __u32 m = 1 << (nr & 31); - *p &= ~m; + *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); } /** * change_bit - Toggle a bit in memory - * @nr: Bit to clear + * @nr: Bit to toggle * @addr: Address to start counting from * * change_bit() is atomic and may not be reordered. @@ -178,7 +180,7 @@ change_bit (int nr, volatile void *addr) /** * __change_bit - Toggle a bit in memory - * @nr: the bit to set + * @nr: the bit to toggle * @addr: the address to start counting from * * Unlike change_bit(), this function is non-atomic and may be reordered. @@ -197,7 +199,7 @@ __change_bit (int nr, volatile void *addr) * @addr: Address to count from * * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. + * It also implies the acquisition side of the memory barrier. */ static __inline__ int test_and_set_bit (int nr, volatile void *addr) @@ -247,11 +249,11 @@ __test_and_set_bit (int nr, volatile void *addr) /** * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to set + * @nr: Bit to clear * @addr: Address to count from * * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. + * It also implies the acquisition side of the memory barrier. */ static __inline__ int test_and_clear_bit (int nr, volatile void *addr) @@ -272,7 +274,7 @@ test_and_clear_bit (int nr, volatile void *addr) /** * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to set + * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic and can be reordered. @@ -292,11 +294,11 @@ __test_and_clear_bit(int nr, volatile void * addr) /** * test_and_change_bit - Change a bit and return its old value - * @nr: Bit to set + * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. + * It also implies the acquisition side of the memory barrier. */ static __inline__ int test_and_change_bit (int nr, volatile void *addr) @@ -315,8 +317,12 @@ test_and_change_bit (int nr, volatile void *addr) return (old & bit) != 0; } -/* - * WARNING: non atomic version. +/** + * __test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. */ static __inline__ int __test_and_change_bit (int nr, void *addr) diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h index 5b6665c754c9..de2ed2cbdd84 100644 --- a/include/asm-ia64/gcc_intrin.h +++ b/include/asm-ia64/gcc_intrin.h @@ -24,7 +24,9 @@ extern void ia64_bad_param_for_setreg (void); extern void ia64_bad_param_for_getreg (void); +#ifdef __KERNEL__ register unsigned long ia64_r13 asm ("r13") __used; +#endif #define ia64_setreg(regnum, val) \ ({ \ diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index 823553bf12e6..f1663aa94a52 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h @@ -3,9 +3,9 @@ * Purpose: Machine check handling specific defines * * Copyright (C) 1999, 2004 Silicon Graphics, Inc. - * Copyright (C) Vijay Chander (vijay@engr.sgi.com) - * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com) - * Copyright (C) Russ Anderson (rja@sgi.com) + * Copyright (C) Vijay Chander <vijay@engr.sgi.com> + * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> + * Copyright (C) Russ Anderson <rja@sgi.com> */ #ifndef _ASM_IA64_MCA_H diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index 76203f9a8718..dd2a5b134390 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h @@ -1,8 +1,9 @@ /* * File: mca_asm.h + * Purpose: Machine check handling specific defines * * Copyright (C) 1999 Silicon Graphics, Inc. - * Copyright (C) Vijay Chander (vijay@engr.sgi.com) + * Copyright (C) Vijay Chander <vijay@engr.sgi.com> * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> * Copyright (C) 2000 Hewlett-Packard Co. * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 666385b68820..741f7ecb986a 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -473,7 +473,7 @@ ia64_set_psr (__u64 psr) { ia64_stop(); ia64_setreg(_IA64_REG_PSR_L, psr); - ia64_srlz_d(); + ia64_srlz_i(); } /* diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index 1f5412d6f9bb..2251118894ae 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -649,17 +649,6 @@ typedef struct err_rec { * Now define a couple of inline functions for improved type checking * and convenience. */ -static inline long -ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second, - unsigned long *drift_info) -{ - struct ia64_sal_retval isrv; - - SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0); - *ticks_per_second = isrv.v0; - *drift_info = isrv.v1; - return isrv.status; -} extern s64 ia64_sal_cache_flush (u64 cache_type); extern void __init check_sal_cache_flush (void); @@ -841,6 +830,9 @@ extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64, u64, u64, u64, u64, u64); extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64, u64, u64, u64, u64, u64); +extern long +ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second, + unsigned long *drift_info); #ifdef CONFIG_HOTPLUG_CPU /* * System Abstraction Layer Specification |