diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc/Kconfig | 3 | ||||
-rw-r--r-- | arch/sparc/include/asm/atomic_32.h | 104 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_32.h | 10 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtsun4.h | 171 | ||||
-rw-r--r-- | arch/sparc/include/asm/thread_info_32.h | 2 | ||||
-rw-r--r-- | arch/sparc/lib/atomic_32.S | 55 | ||||
-rw-r--r-- | arch/sparc/lib/ksyms.c | 6 |
7 files changed, 6 insertions, 345 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 70ae9d81870e..868ea08dff0b 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -31,6 +31,7 @@ config SPARC config SPARC32 def_bool !64BIT + select GENERIC_ATOMIC64 config SPARC64 def_bool 64BIT @@ -383,9 +384,7 @@ config SCHED_MC making when dealing with multi-core CPU chips at a cost of slightly increased overhead in some places. If unsure say N here. -if SPARC64 source "kernel/Kconfig.preempt" -endif config CMDLINE_BOOL bool "Default bootloader kernel arguments" diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 5c3c8b69884d..9dd0a769fa18 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -13,7 +13,7 @@ #include <linux/types.h> -#ifdef __KERNEL__ +#include <asm-generic/atomic64.h> #include <asm/system.h> @@ -52,112 +52,10 @@ extern void atomic_set(atomic_t *, int); #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - -/* This is the old 24-bit implementation. It's still used internally - * by some sparc-specific code, notably the semaphore implementation. - */ -typedef struct { volatile int counter; } atomic24_t; - -#ifndef CONFIG_SMP - -#define ATOMIC24_INIT(i) { (i) } -#define atomic24_read(v) ((v)->counter) -#define atomic24_set(v, i) (((v)->counter) = i) - -#else -/* We do the bulk of the actual work out of line in two common - * routines in assembler, see arch/sparc/lib/atomic.S for the - * "fun" details. - * - * For SMP the trick is you embed the spin lock byte within - * the word, use the low byte so signedness is easily retained - * via a quick arithmetic shift. It looks like this: - * - * ---------------------------------------- - * | signed 24-bit counter value | lock | atomic_t - * ---------------------------------------- - * 31 8 7 0 - */ - -#define ATOMIC24_INIT(i) { ((i) << 8) } - -static inline int atomic24_read(const atomic24_t *v) -{ - int ret = v->counter; - - while(ret & 0xff) - ret = v->counter; - - return ret >> 8; -} - -#define atomic24_set(v, i) (((v)->counter) = ((i) << 8)) -#endif - -static inline int __atomic24_add(int i, atomic24_t *v) -{ - register volatile int *ptr asm("g1"); - register int increment asm("g2"); - register int tmp1 asm("g3"); - register int tmp2 asm("g4"); - register int tmp3 asm("g7"); - - ptr = &v->counter; - increment = i; - - __asm__ __volatile__( - "mov %%o7, %%g4\n\t" - "call ___atomic24_add\n\t" - " add %%o7, 8, %%o7\n" - : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) - : "0" (increment), "r" (ptr) - : "memory", "cc"); - - return increment; -} - -static inline int __atomic24_sub(int i, atomic24_t *v) -{ - register volatile int *ptr asm("g1"); - register int increment asm("g2"); - register int tmp1 asm("g3"); - register int tmp2 asm("g4"); - register int tmp3 asm("g7"); - - ptr = &v->counter; - increment = i; - - __asm__ __volatile__( - "mov %%o7, %%g4\n\t" - "call ___atomic24_sub\n\t" - " add %%o7, 8, %%o7\n" - : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) - : "0" (increment), "r" (ptr) - : "memory", "cc"); - - return increment; -} - -#define atomic24_add(i, v) ((void)__atomic24_add((i), (v))) -#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v))) - -#define atomic24_dec_return(v) __atomic24_sub(1, (v)) -#define atomic24_inc_return(v) __atomic24_add(1, (v)) - -#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0) -#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0) - -#define atomic24_inc(v) ((void)__atomic24_add(1, (v))) -#define atomic24_dec(v) ((void)__atomic24_sub(1, (v))) - -#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0) - /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#endif /* !(__KERNEL__) */ - #endif /* !(__ARCH_SPARC_ATOMIC__) */ diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index 156707b0f18d..bb5c2ac4055d 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h @@ -8,14 +8,10 @@ #ifndef _SPARC_PAGE_H #define _SPARC_PAGE_H -#define PAGE_SHIFT 12 +#include <linux/const.h> -#ifndef __ASSEMBLY__ -/* I have my suspicions... -DaveM */ -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#else -#define PAGE_SIZE (1 << PAGE_SHIFT) -#endif +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #include <asm/btfixup.h> diff --git a/arch/sparc/include/asm/pgtsun4.h b/arch/sparc/include/asm/pgtsun4.h deleted file mode 100644 index 5a0d661fb82e..000000000000 --- a/arch/sparc/include/asm/pgtsun4.h +++ /dev/null @@ -1,171 +0,0 @@ -/* - * pgtsun4.h: Sun4 specific pgtable.h defines and code. - * - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) - */ -#ifndef _SPARC_PGTSUN4C_H -#define _SPARC_PGTSUN4C_H - -#include <asm/contregs.h> - -/* PMD_SHIFT determines the size of the area a second-level page table can map */ -#define SUN4C_PMD_SHIFT 23 - -/* PGDIR_SHIFT determines what a third-level page table entry can map */ -#define SUN4C_PGDIR_SHIFT 23 -#define SUN4C_PGDIR_SIZE (1UL << SUN4C_PGDIR_SHIFT) -#define SUN4C_PGDIR_MASK (~(SUN4C_PGDIR_SIZE-1)) -#define SUN4C_PGDIR_ALIGN(addr) (((addr)+SUN4C_PGDIR_SIZE-1)&SUN4C_PGDIR_MASK) - -/* To represent how the sun4c mmu really lays things out. */ -#define SUN4C_REAL_PGDIR_SHIFT 18 -#define SUN4C_REAL_PGDIR_SIZE (1UL << SUN4C_REAL_PGDIR_SHIFT) -#define SUN4C_REAL_PGDIR_MASK (~(SUN4C_REAL_PGDIR_SIZE-1)) -#define SUN4C_REAL_PGDIR_ALIGN(addr) (((addr)+SUN4C_REAL_PGDIR_SIZE-1)&SUN4C_REAL_PGDIR_MASK) - -/* 19 bit PFN on sun4 */ -#define SUN4C_PFN_MASK 0x7ffff - -/* Don't increase these unless the structures in sun4c.c are fixed */ -#define SUN4C_MAX_SEGMAPS 256 -#define SUN4C_MAX_CONTEXTS 16 - -/* - * To be efficient, and not have to worry about allocating such - * a huge pgd, we make the kernel sun4c tables each hold 1024 - * entries and the pgd similarly just like the i386 tables. - */ -#define SUN4C_PTRS_PER_PTE 1024 -#define SUN4C_PTRS_PER_PMD 1 -#define SUN4C_PTRS_PER_PGD 1024 - -/* - * Sparc SUN4C pte fields. - */ -#define _SUN4C_PAGE_VALID 0x80000000 -#define _SUN4C_PAGE_SILENT_READ 0x80000000 /* synonym */ -#define _SUN4C_PAGE_DIRTY 0x40000000 -#define _SUN4C_PAGE_SILENT_WRITE 0x40000000 /* synonym */ -#define _SUN4C_PAGE_PRIV 0x20000000 /* privileged page */ -#define _SUN4C_PAGE_NOCACHE 0x10000000 /* non-cacheable page */ -#define _SUN4C_PAGE_PRESENT 0x08000000 /* implemented in software */ -#define _SUN4C_PAGE_IO 0x04000000 /* I/O page */ -#define _SUN4C_PAGE_FILE 0x02000000 /* implemented in software */ -#define _SUN4C_PAGE_READ 0x00800000 /* implemented in software */ -#define _SUN4C_PAGE_WRITE 0x00400000 /* implemented in software */ -#define _SUN4C_PAGE_ACCESSED 0x00200000 /* implemented in software */ -#define _SUN4C_PAGE_MODIFIED 0x00100000 /* implemented in software */ - -#define _SUN4C_READABLE (_SUN4C_PAGE_READ|_SUN4C_PAGE_SILENT_READ|\ - _SUN4C_PAGE_ACCESSED) -#define _SUN4C_WRITEABLE (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE|\ - _SUN4C_PAGE_MODIFIED) - -#define _SUN4C_PAGE_CHG_MASK (0xffff|_SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_MODIFIED) - -#define SUN4C_PAGE_NONE __pgprot(_SUN4C_PAGE_PRESENT) -#define SUN4C_PAGE_SHARED __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE|\ - _SUN4C_PAGE_WRITE) -#define SUN4C_PAGE_COPY __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE) -#define SUN4C_PAGE_READONLY __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE) -#define SUN4C_PAGE_KERNEL __pgprot(_SUN4C_READABLE|_SUN4C_WRITEABLE|\ - _SUN4C_PAGE_DIRTY|_SUN4C_PAGE_PRIV) - -/* SUN4C swap entry encoding - * - * We use 5 bits for the type and 19 for the offset. This gives us - * 32 swapfiles of 4GB each. Encoding looks like: - * - * RRRRRRRRooooooooooooooooooottttt - * fedcba9876543210fedcba9876543210 - * - * The top 8 bits are reserved for protection and status bits, especially - * FILE and PRESENT. - */ -#define SUN4C_SWP_TYPE_MASK 0x1f -#define SUN4C_SWP_OFF_MASK 0x7ffff -#define SUN4C_SWP_OFF_SHIFT 5 - -#ifndef __ASSEMBLY__ - -static inline unsigned long sun4c_get_synchronous_error(void) -{ - unsigned long sync_err; - - __asm__ __volatile__("lda [%1] %2, %0\n\t" : - "=r" (sync_err) : - "r" (AC_SYNC_ERR), "i" (ASI_CONTROL)); - return sync_err; -} - -static inline unsigned long sun4c_get_synchronous_address(void) -{ - unsigned long sync_addr; - - __asm__ __volatile__("lda [%1] %2, %0\n\t" : - "=r" (sync_addr) : - "r" (AC_SYNC_VA), "i" (ASI_CONTROL)); - return sync_addr; -} - -/* SUN4 pte, segmap, and context manipulation */ -static inline unsigned long sun4c_get_segmap(unsigned long addr) -{ - register unsigned long entry; - - __asm__ __volatile__("\n\tlduha [%1] %2, %0\n\t" : - "=r" (entry) : - "r" (addr), "i" (ASI_SEGMAP)); - return entry; -} - -static inline void sun4c_put_segmap(unsigned long addr, unsigned long entry) -{ - __asm__ __volatile__("\n\tstha %1, [%0] %2; nop; nop; nop;\n\t" : : - "r" (addr), "r" (entry), - "i" (ASI_SEGMAP) - : "memory"); -} - -static inline unsigned long sun4c_get_pte(unsigned long addr) -{ - register unsigned long entry; - - __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" : - "=r" (entry) : - "r" (addr), "i" (ASI_PTE)); - return entry; -} - -static inline void sun4c_put_pte(unsigned long addr, unsigned long entry) -{ - __asm__ __volatile__("\n\tsta %1, [%0] %2; nop; nop; nop;\n\t" : : - "r" (addr), - "r" ((entry & ~(_SUN4C_PAGE_PRESENT))), "i" (ASI_PTE) - : "memory"); -} - -static inline int sun4c_get_context(void) -{ - register int ctx; - - __asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" : - "=r" (ctx) : - "r" (AC_CONTEXT), "i" (ASI_CONTROL)); - - return ctx; -} - -static inline int sun4c_set_context(int ctx) -{ - __asm__ __volatile__("\n\tstba %0, [%1] %2; nop; nop; nop;\n\t" : : - "r" (ctx), "r" (AC_CONTEXT), "i" (ASI_CONTROL) - : "memory"); - - return ctx; -} - -#endif /* !(__ASSEMBLY__) */ - -#endif /* !(_SPARC_PGTSUN4_H) */ diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index 5cc5888ad5a3..c2a1080cdd3b 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -95,7 +95,7 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) * Observe the order of get_free_pages() in alloc_thread_info_node(). * The sun4 has 8K stack too, because it's short on memory, and 16K is a waste. */ -#define THREAD_SIZE 8192 +#define THREAD_SIZE (2 * PAGE_SIZE) /* * Offsets in thread_info structure, used in assembly code diff --git a/arch/sparc/lib/atomic_32.S b/arch/sparc/lib/atomic_32.S index 178cbb8ae1b9..eb6c7359cbd1 100644 --- a/arch/sparc/lib/atomic_32.S +++ b/arch/sparc/lib/atomic_32.S @@ -40,60 +40,5 @@ ___xchg32_sun4md: mov %g4, %o7 #endif - /* Read asm-sparc/atomic.h carefully to understand how this works for SMP. - * Really, some things here for SMP are overly clever, go read the header. - */ - .globl ___atomic24_add -___atomic24_add: - rd %psr, %g3 ! Keep the code small, old way was stupid - nop; nop; nop; ! Let the bits set - or %g3, PSR_PIL, %g7 ! Disable interrupts - wr %g7, 0x0, %psr ! Set %psr - nop; nop; nop; ! Let the bits set -#ifdef CONFIG_SMP -1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP. - orcc %g7, 0x0, %g0 ! Did we get it? - bne 1b ! Nope... - ld [%g1], %g7 ! Load locked atomic24_t - sra %g7, 8, %g7 ! Get signed 24-bit integer - add %g7, %g2, %g2 ! Add in argument - sll %g2, 8, %g7 ! Transpose back to atomic24_t - st %g7, [%g1] ! Clever: This releases the lock as well. -#else - ld [%g1], %g7 ! Load locked atomic24_t - add %g7, %g2, %g2 ! Add in argument - st %g2, [%g1] ! Store it back -#endif - wr %g3, 0x0, %psr ! Restore original PSR_PIL - nop; nop; nop; ! Let the bits set - jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h - mov %g4, %o7 ! Restore %o7 - - .globl ___atomic24_sub -___atomic24_sub: - rd %psr, %g3 ! Keep the code small, old way was stupid - nop; nop; nop; ! Let the bits set - or %g3, PSR_PIL, %g7 ! Disable interrupts - wr %g7, 0x0, %psr ! Set %psr - nop; nop; nop; ! Let the bits set -#ifdef CONFIG_SMP -1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP. - orcc %g7, 0x0, %g0 ! Did we get it? - bne 1b ! Nope... - ld [%g1], %g7 ! Load locked atomic24_t - sra %g7, 8, %g7 ! Get signed 24-bit integer - sub %g7, %g2, %g2 ! Subtract argument - sll %g2, 8, %g7 ! Transpose back to atomic24_t - st %g7, [%g1] ! Clever: This releases the lock as well -#else - ld [%g1], %g7 ! Load locked atomic24_t - sub %g7, %g2, %g2 ! Subtract argument - st %g2, [%g1] ! Store it back -#endif - wr %g3, 0x0, %psr ! Restore original PSR_PIL - nop; nop; nop; ! Let the bits set - jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h - mov %g4, %o7 ! Restore %o7 - .globl __atomic_end __atomic_end: diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 1b30bb3bfdb1..f73c2240fe60 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -62,8 +62,6 @@ extern void ___rw_read_enter(void); extern void ___rw_read_try(void); extern void ___rw_read_exit(void); extern void ___rw_write_enter(void); -extern void ___atomic24_add(void); -extern void ___atomic24_sub(void); /* Alias functions whose names begin with "." and export the aliases. * The module references will be fixed up by module_frob_arch_sections. @@ -97,10 +95,6 @@ EXPORT_SYMBOL(___rw_read_exit); EXPORT_SYMBOL(___rw_write_enter); #endif -/* Atomic operations. */ -EXPORT_SYMBOL(___atomic24_add); -EXPORT_SYMBOL(___atomic24_sub); - EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); |