diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-07 22:27:53 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-07 22:27:53 +0100 |
commit | dd1c3ed76f26504621b5ce08b894666aafa38e47 (patch) | |
tree | f879aefbbd7871733bc616cb7f73dd5ffca53f44 /arch | |
parent | Merge tag 'powerpc-5.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/po... (diff) | |
parent | xtensa: simplify trap_init (diff) | |
download | linux-dd1c3ed76f26504621b5ce08b894666aafa38e47.tar.xz linux-dd1c3ed76f26504621b5ce08b894666aafa38e47.zip |
Merge tag 'xtensa-20190307' of git://github.com/jcmvbkbc/linux-xtensa
Pull xtensa updates from Max Filippov:
- use generic spinlock/rwlock implementations
- clean up IPI processing
- document boot parameters passing to the kernel
- fix get_wchan
- various cleanups in time.c, process.c, traps.c and thread_info.h
* tag 'xtensa-20190307' of git://github.com/jcmvbkbc/linux-xtensa:
xtensa: simplify trap_init
xtensa: drop unused definitions
xtensa: fix get_wchan
xtensa: use generic spinlock/rwlock implementation
xtensa: provide xchg for sizes 1 and 2
xtensa: clean up arch/xtensa/kernel/time.c
xtensa: SMP: rework IPI processing
xtensa: document boot parameter passing
Diffstat (limited to 'arch')
-rw-r--r-- | arch/xtensa/Kconfig | 2 | ||||
-rw-r--r-- | arch/xtensa/include/asm/Kbuild | 2 | ||||
-rw-r--r-- | arch/xtensa/include/asm/cmpxchg.h | 36 | ||||
-rw-r--r-- | arch/xtensa/include/asm/spinlock.h | 185 | ||||
-rw-r--r-- | arch/xtensa/include/asm/spinlock_types.h | 15 | ||||
-rw-r--r-- | arch/xtensa/include/asm/thread_info.h | 9 | ||||
-rw-r--r-- | arch/xtensa/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/xtensa/kernel/smp.c | 38 | ||||
-rw-r--r-- | arch/xtensa/kernel/time.c | 53 | ||||
-rw-r--r-- | arch/xtensa/kernel/traps.c | 5 |
10 files changed, 94 insertions, 257 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 963986a48c62..bacf87ee7975 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -5,6 +5,8 @@ config XTENSA select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_COHERENT_DMA_MMAP if !MMU + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 809f39ce08c0..d939e13e8d84 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -23,6 +23,8 @@ generic-y += mm-arch-hooks.h generic-y += param.h generic-y += percpu.h generic-y += preempt.h +generic-y += qrwlock.h +generic-y += qspinlock.h generic-y += rwsem.h generic-y += sections.h generic-y += socket.h diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 201e9009efd8..22a10c715c1f 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -13,6 +13,7 @@ #ifndef __ASSEMBLY__ +#include <linux/bits.h> #include <linux/stringify.h> /* @@ -138,6 +139,28 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +static inline u32 xchg_small(volatile void *ptr, u32 x, int size) +{ + int off = (unsigned long)ptr % sizeof(u32); + volatile u32 *p = ptr - off; +#ifdef __BIG_ENDIAN + int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE; +#else + int bitoff = off * BITS_PER_BYTE; +#endif + u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; + u32 oldv, newv; + u32 ret; + + do { + oldv = READ_ONCE(*p); + ret = (oldv & bitmask) >> bitoff; + newv = (oldv & ~bitmask) | (x << bitoff); + } while (__cmpxchg_u32(p, oldv, newv) != oldv); + + return ret; +} + /* * This only works if the compiler isn't horribly bad at optimizing. * gcc-2.5.8 reportedly can't handle this, but I define that one to @@ -150,11 +173,16 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { - case 4: - return xchg_u32(ptr, x); + case 1: + return xchg_small(ptr, x, 1); + case 2: + return xchg_small(ptr, x, 2); + case 4: + return xchg_u32(ptr, x); + default: + __xchg_called_with_bad_pointer(); + return x; } - __xchg_called_with_bad_pointer(); - return x; } #endif /* __ASSEMBLY__ */ diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index c6e1290dcbb7..584b0de6f2ca 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h @@ -12,188 +12,9 @@ #define _XTENSA_SPINLOCK_H #include <asm/barrier.h> -#include <asm/processor.h> +#include <asm/qrwlock.h> +#include <asm/qspinlock.h> -/* - * spinlock - * - * There is at most one owner of a spinlock. There are not different - * types of spinlock owners like there are for rwlocks (see below). - * - * When trying to obtain a spinlock, the function "spins" forever, or busy- - * waits, until the lock is obtained. When spinning, presumably some other - * owner will soon give up the spinlock making it available to others. Use - * the trylock functions to avoid spinning forever. - * - * possible values: - * - * 0 nobody owns the spinlock - * 1 somebody owns the spinlock - */ - -#define arch_spin_is_locked(x) ((x)->slock != 0) - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " wsr %0, scompare1\n" - "1: movi %0, 1\n" - " s32c1i %0, %1, 0\n" - " bnez %0, 1b\n" - : "=&a" (tmp) - : "a" (&lock->slock) - : "memory"); -} - -/* Returns 1 if the lock is obtained, 0 otherwise. */ - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " wsr %0, scompare1\n" - " movi %0, 1\n" - " s32c1i %0, %1, 0\n" - : "=&a" (tmp) - : "a" (&lock->slock) - : "memory"); - - return tmp == 0 ? 1 : 0; -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " s32ri %0, %1, 0\n" - : "=&a" (tmp) - : "a" (&lock->slock) - : "memory"); -} - -/* - * rwlock - * - * Read-write locks are really a more flexible spinlock. They allow - * multiple readers but only one writer. Write ownership is exclusive - * (i.e., all other readers and writers are blocked from ownership while - * there is a write owner). These rwlocks are unfair to writers. Writers - * can be starved for an indefinite time by readers. - * - * possible values: - * - * 0 nobody owns the rwlock - * >0 one or more readers own the rwlock - * (the positive value is the actual number of readers) - * 0x80000000 one writer owns the rwlock, no other writers, no readers - */ - -static inline void arch_write_lock(arch_rwlock_t *rw) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " wsr %0, scompare1\n" - "1: movi %0, 1\n" - " slli %0, %0, 31\n" - " s32c1i %0, %1, 0\n" - " bnez %0, 1b\n" - : "=&a" (tmp) - : "a" (&rw->lock) - : "memory"); -} - -/* Returns 1 if the lock is obtained, 0 otherwise. */ - -static inline int arch_write_trylock(arch_rwlock_t *rw) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " wsr %0, scompare1\n" - " movi %0, 1\n" - " slli %0, %0, 31\n" - " s32c1i %0, %1, 0\n" - : "=&a" (tmp) - : "a" (&rw->lock) - : "memory"); - - return tmp == 0 ? 1 : 0; -} - -static inline void arch_write_unlock(arch_rwlock_t *rw) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " s32ri %0, %1, 0\n" - : "=&a" (tmp) - : "a" (&rw->lock) - : "memory"); -} - -static inline void arch_read_lock(arch_rwlock_t *rw) -{ - unsigned long tmp; - unsigned long result; - - __asm__ __volatile__( - "1: l32i %1, %2, 0\n" - " bltz %1, 1b\n" - " wsr %1, scompare1\n" - " addi %0, %1, 1\n" - " s32c1i %0, %2, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (&rw->lock) - : "memory"); -} - -/* Returns 1 if the lock is obtained, 0 otherwise. */ - -static inline int arch_read_trylock(arch_rwlock_t *rw) -{ - unsigned long result; - unsigned long tmp; - - __asm__ __volatile__( - " l32i %1, %2, 0\n" - " addi %0, %1, 1\n" - " bltz %0, 1f\n" - " wsr %1, scompare1\n" - " s32c1i %0, %2, 0\n" - " sub %0, %0, %1\n" - "1:\n" - : "=&a" (result), "=&a" (tmp) - : "a" (&rw->lock) - : "memory"); - - return result == 0; -} - -static inline void arch_read_unlock(arch_rwlock_t *rw) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( - "1: l32i %1, %2, 0\n" - " addi %0, %1, -1\n" - " wsr %1, scompare1\n" - " s32c1i %0, %2, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (tmp1), "=&a" (tmp2) - : "a" (&rw->lock) - : "memory"); -} +#define smp_mb__after_spinlock() smp_mb() #endif /* _XTENSA_SPINLOCK_H */ diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h index bb1fe6c1816e..64c9389254f1 100644 --- a/arch/xtensa/include/asm/spinlock_types.h +++ b/arch/xtensa/include/asm/spinlock_types.h @@ -2,20 +2,11 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) # error "please don't include this file directly" #endif -typedef struct { - volatile unsigned int slock; -} arch_spinlock_t; - -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } - -typedef struct { - volatile unsigned int lock; -} arch_rwlock_t; - -#define __ARCH_RW_LOCK_UNLOCKED { 0 } +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qrwlock_types.h> #endif diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index f333f10a7650..f092cc3f4e66 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -121,15 +121,6 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_WORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ _TIF_SYSCALL_TRACEPOINT) -/* - * Thread-synchronous status. - * - * This is different from the flags in that nobody else - * ever touches our thread-synchronous status, so we don't - * have to worry about atomic accesses. - */ -#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ - #define THREAD_SIZE KERNEL_STACK_SIZE #define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT) diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 74969a437a37..db278a9e80c7 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -52,8 +52,6 @@ extern void ret_from_fork(void); extern void ret_from_kernel_thread(void); -struct task_struct *current_set[NR_CPUS] = {&init_task, }; - void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); @@ -321,8 +319,8 @@ unsigned long get_wchan(struct task_struct *p) /* Stack layout: sp-4: ra, sp-3: sp' */ - pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp); - sp = *(unsigned long *)sp - 3; + pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp); + sp = SPILL_SLOT(sp, 1); } while (count++ < 16); return 0; } diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index be1f280c322c..3699d6d3e479 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -372,8 +372,7 @@ static void send_ipi_message(const struct cpumask *callmask, unsigned long mask = 0; for_each_cpu(index, callmask) - if (index != smp_processor_id()) - mask |= 1 << index; + mask |= 1 << index; set_er(mask, MIPISET(msg_id)); } @@ -412,22 +411,31 @@ irqreturn_t ipi_interrupt(int irq, void *dev_id) { unsigned int cpu = smp_processor_id(); struct ipi_data *ipi = &per_cpu(ipi_data, cpu); - unsigned int msg; - unsigned i; - msg = get_er(MIPICAUSE(cpu)); - for (i = 0; i < IPI_MAX; i++) - if (msg & (1 << i)) { - set_er(1 << i, MIPICAUSE(cpu)); - ++ipi->ipi_count[i]; + for (;;) { + unsigned int msg; + + msg = get_er(MIPICAUSE(cpu)); + set_er(msg, MIPICAUSE(cpu)); + + if (!msg) + break; + + if (msg & (1 << IPI_CALL_FUNC)) { + ++ipi->ipi_count[IPI_CALL_FUNC]; + generic_smp_call_function_interrupt(); } - if (msg & (1 << IPI_RESCHEDULE)) - scheduler_ipi(); - if (msg & (1 << IPI_CALL_FUNC)) - generic_smp_call_function_interrupt(); - if (msg & (1 << IPI_CPU_STOP)) - ipi_cpu_stop(cpu); + if (msg & (1 << IPI_RESCHEDULE)) { + ++ipi->ipi_count[IPI_RESCHEDULE]; + scheduler_ipi(); + } + + if (msg & (1 << IPI_CPU_STOP)) { + ++ipi->ipi_count[IPI_CPU_STOP]; + ipi_cpu_stop(cpu); + } + } return IRQ_HANDLED; } diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 378186b5eb40..69db8c93c1f9 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -52,14 +52,11 @@ static struct clocksource ccount_clocksource = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static int ccount_timer_set_next_event(unsigned long delta, - struct clock_event_device *dev); struct ccount_timer { struct clock_event_device evt; int irq_enabled; char name[24]; }; -static DEFINE_PER_CPU(struct ccount_timer, ccount_timer); static int ccount_timer_set_next_event(unsigned long delta, struct clock_event_device *dev) @@ -107,7 +104,30 @@ static int ccount_timer_set_oneshot(struct clock_event_device *evt) return 0; } -static irqreturn_t timer_interrupt(int irq, void *dev_id); +static DEFINE_PER_CPU(struct ccount_timer, ccount_timer) = { + .evt = { + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = ccount_timer_set_next_event, + .set_state_shutdown = ccount_timer_shutdown, + .set_state_oneshot = ccount_timer_set_oneshot, + .tick_resume = ccount_timer_set_oneshot, + }, +}; + +static irqreturn_t timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt; + + set_linux_timer(get_linux_timer()); + evt->event_handler(evt); + + /* Allow platform to do something useful (Wdog). */ + platform_heartbeat(); + + return IRQ_HANDLED; +} + static struct irqaction timer_irqaction = { .handler = timer_interrupt, .flags = IRQF_TIMER, @@ -120,14 +140,8 @@ void local_timer_setup(unsigned cpu) struct clock_event_device *clockevent = &timer->evt; timer->irq_enabled = 1; - clockevent->name = timer->name; snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu); - clockevent->features = CLOCK_EVT_FEAT_ONESHOT; - clockevent->rating = 300; - clockevent->set_next_event = ccount_timer_set_next_event; - clockevent->set_state_shutdown = ccount_timer_shutdown; - clockevent->set_state_oneshot = ccount_timer_set_oneshot; - clockevent->tick_resume = ccount_timer_set_oneshot; + clockevent->name = timer->name; clockevent->cpumask = cpumask_of(cpu); clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT); if (WARN(!clockevent->irq, "error: can't map timer irq")) @@ -190,23 +204,6 @@ void __init time_init(void) timer_probe(); } -/* - * The timer interrupt is called HZ times per second. - */ - -irqreturn_t timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt; - - set_linux_timer(get_linux_timer()); - evt->event_handler(evt); - - /* Allow platform to do something useful (Wdog). */ - platform_heartbeat(); - - return IRQ_HANDLED; -} - #ifndef CONFIG_GENERIC_CALIBRATE_DELAY void calibrate_delay(void) { diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index e6fa55aa1ccb..454d53096bc9 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -420,16 +420,15 @@ void __init trap_init(void) /* Setup specific handlers. */ for(i = 0; dispatch_init_table[i].cause >= 0; i++) { - int fast = dispatch_init_table[i].fast; int cause = dispatch_init_table[i].cause; void *handler = dispatch_init_table[i].handler; if (fast == 0) set_handler(default_handler, cause, handler); - if (fast && fast & USER) + if ((fast & USER) != 0) set_handler(fast_user_handler, cause, handler); - if (fast && fast & KRNL) + if ((fast & KRNL) != 0) set_handler(fast_kernel_handler, cause, handler); } |