diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic.h | 7 | ||||
-rw-r--r-- | include/asm-generic/barrier.h | 8 | ||||
-rw-r--r-- | include/asm-generic/bitops.h | 9 | ||||
-rw-r--r-- | include/asm-generic/bitops/atomic.h | 2 | ||||
-rw-r--r-- | include/asm-generic/bitops/lock.h | 2 | ||||
-rw-r--r-- | include/asm-generic/dma-coherent.h | 13 | ||||
-rw-r--r-- | include/asm-generic/ioctl.h | 5 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 8 | ||||
-rw-r--r-- | include/asm-generic/qrwlock.h | 166 | ||||
-rw-r--r-- | include/asm-generic/qrwlock_types.h | 21 | ||||
-rw-r--r-- | include/asm-generic/unaligned.h | 21 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 60 |
12 files changed, 244 insertions, 78 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 33bd2de3bc1e..9c79e7603459 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -16,6 +16,7 @@ #define __ASM_GENERIC_ATOMIC_H #include <asm/cmpxchg.h> +#include <asm/barrier.h> #ifdef CONFIG_SMP /* Force people to define core atomics */ @@ -182,11 +183,5 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) } #endif -/* Assume that atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 6f692f8ac664..1402fa855388 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -62,6 +62,14 @@ #define set_mb(var, value) do { (var) = (value); mb(); } while (0) #endif +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() smp_mb() +#endif + #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index 280ca7a96f75..dcdcacf2fd2b 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h @@ -11,14 +11,7 @@ #include <linux/irqflags.h> #include <linux/compiler.h> - -/* - * clear_bit may not imply a memory barrier - */ -#ifndef smp_mb__before_clear_bit -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() -#endif +#include <asm/barrier.h> #include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/ffz.h> diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 9ae6c34dc191..49673510b484 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -80,7 +80,7 @@ static inline void set_bit(int nr, volatile unsigned long *addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static inline void clear_bit(int nr, volatile unsigned long *addr) diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 308a9e22c802..c30266e94806 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -20,7 +20,7 @@ */ #define clear_bit_unlock(nr, addr) \ do { \ - smp_mb__before_clear_bit(); \ + smp_mb__before_atomic(); \ clear_bit(nr, addr); \ } while (0) diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h index 2be8a2dbc868..0297e5875798 100644 --- a/include/asm-generic/dma-coherent.h +++ b/include/asm-generic/dma-coherent.h @@ -16,16 +16,13 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, * Standard interface */ #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags); -extern void -dma_release_declared_memory(struct device *dev); +void dma_release_declared_memory(struct device *dev); -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); +void *dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); #else #define dma_alloc_from_coherent(dev, size, handle, ret) (0) #define dma_release_from_coherent(dev, order, vaddr) (0) diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h index d17295b290fa..297fb0d7cd6c 100644 --- a/include/asm-generic/ioctl.h +++ b/include/asm-generic/ioctl.h @@ -3,10 +3,15 @@ #include <uapi/asm-generic/ioctl.h> +#ifdef __CHECKER__ +#define _IOC_TYPECHECK(t) (sizeof(t)) +#else /* provoke compile error for invalid uses of size argument */ extern unsigned int __invalid_size_argument_for_IOC; #define _IOC_TYPECHECK(t) \ ((sizeof(t) == sizeof(t[1]) && \ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ sizeof(t) : __invalid_size_argument_for_IOC) +#endif + #endif /* _ASM_GENERIC_IOCTL_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index a8015a7a55bb..53b2acc38213 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -233,6 +233,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) # define pte_accessible(mm, pte) ((void)(pte), 1) #endif +#ifndef pte_present_nonuma +#define pte_present_nonuma(pte) pte_present(pte) +#endif + #ifndef flush_tlb_fix_spurious_fault #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif @@ -670,7 +674,7 @@ static inline int pmd_trans_unstable(pmd_t *pmd) static inline int pte_numa(pte_t pte) { return (pte_flags(pte) & - (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; + (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; } #endif @@ -678,7 +682,7 @@ static inline int pte_numa(pte_t pte) static inline int pmd_numa(pmd_t pmd) { return (pmd_flags(pmd) & - (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; + (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; } #endif diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h new file mode 100644 index 000000000000..6383d54bf983 --- /dev/null +++ b/include/asm-generic/qrwlock.h @@ -0,0 +1,166 @@ +/* + * Queue read/write lock + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. + * + * Authors: Waiman Long <waiman.long@hp.com> + */ +#ifndef __ASM_GENERIC_QRWLOCK_H +#define __ASM_GENERIC_QRWLOCK_H + +#include <linux/atomic.h> +#include <asm/barrier.h> +#include <asm/processor.h> + +#include <asm-generic/qrwlock_types.h> + +/* + * Writer states & reader shift and bias + */ +#define _QW_WAITING 1 /* A writer is waiting */ +#define _QW_LOCKED 0xff /* A writer holds the lock */ +#define _QW_WMASK 0xff /* Writer mask */ +#define _QR_SHIFT 8 /* Reader count shift */ +#define _QR_BIAS (1U << _QR_SHIFT) + +/* + * External function declarations + */ +extern void queue_read_lock_slowpath(struct qrwlock *lock); +extern void queue_write_lock_slowpath(struct qrwlock *lock); + +/** + * queue_read_can_lock- would read_trylock() succeed? + * @lock: Pointer to queue rwlock structure + */ +static inline int queue_read_can_lock(struct qrwlock *lock) +{ + return !(atomic_read(&lock->cnts) & _QW_WMASK); +} + +/** + * queue_write_can_lock- would write_trylock() succeed? + * @lock: Pointer to queue rwlock structure + */ +static inline int queue_write_can_lock(struct qrwlock *lock) +{ + return !atomic_read(&lock->cnts); +} + +/** + * queue_read_trylock - try to acquire read lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static inline int queue_read_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (likely(!(cnts & _QW_WMASK))) { + cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts); + if (likely(!(cnts & _QW_WMASK))) + return 1; + atomic_sub(_QR_BIAS, &lock->cnts); + } + return 0; +} + +/** + * queue_write_trylock - try to acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static inline int queue_write_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (unlikely(cnts)) + return 0; + + return likely(atomic_cmpxchg(&lock->cnts, + cnts, cnts | _QW_LOCKED) == cnts); +} +/** + * queue_read_lock - acquire read lock of a queue rwlock + * @lock: Pointer to queue rwlock structure + */ +static inline void queue_read_lock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_add_return(_QR_BIAS, &lock->cnts); + if (likely(!(cnts & _QW_WMASK))) + return; + + /* The slowpath will decrement the reader count, if necessary. */ + queue_read_lock_slowpath(lock); +} + +/** + * queue_write_lock - acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queue_write_lock(struct qrwlock *lock) +{ + /* Optimize for the unfair lock case where the fair flag is 0. */ + if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) + return; + + queue_write_lock_slowpath(lock); +} + +/** + * queue_read_unlock - release read lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queue_read_unlock(struct qrwlock *lock) +{ + /* + * Atomically decrement the reader count + */ + smp_mb__before_atomic(); + atomic_sub(_QR_BIAS, &lock->cnts); +} + +#ifndef queue_write_unlock +/** + * queue_write_unlock - release write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queue_write_unlock(struct qrwlock *lock) +{ + /* + * If the writer field is atomic, it can be cleared directly. + * Otherwise, an atomic subtraction will be used to clear it. + */ + smp_mb__before_atomic(); + atomic_sub(_QW_LOCKED, &lock->cnts); +} +#endif + +/* + * Remapping rwlock architecture specific functions to the corresponding + * queue rwlock functions. + */ +#define arch_read_can_lock(l) queue_read_can_lock(l) +#define arch_write_can_lock(l) queue_write_can_lock(l) +#define arch_read_lock(l) queue_read_lock(l) +#define arch_write_lock(l) queue_write_lock(l) +#define arch_read_trylock(l) queue_read_trylock(l) +#define arch_write_trylock(l) queue_write_trylock(l) +#define arch_read_unlock(l) queue_read_unlock(l) +#define arch_write_unlock(l) queue_write_unlock(l) + +#endif /* __ASM_GENERIC_QRWLOCK_H */ diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h new file mode 100644 index 000000000000..4d76f24df518 --- /dev/null +++ b/include/asm-generic/qrwlock_types.h @@ -0,0 +1,21 @@ +#ifndef __ASM_GENERIC_QRWLOCK_TYPES_H +#define __ASM_GENERIC_QRWLOCK_TYPES_H + +#include <linux/types.h> +#include <asm/spinlock_types.h> + +/* + * The queue read/write lock data structure + */ + +typedef struct qrwlock { + atomic_t cnts; + arch_spinlock_t lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { \ + .cnts = ATOMIC_INIT(0), \ + .lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +} + +#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index 03cf5936bad6..1ac097279db1 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h @@ -4,22 +4,27 @@ /* * This is the most generic implementation of unaligned accesses * and should work almost anywhere. - * - * If an architecture can handle unaligned accesses in hardware, - * it may want to use the linux/unaligned/access_ok.h implementation - * instead. */ #include <asm/byteorder.h> +/* Set by the arch if it can handle unaligned accesses in hardware. */ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include <linux/unaligned/access_ok.h> +#endif + #if defined(__LITTLE_ENDIAN) -# include <linux/unaligned/le_struct.h> -# include <linux/unaligned/be_byteshift.h> +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include <linux/unaligned/le_struct.h> +# include <linux/unaligned/be_byteshift.h> +# endif # include <linux/unaligned/generic.h> # define get_unaligned __get_unaligned_le # define put_unaligned __put_unaligned_le #elif defined(__BIG_ENDIAN) -# include <linux/unaligned/be_struct.h> -# include <linux/unaligned/le_byteshift.h> +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include <linux/unaligned/be_struct.h> +# include <linux/unaligned/le_byteshift.h> +# endif # include <linux/unaligned/generic.h> # define get_unaligned __get_unaligned_be # define put_unaligned __put_unaligned_be diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8e0204a68c74..471ba48c7ae4 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -148,52 +148,23 @@ #define TRACE_SYSCALLS() #endif -#ifdef CONFIG_CLKSRC_OF -#define CLKSRC_OF_TABLES() . = ALIGN(8); \ - VMLINUX_SYMBOL(__clksrc_of_table) = .; \ - *(__clksrc_of_table) \ - *(__clksrc_of_table_end) -#else -#define CLKSRC_OF_TABLES() -#endif -#ifdef CONFIG_IRQCHIP -#define IRQCHIP_OF_MATCH_TABLE() \ +#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) +#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) +#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name) +#define _OF_TABLE_0(name) +#define _OF_TABLE_1(name) \ . = ALIGN(8); \ - VMLINUX_SYMBOL(__irqchip_begin) = .; \ - *(__irqchip_of_table) \ - *(__irqchip_of_end) -#else -#define IRQCHIP_OF_MATCH_TABLE() -#endif - -#ifdef CONFIG_COMMON_CLK -#define CLK_OF_TABLES() . = ALIGN(8); \ - VMLINUX_SYMBOL(__clk_of_table) = .; \ - *(__clk_of_table) \ - *(__clk_of_table_end) -#else -#define CLK_OF_TABLES() -#endif - -#ifdef CONFIG_OF_RESERVED_MEM -#define RESERVEDMEM_OF_TABLES() \ - . = ALIGN(8); \ - VMLINUX_SYMBOL(__reservedmem_of_table) = .; \ - *(__reservedmem_of_table) \ - *(__reservedmem_of_table_end) -#else -#define RESERVEDMEM_OF_TABLES() -#endif + VMLINUX_SYMBOL(__##name##_of_table) = .; \ + *(__##name##_of_table) \ + *(__##name##_of_table_end) -#ifdef CONFIG_SMP -#define CPU_METHOD_OF_TABLES() . = ALIGN(8); \ - VMLINUX_SYMBOL(__cpu_method_of_table_begin) = .; \ - *(__cpu_method_of_table) \ - VMLINUX_SYMBOL(__cpu_method_of_table_end) = .; -#else -#define CPU_METHOD_OF_TABLES() -#endif +#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) +#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) +#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) +#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) +#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) +#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) #define KERNEL_DTB() \ STRUCT_ALIGN(); \ @@ -523,7 +494,8 @@ CLKSRC_OF_TABLES() \ CPU_METHOD_OF_TABLES() \ KERNEL_DTB() \ - IRQCHIP_OF_MATCH_TABLE() + IRQCHIP_OF_MATCH_TABLE() \ + EARLYCON_OF_TABLES() #define INIT_TEXT \ *(.init.text) \ |