diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/io.h | 4 | ||||
-rw-r--r-- | include/asm-generic/kvm_para.h | 5 | ||||
-rw-r--r-- | include/asm-generic/mutex-dec.h | 10 | ||||
-rw-r--r-- | include/asm-generic/mutex-null.h | 2 | ||||
-rw-r--r-- | include/asm-generic/mutex-xchg.h | 10 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 4 | ||||
-rw-r--r-- | include/asm-generic/tlb.h | 17 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 20 |
8 files changed, 23 insertions, 49 deletions
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index ac9da00e9f2c..d5afe96adba6 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -343,8 +343,12 @@ extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_GENERIC_IOMAP */ #endif /* CONFIG_HAS_IOPORT */ +#ifndef xlate_dev_kmem_ptr #define xlate_dev_kmem_ptr(p) p +#endif +#ifndef xlate_dev_mem_ptr #define xlate_dev_mem_ptr(p) __va(p) +#endif #ifdef CONFIG_VIRT_TO_BUS #ifndef virt_to_bus diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h index 9d96605f160a..fa25becbdcaf 100644 --- a/include/asm-generic/kvm_para.h +++ b/include/asm-generic/kvm_para.h @@ -18,4 +18,9 @@ static inline unsigned int kvm_arch_para_features(void) return 0; } +static inline bool kvm_para_available(void) +{ + return false; +} + #endif diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index f104af7cf437..d4f9fb4e53df 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h @@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) * __mutex_fastpath_lock_retval - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 1 * - * Change the count from 1 to a value lower than 1, and call <fail_fn> if - * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, - * or anything the slow path function returns. + * Change the count from 1 to a value lower than 1. This function returns 0 + * if the fastpath succeeds, or -1 otherwise. */ static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock_retval(atomic_t *count) { if (unlikely(atomic_dec_return(count) < 0)) - return fail_fn(count); + return -1; return 0; } diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h index e1bbbc72b6a2..61069ed334e2 100644 --- a/include/asm-generic/mutex-null.h +++ b/include/asm-generic/mutex-null.h @@ -11,7 +11,7 @@ #define _ASM_GENERIC_MUTEX_NULL_H #define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) -#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_lock_retval(count) (-1) #define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) #define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) #define __mutex_slowpath_needs_to_unlock() 1 diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index c04e0db8a2d6..f169ec064785 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) * __mutex_fastpath_lock_retval - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 1 * - * Change the count from 1 to a value lower than 1, and call <fail_fn> if it - * wasn't 1 originally. This function returns 0 if the fastpath succeeds, - * or anything the slow path function returns + * Change the count from 1 to a value lower than 1. This function returns 0 + * if the fastpath succeeds, or -1 otherwise. */ static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock_retval(atomic_t *count) { if (unlikely(atomic_xchg(count, 0) != 1)) if (likely(atomic_xchg(count, -1) != 1)) - return fail_fn(count); + return -1; return 0; } diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index a59ff51b0166..b1836987d506 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -692,4 +692,8 @@ static inline pmd_t pmd_mknuma(pmd_t pmd) #endif /* !__ASSEMBLY__ */ +#ifndef io_remap_pfn_range +#define io_remap_pfn_range remap_pfn_range +#endif + #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index b1b1fa6ffffe..13821c339a41 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -97,11 +97,9 @@ struct mmu_gather { unsigned long start; unsigned long end; unsigned int need_flush : 1, /* Did free PTEs */ - fast_mode : 1; /* No batching */ - /* we are in the middle of an operation to clear * a full mm and can make some optimizations */ - unsigned int fullmm : 1, + fullmm : 1, /* we have performed an operation which * requires a complete flush of the tlb */ need_flush_all : 1; @@ -114,19 +112,6 @@ struct mmu_gather { #define HAVE_GENERIC_MMU_GATHER -static inline int tlb_fast_mode(struct mmu_gather *tlb) -{ -#ifdef CONFIG_SMP - return tlb->fast_mode; -#else - /* - * For UP we don't need to worry about TLB flush - * and page free order so much.. - */ - return 1; -#endif -} - void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index eb58d2d7d971..4f2737208c42 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -68,14 +68,6 @@ * are handled as text/data or they can be discarded (which * often happens at runtime) */ -#ifdef CONFIG_HOTPLUG -#define DEV_KEEP(sec) *(.dev##sec) -#define DEV_DISCARD(sec) -#else -#define DEV_KEEP(sec) -#define DEV_DISCARD(sec) *(.dev##sec) -#endif - #ifdef CONFIG_HOTPLUG_CPU #define CPU_KEEP(sec) *(.cpu##sec) #define CPU_DISCARD(sec) @@ -182,8 +174,6 @@ *(.data) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ - DEV_KEEP(init.data) \ - DEV_KEEP(exit.data) \ CPU_KEEP(init.data) \ CPU_KEEP(exit.data) \ MEM_KEEP(init.data) \ @@ -372,8 +362,6 @@ /* __*init sections */ \ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ *(.ref.rodata) \ - DEV_KEEP(init.rodata) \ - DEV_KEEP(exit.rodata) \ CPU_KEEP(init.rodata) \ CPU_KEEP(exit.rodata) \ MEM_KEEP(init.rodata) \ @@ -416,8 +404,6 @@ *(.text.hot) \ *(.text) \ *(.ref.text) \ - DEV_KEEP(init.text) \ - DEV_KEEP(exit.text) \ CPU_KEEP(init.text) \ CPU_KEEP(exit.text) \ MEM_KEEP(init.text) \ @@ -503,7 +489,6 @@ /* init and exit section handling */ #define INIT_DATA \ *(.init.data) \ - DEV_DISCARD(init.data) \ CPU_DISCARD(init.data) \ MEM_DISCARD(init.data) \ KERNEL_CTORS() \ @@ -511,7 +496,6 @@ *(.init.rodata) \ FTRACE_EVENTS() \ TRACE_SYSCALLS() \ - DEV_DISCARD(init.rodata) \ CPU_DISCARD(init.rodata) \ MEM_DISCARD(init.rodata) \ CLK_OF_TABLES() \ @@ -521,14 +505,11 @@ #define INIT_TEXT \ *(.init.text) \ - DEV_DISCARD(init.text) \ CPU_DISCARD(init.text) \ MEM_DISCARD(init.text) #define EXIT_DATA \ *(.exit.data) \ - DEV_DISCARD(exit.data) \ - DEV_DISCARD(exit.rodata) \ CPU_DISCARD(exit.data) \ CPU_DISCARD(exit.rodata) \ MEM_DISCARD(exit.data) \ @@ -536,7 +517,6 @@ #define EXIT_TEXT \ *(.exit.text) \ - DEV_DISCARD(exit.text) \ CPU_DISCARD(exit.text) \ MEM_DISCARD(exit.text) |