diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-04 01:27:01 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-04 01:27:01 +0200 |
commit | c706c7eb0d08098f0d768aeef945d7cf1f8858b4 (patch) | |
tree | c629c9705514b70dd3e8f8dc67fa8d849871886c /arch/arm | |
parent | Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/ke... (diff) | |
parent | Merge branch 'devel-stable' into for-linus (diff) | |
download | linux-c706c7eb0d08098f0d768aeef945d7cf1f8858b4.tar.xz linux-c706c7eb0d08098f0d768aeef945d7cf1f8858b4.zip |
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM development updates from Russell King:
"Included in this update:
- moving PSCI code from ARM64/ARM to drivers/
- removal of some architecture internals from global kernel view
- addition of software based "privileged no access" support using the
old domains register to turn off the ability for kernel
loads/stores to access userspace. Only the proper accessors will
be usable.
- addition of early fixup support for early console
- re-addition (and reimplementation) of OMAP special interconnect
barrier
- removal of finish_arch_switch()
- only expose cpuX/online in sysfs if hotpluggable
- a number of code cleanups"
* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (41 commits)
ARM: software-based priviledged-no-access support
ARM: entry: provide uaccess assembly macro hooks
ARM: entry: get rid of multiple macro definitions
ARM: 8421/1: smp: Collapse arch_cpu_idle_dead() into cpu_die()
ARM: uaccess: provide uaccess_save_and_enable() and uaccess_restore()
ARM: mm: improve do_ldrd_abort macro
ARM: entry: ensure that IRQs are enabled when calling syscall_trace_exit()
ARM: entry: efficiency cleanups
ARM: entry: get rid of asm_trace_hardirqs_on_cond
ARM: uaccess: simplify user access assembly
ARM: domains: remove DOMAIN_TABLE
ARM: domains: keep vectors in separate domain
ARM: domains: get rid of manager mode for user domain
ARM: domains: move initial domain setting value to asm/domains.h
ARM: domains: provide domain_mask()
ARM: domains: switch to keeping domain value in register
ARM: 8419/1: dma-mapping: harmonize definition of DMA_ERROR_CODE
ARM: 8417/1: refactor bitops functions with BIT_MASK() and BIT_WORD()
ARM: 8416/1: Feroceon: use of_iomap() to map register base
ARM: 8415/1: early fixmap support for earlycon
...
Diffstat (limited to 'arch/arm')
78 files changed, 906 insertions, 1737 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 41cbb4a53066..0d1b717e1eca 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64 config ARCH_HAS_BANDGAP bool +config FIX_EARLYCON_MEM + def_bool y if MMU + config GENERIC_HWEIGHT bool default y @@ -1496,6 +1499,7 @@ config HOTPLUG_CPU config ARM_PSCI bool "Support for the ARM Power State Coordination Interface (PSCI)" depends on CPU_V7 + select ARM_PSCI_FW help Say Y here if you want Linux to communicate with system firmware implementing the PSCI specification for CPU-centric power @@ -1700,13 +1704,24 @@ config HIGHPTE consumed by page tables. Setting this option will allow user-space 2nd level page tables to reside in high memory. -config HW_PERF_EVENTS - bool "Enable hardware performance counter support for perf events" - depends on PERF_EVENTS +config CPU_SW_DOMAIN_PAN + bool "Enable use of CPU domains to implement privileged no-access" + depends on MMU && !ARM_LPAE default y help - Enable hardware performance counter support for perf events. If - disabled, perf events will use software events only. + Increase kernel security by ensuring that normal kernel accesses + are unable to access userspace addresses. This can help prevent + use-after-free bugs becoming an exploitable privilege escalation + by ensuring that magic values (such as LIST_POISON) will always + fault when dereferenced. + + CPUs with low-vector mappings use a best-efforts implementation. + Their lower 1MB needs to remain accessible for the vectors, but + the remainder of userspace will become appropriately inaccessible. + +config HW_PERF_EVENTS + def_bool y + depends on ARM_PMU config SYS_SUPPORTS_HUGETLBFS def_bool y diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c index 92e54d7c6f46..2b25b6038f66 100644 --- a/arch/arm/common/mcpm_platsmp.c +++ b/arch/arm/common/mcpm_platsmp.c @@ -65,14 +65,10 @@ static int mcpm_cpu_kill(unsigned int cpu) return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster); } -static int mcpm_cpu_disable(unsigned int cpu) +static bool mcpm_cpu_can_disable(unsigned int cpu) { - /* - * We assume all CPUs may be shut down. - * This would be the hook to use for eventual Secure - * OS migration requests as described in the PSCI spec. - */ - return 0; + /* We assume all CPUs may be shut down. */ + return true; } static void mcpm_cpu_die(unsigned int cpu) @@ -92,7 +88,7 @@ static struct smp_operations __initdata mcpm_smp_ops = { .smp_secondary_init = mcpm_secondary_init, #ifdef CONFIG_HOTPLUG_CPU .cpu_kill = mcpm_cpu_kill, - .cpu_disable = mcpm_cpu_disable, + .cpu_can_disable = mcpm_cpu_can_disable, .cpu_die = mcpm_cpu_die, #endif }; diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 30b3bc1666d2..be648eb47cd9 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -12,7 +12,6 @@ generic-y += irq_regs.h generic-y += kdebug.h generic-y += local.h generic-y += local64.h -generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += msgbuf.h generic-y += param.h diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 4abe57279c66..7bbf325a4f31 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -108,33 +108,37 @@ .endm #endif - .macro asm_trace_hardirqs_off + .macro asm_trace_hardirqs_off, save=1 #if defined(CONFIG_TRACE_IRQFLAGS) + .if \save stmdb sp!, {r0-r3, ip, lr} + .endif bl trace_hardirqs_off + .if \save ldmia sp!, {r0-r3, ip, lr} + .endif #endif .endm - .macro asm_trace_hardirqs_on_cond, cond + .macro asm_trace_hardirqs_on, cond=al, save=1 #if defined(CONFIG_TRACE_IRQFLAGS) /* * actually the registers should be pushed and pop'd conditionally, but * after bl the flags are certainly clobbered */ + .if \save stmdb sp!, {r0-r3, ip, lr} + .endif bl\cond trace_hardirqs_on + .if \save ldmia sp!, {r0-r3, ip, lr} + .endif #endif .endm - .macro asm_trace_hardirqs_on - asm_trace_hardirqs_on_cond al - .endm - - .macro disable_irq + .macro disable_irq, save=1 disable_irq_notrace - asm_trace_hardirqs_off + asm_trace_hardirqs_off \save .endm .macro enable_irq @@ -173,7 +177,7 @@ .macro restore_irqs, oldcpsr tst \oldcpsr, #PSR_I_BIT - asm_trace_hardirqs_on_cond eq + asm_trace_hardirqs_on cond=eq restore_irqs_notrace \oldcpsr .endm @@ -445,6 +449,53 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm + .macro uaccess_disable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_DISABLE + mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register + .if \isb + instr_sync + .endif +#endif + .endm + + .macro uaccess_enable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_ENABLE + mcr p15, 0, \tmp, c3, c0, 0 + .if \isb + instr_sync + .endif +#endif + .endm + + .macro uaccess_save, tmp +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + mrc p15, 0, \tmp, c3, c0, 0 + str \tmp, [sp, #S_FRAME_SIZE] +#endif + .endm + + .macro uaccess_restore +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + ldr r0, [sp, #S_FRAME_SIZE] + mcr p15, 0, r0, c3, c0, 0 +#endif + .endm + + .macro uaccess_save_and_disable, tmp + uaccess_save \tmp + uaccess_disable \tmp + .endm + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro ret\c, reg #if __LINUX_ARM_ARCH__ < 6 diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 70393574e0fa..3ff5642d9788 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -2,7 +2,6 @@ #define __ASM_BARRIER_H #ifndef __ASSEMBLY__ -#include <asm/outercache.h> #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); @@ -37,12 +36,20 @@ #define dmb(x) __asm__ __volatile__ ("" : : : "memory") #endif +#ifdef CONFIG_ARM_HEAVY_MB +extern void (*soc_mb)(void); +extern void arm_heavy_mb(void); +#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0) +#else +#define __arm_heavy_mb(x...) dsb(x) +#endif + #ifdef CONFIG_ARCH_HAS_BARRIERS #include <mach/barriers.h> #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) -#define mb() do { dsb(); outer_sync(); } while (0) +#define mb() __arm_heavy_mb() #define rmb() dsb() -#define wmb() do { dsb(st); outer_sync(); } while (0) +#define wmb() __arm_heavy_mb(st) #define dma_rmb() dmb(osh) #define dma_wmb() dmb(oshst) #else diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 56380995f4c3..e943e6cee254 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -35,9 +35,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); *p |= mask; @@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long * static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); *p &= ~mask; @@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); *p ^= mask; @@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned int res; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); res = *p; @@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned int res; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); res = *p; @@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned int res; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); res = *p; diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 4812cda8fd17..d5525bfc7e3e 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache; * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_map_area cpu_cache.dma_map_area -#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_flush_range cpu_cache.dma_flush_range #else @@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -extern void dmac_map_area(const void *, size_t, int); -extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_flush_range(const void *, const void *); #endif @@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { } void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, void *kaddr, unsigned long len); +/** + * secure_flush_area - ensure coherency across the secure boundary + * @addr: virtual address + * @size: size of region + * + * Ensure that the specified area of memory is coherent across the secure + * boundary from the non-secure side. This is used when calling secure + * firmware where the secure firmware does not ensure coherency. + */ +static inline void secure_flush_area(const void *addr, size_t size) +{ + phys_addr_t phys = __pa(addr); + + __cpuc_flush_dcache_area((void *)addr, size); + outer_flush_range(phys, phys + size); +} + #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index b52101d37ec7..a68b9d8a71fe 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -14,7 +14,7 @@ #include <xen/xen.h> #include <asm/xen/hypervisor.h> -#define DMA_ERROR_CODE (~0) +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) extern struct dma_map_ops arm_dma_ops; extern struct dma_map_ops arm_coherent_dma_ops; diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 6ddbe446425e..e878129f2fee 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -34,15 +34,14 @@ */ #ifndef CONFIG_IO_36 #define DOMAIN_KERNEL 0 -#define DOMAIN_TABLE 0 #define DOMAIN_USER 1 #define DOMAIN_IO 2 #else #define DOMAIN_KERNEL 2 -#define DOMAIN_TABLE 2 #define DOMAIN_USER 1 #define DOMAIN_IO 0 #endif +#define DOMAIN_VECTORS 3 /* * Domain types @@ -55,11 +54,46 @@ #define DOMAIN_MANAGER 1 #endif -#define domain_val(dom,type) ((type) << (2*(dom))) +#define domain_mask(dom) ((3) << (2 * (dom))) +#define domain_val(dom,type) ((type) << (2 * (dom))) + +#ifdef CONFIG_CPU_SW_DOMAIN_PAN +#define DACR_INIT \ + (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) +#else +#define DACR_INIT \ + (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) +#endif + +#define __DACR_DEFAULT \ + domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT) + +#define DACR_UACCESS_DISABLE \ + (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) +#define DACR_UACCESS_ENABLE \ + (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT)) #ifndef __ASSEMBLY__ -#ifdef CONFIG_CPU_USE_DOMAINS +static inline unsigned int get_domain(void) +{ + unsigned int domain; + + asm( + "mrc p15, 0, %0, c3, c0 @ get domain" + : "=r" (domain)); + + return domain; +} + static inline void set_domain(unsigned val) { asm volatile( @@ -68,17 +102,16 @@ static inline void set_domain(unsigned val) isb(); } +#ifdef CONFIG_CPU_USE_DOMAINS #define modify_domain(dom,type) \ do { \ - struct thread_info *thread = current_thread_info(); \ - unsigned int domain = thread->cpu_domain; \ - domain &= ~domain_val(dom, DOMAIN_MANAGER); \ - thread->cpu_domain = domain | domain_val(dom, type); \ - set_domain(thread->cpu_domain); \ + unsigned int domain = get_domain(); \ + domain &= ~domain_mask(dom); \ + domain = domain | domain_val(dom, type); \ + set_domain(domain); \ } while (0) #else -static inline void set_domain(unsigned val) { } static inline void modify_domain(unsigned dom, unsigned type) { } #endif diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index 0415eae1df27..58cfe9f1a687 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -6,9 +6,13 @@ #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) #include <asm/kmap_types.h> +#include <asm/pgtable.h> enum fixed_addresses { - FIX_KMAP_BEGIN, + FIX_EARLYCON_MEM_BASE, + __end_of_permanent_fixed_addresses, + + FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, /* Support writing RO kernel text via kprobes, jump labels, etc. */ @@ -18,7 +22,16 @@ enum fixed_addresses { __end_of_fixed_addresses }; +#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY) + +#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK) + +/* Used by set_fixmap_(io|nocache), both meant for mapping a device */ +#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED) +#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); +void __init early_fixmap_init(void); #include <asm-generic/fixmap.h> diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 5eed82809d82..6795368ad023 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -22,8 +22,11 @@ #ifdef CONFIG_SMP #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ +({ \ + unsigned int __ua_flags; \ smp_mb(); \ prefetchw(uaddr); \ + __ua_flags = uaccess_save_and_enable(); \ __asm__ __volatile__( \ "1: ldrex %1, [%3]\n" \ " " insn "\n" \ @@ -34,12 +37,15 @@ __futex_atomic_ex_table("%5") \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ - : "cc", "memory") + : "cc", "memory"); \ + uaccess_restore(__ua_flags); \ +}) static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { + unsigned int __ua_flags; int ret; u32 val; @@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, smp_mb(); /* Prefetching cannot fault */ prefetchw(uaddr); + __ua_flags = uaccess_save_and_enable(); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: ldrex %1, [%4]\n" " teq %1, %2\n" @@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "=&r" (ret), "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); + uaccess_restore(__ua_flags); smp_mb(); *uval = val; @@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, #include <asm/domain.h> #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ +({ \ + unsigned int __ua_flags = uaccess_save_and_enable(); \ __asm__ __volatile__( \ "1: " TUSER(ldr) " %1, [%3]\n" \ " " insn "\n" \ @@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, __futex_atomic_ex_table("%5") \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ - : "cc", "memory") + : "cc", "memory"); \ + uaccess_restore(__ua_flags); \ +}) static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { + unsigned int __ua_flags; int ret = 0; u32 val; @@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; preempt_disable(); + __ua_flags = uaccess_save_and_enable(); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: " TUSER(ldr) " %1, [%4]\n" " teq %1, %2\n" @@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "+r" (ret), "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); + uaccess_restore(__ua_flags); *uval = val; preempt_enable(); diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index a3c24cd5b7c8..cab07f69382d 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) -#define dmac_map_area __glue(_CACHE,_dma_map_area) -#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) #endif diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index 563b92fc2f41..c2bf24f40177 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -129,21 +129,4 @@ static inline void outer_resume(void) { } #endif -#ifdef CONFIG_OUTER_CACHE_SYNC -/** - * outer_sync - perform a sync point for outer cache - * - * Ensure that all outer cache operations are complete and any store - * buffers are drained. - */ -static inline void outer_sync(void) -{ - if (outer_cache.sync) - outer_cache.sync(); -} -#else -static inline void outer_sync(void) -{ } -#endif - #endif /* __ASM_OUTERCACHE_H */ diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h index 5e68278e953e..d0131ee6f6af 100644 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -23,6 +23,7 @@ #define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) +#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f) #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ /* * - section diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h deleted file mode 100644 index 3fc87dfd77e6..000000000000 --- a/arch/arm/include/asm/pmu.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * linux/arch/arm/include/asm/pmu.h - * - * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#ifndef __ARM_PMU_H__ -#define __ARM_PMU_H__ - -#include <linux/interrupt.h> -#include <linux/perf_event.h> - -#include <asm/cputype.h> - -/* - * struct arm_pmu_platdata - ARM PMU platform data - * - * @handle_irq: an optional handler which will be called from the - * interrupt and passed the address of the low level handler, - * and can be used to implement any platform specific handling - * before or after calling it. - */ -struct arm_pmu_platdata { - irqreturn_t (*handle_irq)(int irq, void *dev, - irq_handler_t pmu_handler); -}; - -#ifdef CONFIG_HW_PERF_EVENTS - -/* - * The ARMv7 CPU PMU supports up to 32 event counters. - */ -#define ARMPMU_MAX_HWEVENTS 32 - -#define HW_OP_UNSUPPORTED 0xFFFF -#define C(_x) PERF_COUNT_HW_CACHE_##_x -#define CACHE_OP_UNSUPPORTED 0xFFFF - -#define PERF_MAP_ALL_UNSUPPORTED \ - [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED - -#define PERF_CACHE_MAP_ALL_UNSUPPORTED \ -[0 ... C(MAX) - 1] = { \ - [0 ... C(OP_MAX) - 1] = { \ - [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \ - }, \ -} - -/* The events for a given PMU register set. */ -struct pmu_hw_events { - /* - * The events that are active on the PMU for the given index. - */ - struct perf_event *events[ARMPMU_MAX_HWEVENTS]; - - /* - * A 1 bit for an index indicates that the counter is being used for - * an event. A 0 means that the counter can be used. - */ - DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); - - /* - * Hardware lock to serialize accesses to PMU registers. Needed for the - * read/modify/write sequences. - */ - raw_spinlock_t pmu_lock; - - /* - * When using percpu IRQs, we need a percpu dev_id. Place it here as we - * already have to allocate this struct per cpu. - */ - struct arm_pmu *percpu_pmu; -}; - -struct arm_pmu { - struct pmu pmu; - cpumask_t active_irqs; - cpumask_t supported_cpus; - int *irq_affinity; - char *name; - irqreturn_t (*handle_irq)(int irq_num, void *dev); - void (*enable)(struct perf_event *event); - void (*disable)(struct perf_event *event); - int (*get_event_idx)(struct pmu_hw_events *hw_events, - struct perf_event *event); - void (*clear_event_idx)(struct pmu_hw_events *hw_events, - struct perf_event *event); - int (*set_event_filter)(struct hw_perf_event *evt, - struct perf_event_attr *attr); - u32 (*read_counter)(struct perf_event *event); - void (*write_counter)(struct perf_event *event, u32 val); - void (*start)(struct arm_pmu *); - void (*stop)(struct arm_pmu *); - void (*reset)(void *); - int (*request_irq)(struct arm_pmu *, irq_handler_t handler); - void (*free_irq)(struct arm_pmu *); - int (*map_event)(struct perf_event *event); - int num_events; - atomic_t active_events; - struct mutex reserve_mutex; - u64 max_period; - struct platform_device *plat_device; - struct pmu_hw_events __percpu *hw_events; - struct notifier_block hotplug_nb; -}; - -#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) - -int armpmu_register(struct arm_pmu *armpmu, int type); - -u64 armpmu_event_update(struct perf_event *event); - -int armpmu_event_set_period(struct perf_event *event); - -int armpmu_map_event(struct perf_event *event, - const unsigned (*event_map)[PERF_COUNT_HW_MAX], - const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u32 raw_event_mask); - -struct pmu_probe_info { - unsigned int cpuid; - unsigned int mask; - int (*init)(struct arm_pmu *); -}; - -#define PMU_PROBE(_cpuid, _mask, _fn) \ -{ \ - .cpuid = (_cpuid), \ - .mask = (_mask), \ - .init = (_fn), \ -} - -#define ARM_PMU_PROBE(_cpuid, _fn) \ - PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn) - -#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK) - -#define XSCALE_PMU_PROBE(_version, _fn) \ - PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn) - -int arm_pmu_device_probe(struct platform_device *pdev, - const struct of_device_id *of_table, - const struct pmu_probe_info *probe_table); - -#endif /* CONFIG_HW_PERF_EVENTS */ - -#endif /* __ARM_PMU_H__ */ diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h index c25ef3ec6d1f..68ee3ce17b82 100644 --- a/arch/arm/include/asm/psci.h +++ b/arch/arm/include/asm/psci.h @@ -14,34 +14,11 @@ #ifndef __ASM_ARM_PSCI_H #define __ASM_ARM_PSCI_H -#define PSCI_POWER_STATE_TYPE_STANDBY 0 -#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 - -struct psci_power_state { - u16 id; - u8 type; - u8 affinity_level; -}; - -struct psci_operations { - int (*cpu_suspend)(struct psci_power_state state, - unsigned long entry_point); - int (*cpu_off)(struct psci_power_state state); - int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); - int (*migrate)(unsigned long cpuid); - int (*affinity_info)(unsigned long target_affinity, - unsigned long lowest_affinity_level); - int (*migrate_info_type)(void); -}; - -extern struct psci_operations psci_ops; extern struct smp_operations psci_smp_ops; #ifdef CONFIG_ARM_PSCI -int psci_init(void); bool psci_smp_available(void); #else -static inline int psci_init(void) { return 0; } static inline bool psci_smp_available(void) { return false; } #endif diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2f3ac1ba6fb4..ef356659b4f4 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -74,7 +74,6 @@ extern void secondary_startup_arm(void); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); -extern void cpu_die(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); @@ -105,6 +104,7 @@ struct smp_operations { #ifdef CONFIG_HOTPLUG_CPU int (*cpu_kill)(unsigned int cpu); void (*cpu_die)(unsigned int cpu); + bool (*cpu_can_disable)(unsigned int cpu); int (*cpu_disable)(unsigned int cpu); #endif #endif diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 993e5224d8f7..f9080717fc88 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void) extern int platform_can_secondary_boot(void); extern int platform_can_cpu_hotplug(void); +#ifdef CONFIG_HOTPLUG_CPU +extern int platform_can_hotplug_cpu(unsigned int cpu); +#else +static inline int platform_can_hotplug_cpu(unsigned int cpu) +{ + return 0; +} +#endif + #endif diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index bd32eded3e50..d0a1119dcaf3 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -74,9 +74,6 @@ struct thread_info { .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \ } #define init_thread_info (init_thread_union.thread_info) @@ -136,22 +133,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, /* * thread information flags: - * TIF_SYSCALL_TRACE - syscall trace active - * TIF_SYSCAL_AUDIT - syscall auditing active - * TIF_SIGPENDING - signal pending - * TIF_NEED_RESCHED - rescheduling necessary - * TIF_NOTIFY_RESUME - callback before returning to user * TIF_USEDFPU - FPU was used by this task this quantum (SMP) * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_SIGPENDING 0 -#define TIF_NEED_RESCHED 1 +#define TIF_SIGPENDING 0 /* signal pending */ +#define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ -#define TIF_UPROBE 7 -#define TIF_SYSCALL_TRACE 8 -#define TIF_SYSCALL_AUDIT 9 -#define TIF_SYSCALL_TRACEPOINT 10 -#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ +#define TIF_UPROBE 3 /* breakpointed or singlestepping */ +#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ +#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ + #define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 74b17d09ef7a..8cc85a4ebec2 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -50,6 +50,35 @@ struct exception_table_entry extern int fixup_exception(struct pt_regs *regs); /* + * These two functions allow hooking accesses to userspace to increase + * system integrity by ensuring that the kernel can not inadvertantly + * perform such accesses (eg, via list poison values) which could then + * be exploited for priviledge escalation. + */ +static inline unsigned int uaccess_save_and_enable(void) +{ +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + unsigned int old_domain = get_domain(); + + /* Set the current domain access to permit user accesses */ + set_domain((old_domain & ~domain_mask(DOMAIN_USER)) | + domain_val(DOMAIN_USER, DOMAIN_CLIENT)); + + return old_domain; +#else + return 0; +#endif +} + +static inline void uaccess_restore(unsigned int flags) +{ +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* Restore the user access mask */ + set_domain(flags); +#endif +} + +/* * These two are intentionally not defined anywhere - if the kernel * code generates any references to them, that's a bug. */ @@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *); register typeof(x) __r2 asm("r2"); \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ + unsigned int __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(__p))) { \ case 1: \ if (sizeof((x)) >= 8) \ @@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *); break; \ default: __e = __get_user_bad(); break; \ } \ + uaccess_restore(__ua_flags); \ x = (typeof(*(p))) __r2; \ __e; \ }) @@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long); register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ + unsigned int __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(__p))) { \ case 1: \ __put_user_x(__r2, __p, __e, __l, 1); \ @@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long); break; \ default: __e = __put_user_bad(); break; \ } \ + uaccess_restore(__ua_flags); \ __e; \ }) @@ -300,20 +333,23 @@ static inline void set_fs(mm_segment_t fs) do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ + unsigned int __ua_flags; \ __chk_user_ptr(ptr); \ might_fault(); \ + __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(ptr))) { \ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ default: (__gu_val) = __get_user_bad(); \ } \ + uaccess_restore(__ua_flags); \ (x) = (__typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user_asm_byte(x, addr, err) \ +#define __get_user_asm(x, addr, err, instr) \ __asm__ __volatile__( \ - "1: " TUSER(ldrb) " %1,[%2],#0\n" \ + "1: " TUSER(instr) " %1, [%2], #0\n" \ "2:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -329,6 +365,9 @@ do { \ : "r" (addr), "i" (-EFAULT) \ : "cc") +#define __get_user_asm_byte(x, addr, err) \ + __get_user_asm(x, addr, err, ldrb) + #ifndef __ARMEB__ #define __get_user_asm_half(x, __gu_addr, err) \ ({ \ @@ -348,22 +387,7 @@ do { \ #endif #define __get_user_asm_word(x, addr, err) \ - __asm__ __volatile__( \ - "1: " TUSER(ldr) " %1,[%2],#0\n" \ - "2:\n" \ - " .pushsection .text.fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %0, %3\n" \ - " mov %1, #0\n" \ - " b 2b\n" \ - " .popsection\n" \ - " .pushsection __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .popsection" \ - : "+r" (err), "=&r" (x) \ - : "r" (addr), "i" (-EFAULT) \ - : "cc") + __get_user_asm(x, addr, err, ldr) #define __put_user(x, ptr) \ ({ \ @@ -381,9 +405,11 @@ do { \ #define __put_user_err(x, ptr, err) \ do { \ unsigned long __pu_addr = (unsigned long)(ptr); \ + unsigned int __ua_flags; \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ might_fault(); \ + __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(ptr))) { \ case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ @@ -391,11 +417,12 @@ do { \ case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ default: __put_user_bad(); \ } \ + uaccess_restore(__ua_flags); \ } while (0) -#define __put_user_asm_byte(x, __pu_addr, err) \ +#define __put_user_asm(x, __pu_addr, err, instr) \ __asm__ __volatile__( \ - "1: " TUSER(strb) " %1,[%2],#0\n" \ + "1: " TUSER(instr) " %1, [%2], #0\n" \ "2:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -410,6 +437,9 @@ do { \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") +#define __put_user_asm_byte(x, __pu_addr, err) \ + __put_user_asm(x, __pu_addr, err, strb) + #ifndef __ARMEB__ #define __put_user_asm_half(x, __pu_addr, err) \ ({ \ @@ -427,21 +457,7 @@ do { \ #endif #define __put_user_asm_word(x, __pu_addr, err) \ - __asm__ __volatile__( \ - "1: " TUSER(str) " %1,[%2],#0\n" \ - "2:\n" \ - " .pushsection .text.fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %0, %3\n" \ - " b 2b\n" \ - " .popsection\n" \ - " .pushsection __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .popsection" \ - : "+r" (err) \ - : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ - : "cc") + __put_user_asm(x, __pu_addr, err, str) #ifndef __ARMEB__ #define __reg_oper0 "%R2" @@ -474,11 +490,46 @@ do { \ #ifdef CONFIG_MMU -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); -extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); -extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); +extern unsigned long __must_check +arm_copy_from_user(void *to, const void __user *from, unsigned long n); + +static inline unsigned long __must_check +__copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned int __ua_flags = uaccess_save_and_enable(); + n = arm_copy_from_user(to, from, n); + uaccess_restore(__ua_flags); + return n; +} + +extern unsigned long __must_check +arm_copy_to_user(void __user *to, const void *from, unsigned long n); +extern unsigned long __must_check +__copy_to_user_std(void __user *to, const void *from, unsigned long n); + +static inline unsigned long __must_check +__copy_to_user(void __user *to, const void *from, unsigned long n) +{ + unsigned int __ua_flags = uaccess_save_and_enable(); + n = arm_copy_to_user(to, from, n); + uaccess_restore(__ua_flags); + return n; +} + +extern unsigned long __must_check +arm_clear_user(void __user *addr, unsigned long n); +extern unsigned long __must_check +__clear_user_std(void __user *addr, unsigned long n); + +static inline unsigned long __must_check +__clear_user(void __user *addr, unsigned long n) +{ + unsigned int __ua_flags = uaccess_save_and_enable(); + n = arm_clear_user(addr, n); + uaccess_restore(__ua_flags); + return n; +} + #else #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) @@ -511,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo return n; } +/* These are from lib/ code, and use __get_user() and friends */ extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index e69f7a19735d..af9e59bf3831 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -71,8 +71,7 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o -obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o \ - perf_event_xscale.o perf_event_v6.o \ +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \ perf_event_v7.o CFLAGS_pj4-cp0.o := -marm AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt @@ -89,7 +88,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o ifeq ($(CONFIG_ARM_PSCI),y) -obj-y += psci.o psci-call.o +obj-y += psci-call.o obj-$(CONFIG_SMP) += psci_smp.o endif diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 5e5a51a99e68..f89811fb9a55 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy); #ifdef CONFIG_MMU EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(__copy_from_user); -EXPORT_SYMBOL(__copy_to_user); -EXPORT_SYMBOL(__clear_user); +EXPORT_SYMBOL(arm_copy_from_user); +EXPORT_SYMBOL(arm_copy_to_user); +EXPORT_SYMBOL(arm_clear_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index cb4fb1e69778..3e1c26eb32b4 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -149,10 +149,10 @@ ENDPROC(__und_invalid) #define SPFIX(code...) #endif - .macro svc_entry, stack_hole=0, trace=1 + .macro svc_entry, stack_hole=0, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.save {r0 - pc} ) - sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) + sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4) #ifdef CONFIG_THUMB2_KERNEL SPFIX( str r0, [sp] ) @ temporarily saved SPFIX( mov r0, sp ) @@ -167,7 +167,7 @@ ENDPROC(__und_invalid) ldmia r0, {r3 - r5} add r7, sp, #S_SP - 4 @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" - add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) + add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4) SPFIX( addeq r2, r2, #4 ) str r3, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack @@ -185,6 +185,11 @@ ENDPROC(__und_invalid) @ stmia r7, {r2 - r6} + uaccess_save r0 + .if \uaccess + uaccess_disable r0 + .endif + .if \trace #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off @@ -194,7 +199,7 @@ ENDPROC(__und_invalid) .align 5 __dabt_svc: - svc_entry + svc_entry uaccess=0 mov r2, sp dabt_helper THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR @@ -368,7 +373,7 @@ ENDPROC(__fiq_abt) #error "sizeof(struct pt_regs) must be a multiple of 8" #endif - .macro usr_entry, trace=1 + .macro usr_entry, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.cantunwind ) @ don't unwind the user space sub sp, sp, #S_FRAME_SIZE @@ -400,6 +405,10 @@ ENDPROC(__fiq_abt) ARM( stmdb r0, {sp, lr}^ ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) + .if \uaccess + uaccess_disable ip + .endif + @ Enable the alignment trap while in kernel mode ATRAP( teq r8, r7) ATRAP( mcrne p15, 0, r8, c1, c0, 0) @@ -435,7 +444,7 @@ ENDPROC(__fiq_abt) .align 5 __dabt_usr: - usr_entry + usr_entry uaccess=0 kuser_cmpxchg_check mov r2, sp dabt_helper @@ -458,7 +467,7 @@ ENDPROC(__irq_usr) .align 5 __und_usr: - usr_entry + usr_entry uaccess=0 mov r2, r4 mov r3, r5 @@ -484,6 +493,8 @@ __und_usr: 1: ldrt r0, [r4] ARM_BE8(rev r0, r0) @ little endian instruction + uaccess_disable ip + @ r0 = 32-bit ARM instruction which caused the exception @ r2 = PC value for the following instruction (:= regs->ARM_pc) @ r4 = PC value for the faulting instruction @@ -518,9 +529,10 @@ __und_usr_thumb: 2: ldrht r5, [r4] ARM_BE8(rev16 r5, r5) @ little endian instruction cmp r5, #0xe800 @ 32bit instruction if xx != 0 - blo __und_usr_fault_16 @ 16bit undefined instruction + blo __und_usr_fault_16_pan @ 16bit undefined instruction 3: ldrht r0, [r2] ARM_BE8(rev16 r0, r0) @ little endian instruction + uaccess_disable ip add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update orr r0, r0, r5, lsl #16 @@ -715,6 +727,8 @@ ENDPROC(no_fp) __und_usr_fault_32: mov r1, #4 b 1f +__und_usr_fault_16_pan: + uaccess_disable ip __und_usr_fault_16: mov r1, #2 1: mov r0, sp @@ -770,6 +784,8 @@ ENTRY(__switch_to) ldr r4, [r2, #TI_TP_VALUE] ldr r5, [r2, #TI_TP_VALUE + 4] #ifdef CONFIG_CPU_USE_DOMAINS + mrc p15, 0, r6, c3, c0, 0 @ Get domain register + str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register ldr r6, [r2, #TI_CPU_DOMAIN] #endif switch_tls r1, r4, r5, r3, r7 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index b48dd4f37f80..30a7228eaceb 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -24,35 +24,55 @@ .align 5 +#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) /* - * This is the fast syscall return path. We do as little as - * possible here, and this includes saving r0 back into the SVC - * stack. + * This is the fast syscall return path. We do as little as possible here, + * such as avoiding writing r0 to the stack. We only use this path if we + * have tracing and context tracking disabled - the overheads from those + * features make this path too inefficient. */ ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) - disable_irq @ disable interrupts + disable_irq_notrace @ disable interrupts ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK - bne __sys_trace_return - tst r1, #_TIF_WORK_MASK + tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK bne fast_work_pending - asm_trace_hardirqs_on /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr - ct_user_enter restore_user_regs fast = 1, offset = S_OFF UNWIND(.fnend ) +ENDPROC(ret_fast_syscall) -/* - * Ok, we need to do extra processing, enter the slow path. - */ + /* Ok, we need to do extra processing, enter the slow path. */ fast_work_pending: str r0, [sp, #S_R0+S_OFF]! @ returned r0 -work_pending: + /* fall through to work_pending */ +#else +/* + * The "replacement" ret_fast_syscall for when tracing or context tracking + * is enabled. As we will need to call out to some C functions, we save + * r0 first to avoid needing to save registers around each C function call. + */ +ret_fast_syscall: + UNWIND(.fnstart ) + UNWIND(.cantunwind ) + str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 + disable_irq_notrace @ disable interrupts + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing + tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + beq no_work_pending + UNWIND(.fnend ) +ENDPROC(ret_fast_syscall) + + /* Slower path - fall through to work_pending */ +#endif + + tst r1, #_TIF_SYSCALL_WORK + bne __sys_trace_return_nosave +slow_work_pending: mov r0, sp @ 'regs' mov r2, why @ 'syscall' bl do_work_pending @@ -65,16 +85,19 @@ ENDPROC(ret_fast_syscall) /* * "slow" syscall return path. "why" tells us if this was a real syscall. + * IRQs may be enabled here, so always disable them. Note that we use the + * "notrace" version to avoid calling into the tracing code unnecessarily. + * do_work_pending() will update this state if necessary. */ ENTRY(ret_to_user) ret_slow_syscall: - disable_irq @ disable interrupts + disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK - bne work_pending + bne slow_work_pending no_work_pending: - asm_trace_hardirqs_on + asm_trace_hardirqs_on save = 0 /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr @@ -174,6 +197,8 @@ ENTRY(vector_swi) USER( ldr scno, [lr, #-4] ) @ get SWI instruction #endif + uaccess_disable tbl + adr tbl, sys_call_table @ load syscall table pointer #if defined(CONFIG_OABI_COMPAT) @@ -252,6 +277,12 @@ __sys_trace_return: bl syscall_trace_exit b ret_slow_syscall +__sys_trace_return_nosave: + enable_irq_notrace + mov r0, sp + bl syscall_trace_exit + b ret_slow_syscall + .align 5 #ifdef CONFIG_ALIGNMENT_TRAP .type __cr_alignment, #object diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 1a0045abead7..0d22ad206d52 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -196,7 +196,7 @@ msr cpsr_c, \rtemp @ switch back to the SVC mode .endm -#ifndef CONFIG_THUMB2_KERNEL + .macro svc_exit, rpsr, irq = 0 .if \irq != 0 @ IRQs already off @@ -215,6 +215,10 @@ blne trace_hardirqs_off #endif .endif + uaccess_restore + +#ifndef CONFIG_THUMB2_KERNEL + @ ARM mode SVC restore msr spsr_cxsf, \rpsr #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) @ We must avoid clrex due to Cortex-A15 erratum #830321 @@ -222,6 +226,20 @@ strex r1, r2, [r0] @ clear the exclusive monitor #endif ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#else + @ Thumb mode SVC restore + ldr lr, [sp, #S_SP] @ top of the stack + ldrd r0, r1, [sp, #S_LR] @ calling lr and pc + + @ We must avoid clrex due to Cortex-A15 erratum #830321 + strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor + + stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context + ldmia sp, {r0 - r12} + mov sp, lr + ldr lr, [sp], #4 + rfeia sp! +#endif .endm @ @@ -241,6 +259,9 @@ @ on the stack remains correct). @ .macro svc_exit_via_fiq + uaccess_restore +#ifndef CONFIG_THUMB2_KERNEL + @ ARM mode restore mov r0, sp ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will @ clobber state restored below) @@ -250,9 +271,27 @@ msr spsr_cxsf, r9 ldr r0, [r0, #S_R0] ldmia r8, {pc}^ +#else + @ Thumb mode restore + add r0, sp, #S_R2 + ldr lr, [sp, #S_LR] + ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will + @ clobber state restored below) + ldmia r0, {r2 - r12} + mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT + msr cpsr_c, r1 + sub r0, #S_R2 + add r8, r0, #S_PC + ldmia r0, {r0 - r1} + rfeia r8 +#endif .endm + .macro restore_user_regs, fast = 0, offset = 0 + uaccess_enable r1, isb=0 +#ifndef CONFIG_THUMB2_KERNEL + @ ARM mode restore mov r2, sp ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr ldr lr, [r2, #\offset + S_PC]! @ get pc @@ -270,72 +309,16 @@ @ after ldm {}^ add sp, sp, #\offset + S_FRAME_SIZE movs pc, lr @ return & move spsr_svc into cpsr - .endm - -#else /* CONFIG_THUMB2_KERNEL */ - .macro svc_exit, rpsr, irq = 0 - .if \irq != 0 - @ IRQs already off -#ifdef CONFIG_TRACE_IRQFLAGS - @ The parent context IRQs must have been enabled to get here in - @ the first place, so there's no point checking the PSR I bit. - bl trace_hardirqs_on -#endif - .else - @ IRQs off again before pulling preserved data off the stack - disable_irq_notrace -#ifdef CONFIG_TRACE_IRQFLAGS - tst \rpsr, #PSR_I_BIT - bleq trace_hardirqs_on - tst \rpsr, #PSR_I_BIT - blne trace_hardirqs_off -#endif - .endif - ldr lr, [sp, #S_SP] @ top of the stack - ldrd r0, r1, [sp, #S_LR] @ calling lr and pc - - @ We must avoid clrex due to Cortex-A15 erratum #830321 - strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor - - stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context - ldmia sp, {r0 - r12} - mov sp, lr - ldr lr, [sp], #4 - rfeia sp! - .endm - - @ - @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit - @ - @ For full details see non-Thumb implementation above. - @ - .macro svc_exit_via_fiq - add r0, sp, #S_R2 - ldr lr, [sp, #S_LR] - ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will - @ clobber state restored below) - ldmia r0, {r2 - r12} - mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT - msr cpsr_c, r1 - sub r0, #S_R2 - add r8, r0, #S_PC - ldmia r0, {r0 - r1} - rfeia r8 - .endm - -#ifdef CONFIG_CPU_V7M - /* - * Note we don't need to do clrex here as clearing the local monitor is - * part of each exception entry and exit sequence. - */ - .macro restore_user_regs, fast = 0, offset = 0 +#elif defined(CONFIG_CPU_V7M) + @ V7M restore. + @ Note that we don't need to do clrex here as clearing the local + @ monitor is part of the exception entry and exit sequence. .if \offset add sp, #\offset .endif v7m_exception_slow_exit ret_r0 = \fast - .endm -#else /* ifdef CONFIG_CPU_V7M */ - .macro restore_user_regs, fast = 0, offset = 0 +#else + @ Thumb mode restore mov r2, sp load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr @@ -353,9 +336,8 @@ .endif add sp, sp, #S_FRAME_SIZE - S_SP movs pc, lr @ return & move spsr_svc into cpsr - .endm -#endif /* ifdef CONFIG_CPU_V7M / else */ #endif /* !CONFIG_THUMB2_KERNEL */ + .endm /* * Context tracking subsystem. Used to instrument transitions diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 29e2991465cb..04286fd9e09c 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -464,10 +464,7 @@ __enable_mmu: #ifdef CONFIG_ARM_LPAE mcrr p15, 0, r4, r5, c2 @ load TTBR0 #else - mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT)) + mov r5, #DACR_INIT mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer #endif diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index baf8edebe26f..5ff4826cb154 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -39,6 +39,7 @@ #include <linux/export.h> #include <asm/hardware/cache-l2x0.h> +#include <asm/outercache.h> #include <asm/exception.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c deleted file mode 100644 index 54272e0be713..000000000000 --- a/arch/arm/kernel/perf_event.c +++ /dev/null @@ -1,896 +0,0 @@ -#undef DEBUG - -/* - * ARM performance counter support. - * - * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles - * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> - * - * This code is based on the sparc64 perf event code, which is in turn based - * on the x86 code. - */ -#define pr_fmt(fmt) "hw perfevents: " fmt - -#include <linux/bitmap.h> -#include <linux/cpumask.h> -#include <linux/export.h> -#include <linux/kernel.h> -#include <linux/of.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/irq.h> -#include <linux/irqdesc.h> - -#include <asm/cputype.h> -#include <asm/irq_regs.h> -#include <asm/pmu.h> - -static int -armpmu_map_cache_event(const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u64 config) -{ - unsigned int cache_type, cache_op, cache_result, ret; - - cache_type = (config >> 0) & 0xff; - if (cache_type >= PERF_COUNT_HW_CACHE_MAX) - return -EINVAL; - - cache_op = (config >> 8) & 0xff; - if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) - return -EINVAL; - - cache_result = (config >> 16) & 0xff; - if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) - return -EINVAL; - - ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; - - if (ret == CACHE_OP_UNSUPPORTED) - return -ENOENT; - - return ret; -} - -static int -armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) -{ - int mapping; - - if (config >= PERF_COUNT_HW_MAX) - return -EINVAL; - - mapping = (*event_map)[config]; - return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; -} - -static int -armpmu_map_raw_event(u32 raw_event_mask, u64 config) -{ - return (int)(config & raw_event_mask); -} - -int -armpmu_map_event(struct perf_event *event, - const unsigned (*event_map)[PERF_COUNT_HW_MAX], - const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u32 raw_event_mask) -{ - u64 config = event->attr.config; - int type = event->attr.type; - - if (type == event->pmu->type) - return armpmu_map_raw_event(raw_event_mask, config); - - switch (type) { - case PERF_TYPE_HARDWARE: - return armpmu_map_hw_event(event_map, config); - case PERF_TYPE_HW_CACHE: - return armpmu_map_cache_event(cache_map, config); - case PERF_TYPE_RAW: - return armpmu_map_raw_event(raw_event_mask, config); - } - - return -ENOENT; -} - -int armpmu_event_set_period(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - s64 left = local64_read(&hwc->period_left); - s64 period = hwc->sample_period; - int ret = 0; - - if (unlikely(left <= -period)) { - left = period; - local64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - if (unlikely(left <= 0)) { - left += period; - local64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - /* - * Limit the maximum period to prevent the counter value - * from overtaking the one we are about to program. In - * effect we are reducing max_period to account for - * interrupt latency (and we are being very conservative). - */ - if (left > (armpmu->max_period >> 1)) - left = armpmu->max_period >> 1; - - local64_set(&hwc->prev_count, (u64)-left); - - armpmu->write_counter(event, (u64)(-left) & 0xffffffff); - - perf_event_update_userpage(event); - - return ret; -} - -u64 armpmu_event_update(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - u64 delta, prev_raw_count, new_raw_count; - -again: - prev_raw_count = local64_read(&hwc->prev_count); - new_raw_count = armpmu->read_counter(event); - - if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count) - goto again; - - delta = (new_raw_count - prev_raw_count) & armpmu->max_period; - - local64_add(delta, &event->count); - local64_sub(delta, &hwc->period_left); - - return new_raw_count; -} - -static void -armpmu_read(struct perf_event *event) -{ - armpmu_event_update(event); -} - -static void -armpmu_stop(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - - /* - * ARM pmu always has to update the counter, so ignore - * PERF_EF_UPDATE, see comments in armpmu_start(). - */ - if (!(hwc->state & PERF_HES_STOPPED)) { - armpmu->disable(event); - armpmu_event_update(event); - hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; - } -} - -static void armpmu_start(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - - /* - * ARM pmu always has to reprogram the period, so ignore - * PERF_EF_RELOAD, see the comment below. - */ - if (flags & PERF_EF_RELOAD) - WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); - - hwc->state = 0; - /* - * Set the period again. Some counters can't be stopped, so when we - * were stopped we simply disabled the IRQ source and the counter - * may have been left counting. If we don't do this step then we may - * get an interrupt too soon or *way* too late if the overflow has - * happened since disabling. - */ - armpmu_event_set_period(event); - armpmu->enable(event); -} - -static void -armpmu_del(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - - armpmu_stop(event, PERF_EF_UPDATE); - hw_events->events[idx] = NULL; - clear_bit(idx, hw_events->used_mask); - if (armpmu->clear_event_idx) - armpmu->clear_event_idx(hw_events, event); - - perf_event_update_userpage(event); -} - -static int -armpmu_add(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - /* An event following a process won't be stopped earlier */ - if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) - return -ENOENT; - - perf_pmu_disable(event->pmu); - - /* If we don't have a space for the counter then finish early. */ - idx = armpmu->get_event_idx(hw_events, event); - if (idx < 0) { - err = idx; - goto out; - } - - /* - * If there is an event in the counter we are going to use then make - * sure it is disabled. - */ - event->hw.idx = idx; - armpmu->disable(event); - hw_events->events[idx] = event; - - hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; - if (flags & PERF_EF_START) - armpmu_start(event, PERF_EF_RELOAD); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - perf_pmu_enable(event->pmu); - return err; -} - -static int -validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, - struct perf_event *event) -{ - struct arm_pmu *armpmu; - - if (is_software_event(event)) - return 1; - - /* - * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The - * core perf code won't check that the pmu->ctx == leader->ctx - * until after pmu->event_init(event). - */ - if (event->pmu != pmu) - return 0; - - if (event->state < PERF_EVENT_STATE_OFF) - return 1; - - if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) - return 1; - - armpmu = to_arm_pmu(event->pmu); - return armpmu->get_event_idx(hw_events, event) >= 0; -} - -static int -validate_group(struct perf_event *event) -{ - struct perf_event *sibling, *leader = event->group_leader; - struct pmu_hw_events fake_pmu; - - /* - * Initialise the fake PMU. We only need to populate the - * used_mask for the purposes of validation. - */ - memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); - - if (!validate_event(event->pmu, &fake_pmu, leader)) - return -EINVAL; - - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(event->pmu, &fake_pmu, sibling)) - return -EINVAL; - } - - if (!validate_event(event->pmu, &fake_pmu, event)) - return -EINVAL; - - return 0; -} - -static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) -{ - struct arm_pmu *armpmu; - struct platform_device *plat_device; - struct arm_pmu_platdata *plat; - int ret; - u64 start_clock, finish_clock; - - /* - * we request the IRQ with a (possibly percpu) struct arm_pmu**, but - * the handlers expect a struct arm_pmu*. The percpu_irq framework will - * do any necessary shifting, we just need to perform the first - * dereference. - */ - armpmu = *(void **)dev; - plat_device = armpmu->plat_device; - plat = dev_get_platdata(&plat_device->dev); - - start_clock = sched_clock(); - if (plat && plat->handle_irq) - ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); - else - ret = armpmu->handle_irq(irq, armpmu); - finish_clock = sched_clock(); - - perf_sample_event_took(finish_clock - start_clock); - return ret; -} - -static void -armpmu_release_hardware(struct arm_pmu *armpmu) -{ - armpmu->free_irq(armpmu); -} - -static int -armpmu_reserve_hardware(struct arm_pmu *armpmu) -{ - int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); - if (err) { - armpmu_release_hardware(armpmu); - return err; - } - - return 0; -} - -static void -hw_perf_event_destroy(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - atomic_t *active_events = &armpmu->active_events; - struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; - - if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { - armpmu_release_hardware(armpmu); - mutex_unlock(pmu_reserve_mutex); - } -} - -static int -event_requires_mode_exclusion(struct perf_event_attr *attr) -{ - return attr->exclude_idle || attr->exclude_user || - attr->exclude_kernel || attr->exclude_hv; -} - -static int -__hw_perf_event_init(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - int mapping; - - mapping = armpmu->map_event(event); - - if (mapping < 0) { - pr_debug("event %x:%llx not supported\n", event->attr.type, - event->attr.config); - return mapping; - } - - /* - * We don't assign an index until we actually place the event onto - * hardware. Use -1 to signify that we haven't decided where to put it - * yet. For SMP systems, each core has it's own PMU so we can't do any - * clever allocation or constraints checking at this point. - */ - hwc->idx = -1; - hwc->config_base = 0; - hwc->config = 0; - hwc->event_base = 0; - - /* - * Check whether we need to exclude the counter from certain modes. - */ - if ((!armpmu->set_event_filter || - armpmu->set_event_filter(hwc, &event->attr)) && - event_requires_mode_exclusion(&event->attr)) { - pr_debug("ARM performance counters do not support " - "mode exclusion\n"); - return -EOPNOTSUPP; - } - - /* - * Store the event encoding into the config_base field. - */ - hwc->config_base |= (unsigned long)mapping; - - if (!is_sampling_event(event)) { - /* - * For non-sampling runs, limit the sample_period to half - * of the counter width. That way, the new counter value - * is far less likely to overtake the previous one unless - * you have some serious IRQ latency issues. - */ - hwc->sample_period = armpmu->max_period >> 1; - hwc->last_period = hwc->sample_period; - local64_set(&hwc->period_left, hwc->sample_period); - } - - if (event->group_leader != event) { - if (validate_group(event) != 0) - return -EINVAL; - } - - return 0; -} - -static int armpmu_event_init(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - int err = 0; - atomic_t *active_events = &armpmu->active_events; - - /* - * Reject CPU-affine events for CPUs that are of a different class to - * that which this PMU handles. Process-following events (where - * event->cpu == -1) can be migrated between CPUs, and thus we have to - * reject them later (in armpmu_add) if they're scheduled on a - * different class of CPU. - */ - if (event->cpu != -1 && - !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) - return -ENOENT; - - /* does not support taken branch sampling */ - if (has_branch_stack(event)) - return -EOPNOTSUPP; - - if (armpmu->map_event(event) == -ENOENT) - return -ENOENT; - - event->destroy = hw_perf_event_destroy; - - if (!atomic_inc_not_zero(active_events)) { - mutex_lock(&armpmu->reserve_mutex); - if (atomic_read(active_events) == 0) - err = armpmu_reserve_hardware(armpmu); - - if (!err) - atomic_inc(active_events); - mutex_unlock(&armpmu->reserve_mutex); - } - - if (err) - return err; - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err; -} - -static void armpmu_enable(struct pmu *pmu) -{ - struct arm_pmu *armpmu = to_arm_pmu(pmu); - struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); - - /* For task-bound events we may be called on other CPUs */ - if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) - return; - - if (enabled) - armpmu->start(armpmu); -} - -static void armpmu_disable(struct pmu *pmu) -{ - struct arm_pmu *armpmu = to_arm_pmu(pmu); - - /* For task-bound events we may be called on other CPUs */ - if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) - return; - - armpmu->stop(armpmu); -} - -/* - * In heterogeneous systems, events are specific to a particular - * microarchitecture, and aren't suitable for another. Thus, only match CPUs of - * the same microarchitecture. - */ -static int armpmu_filter_match(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - unsigned int cpu = smp_processor_id(); - return cpumask_test_cpu(cpu, &armpmu->supported_cpus); -} - -static void armpmu_init(struct arm_pmu *armpmu) -{ - atomic_set(&armpmu->active_events, 0); - mutex_init(&armpmu->reserve_mutex); - - armpmu->pmu = (struct pmu) { - .pmu_enable = armpmu_enable, - .pmu_disable = armpmu_disable, - .event_init = armpmu_event_init, - .add = armpmu_add, - .del = armpmu_del, - .start = armpmu_start, - .stop = armpmu_stop, - .read = armpmu_read, - .filter_match = armpmu_filter_match, - }; -} - -int armpmu_register(struct arm_pmu *armpmu, int type) -{ - armpmu_init(armpmu); - pr_info("enabled with %s PMU driver, %d counters available\n", - armpmu->name, armpmu->num_events); - return perf_pmu_register(&armpmu->pmu, armpmu->name, type); -} - -/* Set at runtime when we know what CPU type we are. */ -static struct arm_pmu *__oprofile_cpu_pmu; - -/* - * Despite the names, these two functions are CPU-specific and are used - * by the OProfile/perf code. - */ -const char *perf_pmu_name(void) -{ - if (!__oprofile_cpu_pmu) - return NULL; - - return __oprofile_cpu_pmu->name; -} -EXPORT_SYMBOL_GPL(perf_pmu_name); - -int perf_num_counters(void) -{ - int max_events = 0; - - if (__oprofile_cpu_pmu != NULL) - max_events = __oprofile_cpu_pmu->num_events; - - return max_events; -} -EXPORT_SYMBOL_GPL(perf_num_counters); - -static void cpu_pmu_enable_percpu_irq(void *data) -{ - int irq = *(int *)data; - - enable_percpu_irq(irq, IRQ_TYPE_NONE); -} - -static void cpu_pmu_disable_percpu_irq(void *data) -{ - int irq = *(int *)data; - - disable_percpu_irq(irq); -} - -static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) -{ - int i, irq, irqs; - struct platform_device *pmu_device = cpu_pmu->plat_device; - struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - - irq = platform_get_irq(pmu_device, 0); - if (irq >= 0 && irq_is_percpu(irq)) { - on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); - free_percpu_irq(irq, &hw_events->percpu_pmu); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - if (cpu_pmu->irq_affinity) - cpu = cpu_pmu->irq_affinity[i]; - - if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq >= 0) - free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); - } - } -} - -static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) -{ - int i, err, irq, irqs; - struct platform_device *pmu_device = cpu_pmu->plat_device; - struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; - - if (!pmu_device) - return -ENODEV; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (irqs < 1) { - pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); - return 0; - } - - irq = platform_get_irq(pmu_device, 0); - if (irq >= 0 && irq_is_percpu(irq)) { - err = request_percpu_irq(irq, handler, "arm-pmu", - &hw_events->percpu_pmu); - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - return err; - } - on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq < 0) - continue; - - if (cpu_pmu->irq_affinity) - cpu = cpu_pmu->irq_affinity[i]; - - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { - pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, cpu); - continue; - } - - err = request_irq(irq, handler, - IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", - per_cpu_ptr(&hw_events->percpu_pmu, cpu)); - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - return err; - } - - cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); - } - } - - return 0; -} - -/* - * PMU hardware loses all context when a CPU goes offline. - * When a CPU is hotplugged back in, since some hardware registers are - * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading - * junk values out of them. - */ -static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, - void *hcpu) -{ - int cpu = (unsigned long)hcpu; - struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); - - if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) - return NOTIFY_DONE; - - if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) - return NOTIFY_DONE; - - if (pmu->reset) - pmu->reset(pmu); - else - return NOTIFY_DONE; - - return NOTIFY_OK; -} - -static int cpu_pmu_init(struct arm_pmu *cpu_pmu) -{ - int err; - int cpu; - struct pmu_hw_events __percpu *cpu_hw_events; - - cpu_hw_events = alloc_percpu(struct pmu_hw_events); - if (!cpu_hw_events) - return -ENOMEM; - - cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; - err = register_cpu_notifier(&cpu_pmu->hotplug_nb); - if (err) - goto out_hw_events; - - for_each_possible_cpu(cpu) { - struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); - raw_spin_lock_init(&events->pmu_lock); - events->percpu_pmu = cpu_pmu; - } - - cpu_pmu->hw_events = cpu_hw_events; - cpu_pmu->request_irq = cpu_pmu_request_irq; - cpu_pmu->free_irq = cpu_pmu_free_irq; - - /* Ensure the PMU has sane values out of reset. */ - if (cpu_pmu->reset) - on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset, - cpu_pmu, 1); - - /* If no interrupts available, set the corresponding capability flag */ - if (!platform_get_irq(cpu_pmu->plat_device, 0)) - cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; - - return 0; - -out_hw_events: - free_percpu(cpu_hw_events); - return err; -} - -static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) -{ - unregister_cpu_notifier(&cpu_pmu->hotplug_nb); - free_percpu(cpu_pmu->hw_events); -} - -/* - * CPU PMU identification and probing. - */ -static int probe_current_pmu(struct arm_pmu *pmu, - const struct pmu_probe_info *info) -{ - int cpu = get_cpu(); - unsigned int cpuid = read_cpuid_id(); - int ret = -ENODEV; - - pr_info("probing PMU on CPU %d\n", cpu); - - for (; info->init != NULL; info++) { - if ((cpuid & info->mask) != info->cpuid) - continue; - ret = info->init(pmu); - break; - } - - put_cpu(); - return ret; -} - -static int of_pmu_irq_cfg(struct arm_pmu *pmu) -{ - int i, irq, *irqs; - struct platform_device *pdev = pmu->plat_device; - - /* Don't bother with PPIs; they're already affine */ - irq = platform_get_irq(pdev, 0); - if (irq >= 0 && irq_is_percpu(irq)) - return 0; - - irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); - if (!irqs) - return -ENOMEM; - - for (i = 0; i < pdev->num_resources; ++i) { - struct device_node *dn; - int cpu; - - dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", - i); - if (!dn) { - pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", - of_node_full_name(pdev->dev.of_node), i); - break; - } - - for_each_possible_cpu(cpu) - if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) - break; - - if (cpu >= nr_cpu_ids) { - pr_warn("Failed to find logical CPU for %s\n", - dn->name); - of_node_put(dn); - break; - } - of_node_put(dn); - - irqs[i] = cpu; - cpumask_set_cpu(cpu, &pmu->supported_cpus); - } - - if (i == pdev->num_resources) { - pmu->irq_affinity = irqs; - } else { - kfree(irqs); - cpumask_setall(&pmu->supported_cpus); - } - - return 0; -} - -int arm_pmu_device_probe(struct platform_device *pdev, - const struct of_device_id *of_table, - const struct pmu_probe_info *probe_table) -{ - const struct of_device_id *of_id; - const int (*init_fn)(struct arm_pmu *); - struct device_node *node = pdev->dev.of_node; - struct arm_pmu *pmu; - int ret = -ENODEV; - - pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); - if (!pmu) { - pr_info("failed to allocate PMU device!\n"); - return -ENOMEM; - } - - if (!__oprofile_cpu_pmu) - __oprofile_cpu_pmu = pmu; - - pmu->plat_device = pdev; - - if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { - init_fn = of_id->data; - - ret = of_pmu_irq_cfg(pmu); - if (!ret) - ret = init_fn(pmu); - } else { - ret = probe_current_pmu(pmu, probe_table); - cpumask_setall(&pmu->supported_cpus); - } - - if (ret) { - pr_info("failed to probe PMU!\n"); - goto out_free; - } - - ret = cpu_pmu_init(pmu); - if (ret) - goto out_free; - - ret = armpmu_register(pmu, -1); - if (ret) - goto out_destroy; - - return 0; - -out_destroy: - cpu_pmu_destroy(pmu); -out_free: - pr_info("failed to register PMU devices!\n"); - kfree(pmu); - return ret; -} diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 09f83e414a72..09413e7b49aa 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -34,9 +34,9 @@ #include <asm/cputype.h> #include <asm/irq_regs.h> -#include <asm/pmu.h> #include <linux/of.h> +#include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> enum armv6_perf_types { diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f9b37f876e20..126dc679b230 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -21,11 +21,11 @@ #include <asm/cp15.h> #include <asm/cputype.h> #include <asm/irq_regs.h> -#include <asm/pmu.h> #include <asm/vfp.h> #include "../vfp/vfpinstr.h" #include <linux/of.h> +#include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> /* diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 304d056d5b25..aa0499e2eef7 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -16,9 +16,9 @@ #include <asm/cputype.h> #include <asm/irq_regs.h> -#include <asm/pmu.h> #include <linux/of.h> +#include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> enum xscale_perf_types { diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index f192a2a41719..a3089bacb8d8 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void) ledtrig_cpu(CPU_LED_IDLE_END); } -#ifdef CONFIG_HOTPLUG_CPU -void arch_cpu_idle_dead(void) -{ - cpu_die(); -} -#endif - void __show_regs(struct pt_regs *regs) { unsigned long flags; @@ -129,12 +122,36 @@ void __show_regs(struct pt_regs *regs) buf[4] = '\0'; #ifndef CONFIG_CPU_V7M - printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", - buf, interrupts_enabled(regs) ? "n" : "ff", - fast_interrupts_enabled(regs) ? "n" : "ff", - processor_modes[processor_mode(regs)], - isa_modes[isa_mode(regs)], - get_fs() == get_ds() ? "kernel" : "user"); + { + unsigned int domain = get_domain(); + const char *segment; + +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Get the domain register for the parent context. In user + * mode, we don't save the DACR, so lets use what it should + * be. For other modes, we place it after the pt_regs struct. + */ + if (user_mode(regs)) + domain = DACR_UACCESS_ENABLE; + else + domain = *(unsigned int *)(regs + 1); +#endif + + if ((domain & domain_mask(DOMAIN_USER)) == + domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) + segment = "none"; + else if (get_fs() == get_ds()) + segment = "kernel"; + else + segment = "user"; + + printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", + buf, interrupts_enabled(regs) ? "n" : "ff", + fast_interrupts_enabled(regs) ? "n" : "ff", + processor_modes[processor_mode(regs)], + isa_modes[isa_mode(regs)], segment); + } #else printk("xPSR: %08lx\n", regs->ARM_cpsr); #endif @@ -146,10 +163,9 @@ void __show_regs(struct pt_regs *regs) buf[0] = '\0'; #ifdef CONFIG_CPU_CP15_MMU { - unsigned int transbase, dac; + unsigned int transbase, dac = get_domain(); asm("mrc p15, 0, %0, c2, c0\n\t" - "mrc p15, 0, %1, c3, c0\n" - : "=r" (transbase), "=r" (dac)); + : "=r" (transbase)); snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", transbase, dac); } @@ -210,6 +226,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); + /* + * Copy the initial value of the domain access control register + * from the current thread: thread->addr_limit will have been + * copied from the current thread via setup_thread_stack() in + * kernel/fork.c + */ + thread->cpu_domain = get_domain(); + if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->ARM_r0 = 0; diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c deleted file mode 100644 index 2e6024334790..000000000000 --- a/arch/arm/kernel/psci.c +++ /dev/null @@ -1,299 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2012 ARM Limited - * - * Author: Will Deacon <will.deacon@arm.com> - */ - -#define pr_fmt(fmt) "psci: " fmt - -#include <linux/init.h> -#include <linux/of.h> -#include <linux/reboot.h> -#include <linux/pm.h> -#include <uapi/linux/psci.h> - -#include <asm/compiler.h> -#include <asm/errno.h> -#include <asm/psci.h> -#include <asm/system_misc.h> - -struct psci_operations psci_ops; - -static int (*invoke_psci_fn)(u32, u32, u32, u32); -typedef int (*psci_initcall_t)(const struct device_node *); - -asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32); -asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32); - -enum psci_function { - PSCI_FN_CPU_SUSPEND, - PSCI_FN_CPU_ON, - PSCI_FN_CPU_OFF, - PSCI_FN_MIGRATE, - PSCI_FN_AFFINITY_INFO, - PSCI_FN_MIGRATE_INFO_TYPE, - PSCI_FN_MAX, -}; - -static u32 psci_function_id[PSCI_FN_MAX]; - -static int psci_to_linux_errno(int errno) -{ - switch (errno) { - case PSCI_RET_SUCCESS: - return 0; - case PSCI_RET_NOT_SUPPORTED: - return -EOPNOTSUPP; - case PSCI_RET_INVALID_PARAMS: - return -EINVAL; - case PSCI_RET_DENIED: - return -EPERM; - }; - - return -EINVAL; -} - -static u32 psci_power_state_pack(struct psci_power_state state) -{ - return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT) - & PSCI_0_2_POWER_STATE_ID_MASK) | - ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT) - & PSCI_0_2_POWER_STATE_TYPE_MASK) | - ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT) - & PSCI_0_2_POWER_STATE_AFFL_MASK); -} - -static int psci_get_version(void) -{ - int err; - - err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); - return err; -} - -static int psci_cpu_suspend(struct psci_power_state state, - unsigned long entry_point) -{ - int err; - u32 fn, power_state; - - fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; - power_state = psci_power_state_pack(state); - err = invoke_psci_fn(fn, power_state, entry_point, 0); - return psci_to_linux_errno(err); -} - -static int psci_cpu_off(struct psci_power_state state) -{ - int err; - u32 fn, power_state; - - fn = psci_function_id[PSCI_FN_CPU_OFF]; - power_state = psci_power_state_pack(state); - err = invoke_psci_fn(fn, power_state, 0, 0); - return psci_to_linux_errno(err); -} - -static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_CPU_ON]; - err = invoke_psci_fn(fn, cpuid, entry_point, 0); - return psci_to_linux_errno(err); -} - -static int psci_migrate(unsigned long cpuid) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_MIGRATE]; - err = invoke_psci_fn(fn, cpuid, 0, 0); - return psci_to_linux_errno(err); -} - -static int psci_affinity_info(unsigned long target_affinity, - unsigned long lowest_affinity_level) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_AFFINITY_INFO]; - err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0); - return err; -} - -static int psci_migrate_info_type(void) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE]; - err = invoke_psci_fn(fn, 0, 0, 0); - return err; -} - -static int get_set_conduit_method(struct device_node *np) -{ - const char *method; - - pr_info("probing for conduit method from DT.\n"); - - if (of_property_read_string(np, "method", &method)) { - pr_warn("missing \"method\" property\n"); - return -ENXIO; - } - - if (!strcmp("hvc", method)) { - invoke_psci_fn = __invoke_psci_fn_hvc; - } else if (!strcmp("smc", method)) { - invoke_psci_fn = __invoke_psci_fn_smc; - } else { - pr_warn("invalid \"method\" property: %s\n", method); - return -EINVAL; - } - return 0; -} - -static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd) -{ - invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); -} - -static void psci_sys_poweroff(void) -{ - invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); -} - -/* - * PSCI Function IDs for v0.2+ are well defined so use - * standard values. - */ -static int psci_0_2_init(struct device_node *np) -{ - int err, ver; - - err = get_set_conduit_method(np); - - if (err) - goto out_put_node; - - ver = psci_get_version(); - - if (ver == PSCI_RET_NOT_SUPPORTED) { - /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */ - pr_err("PSCI firmware does not comply with the v0.2 spec.\n"); - err = -EOPNOTSUPP; - goto out_put_node; - } else { - pr_info("PSCIv%d.%d detected in firmware.\n", - PSCI_VERSION_MAJOR(ver), - PSCI_VERSION_MINOR(ver)); - - if (PSCI_VERSION_MAJOR(ver) == 0 && - PSCI_VERSION_MINOR(ver) < 2) { - err = -EINVAL; - pr_err("Conflicting PSCI version detected.\n"); - goto out_put_node; - } - } - - pr_info("Using standard PSCI v0.2 function IDs\n"); - psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND; - psci_ops.cpu_suspend = psci_cpu_suspend; - - psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF; - psci_ops.cpu_off = psci_cpu_off; - - psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON; - psci_ops.cpu_on = psci_cpu_on; - - psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE; - psci_ops.migrate = psci_migrate; - - psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO; - psci_ops.affinity_info = psci_affinity_info; - - psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] = - PSCI_0_2_FN_MIGRATE_INFO_TYPE; - psci_ops.migrate_info_type = psci_migrate_info_type; - - arm_pm_restart = psci_sys_reset; - - pm_power_off = psci_sys_poweroff; - -out_put_node: - of_node_put(np); - return err; -} - -/* - * PSCI < v0.2 get PSCI Function IDs via DT. - */ -static int psci_0_1_init(struct device_node *np) -{ - u32 id; - int err; - - err = get_set_conduit_method(np); - - if (err) - goto out_put_node; - - pr_info("Using PSCI v0.1 Function IDs from DT\n"); - - if (!of_property_read_u32(np, "cpu_suspend", &id)) { - psci_function_id[PSCI_FN_CPU_SUSPEND] = id; - psci_ops.cpu_suspend = psci_cpu_suspend; - } - - if (!of_property_read_u32(np, "cpu_off", &id)) { - psci_function_id[PSCI_FN_CPU_OFF] = id; - psci_ops.cpu_off = psci_cpu_off; - } - - if (!of_property_read_u32(np, "cpu_on", &id)) { - psci_function_id[PSCI_FN_CPU_ON] = id; - psci_ops.cpu_on = psci_cpu_on; - } - - if (!of_property_read_u32(np, "migrate", &id)) { - psci_function_id[PSCI_FN_MIGRATE] = id; - psci_ops.migrate = psci_migrate; - } - -out_put_node: - of_node_put(np); - return err; -} - -static const struct of_device_id const psci_of_match[] __initconst = { - { .compatible = "arm,psci", .data = psci_0_1_init}, - { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, - {}, -}; - -int __init psci_init(void) -{ - struct device_node *np; - const struct of_device_id *matched_np; - psci_initcall_t init_fn; - - np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); - if (!np) - return -ENODEV; - - init_fn = (psci_initcall_t)matched_np->data; - return init_fn(np); -} diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c index 28a1db4da704..61c04b02faeb 100644 --- a/arch/arm/kernel/psci_smp.c +++ b/arch/arm/kernel/psci_smp.c @@ -17,6 +17,8 @@ #include <linux/smp.h> #include <linux/of.h> #include <linux/delay.h> +#include <linux/psci.h> + #include <uapi/linux/psci.h> #include <asm/psci.h> @@ -51,22 +53,34 @@ static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle) { if (psci_ops.cpu_on) return psci_ops.cpu_on(cpu_logical_map(cpu), - __pa(secondary_startup)); + virt_to_idmap(&secondary_startup)); return -ENODEV; } #ifdef CONFIG_HOTPLUG_CPU +int psci_cpu_disable(unsigned int cpu) +{ + /* Fail early if we don't have CPU_OFF support */ + if (!psci_ops.cpu_off) + return -EOPNOTSUPP; + + /* Trusted OS will deny CPU_OFF */ + if (psci_tos_resident_on(cpu)) + return -EPERM; + + return 0; +} + void __ref psci_cpu_die(unsigned int cpu) { - const struct psci_power_state ps = { - .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, - }; + u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN << + PSCI_0_2_POWER_STATE_TYPE_SHIFT; - if (psci_ops.cpu_off) - psci_ops.cpu_off(ps); + if (psci_ops.cpu_off) + psci_ops.cpu_off(state); - /* We should never return */ - panic("psci: cpu %d failed to shutdown\n", cpu); + /* We should never return */ + panic("psci: cpu %d failed to shutdown\n", cpu); } int __ref psci_cpu_kill(unsigned int cpu) @@ -109,6 +123,7 @@ bool __init psci_smp_available(void) struct smp_operations __initdata psci_smp_ops = { .smp_boot_secondary = psci_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU + .cpu_disable = psci_cpu_disable, .cpu_die = psci_cpu_die, .cpu_kill = psci_cpu_kill, #endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 36c18b73c1f4..20edd349d379 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -31,12 +31,14 @@ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/sort.h> +#include <linux/psci.h> #include <asm/unified.h> #include <asm/cp15.h> #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/elf.h> +#include <asm/fixmap.h> #include <asm/procinfo.h> #include <asm/psci.h> #include <asm/sections.h> @@ -954,6 +956,9 @@ void __init setup_arch(char **cmdline_p) strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = cmd_line; + if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM)) + early_fixmap_init(); + parse_early_param(); #ifdef CONFIG_MMU @@ -972,7 +977,7 @@ void __init setup_arch(char **cmdline_p) unflatten_device_tree(); arm_dt_init_cpu_maps(); - psci_init(); + psci_dt_init(); xen_early_init(); #ifdef CONFIG_SMP if (is_smp()) { @@ -1015,7 +1020,7 @@ static int __init topology_init(void) for_each_possible_cpu(cpu) { struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); - cpuinfo->cpu.hotpluggable = 1; + cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu); register_cpu(&cpuinfo->cpu, cpu); } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 423663e23791..b6cda06b455f 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall) asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { + /* + * The assembly code enters us with IRQs off, but it hasn't + * informed the tracing code of that for efficiency reasons. + * Update the trace code with the current status. + */ + trace_hardirqs_off(); do { if (likely(thread_flags & _TIF_NEED_RESCHED)) { schedule(); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3d6b7821cff8..ba0063c539c3 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu) if (smp_ops.cpu_disable) return smp_ops.cpu_disable(cpu); + return 0; +} + +int platform_can_hotplug_cpu(unsigned int cpu) +{ + /* cpu_die must be specified to support hotplug */ + if (!smp_ops.cpu_die) + return 0; + + if (smp_ops.cpu_can_disable) + return smp_ops.cpu_can_disable(cpu); + /* * By default, allow disabling all CPUs except the first one, * since this is special on a lot of platforms, e.g. because * of clock tick interrupts. */ - return cpu == 0 ? -EPERM : 0; + return cpu != 0; } + /* * __cpu_disable runs on the processor to be shutdown. */ @@ -253,7 +266,7 @@ void __cpu_die(unsigned int cpu) * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ -void __ref cpu_die(void) +void arch_cpu_idle_dead(void) { unsigned int cpu = smp_processor_id(); diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 1361756782c7..5b26e7efa9ea 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data, while (1) { unsigned long temp; + unsigned int __ua_flags; + __ua_flags = uaccess_save_and_enable(); if (type == TYPE_SWPB) __user_swpb_asm(*data, address, res, temp); else __user_swp_asm(*data, address, res, temp); + uaccess_restore(__ua_flags); if (likely(res != -EAGAIN) || signal_pending(current)) break; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index d358226236f2..969f9d9e665f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base) kuser_init(vectors_base); flush_icache_range(vectors, vectors + PAGE_SIZE * 2); - modify_domain(DOMAIN_USER, DOMAIN_CLIENT); #else /* ifndef CONFIG_CPU_V7M */ /* * on V7-M there is no need to copy the vector table to a dedicated diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index 1710fd7db2d5..970d6c043774 100644 --- a/arch/arm/lib/clear_user.S +++ b/arch/arm/lib/clear_user.S @@ -12,14 +12,14 @@ .text -/* Prototype: int __clear_user(void *addr, size_t sz) +/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear * Returns : number of bytes NOT cleared */ ENTRY(__clear_user_std) -WEAK(__clear_user) +WEAK(arm_clear_user) stmfd sp!, {r1, lr} mov r2, #0 cmp r1, #4 @@ -44,7 +44,7 @@ WEAK(__clear_user) USER( strnebt r2, [r0]) mov r0, #0 ldmfd sp!, {r1, pc} -ENDPROC(__clear_user) +ENDPROC(arm_clear_user) ENDPROC(__clear_user_std) .pushsection .text.fixup,"ax" diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index 7a235b9952be..1512bebfbf1b 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -17,7 +17,7 @@ /* * Prototype: * - * size_t __copy_from_user(void *to, const void *from, size_t n) + * size_t arm_copy_from_user(void *to, const void *from, size_t n) * * Purpose: * @@ -89,11 +89,11 @@ .text -ENTRY(__copy_from_user) +ENTRY(arm_copy_from_user) #include "copy_template.S" -ENDPROC(__copy_from_user) +ENDPROC(arm_copy_from_user) .pushsection .fixup,"ax" .align 0 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index 9648b0675a3e..caf5019d8161 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -17,7 +17,7 @@ /* * Prototype: * - * size_t __copy_to_user(void *to, const void *from, size_t n) + * size_t arm_copy_to_user(void *to, const void *from, size_t n) * * Purpose: * @@ -93,11 +93,11 @@ .text ENTRY(__copy_to_user_std) -WEAK(__copy_to_user) +WEAK(arm_copy_to_user) #include "copy_template.S" -ENDPROC(__copy_to_user) +ENDPROC(arm_copy_to_user) ENDPROC(__copy_to_user_std) .pushsection .text.fixup,"ax" diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index 1d0957e61f89..1712f132b80d 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S @@ -17,6 +17,19 @@ .text +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + .macro save_regs + mrc p15, 0, ip, c3, c0, 0 + stmfd sp!, {r1, r2, r4 - r8, ip, lr} + uaccess_enable ip + .endm + + .macro load_regs + ldmfd sp!, {r1, r2, r4 - r8, ip, lr} + mcr p15, 0, ip, c3, c0, 0 + ret lr + .endm +#else .macro save_regs stmfd sp!, {r1, r2, r4 - r8, lr} .endm @@ -24,6 +37,7 @@ .macro load_regs ldmfd sp!, {r1, r2, r4 - r8, pc} .endm +#endif .macro load1b, reg1 ldrusr \reg1, r0, 1 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 4b39af2dfda9..d72b90905132 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -136,7 +136,7 @@ out: } unsigned long -__copy_to_user(void __user *to, const void *from, unsigned long n) +arm_copy_to_user(void __user *to, const void *from, unsigned long n) { /* * This test is stubbed out of the main function above to keep @@ -190,7 +190,7 @@ out: return n; } -unsigned long __clear_user(void __user *addr, unsigned long n) +unsigned long arm_clear_user(void __user *addr, unsigned long n) { /* See rational for this in __copy_to_user() above. */ if (n < 64) diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 231fba0d03e5..6050a14faee6 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -28,8 +28,8 @@ #include <linux/reboot.h> #include <linux/amba/bus.h> #include <linux/platform_device.h> +#include <linux/psci.h> -#include <asm/psci.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> diff --git a/arch/arm/mach-highbank/pm.c b/arch/arm/mach-highbank/pm.c index 7f2bd85eb935..400311695548 100644 --- a/arch/arm/mach-highbank/pm.c +++ b/arch/arm/mach-highbank/pm.c @@ -16,19 +16,21 @@ #include <linux/cpu_pm.h> #include <linux/init.h> +#include <linux/psci.h> #include <linux/suspend.h> #include <asm/suspend.h> -#include <asm/psci.h> + +#include <uapi/linux/psci.h> + +#define HIGHBANK_SUSPEND_PARAM \ + ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ + (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ + (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) static int highbank_suspend_finish(unsigned long val) { - const struct psci_power_state ps = { - .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, - .affinity_level = 1, - }; - - return psci_ops.cpu_suspend(ps, __pa(cpu_resume)); + return psci_ops.cpu_suspend(HIGHBANK_SUSPEND_PARAM, __pa(cpu_resume)); } static int highbank_pm_enter(suspend_state_t state) diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c index 04c9daf9f8d7..7db5870d127f 100644 --- a/arch/arm/mach-mmp/pm-pxa910.c +++ b/arch/arm/mach-mmp/pm-pxa910.c @@ -18,6 +18,7 @@ #include <linux/io.h> #include <linux/irq.h> #include <asm/mach-types.h> +#include <asm/outercache.h> #include <mach/hardware.h> #include <mach/cputype.h> #include <mach/addr-map.h> diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 9e2a68456b81..07d2e100caab 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -29,6 +29,7 @@ config ARCH_OMAP4 select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP select OMAP_INTERCONNECT + select OMAP_INTERCONNECT_BARRIER select PL310_ERRATA_588369 if CACHE_L2X0 select PL310_ERRATA_727915 if CACHE_L2X0 select PM_OPP if PM @@ -46,6 +47,7 @@ config SOC_OMAP5 select HAVE_ARM_TWD if SMP select HAVE_ARM_ARCH_TIMER select ARM_ERRATA_798181 if SMP + select OMAP_INTERCONNECT_BARRIER config SOC_AM33XX bool "TI AM33XX" @@ -71,6 +73,7 @@ config SOC_DRA7XX select HAVE_ARM_ARCH_TIMER select IRQ_CROSSBAR select ARM_ERRATA_798181 if SMP + select OMAP_INTERCONNECT_BARRIER config ARCH_OMAP2PLUS bool @@ -92,6 +95,10 @@ config ARCH_OMAP2PLUS help Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 +config OMAP_INTERCONNECT_BARRIER + bool + select ARM_HEAVY_MB + if ARCH_OMAP2PLUS diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c index eae6a0e87c90..484cdadfb187 100644 --- a/arch/arm/mach-omap2/common.c +++ b/arch/arm/mach-omap2/common.c @@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void) void __init omap_reserve(void) { omap_secure_ram_reserve_memblock(); + omap_barrier_reserve_memblock(); } diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 749d50bb4ca5..92e92cfc2775 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -189,6 +189,15 @@ static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd) } #endif +#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER +void omap_barrier_reserve_memblock(void); +void omap_barriers_init(void); +#else +static inline void omap_barrier_reserve_memblock(void) +{ +} +#endif + /* This gets called from mach-omap2/io.c, do not call this */ void __init omap2_set_globals_tap(u32 class, void __iomem *tap); diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h deleted file mode 100644 index 1c582a8592b9..000000000000 --- a/arch/arm/mach-omap2/include/mach/barriers.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * OMAP memory barrier header. - * - * Copyright (C) 2011 Texas Instruments, Inc. - * Santosh Shilimkar <santosh.shilimkar@ti.com> - * Richard Woodruff <r-woodruff2@ti.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __MACH_BARRIERS_H -#define __MACH_BARRIERS_H - -#include <asm/outercache.h> - -extern void omap_bus_sync(void); - -#define rmb() dsb() -#define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0) -#define mb() wmb() - -#endif /* __MACH_BARRIERS_H */ diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 6a4822dbb4ea..980c9372e6fd 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -352,6 +352,7 @@ void __init am33xx_map_io(void) void __init omap4_map_io(void) { iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc)); + omap_barriers_init(); } #endif @@ -359,6 +360,7 @@ void __init omap4_map_io(void) void __init omap5_map_io(void) { iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc)); + omap_barriers_init(); } #endif diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 16350eefa66c..949696b6f17b 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -51,6 +51,127 @@ static void __iomem *twd_base; #define IRQ_LOCALTIMER 29 +#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER + +/* Used to implement memory barrier on DRAM path */ +#define OMAP4_DRAM_BARRIER_VA 0xfe600000 + +static void __iomem *dram_sync, *sram_sync; +static phys_addr_t dram_sync_paddr; +static u32 dram_sync_size; + +/* + * The OMAP4 bus structure contains asynchrnous bridges which can buffer + * data writes from the MPU. These asynchronous bridges can be found on + * paths between the MPU to EMIF, and the MPU to L3 interconnects. + * + * We need to be careful about re-ordering which can happen as a result + * of different accesses being performed via different paths, and + * therefore different asynchronous bridges. + */ + +/* + * OMAP4 interconnect barrier which is called for each mb() and wmb(). + * This is to ensure that normal paths to DRAM (normal memory, cacheable + * accesses) are properly synchronised with writes to DMA coherent memory + * (normal memory, uncacheable) and device writes. + * + * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF + * path, as we need to ensure that data is visible to other system + * masters prior to writes to those system masters being seen. + * + * Note: the SRAM path is not synchronised via mb() and wmb(). + */ +static void omap4_mb(void) +{ + if (dram_sync) + writel_relaxed(0, dram_sync); +} + +/* + * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI. + * + * If a data is stalled inside asynchronous bridge because of back + * pressure, it may be accepted multiple times, creating pointer + * misalignment that will corrupt next transfers on that data path until + * next reset of the system. No recovery procedure once the issue is hit, + * the path remains consistently broken. + * + * Async bridges can be found on paths between MPU to EMIF and MPU to L3 + * interconnects. + * + * This situation can happen only when the idle is initiated by a Master + * Request Disconnection (which is trigged by software when executing WFI + * on the CPU). + * + * The work-around for this errata needs all the initiators connected + * through an async bridge to ensure that data path is properly drained + * before issuing WFI. This condition will be met if one Strongly ordered + * access is performed to the target right before executing the WFI. + * + * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained. + * IO barrier ensure that there is no synchronisation loss on initiators + * operating on both interconnect port simultaneously. + * + * This is a stronger version of the OMAP4 memory barrier below, and + * operates on both the MPU->MA->EMIF path but also the MPU->OCP path + * as well, and is necessary prior to executing a WFI. + */ +void omap_interconnect_sync(void) +{ + if (dram_sync && sram_sync) { + writel_relaxed(readl_relaxed(dram_sync), dram_sync); + writel_relaxed(readl_relaxed(sram_sync), sram_sync); + isb(); + } +} + +static int __init omap4_sram_init(void) +{ + struct device_node *np; + struct gen_pool *sram_pool; + + np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); + if (!np) + pr_warn("%s:Unable to allocate sram needed to handle errata I688\n", + __func__); + sram_pool = of_gen_pool_get(np, "sram", 0); + if (!sram_pool) + pr_warn("%s:Unable to get sram pool needed to handle errata I688\n", + __func__); + else + sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE); + + return 0; +} +omap_arch_initcall(omap4_sram_init); + +/* Steal one page physical memory for barrier implementation */ +void __init omap_barrier_reserve_memblock(void) +{ + dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M); + dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M); +} + +void __init omap_barriers_init(void) +{ + struct map_desc dram_io_desc[1]; + + dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA; + dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr); + dram_io_desc[0].length = dram_sync_size; + dram_io_desc[0].type = MT_MEMORY_RW_SO; + iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc)); + dram_sync = (void __iomem *) dram_io_desc[0].virtual; + + pr_info("OMAP4: Map %pa to %p for dram barrier\n", + &dram_sync_paddr, dram_sync); + + soc_mb = omap4_mb; +} + +#endif + void gic_dist_disable(void) { if (gic_dist_base_addr) diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S index ad1bb9431e94..9b09d85d811a 100644 --- a/arch/arm/mach-omap2/sleep44xx.S +++ b/arch/arm/mach-omap2/sleep44xx.S @@ -333,14 +333,12 @@ ENDPROC(omap4_cpu_resume) #endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */ -ENTRY(omap_bus_sync) - ret lr -ENDPROC(omap_bus_sync) - ENTRY(omap_do_wfi) stmfd sp!, {lr} +#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER /* Drain interconnect write buffers. */ - bl omap_bus_sync + bl omap_interconnect_sync +#endif /* * Execute an ISB instruction to ensure that all of the diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c index d99d08eeb966..83e94c95e314 100644 --- a/arch/arm/mach-prima2/pm.c +++ b/arch/arm/mach-prima2/pm.c @@ -16,6 +16,7 @@ #include <linux/of_platform.h> #include <linux/io.h> #include <linux/rtc/sirfsoc_rtciobrg.h> +#include <asm/outercache.h> #include <asm/suspend.h> #include <asm/hardware/cache-l2x0.h> diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h index 476092b86c6e..8d27ec546a35 100644 --- a/arch/arm/mach-shmobile/common.h +++ b/arch/arm/mach-shmobile/common.h @@ -13,7 +13,7 @@ extern void shmobile_smp_boot(void); extern void shmobile_smp_sleep(void); extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg); -extern int shmobile_smp_cpu_disable(unsigned int cpu); +extern bool shmobile_smp_cpu_can_disable(unsigned int cpu); extern void shmobile_boot_scu(void); extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus); extern void shmobile_smp_scu_cpu_die(unsigned int cpu); diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c index 3923e09e966d..b23378f3d7e1 100644 --- a/arch/arm/mach-shmobile/platsmp.c +++ b/arch/arm/mach-shmobile/platsmp.c @@ -31,8 +31,8 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg) } #ifdef CONFIG_HOTPLUG_CPU -int shmobile_smp_cpu_disable(unsigned int cpu) +bool shmobile_smp_cpu_can_disable(unsigned int cpu) { - return 0; /* Hotplug of any CPU is supported */ + return true; /* Hotplug of any CPU is supported */ } #endif diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c index 2ef0054ce934..4b33d432a364 100644 --- a/arch/arm/mach-shmobile/smp-r8a7790.c +++ b/arch/arm/mach-shmobile/smp-r8a7790.c @@ -64,7 +64,7 @@ struct smp_operations r8a7790_smp_ops __initdata = { .smp_prepare_cpus = r8a7790_smp_prepare_cpus, .smp_boot_secondary = shmobile_smp_apmu_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU - .cpu_disable = shmobile_smp_cpu_disable, + .cpu_can_disable = shmobile_smp_cpu_can_disable, .cpu_die = shmobile_smp_apmu_cpu_die, .cpu_kill = shmobile_smp_apmu_cpu_kill, #endif diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c index 5e2d1db79afa..b2508c0d276b 100644 --- a/arch/arm/mach-shmobile/smp-r8a7791.c +++ b/arch/arm/mach-shmobile/smp-r8a7791.c @@ -58,7 +58,7 @@ struct smp_operations r8a7791_smp_ops __initdata = { .smp_prepare_cpus = r8a7791_smp_prepare_cpus, .smp_boot_secondary = r8a7791_smp_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU - .cpu_disable = shmobile_smp_cpu_disable, + .cpu_can_disable = shmobile_smp_cpu_can_disable, .cpu_die = shmobile_smp_apmu_cpu_die, .cpu_kill = shmobile_smp_apmu_cpu_kill, #endif diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c index d03aa11fb46d..bc2824a036e1 100644 --- a/arch/arm/mach-shmobile/smp-sh73a0.c +++ b/arch/arm/mach-shmobile/smp-sh73a0.c @@ -60,7 +60,7 @@ struct smp_operations sh73a0_smp_ops __initdata = { .smp_prepare_cpus = sh73a0_smp_prepare_cpus, .smp_boot_secondary = sh73a0_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU - .cpu_disable = shmobile_smp_cpu_disable, + .cpu_can_disable = shmobile_smp_cpu_can_disable, .cpu_die = shmobile_smp_scu_cpu_die, .cpu_kill = shmobile_smp_scu_cpu_kill, #endif diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c index 7557bede7ae6..780bd13cd7e3 100644 --- a/arch/arm/mach-ux500/cache-l2x0.c +++ b/arch/arm/mach-ux500/cache-l2x0.c @@ -8,6 +8,7 @@ #include <linux/of.h> #include <linux/of_address.h> +#include <asm/outercache.h> #include <asm/hardware/cache-l2x0.h> #include "db8500-regs.h" diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index ba708ce08616..f80560318c58 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -20,10 +20,10 @@ #include <linux/mfd/dbx500-prcmu.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/perf/arm_pmu.h> #include <linux/regulator/machine.h> #include <linux/random.h> -#include <asm/pmu.h> #include <asm/mach/map.h> #include "setup.h" diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 7c6b976ab8d3..df7537f12469 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -883,6 +883,7 @@ config OUTER_CACHE config OUTER_CACHE_SYNC bool + select ARM_HEAVY_MB help The outer cache has a outer_cache_fns.sync function pointer that can be used to drain the write buffer of the outer cache. @@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS This option allows the use of custom mandatory barriers included via the mach/barriers.h file. +config ARM_HEAVY_MB + bool + config ARCH_SUPPORTS_BIG_ENDIAN bool help diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S index 54473cd4aba9..b3b31e30cadd 100644 --- a/arch/arm/mm/abort-ev4.S +++ b/arch/arm/mm/abort-ev4.S @@ -19,6 +19,7 @@ ENTRY(v4_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR ldr r3, [r4] @ read aborted ARM instruction + uaccess_disable ip @ disable userspace access bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index a0908d4653a3..a6a381a6caa5 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -21,8 +21,10 @@ ENTRY(v5t_early_abort) mrc p15, 0, r0, c6, c0, 0 @ get FAR do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction + uaccess_disable ip @ disable user access bic r1, r1, #1 << 11 @ clear bits 11 of FSR - do_ldrd_abort tmp=ip, insn=r3 + teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? + beq do_DataAbort @ yes tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 b do_DataAbort diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index 4006b7a61264..00ab011bef58 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort) bne do_DataAbort do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction - do_ldrd_abort tmp=ip, insn=r3 + uaccess_disable ip @ disable userspace access + teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? + beq do_DataAbort @ yes tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. b do_DataAbort diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 8c48c5c22a33..8801a15aa105 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -26,16 +26,18 @@ ENTRY(v6_early_abort) ldr ip, =0x4107b36 mrc p15, 0, r3, c0, c0, 0 @ get processor id teq ip, r3, lsr #4 @ r0 ARM1136? - bne do_DataAbort + bne 1f tst r5, #PSR_J_BIT @ Java? tsteq r5, #PSR_T_BIT @ Thumb? - bne do_DataAbort + bne 1f bic r1, r1, #1 << 11 @ clear bit 11 of FSR ldr r3, [r4] @ read aborted ARM instruction ARM_BE8(rev r3, r3) - do_ldrd_abort tmp=ip, insn=r3 + teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? + beq 1f @ yes tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. #endif +1: uaccess_disable ip @ disable userspace access b do_DataAbort diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index 4812ad054214..e8d0e08c227f 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -15,6 +15,7 @@ ENTRY(v7_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR + uaccess_disable ip @ disable userspace access /* * V6 code adjusts the returned DFSR. diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index f3982580c273..6d8e8e3365d1 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -26,6 +26,7 @@ ENTRY(v4t_late_abort) #endif bne .data_thumb_abort ldr r8, [r4] @ read arm instruction + uaccess_disable ip @ disable userspace access tst r8, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 @@ -155,6 +156,7 @@ ENTRY(v4t_late_abort) .data_thumb_abort: ldrh r8, [r4] @ read instruction + uaccess_disable ip @ disable userspace access tst r8, #1 << 11 @ L = 1 -> write? orreq r1, r1, #1 << 8 @ yes and r7, r8, #15 << 12 diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index 2cbf68ef0e83..4509bee4e081 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -13,6 +13,7 @@ tst \psr, #PSR_T_BIT beq not_thumb ldrh \tmp, [\pc] @ Read aborted Thumb instruction + uaccess_disable ip @ disable userspace access and \tmp, \tmp, # 0xfe00 @ Mask opcode field cmp \tmp, # 0x5600 @ Is it ldrsb? orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes @@ -29,12 +30,9 @@ not_thumb: * [7:4] == 1101 * [20] == 0 */ - .macro do_ldrd_abort, tmp, insn - tst \insn, #0x0e100000 @ [27:25,20] == 0 - bne not_ldrd - and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 - cmp \tmp, #0x000000d0 - beq do_DataAbort -not_ldrd: + .macro teq_ldrd, tmp, insn + mov \tmp, #0x0e100000 + orr \tmp, #0x000000f0 + and \tmp, \insn, \tmp + teq \tmp, #0x000000d0 .endm - diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 097181e08c25..5c1b7a7b9af6 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -368,7 +368,6 @@ int __init feroceon_of_init(void) struct device_node *node; void __iomem *base; bool l2_wt_override = false; - struct resource res; #if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) l2_wt_override = true; @@ -376,10 +375,7 @@ int __init feroceon_of_init(void) node = of_find_matching_node(NULL, feroceon_ids); if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) { - if (of_address_to_resource(node, 0, &res)) - return -ENODEV; - - base = ioremap(res.start, resource_size(&res)); + base = of_iomap(node, 0); if (!base) return -ENOMEM; diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 71b3d3309024..493692d838c6 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -1171,6 +1171,11 @@ static void __init l2c310_of_parse(const struct device_node *np, } } + if (of_property_read_bool(np, "arm,shared-override")) { + *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE; + *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE; + } + prefetch = l2x0_saved_regs.prefetch_ctrl; ret = of_property_read_u32(np, "arm,double-linefill", &val); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 3d3d6aa60c87..bf35abcc7d59 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -39,6 +39,7 @@ #include <asm/system_info.h> #include <asm/dma-contiguous.h> +#include "dma.h" #include "mm.h" /* @@ -648,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, size = PAGE_ALIGN(size); want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); - if (is_coherent || nommu()) + if (nommu()) + addr = __alloc_simple_buffer(dev, size, gfp, &page); + else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT)) + addr = __alloc_from_contiguous(dev, size, prot, &page, + caller, want_vaddr); + else if (is_coherent) addr = __alloc_simple_buffer(dev, size, gfp, &page); else if (!(gfp & __GFP_WAIT)) addr = __alloc_from_pool(size, &page); - else if (!dev_get_cma_area(dev)) - addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr); else - addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); + addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, + caller, want_vaddr); if (page) *handle = pfn_to_dma(dev, page_to_pfn(page)); @@ -683,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { - pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; - return __dma_alloc(dev, size, handle, gfp, prot, true, + return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, attrs, __builtin_return_address(0)); } @@ -753,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, size = PAGE_ALIGN(size); - if (is_coherent || nommu()) { + if (nommu()) { __dma_free_buffer(page, size); - } else if (__free_from_pool(cpu_addr, size)) { + } else if (!is_coherent && __free_from_pool(cpu_addr, size)) { return; } else if (!dev_get_cma_area(dev)) { - if (want_vaddr) + if (want_vaddr && !is_coherent) __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h new file mode 100644 index 000000000000..70ea6852f94e --- /dev/null +++ b/arch/arm/mm/dma.h @@ -0,0 +1,32 @@ +#ifndef DMA_H +#define DMA_H + +#include <asm/glue-cache.h> + +#ifndef MULTI_CACHE +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) + +/* + * These are private to the dma-mapping API. Do not use directly. + * Their sole purpose is to ensure that data held in the cache + * is visible to DMA, or data written by DMA to system memory is + * visible to the CPU. + */ +extern void dmac_map_area(const void *, size_t, int); +extern void dmac_unmap_area(const void *, size_t, int); + +#else + +/* + * These are private to the dma-mapping API. Do not use directly. + * Their sole purpose is to ensure that data held in the cache + * is visible to DMA, or data written by DMA to system memory is + * visible to the CPU. + */ +#define dmac_map_area cpu_cache.dma_map_area +#define dmac_unmap_area cpu_cache.dma_unmap_area + +#endif + +#endif diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 34b66af516ea..1ec8e7590fc6 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -21,6 +21,21 @@ #include "mm.h" +#ifdef CONFIG_ARM_HEAVY_MB +void (*soc_mb)(void); + +void arm_heavy_mb(void) +{ +#ifdef CONFIG_OUTER_CACHE_SYNC + if (outer_cache.sync) + outer_cache.sync(); +#endif + if (soc_mb) + soc_mb(); +} +EXPORT_SYMBOL(arm_heavy_mb); +#endif + #ifdef CONFIG_CPU_CACHE_VIPT static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index ee8dfa793989..9df5f09585ca 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page) type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); + idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM /* @@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr) if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); - idx = type + KM_TYPE_NR * smp_processor_id(); + idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); @@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn) return page_address(page); type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); + idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(get_fixmap_pte(vaddr))); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 870838a46d52..7cd15143a507 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -291,13 +291,13 @@ static struct mem_type mem_types[] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY, .prot_l1 = PMD_TYPE_TABLE, - .domain = DOMAIN_USER, + .domain = DOMAIN_VECTORS, }, [MT_HIGH_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_USER | L_PTE_RDONLY, .prot_l1 = PMD_TYPE_TABLE, - .domain = DOMAIN_USER, + .domain = DOMAIN_VECTORS, }, [MT_MEMORY_RWX] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, @@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type) } EXPORT_SYMBOL(get_mem_type); +static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr); + +static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS] + __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata; + +static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr) +{ + return &bm_pte[pte_index(addr)]; +} + +static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr) +{ + return pte_offset_kernel(dir, addr); +} + +static inline pmd_t * __init fixmap_pmd(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + pud_t *pud = pud_offset(pgd, addr); + pmd_t *pmd = pmd_offset(pud, addr); + + return pmd; +} + +void __init early_fixmap_init(void) +{ + pmd_t *pmd; + + /* + * The early fixmap range spans multiple pmds, for which + * we are not prepared: + */ + BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT) + != FIXADDR_TOP >> PMD_SHIFT); + + pmd = fixmap_pmd(FIXADDR_TOP); + pmd_populate_kernel(&init_mm, pmd, bm_pte); + + pte_offset_fixmap = pte_offset_early_fixmap; +} + /* * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). * As a result, this can only be called with preemption disabled, as under @@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type); void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { unsigned long vaddr = __fix_to_virt(idx); - pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr); + pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr); /* Make sure fixmap region does not exceed available allocation. */ BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > @@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md) } if ((md->type == MT_DEVICE || md->type == MT_ROM) && - md->virtual >= PAGE_OFFSET && + md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", (long long)__pfn_to_phys((u64)md->pfn), md->virtual); @@ -1219,10 +1260,10 @@ void __init arm_mm_memblock_reserve(void) /* * Set up the device mappings. Since we clear out the page tables for all - * mappings above VMALLOC_START, we will remove any debug device mappings. - * This means you have to be careful how you debug this function, or any - * called function. This means you can't use any function or debugging - * method which may touch any device, otherwise the kernel _will_ crash. + * mappings above VMALLOC_START, except early fixmap, we might remove debug + * device mappings. This means earlycon can be used to debug this function + * Any other function or debugging method which may touch any device _will_ + * crash the kernel. */ static void __init devicemaps_init(const struct machine_desc *mdesc) { @@ -1237,7 +1278,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) early_trap_init(vectors); - for (addr = VMALLOC_START; addr; addr += PMD_SIZE) + /* + * Clear page table except top pmd used by early fixmaps + */ + for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); /* @@ -1489,6 +1533,35 @@ void __init early_paging_init(const struct machine_desc *mdesc) #endif +static void __init early_fixmap_shutdown(void) +{ + int i; + unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1); + + pte_offset_fixmap = pte_offset_late_fixmap; + pmd_clear(fixmap_pmd(va)); + local_flush_tlb_kernel_page(va); + + for (i = 0; i < __end_of_permanent_fixed_addresses; i++) { + pte_t *pte; + struct map_desc map; + + map.virtual = fix_to_virt(i); + pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual); + + /* Only i/o device mappings are supported ATM */ + if (pte_none(*pte) || + (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED) + continue; + + map.pfn = pte_pfn(*pte); + map.type = MT_DEVICE; + map.length = PAGE_SIZE; + + create_mapping(&map); + } +} + /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. @@ -1502,6 +1575,7 @@ void __init paging_init(const struct machine_desc *mdesc) map_lowmem(); memblock_set_current_limit(arm_lowmem_limit); dma_contiguous_remap(); + early_fixmap_shutdown(); devicemaps_init(mdesc); kmap_init(); tcm_init(); diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index a3681f11dd9f..e683db1b90a3 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!new_pte) goto no_pte; +#ifndef CONFIG_ARM_LPAE + /* + * Modify the PTE pointer to have the correct domain. This + * needs to be the vectors domain to avoid the low vectors + * being unmapped. + */ + pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK; + pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS); +#endif + init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); |