From bd1c6ff74ce0bbd8cda6eb7763fa0e2625dfcc8b Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 4 Nov 2015 13:23:52 +0000 Subject: arm64/dma-mapping: Fix sizes in __iommu_{alloc,free}_attrs The iommu-dma layer does its own size-alignment for coherent DMA allocations based on IOMMU page sizes, but we still need to consider CPU page sizes for the cases where a non-cacheable CPU mapping is created. Whilst everything on the alloc/map path seems to implicitly align things enough to make it work, some functions used by the corresponding unmap/free path do not, which leads to problems freeing odd-sized allocations. Either way it's something we really should be handling explicitly, so do that to make both paths suitably robust. Reported-by: Yong Wu Signed-off-by: Robin Murphy Signed-off-by: Catalin Marinas --- arch/arm64/mm/dma-mapping.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 131a199114b4..97fd714035f9 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -552,10 +552,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, { bool coherent = is_device_dma_coherent(dev); int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); + size_t iosize = size; void *addr; if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) return NULL; + + size = PAGE_ALIGN(size); + /* * Some drivers rely on this, and we probably don't want the * possibility of stale kernel data being read by devices anyway. @@ -566,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, struct page **pages; pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); - pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle, + pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle, flush_page); if (!pages) return NULL; @@ -574,7 +578,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, __builtin_return_address(0)); if (!addr) - iommu_dma_free(dev, pages, size, handle); + iommu_dma_free(dev, pages, iosize, handle); } else { struct page *page; /* @@ -591,7 +595,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, if (!addr) return NULL; - *handle = iommu_dma_map_page(dev, page, 0, size, ioprot); + *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); if (iommu_dma_mapping_error(dev, *handle)) { if (coherent) __free_pages(page, get_order(size)); @@ -606,6 +610,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { + size_t iosize = size; + + size = PAGE_ALIGN(size); /* * @cpu_addr will be one of 3 things depending on how it was allocated: * - A remapped array of pages from iommu_dma_alloc(), for all @@ -617,17 +624,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, * Hence how dodgy the below logic looks... */ if (__in_atomic_pool(cpu_addr, size)) { - iommu_dma_unmap_page(dev, handle, size, 0, NULL); + iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); __free_from_pool(cpu_addr, size); } else if (is_vmalloc_addr(cpu_addr)){ struct vm_struct *area = find_vm_area(cpu_addr); if (WARN_ON(!area || !area->pages)) return; - iommu_dma_free(dev, area->pages, size, &handle); + iommu_dma_free(dev, area->pages, iosize, &handle); dma_common_free_remap(cpu_addr, size, VM_USERMAP); } else { - iommu_dma_unmap_page(dev, handle, size, 0, NULL); + iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); __free_pages(virt_to_page(cpu_addr), get_order(size)); } } -- cgit v1.2.3 From 4fee9f364b9b99f76732f2a6fd6df679a237fa74 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 16 Nov 2015 11:18:14 +0100 Subject: arm64: mm: use correct mapping granularity under DEBUG_RODATA When booting a 64k pages kernel that is built with CONFIG_DEBUG_RODATA and resides at an offset that is not a multiple of 512 MB, the rounding that occurs in __map_memblock() and fixup_executable() results in incorrect regions being mapped. The following snippet from /sys/kernel/debug/kernel_page_tables shows how, when the kernel is loaded 2 MB above the base of DRAM at 0x40000000, the first 2 MB of memory (which may be inaccessible from non-secure EL1 or just reserved by the firmware) is inadvertently mapped into the end of the module region. ---[ Modules start ]--- 0xfffffdffffe00000-0xfffffe0000000000 2M RW NX ... UXN MEM/NORMAL ---[ Modules end ]--- ---[ Kernel Mapping ]--- 0xfffffe0000000000-0xfffffe0000090000 576K RW NX ... UXN MEM/NORMAL 0xfffffe0000090000-0xfffffe0000200000 1472K ro x ... UXN MEM/NORMAL 0xfffffe0000200000-0xfffffe0000800000 6M ro x ... UXN MEM/NORMAL 0xfffffe0000800000-0xfffffe0000810000 64K ro x ... UXN MEM/NORMAL 0xfffffe0000810000-0xfffffe0000a00000 1984K RW NX ... UXN MEM/NORMAL 0xfffffe0000a00000-0xfffffe00ffe00000 4084M RW NX ... UXN MEM/NORMAL The same issue is likely to occur on 16k pages kernels whose load address is not a multiple of 32 MB (i.e., SECTION_SIZE). So round to SWAPPER_BLOCK_SIZE instead of SECTION_SIZE. Fixes: da141706aea5 ("arm64: add better page protections to arm64") Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Acked-by: Laura Abbott Cc: # 4.0+ Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e3f563c81c48..32ddd893da9a 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -362,8 +362,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end) * for now. This will get more fine grained later once all memory * is mapped */ - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); + unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE); + unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE); if (end < kernel_x_start) { create_mapping(start, __phys_to_virt(start), @@ -451,18 +451,18 @@ static void __init fixup_executable(void) { #ifdef CONFIG_DEBUG_RODATA /* now that we are actually fully mapped, make the start/end more fine grained */ - if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { + if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) { unsigned long aligned_start = round_down(__pa(_stext), - SECTION_SIZE); + SWAPPER_BLOCK_SIZE); create_mapping(aligned_start, __phys_to_virt(aligned_start), __pa(_stext) - aligned_start, PAGE_KERNEL); } - if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { + if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) { unsigned long aligned_end = round_up(__pa(__init_end), - SECTION_SIZE); + SWAPPER_BLOCK_SIZE); create_mapping(__pa(__init_end), (unsigned long)__init_end, aligned_end - __pa(__init_end), PAGE_KERNEL); -- cgit v1.2.3 From 1dccb598df549d892b6450c261da54cdd7af44b4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 16 Nov 2015 17:25:48 +0100 Subject: arm64: simplify dma_get_ops Including linux/acpi.h from asm/dma-mapping.h causes tons of compile-time warnings, e.g. drivers/isdn/mISDN/dsp_ecdis.h:43:0: warning: "FALSE" redefined drivers/isdn/mISDN/dsp_ecdis.h:44:0: warning: "TRUE" redefined drivers/net/fddi/skfp/h/targetos.h:62:0: warning: "TRUE" redefined drivers/net/fddi/skfp/h/targetos.h:63:0: warning: "FALSE" redefined However, it looks like the dependency should not even there as I do not see why __generic_dma_ops() cares about whether we have an ACPI based system or not. The current behavior is to fall back to the global dma_ops when a device has not set its own dma_ops, but only for DT based systems. This seems dangerous, as a random device might have different requirements regarding IOMMU or coherency, so we should really never have that fallback and just forbid DMA when we have not initialized DMA for a device. This removes the global dma_ops variable and the special-casing for ACPI, and just returns the dma ops that got set for the device, or the dummy_dma_ops if none were present. The original code has apparently been copied from arm32 where we rely on it for ISA devices things like the floppy controller, but we should have no such devices on ARM64. Signed-off-by: Arnd Bergmann [catalin.marinas@arm.com: removed acpi_disabled check in arch_setup_dma_ops()] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/dma-mapping.h | 13 +++---------- arch/arm64/mm/dma-mapping.c | 16 ++++------------ 2 files changed, 7 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 54d0ead41afc..61e08f360e31 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -18,7 +18,6 @@ #ifdef __KERNEL__ -#include #include #include @@ -26,22 +25,16 @@ #include #define DMA_ERROR_CODE (~(dma_addr_t)0) -extern struct dma_map_ops *dma_ops; extern struct dma_map_ops dummy_dma_ops; static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) { - if (unlikely(!dev)) - return dma_ops; - else if (dev->archdata.dma_ops) + if (dev && dev->archdata.dma_ops) return dev->archdata.dma_ops; - else if (acpi_disabled) - return dma_ops; /* - * When ACPI is enabled, if arch_set_dma_ops is not called, - * we will disable device DMA capability by setting it - * to dummy_dma_ops. + * We expect no ISA devices, and all other DMA masters are expected to + * have someone call arch_setup_dma_ops at device creation time. */ return &dummy_dma_ops; } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 97fd714035f9..7963aa4b5d28 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -28,9 +29,6 @@ #include -struct dma_map_ops *dma_ops; -EXPORT_SYMBOL(dma_ops); - static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, bool coherent) { @@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops); static int __init arm64_dma_init(void) { - int ret; - - dma_ops = &swiotlb_dma_ops; - - ret = atomic_pool_init(); - - return ret; + return atomic_pool_init(); } arch_initcall(arm64_dma_init); @@ -991,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, struct iommu_ops *iommu, bool coherent) { - if (!acpi_disabled && !dev->archdata.dma_ops) - dev->archdata.dma_ops = dma_ops; + if (!dev->archdata.dma_ops) + dev->archdata.dma_ops = &swiotlb_dma_ops; dev->archdata.dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); -- cgit v1.2.3 From adc235aff6fcf5ae4d8dc00faf30a07632b9725b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 16 Nov 2015 17:32:15 +0100 Subject: arm64: do not include ptrace.h from compat.h including ptrace.h brings a definition of BITS_PER_PAGE into device drivers and cause a build warning in allmodconfig builds: drivers/block/drbd/drbd_bitmap.c:482:0: warning: "BITS_PER_PAGE" redefined #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) This uses a slightly different way to express current_pt_regs() that avoids the use of the header and gets away with the already included asm/ptrace.h. Signed-off-by: Arnd Bergmann Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/compat.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 7fbed6919b54..eb8432bb82b8 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -23,7 +23,6 @@ */ #include #include -#include #define COMPAT_USER_HZ 100 #ifdef __AARCH64EB__ @@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) +#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) static inline void __user *arch_compat_alloc_user_space(long len) { -- cgit v1.2.3 From de818bd4522c40ea02a81b387d2fa86f989c9623 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Tue, 17 Nov 2015 11:50:51 +0000 Subject: arm64: kernel: pause/unpause function graph tracer in cpu_suspend() The function graph tracer adds instrumentation that is required to trace both entry and exit of a function. In particular the function graph tracer updates the "return address" of a function in order to insert a trace callback on function exit. Kernel power management functions like cpu_suspend() are called upon power down entry with functions called "finishers" that are in turn called to trigger the power down sequence but they may not return to the kernel through the normal return path. When the core resumes from low-power it returns to the cpu_suspend() function through the cpu_resume path, which leaves the trace stack frame set-up by the function tracer in an incosistent state upon return to the kernel when tracing is enabled. This patch fixes the issue by pausing/resuming the function graph tracer on the thread executing cpu_suspend() (ie the function call that subsequently triggers the "suspend finishers"), so that the function graph tracer state is kept consistent across functions that enter power down states and never return by effectively disabling graph tracer while they are executing. Fixes: 819e50e25d0c ("arm64: Add ftrace support") Signed-off-by: Lorenzo Pieralisi Reported-by: Catalin Marinas Reported-by: AKASHI Takahiro Suggested-by: Steven Rostedt Acked-by: Steven Rostedt Cc: Will Deacon Cc: # 3.16+ Signed-off-by: Catalin Marinas --- arch/arm64/kernel/suspend.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index fce95e17cf7f..1095aa483a1c 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -1,3 +1,4 @@ +#include #include #include #include @@ -70,6 +71,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) */ local_dbg_save(flags); + /* + * Function graph tracer state gets incosistent when the kernel + * calls functions that never return (aka suspend finishers) hence + * disable graph tracing during their execution. + */ + pause_graph_tracing(); + /* * mm context saved on the stack, it will be restored when * the cpu comes out of reset through the identity mapped @@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) hw_breakpoint_restore(NULL); } + unpause_graph_tracing(); + /* * Restore pstate flags. OS lock and mdscr have been already * restored, so from this point onwards, debugging is fully -- cgit v1.2.3 From 65da0a8e34a857f2ba9ccb91dc8f8f964cf938b7 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 17 Nov 2015 09:53:31 +0100 Subject: arm64: use non-global mappings for UEFI runtime regions As pointed out by Russell King in response to the proposed ARM version of this code, the sequence to switch between the UEFI runtime mapping and current's actual userland mapping (and vice versa) is potentially unsafe, since it leaves a time window between the switch to the new page tables and the TLB flush where speculative accesses may hit on stale global TLB entries. So instead, use non-global mappings, and perform the switch via the ordinary ASID-aware context switch routines. Signed-off-by: Ard Biesheuvel Acked-by: Will Deacon Reviewed-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/mmu_context.h | 2 +- arch/arm64/kernel/efi.c | 14 +++++--------- 2 files changed, 6 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index c0e87898ba96..24165784b803 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void) #define destroy_context(mm) do { } while(0) void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); -#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) /* * This is called when "tsk" is about to enter lazy TLB mode. diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index de46b50f4cdf..fc5508e0df57 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -224,6 +224,8 @@ static bool __init efi_virtmap_init(void) { efi_memory_desc_t *md; + init_new_context(NULL, &efi_mm); + for_each_efi_memory_desc(&memmap, md) { u64 paddr, npages, size; pgprot_t prot; @@ -254,7 +256,8 @@ static bool __init efi_virtmap_init(void) else prot = PAGE_KERNEL; - create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); + create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, + __pgprot(pgprot_val(prot) | PTE_NG)); } return true; } @@ -329,14 +332,7 @@ core_initcall(arm64_dmi_init); static void efi_set_pgd(struct mm_struct *mm) { - if (mm == &init_mm) - cpu_set_reserved_ttbr0(); - else - cpu_switch_mm(mm->pgd, mm); - - local_flush_tlb_all(); - if (icache_is_aivivt()) - __local_flush_icache_all(); + switch_mm(NULL, mm, NULL); } void efi_virtmap_load(void) -- cgit v1.2.3 From 08c6781cfaa196d4b257ec043c17e8746d9284e3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 16 Nov 2015 13:12:48 +0100 Subject: arm64: crypto: reduce priority of core AES cipher The asynchronous, merged implementations of AES in CBC, CTR and XTS modes are preferred when available (i.e., when instantiating ablkciphers explicitly). However, the synchronous core AES cipher combined with the generic CBC mode implementation will produce a 'cbc(aes)' blkcipher that is callable asynchronously as well. To prevent this implementation from being used when the accelerated asynchronous implemenation is also available, lower its priority to 250 (i.e., below the asynchronous module's priority of 300). Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/crypto/aes-ce-cipher.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c index ce47792a983d..f7bd9bf0bbb3 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-cipher.c @@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey); static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-ce", - .cra_priority = 300, + .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), -- cgit v1.2.3 From 0b2aa5b80bbf4d0fb5daa1fb83ff637daa12d552 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 12 Nov 2015 12:21:10 -0800 Subject: arm64: Fix R/O permissions in mark_rodata_ro The permissions in mark_rodata_ro trigger a build error with STRICT_MM_TYPECHECKS. Fix this by introducing PAGE_KERNEL_ROX for the same reasons as PAGE_KERNEL_RO. From Ard: "PAGE_KERNEL_EXEC has PTE_WRITE set as well, making the range writeable under the ARMv8.1 DBM feature, that manages the dirty bit in hardware (writing to a page with the PTE_RDONLY and PTE_WRITE bits both set will clear the PTE_RDONLY bit in that case)" Signed-off-by: Laura Abbott Acked-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable.h | 1 + arch/arm64/mm/mmu.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 9819a9426b69..7e074f93f383 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 32ddd893da9a..abb66f84d4ac 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -475,7 +475,7 @@ void mark_rodata_ro(void) { create_mapping_late(__pa(_stext), (unsigned long)_stext, (unsigned long)_etext - (unsigned long)_stext, - PAGE_KERNEL_EXEC | PTE_RDONLY); + PAGE_KERNEL_ROX); } #endif -- cgit v1.2.3 From c139aa60c1007429335131167a0ca181e38c5668 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 18 Nov 2015 10:13:08 +0000 Subject: arm64: barriers: fix smp_load_acquire to work with const arguments A newly introduced function in include/net/sock.h passes a const argument to smp_load_acquire: static inline int sk_state_load(const struct sock *sk) { return smp_load_acquire(&sk->sk_state); } This cause an allmodconfig build failure, since our underlying load-acquire implementation does not handle const types correctly: include/net/sock.h: In function 'sk_state_load': ./arch/arm64/include/asm/barrier.h:71:3: error: read-only variable '___p1' used as 'asm' output asm volatile ("ldarb %w0, %1" \ This patch fixes the problem by reusing the trick in READ_ONCE that loads via a non-const member of an anonymous union. This has the advantage of allowing us to use smp_load_acquire on packed structures (e.g. arch_spinlock_t) as well as primitive types. Cc: Arnd Bergmann Cc: David Daney Cc: Eric Dumazet Reported-by: Arnd Bergmann Reported-by: David Daney Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/barrier.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 624f9679f4b0..9622eb48f894 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -64,27 +64,31 @@ do { \ #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1; \ + union { typeof(*p) __val; char __c[1]; } __u; \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ case 1: \ asm volatile ("ldarb %w0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u8 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ case 2: \ asm volatile ("ldarh %w0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u16 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ case 4: \ asm volatile ("ldar %w0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u32 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ case 8: \ asm volatile ("ldar %0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u64 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ } \ - ___p1; \ + __u.__val; \ }) #define read_barrier_depends() do { } while(0) -- cgit v1.2.3 From 92e788b749862ebe9920360513a718e5dd4da7a9 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Wed, 18 Nov 2015 10:48:55 -0800 Subject: arm64: restore bogomips information in /proc/cpuinfo As previously reported, some userspace applications depend on bogomips showed by /proc/cpuinfo. Although there is much less legacy impact on aarch64 than arm, it does break libvirt. This patch reverts commit 326b16db9f69 ("arm64: delay: don't bother reporting bogomips in /proc/cpuinfo"), but with some tweak due to context change and without the pr_info(). Fixes: 326b16db9f69 ("arm64: delay: don't bother reporting bogomips in /proc/cpuinfo") Signed-off-by: Yang Shi Acked-by: Will Deacon Cc: # 3.12+ Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpuinfo.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 706679d0a0b4..212ae6361d8b 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -30,6 +30,7 @@ #include #include #include +#include /* * In case the boot CPU is hotpluggable, we record its initial state and @@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v) */ seq_printf(m, "processor\t: %d\n", i); + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + loops_per_jiffy / (500000UL/HZ), + loops_per_jiffy / (5000UL/HZ) % 100); + /* * Dump out the common processor features in a single line. * Userspace should read the hwcaps with getauxval(AT_HWCAP) -- cgit v1.2.3