From 26e30c6489f4774492c168c7b953e575a16765f7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 27 Jan 2016 17:59:36 +0100 Subject: ARM: imx: enable big endian mode Enable ARM big-endian mode on mach-imx. This requires adding some byte swapping in the debug functions (which otherwise hang forever) and of course the secondary core bringup. Tested (on top of 4.4) on i.MX6 HummingBoard quad-core (IMX6Q). The patch is pretty much as suggested by Arnd Bergmann, thanks! Signed-off-by: Johannes Berg Signed-off-by: Shawn Guo --- arch/arm/include/debug/imx.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/debug/imx.S b/arch/arm/include/debug/imx.S index 619d8cc1ac12..92c44760d656 100644 --- a/arch/arm/include/debug/imx.S +++ b/arch/arm/include/debug/imx.S @@ -11,6 +11,7 @@ * */ +#include #include "imx-uart.h" /* @@ -34,6 +35,7 @@ .endm .macro senduart,rd,rx + ARM_BE8(rev \rd, \rd) str \rd, [\rx, #0x40] @ TXDATA .endm @@ -42,6 +44,7 @@ .macro busyuart,rd,rx 1002: ldr \rd, [\rx, #0x98] @ SR2 + ARM_BE8(rev \rd, \rd) tst \rd, #1 << 3 @ TXDC beq 1002b @ wait until transmit done .endm -- cgit v1.2.3 From 5bd28338d681dcbde2c4bee4ebea0c4e0dfcd9e4 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 5 Feb 2016 14:57:54 -0600 Subject: PCI: Remove includes of empty asm-generic/pci-bridge.h include/asm-generic/pci-bridge.h is now empty, so remove every #include of it. Signed-off-by: Bjorn Helgaas Acked-by: Will Deacon (arm64) --- arch/alpha/include/asm/pci.h | 1 - arch/arm/include/asm/pci.h | 3 --- arch/arm64/include/asm/pci.h | 1 - arch/mips/include/asm/pci.h | 1 - arch/powerpc/include/asm/pci-bridge.h | 1 - arch/unicore32/include/asm/pci.h | 2 -- arch/x86/pci/common.c | 1 - drivers/of/of_pci.c | 1 - drivers/pci/pci.c | 1 - drivers/pci/probe.c | 1 - drivers/pci/setup-bus.c | 1 - 11 files changed, 14 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h index 98f2eeee8f68..75d8865d763d 100644 --- a/arch/alpha/include/asm/pci.h +++ b/arch/alpha/include/asm/pci.h @@ -7,7 +7,6 @@ #include #include #include -#include /* * The following structure is used to manage multiple PCI busses. diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index a5635444ca41..d7de19a77d51 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -3,8 +3,6 @@ #ifdef __KERNEL__ #include -#include - #include /* for pci_sys_data */ extern unsigned long pcibios_min_io; @@ -41,5 +39,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) } #endif /* __KERNEL__ */ - #endif diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index b008a72f8bc0..f75b04e8d732 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h @@ -7,7 +7,6 @@ #include #include -#include #include #define PCIBIOS_MIN_IO 0x1000 diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 98c31e5d9579..108d19376bb1 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -102,7 +102,6 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, #include #include #include -#include struct pci_dev; diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 54843ca5fa2b..78968c1ff931 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -10,7 +10,6 @@ #include #include #include -#include struct device_node; diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h index 38b3f3785c3c..eb9dccecb338 100644 --- a/arch/unicore32/include/asm/pci.h +++ b/arch/unicore32/include/asm/pci.h @@ -14,7 +14,6 @@ #ifdef __KERNEL__ #include -#include #include #include /* for PCIBIOS_MIN_* */ @@ -23,5 +22,4 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); #endif /* __KERNEL__ */ - #endif diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 2879efc73a96..b4a9f23d77d9 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -12,7 +12,6 @@ #include #include -#include #include #include #include diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c index b1449f71601c..13f4fed38048 100644 --- a/drivers/of/of_pci.c +++ b/drivers/of/of_pci.c @@ -5,7 +5,6 @@ #include #include #include -#include static inline int __of_pci_pci_compare(struct device_node *node, unsigned int data) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 602eb4223510..64c0a1215f84 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include "pci.h" diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ead1ac1dc1e3..5eb378fbe849 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -15,7 +15,6 @@ #include #include #include -#include #include "pci.h" #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 7796d0a5befa..55641a39a3e9 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -25,7 +25,6 @@ #include #include #include -#include #include "pci.h" unsigned int pci_flags; -- cgit v1.2.3 From 2841029393fad551b49b6de34d44bfa9ef256441 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 11 Jan 2016 17:15:58 +0000 Subject: ARM: make virt_to_idmap() return unsigned long Make virt_to_idmap() return an unsigned long rather than phys_addr_t. Returning phys_addr_t here makes no sense, because the definition of virt_to_idmap() is that it shall return a physical address which maps identically with the virtual address. Since virtual addresses are limited to 32-bit, identity mapped physical addresses are as well. Almost all users already had an implicit narrowing cast to unsigned long so let's make this official and part of this interface. Tested-by: Grygorii Strashko Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 6 +++--- arch/arm/kernel/reboot.c | 2 +- arch/arm/mach-keystone/keystone.c | 2 +- arch/arm/mm/idmap.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index c79b57bf71c4..49bf6b1e2177 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -273,14 +273,14 @@ static inline void *phys_to_virt(phys_addr_t x) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) -extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); +extern unsigned long (*arch_virt_to_idmap)(unsigned long x); /* * These are for systems that have a hardware interconnect supported alias of * physical memory for idmap purposes. Most cases should leave these - * untouched. + * untouched. Note: this can only return addresses less than 4GiB. */ -static inline phys_addr_t __virt_to_idmap(unsigned long x) +static inline unsigned long __virt_to_idmap(unsigned long x) { if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap) return arch_virt_to_idmap(x); diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c index 38269358fd25..71a2ff9ec490 100644 --- a/arch/arm/kernel/reboot.c +++ b/arch/arm/kernel/reboot.c @@ -50,7 +50,7 @@ static void __soft_restart(void *addr) flush_cache_all(); /* Switch to the identity mapping. */ - phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset); + phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset); phys_reset((unsigned long)addr); /* Should never get here. */ diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c index c279293f084c..d80879ce4963 100644 --- a/arch/arm/mach-keystone/keystone.c +++ b/arch/arm/mach-keystone/keystone.c @@ -63,7 +63,7 @@ static void __init keystone_init(void) of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } -static phys_addr_t keystone_virt_to_idmap(unsigned long x) +static unsigned long keystone_virt_to_idmap(unsigned long x) { return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START; } diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index d65909697165..bd274a05b8ff 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -15,7 +15,7 @@ * page tables. */ pgd_t *idmap_pgd; -phys_addr_t (*arch_virt_to_idmap) (unsigned long x); +unsigned long (*arch_virt_to_idmap)(unsigned long x); #ifdef CONFIG_ARM_LPAE static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, -- cgit v1.2.3 From 52ba0746b3b44c86aee121babf3b2fd9b8f84090 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Mon, 8 Feb 2016 16:02:06 +0000 Subject: xen/arm: correctly handle DMA mapping of compound pages Currently xen_dma_map_page concludes that DMA to anything other than the head page of a compound page must be foreign, since the PFN of the page is that of the head. Fix the check to instead consider the whole of a compound page to be local if the PFN of the head passes the 1:1 check. We can never see a compound page which is a mixture of foreign and local sub-pages. The comment already correctly described the intention, but fixup the spelling and some grammar. This fixes the various SSH protocol errors which we have been seeing on the cubietrucks in our automated test infrastructure. This has been broken since commit 3567258d281b ("xen/arm: use hypercall to flush caches in map_page"), which was in v3.19-rc1. NB arch/arm64/.../xen/page-coherent.h also includes this file. Signed-off-by: Ian Campbell Reviewed-by: Stefano Stabellini Cc: xen-devel@lists.xenproject.org Cc: linux-arm-kernel@lists.infradead.org Cc: stable@vger.kernel.org # v3.19+ --- arch/arm/include/asm/xen/page-coherent.h | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 0375c8caa061..9408a994cc91 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page); + unsigned long page_pfn = page_to_xen_pfn(page); + unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); + unsigned long compound_pages = + (1<map_page(hwdev, page, offset, size, dir, attrs); -- cgit v1.2.3 From 0c5325466d5d4816c9bd13c56746aa26ed66231d Mon Sep 17 00:00:00 2001 From: Mans Rullgard Date: Fri, 5 Feb 2016 10:49:22 +0100 Subject: ARM: debug: add support for Palmchip BK-310x UART Some SoCs use a Palmchip BK-310x UART which is mostly 16550 compatible but with a different register layout. While this UART has previously only been supported in MIPS based chips (Alchemy, Ralink), the ARM based SMP87xx series from Sigma Designs also uses it. This patch allows the debug console to work with this type of UART. Signed-off-by: Mans Rullgard Signed-off-by: Marc Gonzalez Acked-by: Arnd Bergmann Acked-by: Kevin Hilman Signed-off-by: Olof Johansson --- arch/arm/Kconfig.debug | 9 +++++++++ arch/arm/include/debug/palmchip.S | 11 +++++++++++ 2 files changed, 20 insertions(+) create mode 100644 arch/arm/include/debug/palmchip.S (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index c6b6175d0203..1098e91d6d3f 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1368,6 +1368,7 @@ config DEBUG_SIRFSOC_UART config DEBUG_LL_INCLUDE string default "debug/sa1100.S" if DEBUG_SA1100 + default "debug/palmchip.S" if DEBUG_UART_8250_PALMCHIP default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250 default "debug/at91.S" if DEBUG_AT91_UART default "debug/asm9260.S" if DEBUG_ASM9260_UART @@ -1656,6 +1657,14 @@ config DEBUG_UART_8250_WORD DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2 || \ DEBUG_BRCMSTB_UART +config DEBUG_UART_8250_PALMCHIP + bool "8250 UART is Palmchip BK-310x" + depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250 + help + Palmchip provides a UART implementation compatible with 16550 + except for having a different register layout. Say Y here if + the debug UART is of this type. + config DEBUG_UART_8250_FLOW_CONTROL bool "Enable flow control for 8250 UART" depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250 diff --git a/arch/arm/include/debug/palmchip.S b/arch/arm/include/debug/palmchip.S new file mode 100644 index 000000000000..6824b2d1c38e --- /dev/null +++ b/arch/arm/include/debug/palmchip.S @@ -0,0 +1,11 @@ +#include + +#undef UART_TX +#undef UART_LSR +#undef UART_MSR + +#define UART_TX 1 +#define UART_LSR 7 +#define UART_MSR 8 + +#include -- cgit v1.2.3 From 34d2f4d3a4d6a6b204ebfd12d1d2c159360d41a4 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 27 Jan 2016 14:11:42 -0800 Subject: ARM: Use generic clkdev.h header Get rid of the clkdev.h file in the ARM port and use the generic one because we've gotten rid of all the machine specific mach/clkdev.h files. Acked-by: Arnd Bergmann Signed-off-by: Stephen Boyd Signed-off-by: Linus Walleij --- arch/arm/include/asm/Kbuild | 1 + arch/arm/include/asm/clkdev.h | 31 ------------------------------- 2 files changed, 1 insertion(+), 31 deletions(-) delete mode 100644 arch/arm/include/asm/clkdev.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 16da6380eb85..43cd07708cf9 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -1,6 +1,7 @@ generic-y += bitsperlong.h +generic-y += clkdev.h generic-y += cputime.h generic-y += current.h generic-y += early_ioremap.h diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h deleted file mode 100644 index 4e8a4b27d7c7..000000000000 --- a/arch/arm/include/asm/clkdev.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * arch/arm/include/asm/clkdev.h - * - * Copyright (C) 2008 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Helper for the clk API to assist looking up a struct clk. - */ -#ifndef __ASM_CLKDEV_H -#define __ASM_CLKDEV_H - -#include - -#ifndef CONFIG_COMMON_CLK -#ifdef CONFIG_HAVE_MACH_CLKDEV -#include -#else -#define __clk_get(clk) ({ 1; }) -#define __clk_put(clk) do { } while (0) -#endif -#endif - -static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) -{ - return kzalloc(size, GFP_KERNEL); -} - -#endif -- cgit v1.2.3 From 73e592f3bc2cdc68df9dbc92e681b61f9bc6c2bf Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 27 Jan 2016 20:14:00 +0100 Subject: ARM: 8504/1: __arch_xprod_64(): small optimization The tmp variable is used twice: first to pose as a register containing a value of zero, and then to provide a temporary register that initially is zero and get added some value. But somehow gcc decides to split those two usages in different registers. Example code: u64 div64const1000(u64 x) { u32 y = 1000; do_div(x, y); return x; } Result: div64const1000: push {r4, r5, r6, r7, lr} mov lr, #0 mov r6, r0 mov r7, r1 adr r5, .L8 ldrd r4, [r5] mov r1, lr umull r2, r3, r4, r6 cmn r2, r4 adcs r3, r3, r5 adc r2, lr, #0 umlal r3, r2, r5, r6 umlal r3, r1, r4, r7 mov r3, #0 adds r2, r1, r2 adc r3, r3, #0 umlal r2, r3, r5, r7 lsr r0, r2, #9 lsr r1, r3, #9 orr r0, r0, r3, lsl #23 pop {r4, r5, r6, r7, pc} .align 3 .L8: .word -1924145349 .word -2095944041 Full kernel build size: text data bss dec hex filename 13663814 1553940 351368 15569122 ed90e2 vmlinux Here the two instances of 'tmp' are assigned to r1 and lr. To avoid that, let's mark the first 'tmp' usage in __arch_xprod_64() with a "+r" constraint even if the register is not written to, so to create a dependency for the second usage with the effect of enforcing a single temporary register throughout. Result: div64const1000: push {r4, r5, r6, r7} movs r3, #0 adr r5, .L8 ldrd r4, [r5] umull r6, r7, r4, r0 cmn r6, r4 adcs r7, r7, r5 adc r6, r3, #0 umlal r7, r6, r5, r0 umlal r7, r3, r4, r1 mov r7, #0 adds r6, r3, r6 adc r7, r7, #0 umlal r6, r7, r5, r1 lsr r0, r6, #9 lsr r1, r7, #9 orr r0, r0, r7, lsl #23 pop {r4, r5, r6, r7} bx lr .align 3 .L8: .word -1924145349 .word -2095944041 text data bss dec hex filename 13663438 1553940 351368 15568746 ed8f6a vmlinux This time 'tmp' is assigned to r3 and used throughout. However, by being assigned to r3, that blocks usage of the r2-r3 double register slot for 64-bit values, forcing more registers to be spilled on the stack. Let's try to help it by forcing 'tmp' to the caller-saved ip register. Result: div64const1000: stmfd sp!, {r4, r5} mov ip, #0 adr r5, .L8 ldrd r4, [r5] umull r2, r3, r4, r0 cmn r2, r4 adcs r3, r3, r5 adc r2, ip, #0 umlal r3, r2, r5, r0 umlal r3, ip, r4, r1 mov r3, #0 adds r2, ip, r2 adc r3, r3, #0 umlal r2, r3, r5, r1 mov r0, r2, lsr #9 mov r1, r3, lsr #9 orr r0, r0, r3, asl #23 ldmfd sp!, {r4, r5} bx lr .align 3 .L8: .word -1924145349 .word -2095944041 text data bss dec hex filename 13662838 1553940 351368 15568146 ed8d12 vmlinux We could make the code marginally smaller yet by forcing 'tmp' to lr instead, but that would have a negative inpact on branch prediction for which "bx lr" is optimal. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/div64.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index e1f07764b0d6..7d919a9b32e5 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -74,7 +74,7 @@ static inline uint32_t __div64_32(uint64_t *n, uint32_t base) static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias) { unsigned long long res; - unsigned int tmp = 0; + register unsigned int tmp asm("ip") = 0; if (!bias) { asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" @@ -90,12 +90,12 @@ static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias) : "r" (m), "r" (n) : "cc"); } else { - asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" - "cmn %Q0, %Q1\n\t" - "adcs %R0, %R0, %R1\n\t" - "adc %Q0, %3, #0" - : "=&r" (res) - : "r" (m), "r" (n), "r" (tmp) + asm ( "umull %Q0, %R0, %Q2, %Q3\n\t" + "cmn %Q0, %Q2\n\t" + "adcs %R0, %R0, %R2\n\t" + "adc %Q0, %1, #0" + : "=&r" (res), "+&r" (tmp) + : "r" (m), "r" (n) : "cc"); } -- cgit v1.2.3 From 02afa9a87b232bca15bc30808b9310c6388ca1a8 Mon Sep 17 00:00:00 2001 From: Chris Brandt Date: Tue, 9 Feb 2016 19:34:43 +0100 Subject: ARM: 8518/1: Use correct symbols for XIP_KERNEL For an XIP build, _etext does not represent the end of the binary image that needs to stay mapped into the MODULES_VADDR area. Years ago, data came before text in the memory map. However, now that the order is text/init/data, an XIP_KERNEL needs to map up to the data location in order to keep from cutting off parts of the kernel that are needed. We only map up to the beginning of data because data has already been copied, so there's no reason to keep it around anymore. A new symbol is created to make it clear what it is we are referring to. This fixes the bug where you might lose the end of your kernel area after page table setup is complete. Signed-off-by: Chris Brandt Signed-off-by: Russell King --- arch/arm/include/asm/Kbuild | 1 - arch/arm/include/asm/sections.h | 8 ++++++++ arch/arm/kernel/module.c | 2 +- arch/arm/kernel/vmlinux-xip.lds.S | 2 ++ arch/arm/mm/mmu.c | 8 ++++++-- 5 files changed, 17 insertions(+), 4 deletions(-) create mode 100644 arch/arm/include/asm/sections.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 16da6380eb85..3f6616b472af 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -23,7 +23,6 @@ generic-y += preempt.h generic-y += resource.h generic-y += rwsem.h generic-y += seccomp.h -generic-y += sections.h generic-y += segment.h generic-y += sembuf.h generic-y += serial.h diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h new file mode 100644 index 000000000000..803bbf2b20b8 --- /dev/null +++ b/arch/arm/include/asm/sections.h @@ -0,0 +1,8 @@ +#ifndef _ASM_ARM_SECTIONS_H +#define _ASM_ARM_SECTIONS_H + +#include + +extern char _exiprom[]; + +#endif /* _ASM_ARM_SECTIONS_H */ diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index efdddcb97dd1..4f14b5ce6535 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -34,7 +34,7 @@ * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off. */ #undef MODULES_VADDR -#define MODULES_VADDR (((unsigned long)_etext + ~PMD_MASK) & PMD_MASK) +#define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK) #endif #ifdef CONFIG_MMU diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 7b1ded6fb628..40bc4cadb959 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -85,6 +85,7 @@ SECTIONS } . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); + _xiprom = .; /* XIP ROM area to be mapped */ .head.text : { _text = .; @@ -210,6 +211,7 @@ SECTIONS PERCPU_SECTION(L1_CACHE_BYTES) #endif + _exiprom = .; /* End of XIP ROM area */ __data_loc = ALIGN(4); /* location in binary */ . = PAGE_OFFSET + TEXT_OFFSET; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 434d76f0b363..e4b681aafd6d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1253,7 +1253,7 @@ static inline void prepare_page_table(void) #ifdef CONFIG_XIP_KERNEL /* The XIP kernel is mapped in the module area -- skip over it */ - addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK; + addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; #endif for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); @@ -1335,7 +1335,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) #ifdef CONFIG_XIP_KERNEL map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); map.virtual = MODULES_VADDR; - map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; + map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; map.type = MT_ROM; create_mapping(&map); #endif @@ -1426,7 +1426,11 @@ static void __init kmap_init(void) static void __init map_lowmem(void) { struct memblock_region *reg; +#ifdef CONFIG_XIP_KERNEL + phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE); +#else phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); +#endif phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); /* Map all the lowmem memory banks. */ -- cgit v1.2.3 From db57f88e4ccbbb6dd194b3f3088b3dc1987db423 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Mon, 10 Aug 2015 20:53:11 +0100 Subject: ARM: 8411/1: Add default SPARSEMEM settings We can still override these settings via mach/memory.h, but let's provide sensible defaults so that SPARSEMEM is available in the multiplatform kernels. Two platforms currently use SECTION_SIZE_BITS < 28, but are expected to work with 28 (albeit slightly less efficiently if not all banks are populated): - mach-rpc: uses 26 bits. Based on mach/hardware.h it looks like this platform puts RAM at 0x1000_0000 - 0x1fff_ffff, and I/O below 0x1000_0000. - mach-sa1100: uses 27 bits. mach/memory.h indicates that RAM occupies the entire range of 0xc000_0000 - 0xdfff_ffff. But Arnd says in that rpc and sa1100 will never have to use the default since they cannot be part of a multiplatform kernel, and that is unlikely to change. Several platforms need MAX_PHYSMEM_BITS >= 36 so we'll pick that as the minimum. Anything higher and we'll fail the SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH test in . Some analysis from Russell King at http://lists.infradead.org/pipermail/linux-arm-kernel/2014-October/298957.html: I think this is fine in as far as it goes - this means we end up with 256 entries in the mem_section array which means it occupies one page, which I think is acceptable overhead. The other thing to be aware of here is the obvious: #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS #error Allocator MAX_ORDER exceeds SECTION_SIZE #endif Which means that with 28 bits of section, that's a maximum allocator order of 16. We appear to allow FORCE_MAX_ZONEORDER to be set up to 64 in the case of shmobile, which doesn't seem like a sensible upper limit - and certainly isn't when sparsemem is enabled. Given this, I think that FORCE_MAX_ZONEORDER's help, and the dependencies probably could do with some improvement to make the issues more transparent. [gregory.0xf0: added notes from Arnd and Russell] Signed-off-by: Kevin Cernekee Acked-by: Arnd Bergmann Tested-by: Stephen Boyd Signed-off-by: Gregory Fong Signed-off-by: Russell King --- arch/arm/include/asm/sparsemem.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h index 00098615c6f0..73e5e8513751 100644 --- a/arch/arm/include/asm/sparsemem.h +++ b/arch/arm/include/asm/sparsemem.h @@ -15,10 +15,11 @@ * Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000, * then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26. * - * Define these in your mach/memory.h. + * These can be overridden in your mach/memory.h. */ -#if !defined(SECTION_SIZE_BITS) || !defined(MAX_PHYSMEM_BITS) -#error Sparsemem is not supported on this platform +#if !defined(MAX_PHYSMEM_BITS) || !defined(SECTION_SIZE_BITS) +#define MAX_PHYSMEM_BITS 36 +#define SECTION_SIZE_BITS 28 #endif #endif -- cgit v1.2.3 From d78114554939aec0344b494e759d0679224562db Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 2 Feb 2016 00:14:53 +0100 Subject: ARM: 8512/1: proc-v7.S: Adjust stack address when XIP_KERNEL When XIP_KERNEL is enabled, the virt to phys address translation for RAM is not the same as the virt to phys address translation for .text. The only way to know where physical RAM is located is to use PLAT_PHYS_OFFSET. The MACRO will be useful for other places where there is a similar problem. Signed-off-by: Nicolas Pitre Signed-off-by: Chris Brandt Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 8 ++++++++ arch/arm/mm/proc-v7.S | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 49bf6b1e2177..ebdaaf7dd19f 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -134,6 +134,14 @@ */ #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) +#ifdef CONFIG_XIP_KERNEL +#define PHYS_OFFSET_FIXUP \ + ( XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) - PAGE_OFFSET + \ + PLAT_PHYS_OFFSET - CONFIG_XIP_PHYS_ADDR ) +#else +#define PHYS_OFFSET_FIXUP 0 +#endif + #ifndef __ASSEMBLY__ /* diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0f92d575a304..1595fb29ec12 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -487,7 +487,7 @@ __errata_finish: .align 2 __v7_setup_stack_ptr: - .word __v7_setup_stack - . + .word __v7_setup_stack - . + PHYS_OFFSET_FIXUP ENDPROC(__v7_setup) .bss -- cgit v1.2.3 From 8ff97fa31333e8d0f4f7029798d9c7d59359b05c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 16 Feb 2016 17:33:56 +0000 Subject: ARM: make the physical-relative calculation more obvious The physical-relative calculation between the XIP text and data sections introduced by the previous patch was far from obvious. Let's simplify it by turning it into a macro which takes the two (virtual) addresses. This allows us to arrange the calculation in a more obvious manner - we can make it two sub-expressions which calculate the physical address for each symbol, and then takes the difference of those physical addresses. Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 15 +++++++++++---- arch/arm/mm/proc-v7.S | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index ebdaaf7dd19f..51fc9b3b52ea 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -135,11 +135,18 @@ #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) #ifdef CONFIG_XIP_KERNEL -#define PHYS_OFFSET_FIXUP \ - ( XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) - PAGE_OFFSET + \ - PLAT_PHYS_OFFSET - CONFIG_XIP_PHYS_ADDR ) +/* + * When referencing data in RAM from the XIP region in a relative manner + * with the MMU off, we need the relative offset between the two physical + * addresses. The macro below achieves this, which is: + * __pa(v_data) - __xip_pa(v_text) + */ +#define PHYS_RELATIVE(v_data, v_text) \ + (((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \ + ((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \ + CONFIG_XIP_PHYS_ADDR)) #else -#define PHYS_OFFSET_FIXUP 0 +#define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text)) #endif #ifndef __ASSEMBLY__ diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 1595fb29ec12..0f8963a7e7d9 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -487,7 +487,7 @@ __errata_finish: .align 2 __v7_setup_stack_ptr: - .word __v7_setup_stack - . + PHYS_OFFSET_FIXUP + .word PHYS_RELATIVE(__v7_setup_stack, .) ENDPROC(__v7_setup) .bss -- cgit v1.2.3 From a0bf9776cd0be4490d4675d4108e13379849fc7f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 16 Feb 2016 13:52:39 +0100 Subject: arm64: kvm: deal with kernel symbols outside of linear mapping KVM on arm64 uses a fixed offset between the linear mapping at EL1 and the HYP mapping at EL2. Before we can move the kernel virtual mapping out of the linear mapping, we have to make sure that references to kernel symbols that are accessed via the HYP mapping are translated to their linear equivalent. Reviewed-by: Mark Rutland Acked-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm/include/asm/kvm_asm.h | 2 ++ arch/arm/kvm/arm.c | 8 +++++--- arch/arm64/include/asm/kvm_asm.h | 17 +++++++++++++++++ arch/arm64/include/asm/kvm_host.h | 8 +++++--- arch/arm64/kvm/hyp.S | 6 +++--- 5 files changed, 32 insertions(+), 9 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 194c91b610ff..c35c349da069 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -79,6 +79,8 @@ #define rr_lo_hi(a1, a2) a1, a2 #endif +#define kvm_ksym_ref(kva) (kva) + #ifndef __ASSEMBLY__ struct kvm; struct kvm_vcpu; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index dda1959f0dde..975da6cfbf59 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -982,7 +982,7 @@ static void cpu_init_hyp_mode(void *dummy) pgd_ptr = kvm_mmu_get_httbr(); stack_page = __this_cpu_read(kvm_arm_hyp_stack_page); hyp_stack_ptr = stack_page + PAGE_SIZE; - vector_ptr = (unsigned long)__kvm_hyp_vector; + vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector); __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); @@ -1074,13 +1074,15 @@ static int init_hyp_mode(void) /* * Map the Hyp-code called directly from the host */ - err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); + err = create_hyp_mappings(kvm_ksym_ref(__kvm_hyp_code_start), + kvm_ksym_ref(__kvm_hyp_code_end)); if (err) { kvm_err("Cannot map world-switch code\n"); goto out_free_mappings; } - err = create_hyp_mappings(__start_rodata, __end_rodata); + err = create_hyp_mappings(kvm_ksym_ref(__start_rodata), + kvm_ksym_ref(__end_rodata)); if (err) { kvm_err("Cannot map rodata section\n"); goto out_free_mappings; diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 52b777b7d407..31b56008f412 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -26,7 +26,24 @@ #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) +#define kvm_ksym_ref(sym) ((void *)&sym + kvm_ksym_shift) + #ifndef __ASSEMBLY__ +#if __GNUC__ > 4 +#define kvm_ksym_shift (PAGE_OFFSET - KIMAGE_VADDR) +#else +/* + * GCC versions 4.9 and older will fold the constant below into the addend of + * the reference to 'sym' above if kvm_ksym_shift is declared static or if the + * constant is used directly. However, since we use the small code model for + * the core kernel, the reference to 'sym' will be emitted as a adrp/add pair, + * with a +/- 4 GB range, resulting in linker relocation errors if the shift + * is sufficiently large. So prevent the compiler from folding the shift into + * the addend, by making the shift a variable with external linkage. + */ +__weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR; +#endif + struct kvm; struct kvm_vcpu; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 689d4c95e12f..e3d67ff8798b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -307,7 +307,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); -u64 kvm_call_hyp(void *hypfn, ...); +u64 __kvm_call_hyp(void *hypfn, ...); void force_vm_exit(const cpumask_t *mask); void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); @@ -328,8 +328,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, * Call initialization code, and switch to the full blown * HYP code. */ - kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, - hyp_stack_ptr, vector_ptr); + __kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, + hyp_stack_ptr, vector_ptr); } static inline void kvm_arch_hardware_disable(void) {} @@ -343,4 +343,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); +#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 0ccdcbbef3c2..870578f84b1c 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -20,7 +20,7 @@ #include /* - * u64 kvm_call_hyp(void *hypfn, ...); + * u64 __kvm_call_hyp(void *hypfn, ...); * * This is not really a variadic function in the classic C-way and care must * be taken when calling this to ensure parameters are passed in registers @@ -37,7 +37,7 @@ * used to implement __hyp_get_vectors in the same way as in * arch/arm64/kernel/hyp_stub.S. */ -ENTRY(kvm_call_hyp) +ENTRY(__kvm_call_hyp) hvc #0 ret -ENDPROC(kvm_call_hyp) +ENDPROC(__kvm_call_hyp) -- cgit v1.2.3 From 8f318526a292c5e7cebb82f3f766b83c22343293 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 18 Feb 2016 19:15:45 +0000 Subject: irqchip/gic-v3: Add missing barrier to 32bit version of gic_read_iar() Commit 1a1ebd5 ("irqchip/gic-v3: Make sure read from ICC_IAR1_EL1 is visible on redestributor") fixed the missing barrier on arm64, but forgot to update the 32bit counterpart, which has the same requirements. Let's fix it. Fixes: 1a1ebd5 ("irqchip/gic-v3: Make sure read from ICC_IAR1_EL1 is visible on redestributor") Signed-off-by: Marc Zyngier --- arch/arm/include/asm/arch_gicv3.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index 7da5503c0591..e08d15184056 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -117,6 +117,7 @@ static inline u32 gic_read_iar(void) u32 irqstat; asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat)); + dsb(sy); return irqstat; } -- cgit v1.2.3 From e267d97b83d9cecc16c54825f9f3ac7f72dc1e1e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 17 Feb 2016 14:41:12 -0800 Subject: asm-generic: Consolidate mark_rodata_ro() Instead of defining mark_rodata_ro() in each architecture, consolidate it. Signed-off-by: Kees Cook Acked-by: Will Deacon Cc: Andrew Morton Cc: Andy Gross Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Ashok Kumar Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Catalin Marinas Cc: Dan Williams Cc: David Brown Cc: David Hildenbrand Cc: Denys Vlasenko Cc: Emese Revfy Cc: H. Peter Anvin Cc: Helge Deller Cc: James E.J. Bottomley Cc: Linus Torvalds Cc: Luis R. Rodriguez Cc: Marc Zyngier Cc: Mark Rutland Cc: Mathias Krause Cc: Michael Ellerman Cc: Nicolas Pitre Cc: PaX Team Cc: Paul Gortmaker Cc: Peter Zijlstra Cc: Ross Zwisler Cc: Russell King Cc: Rusty Russell Cc: Stephen Boyd Cc: Thomas Gleixner Cc: Toshi Kani Cc: kernel-hardening@lists.openwall.com Cc: linux-arch Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: linux-parisc@vger.kernel.org Link: http://lkml.kernel.org/r/1455748879-21872-2-git-send-email-keescook@chromium.org Signed-off-by: Ingo Molnar --- arch/arm/include/asm/cacheflush.h | 1 - arch/arm64/include/asm/cacheflush.h | 4 ---- arch/parisc/include/asm/cacheflush.h | 4 ---- arch/x86/include/asm/cacheflush.h | 1 - include/linux/init.h | 4 ++++ 5 files changed, 4 insertions(+), 10 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index d5525bfc7e3e..9156fc303afd 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -491,7 +491,6 @@ static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } #endif #ifdef CONFIG_DEBUG_RODATA -void mark_rodata_ro(void); void set_kernel_text_rw(void); void set_kernel_text_ro(void); #else diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 7fc294c3bc5b..22dda613f9c9 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -156,8 +156,4 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); -#ifdef CONFIG_DEBUG_RODATA -void mark_rodata_ro(void); -#endif - #endif diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 845272ce9cc5..7bd69bd43a01 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -121,10 +121,6 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma } } -#ifdef CONFIG_DEBUG_RODATA -void mark_rodata_ro(void); -#endif - #include #define ARCH_HAS_KMAP diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index e63aa38e85fb..c8cff75c5b21 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -92,7 +92,6 @@ void clflush_cache_range(void *addr, unsigned int size); #define mmio_flush_range(addr, size) clflush_cache_range(addr, size) #ifdef CONFIG_DEBUG_RODATA -void mark_rodata_ro(void); extern const int rodata_test_data; extern int kernel_set_to_readonly; void set_kernel_text_rw(void); diff --git a/include/linux/init.h b/include/linux/init.h index b449f378f995..aedb254abc37 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -142,6 +142,10 @@ void prepare_namespace(void); void __init load_default_modules(void); int __init init_rootfs(void); +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void); +#endif + extern void (*late_time_init)(void); extern bool initcall_debug; -- cgit v1.2.3 From 9e0087e64e67cfe0b63b629add793a4aa019a629 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 18 Feb 2016 15:55:59 +0100 Subject: ARM: 8530/1: remove VIRT_TO_BUS All drivers that are relevant for rpc or footbridge have stopped using virt_to_bus a while ago, so we can remove it and avoid some harmless randconfig warnings for drivers that we do not care about: drivers/atm/zatm.c: In function 'poll_rx': drivers/atm/zatm.c:401:18: warning: 'bus_to_virt' is deprecated [-Wdeprecated-declarations] skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb; FWIW, the remaining drivers using this are: ATM: firestream, zatm, ambassador, horizon ISDN: hisax/netjet V4L: STA2X11, zoran Net: Appletalk LTPC, Tulip DE4x5, Toshiba IrDA WAN: comtrol sv11, cosa, lanmedia, sealevel SCSI: DPT_I2O, buslogic VME: CA91C142 My best guess is that all of the above are so hopelessly obsolete that we are best off removing all of them form the kernel, but that can be done another time. Signed-off-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/Kconfig | 1 - arch/arm/include/asm/memory.h | 14 -------------- arch/arm/mach-footbridge/Kconfig | 1 - 3 files changed, 16 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index cc95ff8f07cb..d67f1aa93213 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -572,7 +572,6 @@ config ARCH_RPC select NEED_MACH_IO_H select NEED_MACH_MEMORY_H select NO_IOPORT_MAP - select VIRT_TO_BUS help On the Acorn Risc-PC, Linux can support the internal IDE disk and CD-ROM interface, serial and parallel port, and the floppy drive. diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 51fc9b3b52ea..9427fd632552 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -318,20 +318,6 @@ static inline unsigned long __virt_to_idmap(unsigned long x) #define __bus_to_pfn(x) __phys_to_pfn(x) #endif -#ifdef CONFIG_VIRT_TO_BUS -#define virt_to_bus virt_to_bus -static inline __deprecated unsigned long virt_to_bus(void *x) -{ - return __virt_to_bus((unsigned long)x); -} - -#define bus_to_virt bus_to_virt -static inline __deprecated void *bus_to_virt(unsigned long x) -{ - return (void *)__bus_to_virt(x); -} -#endif - /* * Conversion between a struct page and a physical address. * diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index 07152d00fc50..cbbdd84cf49a 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig @@ -68,7 +68,6 @@ config ARCH_NETWINDER select ISA select ISA_DMA select PCI - select VIRT_TO_BUS help Say Y here if you intend to run this kernel on the Rebel.COM NetWinder. Information about this machine can be found at: -- cgit v1.2.3 From 7d74a5f07694050d62fcee9120d20d702d7e6333 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 18 Feb 2016 16:00:23 +0100 Subject: ARM: 8531/1: turn init_new_context into an inline function Almost all architectures define init_new_context() as a function, but on ARM, it's a macro and that causes a compiler warning when its return code is not used: drivers/firmware/efi/arm-runtime.c: In function 'efi_virtmap_init': arch/arm/include/asm/mmu_context.h:88:34: warning: statement with no effect [-Wunused-value] #define init_new_context(tsk,mm) 0 drivers/firmware/efi/arm-runtime.c:47:2: note: in expansion of macro 'init_new_context' init_new_context(NULL, &efi_mm); This changes the definition into an inline function, which gcc does not warn about. Signed-off-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/include/asm/mmu_context.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 432ce8176498..fa5b42d44985 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -26,7 +26,12 @@ void __check_vmalloc_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); -#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + atomic64_set(&mm->context.id, 0); + return 0; +} #ifdef CONFIG_ARM_ERRATA_798181 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, @@ -85,7 +90,12 @@ static inline void finish_arch_post_lock_switch(void) #endif /* CONFIG_MMU */ -#define init_new_context(tsk,mm) 0 +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + return 0; +} + #endif /* CONFIG_CPU_HAS_ASID */ -- cgit v1.2.3 From 8fff2f752f2c9d31414f83170157701b59aec4c1 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 15 Feb 2016 10:17:47 +0100 Subject: ARM: zynq: Move early printk virtual address to vmalloc area The patch "ARM: 8432/1: move VMALLOC_END from 0xff000000 to 0xff800000" (sha1: 6ff0966052c46efb53980b8a1add2e7b49c9f560) has moved also start of VMALLOC area because size didn't change. That's why origin location of vmalloc was vmalloc : 0xf0000000 - 0xff000000 ( 240 MB) and now is vmalloc : 0xf0800000 - 0xff800000 ( 240 MB) That's why uart virtual addresses need to be changed to reflect this new memory setup. Starting address should be vmalloc start address. Signed-off-by: Michal Simek Acked-by: Arnd Bergmann --- arch/arm/include/debug/zynq.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/debug/zynq.S b/arch/arm/include/debug/zynq.S index de86b9247564..060cb5b49bfd 100644 --- a/arch/arm/include/debug/zynq.S +++ b/arch/arm/include/debug/zynq.S @@ -20,9 +20,9 @@ #define UART_SR_TXEMPTY 0x00000008 /* TX FIFO empty */ #define UART0_PHYS 0xE0000000 -#define UART0_VIRT 0xF0000000 +#define UART0_VIRT 0xF0800000 #define UART1_PHYS 0xE0001000 -#define UART1_VIRT 0xF0001000 +#define UART1_VIRT 0xF0801000 #if IS_ENABLED(CONFIG_DEBUG_ZYNQ_UART1) # define LL_UART_PADDR UART1_PHYS -- cgit v1.2.3 From 4e0b6ca9dace8dc0731571efd2bf1897fcb11938 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Thu, 19 Nov 2015 03:24:42 +0800 Subject: asm-generic: page.h: Remove useless get_user_page and free_user_page They are not symmetric with each other, neither are used in real world (can not be found by grep command in source code root directory), so remove them. Signed-off-by: Chen Gang Acked-by: Russell King Acked-by: Greg Ungerer Signed-off-by: Arnd Bergmann --- arch/arc/include/asm/page.h | 3 --- arch/arm/include/asm/page-nommu.h | 3 --- arch/frv/include/asm/page.h | 3 --- arch/m68k/include/asm/page_mm.h | 3 --- arch/m68k/include/asm/page_no.h | 3 --- arch/openrisc/include/asm/page.h | 3 --- include/asm-generic/page.h | 3 --- 7 files changed, 21 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 429957f1c236..adf5cbb141a5 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -13,9 +13,6 @@ #ifndef __ASSEMBLY__ -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h index d1b162a18dcb..503f488053de 100644 --- a/arch/arm/include/asm/page-nommu.h +++ b/arch/arm/include/asm/page-nommu.h @@ -17,9 +17,6 @@ #define KTHREAD_SIZE PAGE_SIZE #endif -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h index 688d8076a43a..ec5eebce4fb3 100644 --- a/arch/frv/include/asm/page.h +++ b/arch/frv/include/asm/page.h @@ -8,9 +8,6 @@ #ifndef __ASSEMBLY__ -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h index 5029f73e6294..e7a1946455a8 100644 --- a/arch/m68k/include/asm/page_mm.h +++ b/arch/m68k/include/asm/page_mm.h @@ -6,9 +6,6 @@ #include #include -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - /* * We don't need to check for alignment etc. */ diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h index ef209169579a..fa7f32d9836b 100644 --- a/arch/m68k/include/asm/page_no.h +++ b/arch/m68k/include/asm/page_no.h @@ -6,9 +6,6 @@ extern unsigned long memory_start; extern unsigned long memory_end; -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h index 108906f991d6..e613d3673034 100644 --- a/arch/openrisc/include/asm/page.h +++ b/arch/openrisc/include/asm/page.h @@ -40,9 +40,6 @@ #ifndef __ASSEMBLY__ -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h index 37d1fe28960a..67cfb7dbc284 100644 --- a/include/asm-generic/page.h +++ b/include/asm-generic/page.h @@ -24,9 +24,6 @@ #ifndef __ASSEMBLY__ -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) -- cgit v1.2.3 From 35a2491a624af1fa7ab6990639f5246cd5f12592 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Feb 2016 17:54:35 +0000 Subject: arm/arm64: KVM: Add hook for C-based stage2 init As we're about to move the stage2 init to C code, introduce some C hooks that will later be populated with arch-specific implementations. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 4 ++++ arch/arm/kvm/arm.c | 1 + arch/arm64/include/asm/kvm_host.h | 4 ++++ 3 files changed, 9 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index f9f27792d8ed..f1e86f1eb2e5 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -220,6 +220,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); } +static inline void __cpu_init_stage2(void) +{ +} + static inline int kvm_arch_dev_ioctl_check_extension(long ext) { return 0; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index dda1959f0dde..6b76e0152e58 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -985,6 +985,7 @@ static void cpu_init_hyp_mode(void *dummy) vector_ptr = (unsigned long)__kvm_hyp_vector; __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); + __cpu_init_stage2(); kvm_arm_init_debug(); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 689d4c95e12f..fe86cf9f288b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -332,6 +332,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, hyp_stack_ptr, vector_ptr); } +static inline void __cpu_init_stage2(void) +{ +} + static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} -- cgit v1.2.3 From 1a61ae7af4d65ee311a737d550da6cf92a3aea4c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 2 Jan 2016 13:57:18 +0000 Subject: ARM: KVM: Move the HYP code to its own section In order to be able to spread the HYP code into multiple compilation units, adopt a layout similar to that of arm64: - the HYP text is emited in its own section (.hyp.text) - two linker generated symbols are use to identify the boundaries of that section No functionnal change. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 6 ++++-- arch/arm/include/asm/virt.h | 4 ++++ arch/arm/kernel/vmlinux.lds.S | 6 ++++++ arch/arm/kvm/interrupts.S | 13 +++++-------- 4 files changed, 19 insertions(+), 10 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 194c91b610ff..fa2fd253974f 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -19,6 +19,8 @@ #ifndef __ARM_KVM_ASM_H__ #define __ARM_KVM_ASM_H__ +#include + /* 0 is reserved as an invalid value. */ #define c0_MPIDR 1 /* MultiProcessor ID Register */ #define c0_CSSELR 2 /* Cache Size Selection Register */ @@ -91,8 +93,8 @@ extern char __kvm_hyp_exit_end[]; extern char __kvm_hyp_vector[]; -extern char __kvm_hyp_code_start[]; -extern char __kvm_hyp_code_end[]; +#define __kvm_hyp_code_start __hyp_text_start +#define __kvm_hyp_code_end __hyp_text_end extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 4371f45c5784..5fdbfea6defb 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -74,6 +74,10 @@ static inline bool is_hyp_mode_mismatched(void) { return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); } + +/* The section containing the hypervisor text */ +extern char __hyp_text_start[]; +extern char __hyp_text_end[]; #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 8b60fde5ce48..b4139cbbbdd9 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -18,6 +18,11 @@ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; +#define HYPERVISOR_TEXT \ + VMLINUX_SYMBOL(__hyp_text_start) = .; \ + *(.hyp.text) \ + VMLINUX_SYMBOL(__hyp_text_end) = .; + #define IDMAP_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__idmap_text_start) = .; \ @@ -108,6 +113,7 @@ SECTIONS TEXT_TEXT SCHED_TEXT LOCK_TEXT + HYPERVISOR_TEXT KPROBES_TEXT *(.gnu.warning) *(.glue_7) diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 900ef6dd8f72..9d9cb71df449 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -28,9 +28,7 @@ #include "interrupts_head.S" .text - -__kvm_hyp_code_start: - .globl __kvm_hyp_code_start + .pushsection .hyp.text, "ax" /******************************************************************** * Flush per-VMID TLBs @@ -314,8 +312,6 @@ THUMB( orr r2, r2, #PSR_T_BIT ) eret .endm - .text - .align 5 __kvm_hyp_vector: .globl __kvm_hyp_vector @@ -511,10 +507,9 @@ hyp_fiq: .ltorg -__kvm_hyp_code_end: - .globl __kvm_hyp_code_end + .popsection - .section ".rodata" + .pushsection ".rodata" und_die_str: .ascii "unexpected undefined exception in Hyp mode at: %#08x\n" @@ -524,3 +519,5 @@ dabt_die_str: .ascii "unexpected data abort in Hyp mode at: %#08x\n" svc_die_str: .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n" + + .popsection -- cgit v1.2.3 From 42428525a9eefea9dda68de684381ce9f3dc4266 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 2 Jan 2016 14:04:48 +0000 Subject: ARM: KVM: Remove __kvm_hyp_code_start/__kvm_hyp_code_end Now that we've unified the way we refer to the HYP text between arm and arm64, drop __kvm_hyp_code_start/end, and just use the __hyp_text_start/end symbols. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 3 --- arch/arm/kvm/arm.c | 2 +- arch/arm64/include/asm/kvm_asm.h | 3 --- 3 files changed, 1 insertion(+), 7 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index fa2fd253974f..4841225d10ea 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -93,9 +93,6 @@ extern char __kvm_hyp_exit_end[]; extern char __kvm_hyp_vector[]; -#define __kvm_hyp_code_start __hyp_text_start -#define __kvm_hyp_code_end __hyp_text_end - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 6b76e0152e58..fcf6c130c986 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1075,7 +1075,7 @@ static int init_hyp_mode(void) /* * Map the Hyp-code called directly from the host */ - err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); + err = create_hyp_mappings(__hyp_text_start, __hyp_text_end); if (err) { kvm_err("Cannot map world-switch code\n"); goto out_free_mappings; diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 52b777b7d407..2ad8930e7eb3 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -35,9 +35,6 @@ extern char __kvm_hyp_init_end[]; extern char __kvm_hyp_vector[]; -#define __kvm_hyp_code_start __hyp_text_start -#define __kvm_hyp_code_end __hyp_text_end - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); -- cgit v1.2.3 From 0ca5565df8ef7534c0d85ec87e6c74f8ebe86e88 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 11:01:49 +0000 Subject: ARM: KVM: Move VFP registers to a CPU context structure In order to turn the WS code into something that looks a bit more like the arm64 version, move the VFP registers into a CPU context container for both the host and the guest. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 11 +++++++---- arch/arm/kernel/asm-offsets.c | 5 +++-- arch/arm/kvm/coproc.c | 20 ++++++++++---------- arch/arm/kvm/interrupts.S | 10 ++++++---- 4 files changed, 26 insertions(+), 20 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index f1e86f1eb2e5..b64ac8e4adaa 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -88,9 +88,15 @@ struct kvm_vcpu_fault_info { u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; -typedef struct vfp_hard_struct kvm_cpu_context_t; +struct kvm_cpu_context { + struct vfp_hard_struct vfp; +}; + +typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_vcpu_arch { + struct kvm_cpu_context ctxt; + struct kvm_regs regs; int target; /* Processor target */ @@ -111,9 +117,6 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* Floating point registers (VFP and Advanced SIMD/NEON) */ - struct vfp_hard_struct vfp_guest; - /* Host FP context */ kvm_cpu_context_t *host_cpu_context; diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 871b8267d211..346bfca29720 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -173,8 +173,9 @@ int main(void) DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); - DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); - DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); + DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index f3d88dc388bc..1a643f38031d 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -901,7 +901,7 @@ static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) if (vfpid < num_fp_regs()) { if (KVM_REG_SIZE(id) != 8) return -ENOENT; - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid], id); } @@ -911,13 +911,13 @@ static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) switch (vfpid) { case KVM_REG_ARM_VFP_FPEXC: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id); case KVM_REG_ARM_VFP_FPSCR: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id); case KVM_REG_ARM_VFP_FPINST: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id); case KVM_REG_ARM_VFP_FPINST2: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id); case KVM_REG_ARM_VFP_MVFR0: val = fmrx(MVFR0); return reg_to_user(uaddr, &val, id); @@ -945,7 +945,7 @@ static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) if (vfpid < num_fp_regs()) { if (KVM_REG_SIZE(id) != 8) return -ENOENT; - return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], + return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid], uaddr, id); } @@ -955,13 +955,13 @@ static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) switch (vfpid) { case KVM_REG_ARM_VFP_FPEXC: - return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id); case KVM_REG_ARM_VFP_FPSCR: - return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id); case KVM_REG_ARM_VFP_FPINST: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id); case KVM_REG_ARM_VFP_FPINST2: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id); /* These are invariant. */ case KVM_REG_ARM_VFP_MVFR0: if (reg_from_user(&val, uaddr, id)) diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 9d9cb71df449..7bfb28936914 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -172,10 +172,11 @@ __kvm_vcpu_return: #ifdef CONFIG_VFPv3 @ Switch VFP/NEON hardware state to the host's - add r7, vcpu, #VCPU_VFP_GUEST + add r7, vcpu, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) store_vfp_state r7 - add r7, vcpu, #VCPU_VFP_HOST + add r7, vcpu, #VCPU_HOST_CTXT ldr r7, [r7] + add r7, r7, #CPU_CTXT_VFP restore_vfp_state r7 after_vfp_restore: @@ -482,10 +483,11 @@ switch_to_guest_vfp: set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11)) @ Switch VFP/NEON hardware state to the guest's - add r7, r0, #VCPU_VFP_HOST + add r7, r0, #VCPU_HOST_CTXT ldr r7, [r7] + add r7, r7, #CPU_CTXT_VFP store_vfp_state r7 - add r7, r0, #VCPU_VFP_GUEST + add r7, r0, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) restore_vfp_state r7 pop {r3-r7} -- cgit v1.2.3 From fb32a52a1d4487f3ac5b7ccb659d0beb11ec504f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 11:26:01 +0000 Subject: ARM: KVM: Move CP15 array into the CPU context structure Continuing our rework of the CPU context, we now move the CP15 array into the CPU context structure. As this causes quite a bit of churn, we introduce the vcpu_cp15() macro that abstract the location of the actual array. This will probably help next time we have to revisit that code. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 2 +- arch/arm/include/asm/kvm_host.h | 6 +++--- arch/arm/include/asm/kvm_mmu.h | 2 +- arch/arm/kernel/asm-offsets.c | 2 +- arch/arm/kvm/coproc.c | 32 ++++++++++++++++---------------- arch/arm/kvm/coproc.h | 16 ++++++++-------- arch/arm/kvm/emulate.c | 22 +++++++++++----------- arch/arm/kvm/interrupts_head.S | 3 ++- 8 files changed, 43 insertions(+), 42 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 3095df091ff8..32bb52a489d0 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -192,7 +192,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) { - return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK; + return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK; } static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index b64ac8e4adaa..4203701cc7f4 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -90,6 +90,7 @@ struct kvm_vcpu_fault_info { struct kvm_cpu_context { struct vfp_hard_struct vfp; + u32 cp15[NR_CP15_REGS]; }; typedef struct kvm_cpu_context kvm_cpu_context_t; @@ -102,9 +103,6 @@ struct kvm_vcpu_arch { int target; /* Processor target */ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - /* System control coprocessor (cp15) */ - u32 cp15[NR_CP15_REGS]; - /* The CPU type we expose to the VM */ u32 midr; @@ -161,6 +159,8 @@ struct kvm_vcpu_stat { u64 exits; }; +#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r] + int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index a520b7987a29..da44be9db4fa 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -179,7 +179,7 @@ struct kvm; static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { - return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; + return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101; } static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 346bfca29720..43f8b01072c1 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -172,10 +172,10 @@ int main(void) #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); - DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); + DEFINE(CPU_CTXT_CP15, offsetof(struct kvm_cpu_context, cp15)); DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 1a643f38031d..e3e86c4cfed2 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -54,8 +54,8 @@ static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, const struct coproc_reg *r, u64 val) { - vcpu->arch.cp15[r->reg] = val & 0xffffffff; - vcpu->arch.cp15[r->reg + 1] = val >> 32; + vcpu_cp15(vcpu, r->reg) = val & 0xffffffff; + vcpu_cp15(vcpu, r->reg + 1) = val >> 32; } static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, @@ -63,9 +63,9 @@ static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, { u64 val; - val = vcpu->arch.cp15[r->reg + 1]; + val = vcpu_cp15(vcpu, r->reg + 1); val = val << 32; - val = val | vcpu->arch.cp15[r->reg]; + val = val | vcpu_cp15(vcpu, r->reg); return val; } @@ -104,7 +104,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) * vcpu_id, but we read the 'U' bit from the underlying * hardware directly. */ - vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | + vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | (vcpu->vcpu_id & 3)); } @@ -117,7 +117,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu, if (p->is_write) return ignore_write(vcpu, p); - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; + *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR); return true; } @@ -139,7 +139,7 @@ static bool access_l2ctlr(struct kvm_vcpu *vcpu, if (p->is_write) return ignore_write(vcpu, p); - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; + *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR); return true; } @@ -156,7 +156,7 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) ncores = min(ncores, 3U); l2ctlr |= (ncores & 3) << 24; - vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; + vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr; } static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) @@ -171,7 +171,7 @@ static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) else actlr &= ~(1U << 6); - vcpu->arch.cp15[c1_ACTLR] = actlr; + vcpu_cp15(vcpu, c1_ACTLR) = actlr; } /* @@ -218,9 +218,9 @@ bool access_vm_reg(struct kvm_vcpu *vcpu, BUG_ON(!p->is_write); - vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); + vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1); if (p->is_64bit) - vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); + vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2); kvm_toggle_cache(vcpu, was_enabled); return true; @@ -1030,7 +1030,7 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) val = vcpu_cp15_reg64_get(vcpu, r); ret = reg_to_user(uaddr, &val, reg->id); } else if (KVM_REG_SIZE(reg->id) == 4) { - ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); + ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id); } return ret; @@ -1060,7 +1060,7 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (!ret) vcpu_cp15_reg64_set(vcpu, r, val); } else if (KVM_REG_SIZE(reg->id) == 4) { - ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); + ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id); } return ret; @@ -1248,7 +1248,7 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) const struct coproc_reg *table; /* Catch someone adding a register without putting in reset entry. */ - memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); + memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15)); /* Generic chip reset first (so target could override). */ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); @@ -1257,6 +1257,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) reset_coproc_regs(vcpu, table, num); for (num = 1; num < NR_CP15_REGS; num++) - if (vcpu->arch.cp15[num] == 0x42424242) - panic("Didn't reset vcpu->arch.cp15[%zi]", num); + if (vcpu_cp15(vcpu, num) == 0x42424242) + panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); } diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 88d24a3a9778..27351323871d 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -47,7 +47,7 @@ struct coproc_reg { /* Initialization for vcpu. */ void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); - /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ + /* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */ unsigned long reg; /* Value (usually reset value) */ @@ -104,25 +104,25 @@ static inline void reset_unknown(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = 0xdecafbad; + BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); + vcpu_cp15(vcpu, r->reg) = 0xdecafbad; } static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = r->val; + BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); + vcpu_cp15(vcpu, r->reg) = r->val; } static inline void reset_unknown64(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); + BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); - vcpu->arch.cp15[r->reg] = 0xdecafbad; - vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; + vcpu_cp15(vcpu, r->reg) = 0xdecafbad; + vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee; } static inline int cmp_reg(const struct coproc_reg *i1, diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index dc99159857b4..ee161b1c66da 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -266,8 +266,8 @@ void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) static u32 exc_vector_base(struct kvm_vcpu *vcpu) { - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; - u32 vbar = vcpu->arch.cp15[c12_VBAR]; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); + u32 vbar = vcpu_cp15(vcpu, c12_VBAR); if (sctlr & SCTLR_V) return 0xffff0000; @@ -282,7 +282,7 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) { unsigned long cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; @@ -357,22 +357,22 @@ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) if (is_pabt) { /* Set IFAR and IFSR */ - vcpu->arch.cp15[c6_IFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); + vcpu_cp15(vcpu, c6_IFAR) = addr; + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); /* Always give debug fault for now - should give guest a clue */ if (is_lpae) - vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; + vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22; else - vcpu->arch.cp15[c5_IFSR] = 2; + vcpu_cp15(vcpu, c5_IFSR) = 2; } else { /* !iabt */ /* Set DFAR and DFSR */ - vcpu->arch.cp15[c6_DFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); + vcpu_cp15(vcpu, c6_DFAR) = addr; + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); /* Always give debug fault for now - should give guest a clue */ if (is_lpae) - vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; + vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22; else - vcpu->arch.cp15[c5_DFSR] = 2; + vcpu_cp15(vcpu, c5_DFSR) = 2; } } diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 51a59504bef4..b9d953158877 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -4,7 +4,8 @@ #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) #define VCPU_USR_SP (VCPU_USR_REG(13)) #define VCPU_USR_LR (VCPU_USR_REG(14)) -#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4)) +#define VCPU_CP15_BASE (VCPU_GUEST_CTXT + CPU_CTXT_CP15) +#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15_BASE + (_cp15_reg_idx * 4)) /* * Many of these macros need to access the VCPU structure, which is always -- cgit v1.2.3 From c2a8dab507ca6f8990c12372052efc830f51dd3f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 11:26:01 +0000 Subject: ARM: KVM: Move GP registers into the CPU context structure Continuing our rework of the CPU context, we now move the GP registers into the CPU context structure. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 8 ++++---- arch/arm/include/asm/kvm_host.h | 3 +-- arch/arm/kernel/asm-offsets.c | 18 +++++++++--------- arch/arm/kvm/emulate.c | 12 ++++++------ arch/arm/kvm/guest.c | 4 ++-- arch/arm/kvm/interrupts_head.S | 11 +++++++++++ arch/arm/kvm/reset.c | 2 +- 7 files changed, 34 insertions(+), 24 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 32bb52a489d0..f710616ccadc 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -68,12 +68,12 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) { - return &vcpu->arch.regs.usr_regs.ARM_pc; + return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; } static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) { - return &vcpu->arch.regs.usr_regs.ARM_cpsr; + return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; } static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) @@ -83,13 +83,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) { - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; + unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); } static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) { - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; + unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; return cpsr_mode > USR_MODE;; } diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 4203701cc7f4..02932ba8a653 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -89,6 +89,7 @@ struct kvm_vcpu_fault_info { }; struct kvm_cpu_context { + struct kvm_regs gp_regs; struct vfp_hard_struct vfp; u32 cp15[NR_CP15_REGS]; }; @@ -98,8 +99,6 @@ typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; - struct kvm_regs regs; - int target; /* Processor target */ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 43f8b01072c1..2f3e0b064066 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -176,15 +176,15 @@ int main(void) DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); DEFINE(CPU_CTXT_CP15, offsetof(struct kvm_cpu_context, cp15)); - DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); - DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); - DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); - DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); - DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); - DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); - DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); - DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); + DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); + DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); + DEFINE(GP_REGS_SVC, offsetof(struct kvm_regs, svc_regs)); + DEFINE(GP_REGS_ABT, offsetof(struct kvm_regs, abt_regs)); + DEFINE(GP_REGS_UND, offsetof(struct kvm_regs, und_regs)); + DEFINE(GP_REGS_IRQ, offsetof(struct kvm_regs, irq_regs)); + DEFINE(GP_REGS_FIQ, offsetof(struct kvm_regs, fiq_regs)); + DEFINE(GP_REGS_PC, offsetof(struct kvm_regs, usr_regs.ARM_pc)); + DEFINE(GP_REGS_CPSR, offsetof(struct kvm_regs, usr_regs.ARM_cpsr)); DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr)); DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index ee161b1c66da..a494def3f195 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -112,7 +112,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { */ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) { - unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs; + unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs; unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; switch (mode) { @@ -147,15 +147,15 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; switch (mode) { case SVC_MODE: - return &vcpu->arch.regs.KVM_ARM_SVC_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr; case ABT_MODE: - return &vcpu->arch.regs.KVM_ARM_ABT_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr; case UND_MODE: - return &vcpu->arch.regs.KVM_ARM_UND_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr; case IRQ_MODE: - return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr; case FIQ_MODE: - return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr; default: BUG(); } diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 5fa69d7bae58..86e26fbd5ba3 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -55,7 +55,7 @@ static u64 core_reg_offset_from_id(u64 id) static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; + struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; u64 off; if (KVM_REG_SIZE(reg->id) != 4) @@ -72,7 +72,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; + struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; u64 off, val; if (KVM_REG_SIZE(reg->id) != 4) diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index b9d953158877..e0943cb80ab3 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -1,6 +1,17 @@ #include #include +/* Compat macro, until we get rid of this file entierely */ +#define VCPU_GP_REGS (VCPU_GUEST_CTXT + CPU_CTXT_GP_REGS) +#define VCPU_USR_REGS (VCPU_GP_REGS + GP_REGS_USR) +#define VCPU_SVC_REGS (VCPU_GP_REGS + GP_REGS_SVC) +#define VCPU_ABT_REGS (VCPU_GP_REGS + GP_REGS_ABT) +#define VCPU_UND_REGS (VCPU_GP_REGS + GP_REGS_UND) +#define VCPU_IRQ_REGS (VCPU_GP_REGS + GP_REGS_IRQ) +#define VCPU_FIQ_REGS (VCPU_GP_REGS + GP_REGS_FIQ) +#define VCPU_PC (VCPU_GP_REGS + GP_REGS_PC) +#define VCPU_CPSR (VCPU_GP_REGS + GP_REGS_CPSR) + #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) #define VCPU_USR_SP (VCPU_USR_REG(13)) #define VCPU_USR_LR (VCPU_USR_REG(14)) diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index eeb85858d6bb..0048b5a62a50 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -71,7 +71,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) } /* Reset core registers */ - memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs)); + memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs)); /* Reset CP15 registers */ kvm_reset_coprocs(vcpu); -- cgit v1.2.3 From b57cd6f6407d420d522ab71b9c0dd11993e49ba1 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 6 Jan 2016 12:10:58 +0000 Subject: ARM: KVM: Change kvm_call_hyp return type to unsigned long Having u64 as the kvm_call_hyp return type is problematic, as it forces all kind of tricks for the return values from HYP to be promoted to 64bit (LE has the LSB in r0, and BE has them in r1). Since the only user of the return value is perfectly happy with a 32bit value, let's make kvm_call_hyp return an unsigned long, which is 32bit on ARM. This solves yet another headache. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 2 +- arch/arm/kvm/interrupts.S | 10 ++-------- 2 files changed, 3 insertions(+), 9 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 02932ba8a653..c62d71751f7a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -165,7 +165,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -u64 kvm_call_hyp(void *hypfn, ...); +unsigned long kvm_call_hyp(void *hypfn, ...); void force_vm_exit(const cpumask_t *mask); #define KVM_ARCH_WANT_MMU_NOTIFIER diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 7bfb28936914..01eb169f38f6 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -207,20 +207,14 @@ after_vfp_restore: restore_host_regs clrex @ Clear exclusive monitor -#ifndef CONFIG_CPU_ENDIAN_BE8 mov r0, r1 @ Return the return code - mov r1, #0 @ Clear upper bits in return value -#else - @ r1 already has return code - mov r0, #0 @ Clear upper bits in return value -#endif /* CONFIG_CPU_ENDIAN_BE8 */ bx lr @ return to IOCTL /******************************************************************** * Call function in Hyp mode * * - * u64 kvm_call_hyp(void *hypfn, ...); + * unsigned long kvm_call_hyp(void *hypfn, ...); * * This is not really a variadic function in the classic C-way and care must * be taken when calling this to ensure parameters are passed in registers @@ -231,7 +225,7 @@ after_vfp_restore: * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the * function pointer can be passed). The function being called must be mapped * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are - * passed in r0 and r1. + * passed in r0 (strictly 32bit). * * A function pointer with a value of 0xffffffff has a special meaning, * and is used to implement __hyp_get_vectors in the same way as in -- cgit v1.2.3 From d4c7688c51e57be20ca5f3dffa4c8771888a42fc Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Feb 2016 19:56:31 +0000 Subject: ARM: KVM: Switch to C-based stage2 init As we now have hooks to setup VTCR from C code, let's drop the original VTCR setup and reimplement it as part of the HYP code. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 2 ++ arch/arm/include/asm/kvm_host.h | 1 + arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 2 ++ arch/arm/kvm/hyp/s2-setup.c | 34 ++++++++++++++++++++++++++++++++++ arch/arm/kvm/init.S | 8 -------- 6 files changed, 40 insertions(+), 8 deletions(-) create mode 100644 arch/arm/kvm/hyp/s2-setup.c (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 4841225d10ea..3283a2f63254 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -98,6 +98,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); + +extern void __init_stage2_translation(void); #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index c62d71751f7a..0fe41aaf2171 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -224,6 +224,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, static inline void __cpu_init_stage2(void) { + kvm_call_hyp(__init_stage2_translation); } static inline int kvm_arch_dev_ioctl_check_extension(long ext) diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index a7d3a7e0b702..7152369504a6 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o +obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 8b9c2eb5a9dc..ff6de6a3af2d 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -71,6 +71,8 @@ #define HCPTR __ACCESS_CP15(c1, 4, c1, 2) #define HSTR __ACCESS_CP15(c1, 4, c1, 3) #define TTBCR __ACCESS_CP15(c2, 0, c0, 2) +#define HTCR __ACCESS_CP15(c2, 4, c0, 2) +#define VTCR __ACCESS_CP15(c2, 4, c1, 2) #define DACR __ACCESS_CP15(c3, 0, c0, 0) #define DFSR __ACCESS_CP15(c5, 0, c0, 0) #define IFSR __ACCESS_CP15(c5, 0, c0, 1) diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c new file mode 100644 index 000000000000..f5f49c53be28 --- /dev/null +++ b/arch/arm/kvm/hyp/s2-setup.c @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "hyp.h" + +void __hyp_text __init_stage2_translation(void) +{ + u64 val; + + val = read_sysreg(VTCR) & ~VTCR_MASK; + + val |= read_sysreg(HTCR) & VTCR_HTCR_SH; + val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S; + + write_sysreg(val, VTCR); +} diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 3988e72d16ff..1f9ae17476f9 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -84,14 +84,6 @@ __do_hyp_init: orr r0, r0, r1 mcr p15, 4, r0, c2, c0, 2 @ HTCR - mrc p15, 4, r1, c2, c1, 2 @ VTCR - ldr r2, =VTCR_MASK - bic r1, r1, r2 - bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits - orr r1, r0, r1 - orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S) - mcr p15, 4, r1, c2, c1, 2 @ VTCR - @ Use the same memory attributes for hyp. accesses as the kernel @ (copy MAIRx ro HMAIRx). mrc p15, 0, r0, c10, c2, 0 -- cgit v1.2.3 From 4448932fb09a44d73f820afd8fa145a24b3b3995 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 22:53:33 +0000 Subject: ARM: KVM: Turn CP15 defines to an enum Just like on arm64, having the CP15 registers expressed as a set of #defines has been very conflict-prone. Let's turn it into an enum, which should make it more manageable. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 33 --------------------------------- arch/arm/include/asm/kvm_host.h | 39 +++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/guest.c | 1 - 3 files changed, 39 insertions(+), 34 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 3283a2f63254..083825f12c93 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -21,39 +21,6 @@ #include -/* 0 is reserved as an invalid value. */ -#define c0_MPIDR 1 /* MultiProcessor ID Register */ -#define c0_CSSELR 2 /* Cache Size Selection Register */ -#define c1_SCTLR 3 /* System Control Register */ -#define c1_ACTLR 4 /* Auxiliary Control Register */ -#define c1_CPACR 5 /* Coprocessor Access Control */ -#define c2_TTBR0 6 /* Translation Table Base Register 0 */ -#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */ -#define c2_TTBR1 8 /* Translation Table Base Register 1 */ -#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */ -#define c2_TTBCR 10 /* Translation Table Base Control R. */ -#define c3_DACR 11 /* Domain Access Control Register */ -#define c5_DFSR 12 /* Data Fault Status Register */ -#define c5_IFSR 13 /* Instruction Fault Status Register */ -#define c5_ADFSR 14 /* Auxilary Data Fault Status R */ -#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ -#define c6_DFAR 16 /* Data Fault Address Register */ -#define c6_IFAR 17 /* Instruction Fault Address Register */ -#define c7_PAR 18 /* Physical Address Register */ -#define c7_PAR_high 19 /* PAR top 32 bits */ -#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */ -#define c10_PRRR 21 /* Primary Region Remap Register */ -#define c10_NMRR 22 /* Normal Memory Remap Register */ -#define c12_VBAR 23 /* Vector Base Address Register */ -#define c13_CID 24 /* Context ID Register */ -#define c13_TID_URW 25 /* Thread ID, User R/W */ -#define c13_TID_URO 26 /* Thread ID, User R/O */ -#define c13_TID_PRIV 27 /* Thread ID, Privileged */ -#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */ -#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */ -#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */ -#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */ - #define ARM_EXCEPTION_RESET 0 #define ARM_EXCEPTION_UNDEFINED 1 #define ARM_EXCEPTION_SOFTWARE 2 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 0fe41aaf2171..daf6a71071da 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -88,6 +88,45 @@ struct kvm_vcpu_fault_info { u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; +/* + * 0 is reserved as an invalid value. + * Order should be kept in sync with the save/restore code. + */ +enum vcpu_sysreg { + __INVALID_SYSREG__, + c0_MPIDR, /* MultiProcessor ID Register */ + c0_CSSELR, /* Cache Size Selection Register */ + c1_SCTLR, /* System Control Register */ + c1_ACTLR, /* Auxiliary Control Register */ + c1_CPACR, /* Coprocessor Access Control */ + c2_TTBR0, /* Translation Table Base Register 0 */ + c2_TTBR0_high, /* TTBR0 top 32 bits */ + c2_TTBR1, /* Translation Table Base Register 1 */ + c2_TTBR1_high, /* TTBR1 top 32 bits */ + c2_TTBCR, /* Translation Table Base Control R. */ + c3_DACR, /* Domain Access Control Register */ + c5_DFSR, /* Data Fault Status Register */ + c5_IFSR, /* Instruction Fault Status Register */ + c5_ADFSR, /* Auxilary Data Fault Status R */ + c5_AIFSR, /* Auxilary Instrunction Fault Status R */ + c6_DFAR, /* Data Fault Address Register */ + c6_IFAR, /* Instruction Fault Address Register */ + c7_PAR, /* Physical Address Register */ + c7_PAR_high, /* PAR top 32 bits */ + c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */ + c10_PRRR, /* Primary Region Remap Register */ + c10_NMRR, /* Normal Memory Remap Register */ + c12_VBAR, /* Vector Base Address Register */ + c13_CID, /* Context ID Register */ + c13_TID_URW, /* Thread ID, User R/W */ + c13_TID_URO, /* Thread ID, User R/O */ + c13_TID_PRIV, /* Thread ID, Privileged */ + c14_CNTKCTL, /* Timer Control Register (PL1) */ + c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */ + c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */ + NR_CP15_REGS /* Number of regs (incl. invalid) */ +}; + struct kvm_cpu_context { struct kvm_regs gp_regs; struct vfp_hard_struct vfp; diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 86e26fbd5ba3..12cbb6824443 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From 311b5b363cd28aa778de083178f147f32622e331 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:57:36 +0000 Subject: ARM: KVM: Remove unused hyp_pc field This field was never populated, and the panic code already does something similar. Delete the related code. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 5 ----- arch/arm/include/asm/kvm_host.h | 1 - arch/arm/kernel/asm-offsets.c | 1 - arch/arm/kvm/handle_exit.c | 5 ----- 4 files changed, 12 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index f710616ccadc..8a8c6ded9ca7 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -108,11 +108,6 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; } -static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.fault.hyp_pc; -} - static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index daf6a71071da..19e9aba85463 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -85,7 +85,6 @@ struct kvm_vcpu_fault_info { u32 hsr; /* Hyp Syndrome Register */ u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ u32 hpfar; /* Hyp IPA Fault Address Register */ - u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; /* diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 1f24c32e11fe..27d05813ff09 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -175,7 +175,6 @@ int main(void) DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); - DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); #endif BLANK(); #ifdef CONFIG_VDSO diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 3ede90d8b20b..5377d7539a40 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -147,11 +147,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, switch (exception_index) { case ARM_EXCEPTION_IRQ: return 1; - case ARM_EXCEPTION_UNDEFINED: - kvm_err("Undefined exception in Hyp mode at: %#08lx\n", - kvm_vcpu_get_hyp_pc(vcpu)); - BUG(); - panic("KVM: Hypervisor undefined exception!\n"); case ARM_EXCEPTION_DATA_ABORT: case ARM_EXCEPTION_PREF_ABORT: case ARM_EXCEPTION_HVC: -- cgit v1.2.3 From 402f352876ba0df574533e59d72fc3e9871f791a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 22:55:10 +0000 Subject: ARM: KVM: Remove __kvm_hyp_exit/__kvm_hyp_exit_end I have no idea what these were for - probably a leftover from an early implementation. Good bye! Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 083825f12c93..15d58b42d5a1 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -55,9 +55,6 @@ struct kvm_vcpu; extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; -extern char __kvm_hyp_exit[]; -extern char __kvm_hyp_exit_end[]; - extern char __kvm_hyp_vector[]; extern void __kvm_flush_vm_context(void); -- cgit v1.2.3 From 57c841f131ef295b583365d2fddd6b0d16e82c10 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 29 Jan 2016 15:01:28 +0000 Subject: arm/arm64: KVM: Handle out-of-RAM cache maintenance as a NOP So far, our handling of cache maintenance by VA has been pretty simple: Either the access is in the guest RAM and generates a S2 fault, which results in the page being mapped RW, or we go down the io_mem_abort() path, and nuke the guest. The first one is fine, but the second one is extremely weird. Treating the CM as an I/O is wrong, and nothing in the ARM ARM indicates that we should generate a fault for something that cannot end-up in the cache anyway (even if the guest maps it, it will keep on faulting at stage-2 for emulation). So let's just skip this instruction, and let the guest get away with it. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ arch/arm/kvm/mmu.c | 16 ++++++++++++++++ arch/arm64/include/asm/kvm_emulate.h | 5 +++++ 3 files changed, 26 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 8a8c6ded9ca7..ee5328fc4b06 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -138,6 +138,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; } +static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM); +} + /* Get Access Size from a data abort */ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) { diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index aba61fd3697a..c3eb10ea0971 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1430,6 +1430,22 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) goto out_unlock; } + /* + * Check for a cache maintenance operation. Since we + * ended-up here, we know it is outside of any memory + * slot. But we can't find out if that is for a device, + * or if the guest is just being stupid. The only thing + * we know for sure is that this range cannot be cached. + * + * So let's assume that the guest is just being + * cautious, and skip the instruction. + */ + if (kvm_vcpu_dabt_is_cm(vcpu)) { + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + ret = 1; + goto out_unlock; + } + /* * The IPA is reported as [MAX:12], so we need to * complement it with the bottom 12 bits from the diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 779a5872a2c5..4df8e7a58c6b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -189,6 +189,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); } +static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); +} + static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) { return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); -- cgit v1.2.3 From 82deae0fc8ba256c1061dd4de42f0ef6cb9f954f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 9 Jun 2014 19:47:09 +0100 Subject: arm/arm64: Add new is_kernel_in_hyp_mode predicate With ARMv8.1 VHE extension, it will be possible to run the kernel at EL2 (aka HYP mode). In order for the kernel to easily find out where it is running, add a new predicate that returns whether or not the kernel is in HYP mode. For completeness, the 32bit code also get such a predicate (always returning false) so that code common to both architecture (timers, KVM) can use it transparently. Acked-by: Christoffer Dall Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier --- arch/arm/include/asm/virt.h | 5 +++++ arch/arm64/include/asm/virt.h | 10 ++++++++++ 2 files changed, 15 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 5fdbfea6defb..d4ceaf5f299b 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -75,6 +75,11 @@ static inline bool is_hyp_mode_mismatched(void) return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); } +static inline bool is_kernel_in_hyp_mode(void) +{ + return false; +} + /* The section containing the hypervisor text */ extern char __hyp_text_start[]; extern char __hyp_text_end[]; diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 7a5df5252dd7..9f22dd607958 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -23,6 +23,8 @@ #ifndef __ASSEMBLY__ +#include + /* * __boot_cpu_mode records what mode CPUs were booted in. * A correctly-implemented bootloader must start all CPUs in the same mode: @@ -50,6 +52,14 @@ static inline bool is_hyp_mode_mismatched(void) return __boot_cpu_mode[0] != __boot_cpu_mode[1]; } +static inline bool is_kernel_in_hyp_mode(void) +{ + u64 el; + + asm("mrs %0, CurrentEL" : "=r" (el)); + return el == CurrentEL_EL2; +} + /* The section containing the hypervisor text */ extern char __hyp_text_start[]; extern char __hyp_text_end[]; -- cgit v1.2.3 From f1c9cad7c508f59fedd9f77eb36e5859e11ce5ab Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 14:31:37 +0000 Subject: ARM: KVM: Move kvm/hyp/hyp.h to include/asm/kvm_hyp.h In order to be able to use the code located in virt/kvm/arm/hyp, we need to make the global hyp.h file accessible from include/asm, similar to what we did for arm64. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_hyp.h | 130 +++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/banked-sr.c | 2 +- arch/arm/kvm/hyp/cp15-sr.c | 2 +- arch/arm/kvm/hyp/hyp.h | 130 ----------------------------------------- arch/arm/kvm/hyp/s2-setup.c | 3 +- arch/arm/kvm/hyp/switch.c | 2 +- arch/arm/kvm/hyp/timer-sr.c | 4 +- arch/arm/kvm/hyp/tlb.c | 2 +- arch/arm/kvm/hyp/vgic-v2-sr.c | 4 +- 9 files changed, 137 insertions(+), 142 deletions(-) create mode 100644 arch/arm/include/asm/kvm_hyp.h delete mode 100644 arch/arm/kvm/hyp/hyp.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h new file mode 100644 index 000000000000..ff6de6a3af2d --- /dev/null +++ b/arch/arm/include/asm/kvm_hyp.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM_KVM_HYP_H__ +#define __ARM_KVM_HYP_H__ + +#include +#include +#include +#include + +#define __hyp_text __section(.hyp.text) notrace + +#define kern_hyp_va(v) (v) +#define hyp_kern_va(v) (v) + +#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ + "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 +#define __ACCESS_CP15_64(Op1, CRm) \ + "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 +#define __ACCESS_VFP(CRn) \ + "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 + +#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) +#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) + +#define __read_sysreg(r, w, c, t) ({ \ + t __val; \ + asm volatile(r " " c : "=r" (__val)); \ + __val; \ +}) +#define read_sysreg(...) __read_sysreg(__VA_ARGS__) + +#define write_special(v, r) \ + asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) +#define read_special(r) ({ \ + u32 __val; \ + asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + +#define TTBR0 __ACCESS_CP15_64(0, c2) +#define TTBR1 __ACCESS_CP15_64(1, c2) +#define VTTBR __ACCESS_CP15_64(6, c2) +#define PAR __ACCESS_CP15_64(0, c7) +#define CNTV_CVAL __ACCESS_CP15_64(3, c14) +#define CNTVOFF __ACCESS_CP15_64(4, c14) + +#define MIDR __ACCESS_CP15(c0, 0, c0, 0) +#define CSSELR __ACCESS_CP15(c0, 2, c0, 0) +#define VPIDR __ACCESS_CP15(c0, 4, c0, 0) +#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) +#define SCTLR __ACCESS_CP15(c1, 0, c0, 0) +#define CPACR __ACCESS_CP15(c1, 0, c0, 2) +#define HCR __ACCESS_CP15(c1, 4, c1, 0) +#define HDCR __ACCESS_CP15(c1, 4, c1, 1) +#define HCPTR __ACCESS_CP15(c1, 4, c1, 2) +#define HSTR __ACCESS_CP15(c1, 4, c1, 3) +#define TTBCR __ACCESS_CP15(c2, 0, c0, 2) +#define HTCR __ACCESS_CP15(c2, 4, c0, 2) +#define VTCR __ACCESS_CP15(c2, 4, c1, 2) +#define DACR __ACCESS_CP15(c3, 0, c0, 0) +#define DFSR __ACCESS_CP15(c5, 0, c0, 0) +#define IFSR __ACCESS_CP15(c5, 0, c0, 1) +#define ADFSR __ACCESS_CP15(c5, 0, c1, 0) +#define AIFSR __ACCESS_CP15(c5, 0, c1, 1) +#define HSR __ACCESS_CP15(c5, 4, c2, 0) +#define DFAR __ACCESS_CP15(c6, 0, c0, 0) +#define IFAR __ACCESS_CP15(c6, 0, c0, 2) +#define HDFAR __ACCESS_CP15(c6, 4, c0, 0) +#define HIFAR __ACCESS_CP15(c6, 4, c0, 2) +#define HPFAR __ACCESS_CP15(c6, 4, c0, 4) +#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) +#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) +#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) +#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) +#define PRRR __ACCESS_CP15(c10, 0, c2, 0) +#define NMRR __ACCESS_CP15(c10, 0, c2, 1) +#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0) +#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1) +#define VBAR __ACCESS_CP15(c12, 0, c0, 0) +#define CID __ACCESS_CP15(c13, 0, c0, 1) +#define TID_URW __ACCESS_CP15(c13, 0, c0, 2) +#define TID_URO __ACCESS_CP15(c13, 0, c0, 3) +#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) +#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) +#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) +#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) +#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) + +#define VFP_FPEXC __ACCESS_VFP(FPEXC) + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); + +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + +void __sysreg_save_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_state(struct kvm_cpu_context *ctxt); + +void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); +void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); +static inline bool __vfp_enabled(void) +{ + return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); +} + +void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); +void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); + +int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, + struct kvm_cpu_context *host); +int asmlinkage __hyp_do_panic(const char *, int, u32); + +#endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c index d02dc804f611..111bda8cdebd 100644 --- a/arch/arm/kvm/hyp/banked-sr.c +++ b/arch/arm/kvm/hyp/banked-sr.c @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include __asm__(".arch_extension virt"); diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c index 732abbc34bd0..c4782812714c 100644 --- a/arch/arm/kvm/hyp/cp15-sr.c +++ b/arch/arm/kvm/hyp/cp15-sr.c @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) { diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h deleted file mode 100644 index ff6de6a3af2d..000000000000 --- a/arch/arm/kvm/hyp/hyp.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM_KVM_HYP_H__ -#define __ARM_KVM_HYP_H__ - -#include -#include -#include -#include - -#define __hyp_text __section(.hyp.text) notrace - -#define kern_hyp_va(v) (v) -#define hyp_kern_va(v) (v) - -#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ - "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 -#define __ACCESS_CP15_64(Op1, CRm) \ - "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 -#define __ACCESS_VFP(CRn) \ - "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 - -#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) -#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) - -#define __read_sysreg(r, w, c, t) ({ \ - t __val; \ - asm volatile(r " " c : "=r" (__val)); \ - __val; \ -}) -#define read_sysreg(...) __read_sysreg(__VA_ARGS__) - -#define write_special(v, r) \ - asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) -#define read_special(r) ({ \ - u32 __val; \ - asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ - __val; \ -}) - -#define TTBR0 __ACCESS_CP15_64(0, c2) -#define TTBR1 __ACCESS_CP15_64(1, c2) -#define VTTBR __ACCESS_CP15_64(6, c2) -#define PAR __ACCESS_CP15_64(0, c7) -#define CNTV_CVAL __ACCESS_CP15_64(3, c14) -#define CNTVOFF __ACCESS_CP15_64(4, c14) - -#define MIDR __ACCESS_CP15(c0, 0, c0, 0) -#define CSSELR __ACCESS_CP15(c0, 2, c0, 0) -#define VPIDR __ACCESS_CP15(c0, 4, c0, 0) -#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) -#define SCTLR __ACCESS_CP15(c1, 0, c0, 0) -#define CPACR __ACCESS_CP15(c1, 0, c0, 2) -#define HCR __ACCESS_CP15(c1, 4, c1, 0) -#define HDCR __ACCESS_CP15(c1, 4, c1, 1) -#define HCPTR __ACCESS_CP15(c1, 4, c1, 2) -#define HSTR __ACCESS_CP15(c1, 4, c1, 3) -#define TTBCR __ACCESS_CP15(c2, 0, c0, 2) -#define HTCR __ACCESS_CP15(c2, 4, c0, 2) -#define VTCR __ACCESS_CP15(c2, 4, c1, 2) -#define DACR __ACCESS_CP15(c3, 0, c0, 0) -#define DFSR __ACCESS_CP15(c5, 0, c0, 0) -#define IFSR __ACCESS_CP15(c5, 0, c0, 1) -#define ADFSR __ACCESS_CP15(c5, 0, c1, 0) -#define AIFSR __ACCESS_CP15(c5, 0, c1, 1) -#define HSR __ACCESS_CP15(c5, 4, c2, 0) -#define DFAR __ACCESS_CP15(c6, 0, c0, 0) -#define IFAR __ACCESS_CP15(c6, 0, c0, 2) -#define HDFAR __ACCESS_CP15(c6, 4, c0, 0) -#define HIFAR __ACCESS_CP15(c6, 4, c0, 2) -#define HPFAR __ACCESS_CP15(c6, 4, c0, 4) -#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) -#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) -#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) -#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) -#define PRRR __ACCESS_CP15(c10, 0, c2, 0) -#define NMRR __ACCESS_CP15(c10, 0, c2, 1) -#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0) -#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1) -#define VBAR __ACCESS_CP15(c12, 0, c0, 0) -#define CID __ACCESS_CP15(c13, 0, c0, 1) -#define TID_URW __ACCESS_CP15(c13, 0, c0, 2) -#define TID_URO __ACCESS_CP15(c13, 0, c0, 3) -#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) -#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) -#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) -#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) -#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) - -#define VFP_FPEXC __ACCESS_VFP(FPEXC) - -void __timer_save_state(struct kvm_vcpu *vcpu); -void __timer_restore_state(struct kvm_vcpu *vcpu); - -void __vgic_v2_save_state(struct kvm_vcpu *vcpu); -void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); - -void __sysreg_save_state(struct kvm_cpu_context *ctxt); -void __sysreg_restore_state(struct kvm_cpu_context *ctxt); - -void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); -void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); -static inline bool __vfp_enabled(void) -{ - return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); -} - -void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); -void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); - -int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, - struct kvm_cpu_context *host); -int asmlinkage __hyp_do_panic(const char *, int, u32); - -#endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c index f5f49c53be28..7be39af2ed6c 100644 --- a/arch/arm/kvm/hyp/s2-setup.c +++ b/arch/arm/kvm/hyp/s2-setup.c @@ -18,8 +18,7 @@ #include #include #include - -#include "hyp.h" +#include void __hyp_text __init_stage2_translation(void) { diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index f11ede159080..b13caa90cd44 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -16,7 +16,7 @@ */ #include -#include "hyp.h" +#include __asm__(".arch_extension virt"); diff --git a/arch/arm/kvm/hyp/timer-sr.c b/arch/arm/kvm/hyp/timer-sr.c index d7535fd0784e..2bb0c926e01c 100644 --- a/arch/arm/kvm/hyp/timer-sr.c +++ b/arch/arm/kvm/hyp/timer-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* vcpu is already in the HYP VA space */ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index 82958b8f6a74..a2636001e616 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include /** * Flush per-VMID TLBs diff --git a/arch/arm/kvm/hyp/vgic-v2-sr.c b/arch/arm/kvm/hyp/vgic-v2-sr.c index e71761238cfc..9514a7d90d71 100644 --- a/arch/arm/kvm/hyp/vgic-v2-sr.c +++ b/arch/arm/kvm/hyp/vgic-v2-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* vcpu is already in the HYP VA space */ void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 68130cb5db09cb8a285a59f70ac72d2bfa8685fd Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 14:48:42 +0000 Subject: ARM: KVM: Use common version of timer-sr.c Using the common HYP timer code is a bit more tricky, since we use system register names. Nothing a set of macros cannot work around... Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_hyp.h | 9 ++++++ arch/arm/kvm/hyp/Makefile | 2 +- arch/arm/kvm/hyp/timer-sr.c | 69 ------------------------------------------ 3 files changed, 10 insertions(+), 70 deletions(-) delete mode 100644 arch/arm/kvm/hyp/timer-sr.c (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index ff6de6a3af2d..f0e860761380 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -104,6 +104,15 @@ #define VFP_FPEXC __ACCESS_VFP(FPEXC) +/* AArch64 compatibility macros, only for the timer so far */ +#define read_sysreg_el0(r) read_sysreg(r##_el0) +#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0) + +#define cntv_ctl_el0 CNTV_CTL +#define cntv_cval_el0 CNTV_CVAL +#define cntvoff_el2 CNTVOFF +#define cnthctl_el2 CNTHCTL + void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu); diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 8f735d970ef1..8dfa5f7f9290 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -5,10 +5,10 @@ KVM=../../../../virt/kvm obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o -obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm/kvm/hyp/timer-sr.c b/arch/arm/kvm/hyp/timer-sr.c deleted file mode 100644 index 2bb0c926e01c..000000000000 --- a/arch/arm/kvm/hyp/timer-sr.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2012-2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - -#include - -/* vcpu is already in the HYP VA space */ -void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - u64 val; - - if (kvm->arch.timer.enabled) { - timer->cntv_ctl = read_sysreg(CNTV_CTL); - timer->cntv_cval = read_sysreg(CNTV_CVAL); - } - - /* Disable the virtual timer */ - write_sysreg(0, CNTV_CTL); - - /* Allow physical timer/counter access for the host */ - val = read_sysreg(CNTHCTL); - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; - write_sysreg(val, CNTHCTL); - - /* Clear cntvoff for the host */ - write_sysreg(0, CNTVOFF); -} - -void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - u64 val; - - /* - * Disallow physical timer access for the guest - * Physical counter access is allowed - */ - val = read_sysreg(CNTHCTL); - val &= ~CNTHCTL_EL1PCEN; - val |= CNTHCTL_EL1PCTEN; - write_sysreg(val, CNTHCTL); - - if (kvm->arch.timer.enabled) { - write_sysreg(kvm->arch.timer.cntvoff, CNTVOFF); - write_sysreg(timer->cntv_cval, CNTV_CVAL); - isb(); - write_sysreg(timer->cntv_ctl, CNTV_CTL); - } -} -- cgit v1.2.3 From bb0c70bcca6ba3c84afc2da7426f3b923bbe6825 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 11 Jan 2016 21:35:32 +0800 Subject: arm64: KVM: Add a new vcpu device control group for PMUv3 To configure the virtual PMUv3 overflow interrupt number, we use the vcpu kvm_device ioctl, encapsulating the KVM_ARM_VCPU_PMU_V3_IRQ attribute within the KVM_ARM_VCPU_PMU_V3_CTRL group. After configuring the PMUv3, call the vcpu ioctl with attribute KVM_ARM_VCPU_PMU_V3_INIT to initialize the PMUv3. Signed-off-by: Shannon Zhao Acked-by: Peter Maydell Reviewed-by: Andrew Jones Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- Documentation/virtual/kvm/devices/vcpu.txt | 25 +++++++ arch/arm/include/asm/kvm_host.h | 15 ++++ arch/arm/kvm/arm.c | 3 + arch/arm64/include/asm/kvm_host.h | 6 ++ arch/arm64/include/uapi/asm/kvm.h | 5 ++ arch/arm64/kvm/guest.c | 51 +++++++++++++ include/kvm/arm_pmu.h | 23 ++++++ virt/kvm/arm/pmu.c | 112 +++++++++++++++++++++++++++++ 8 files changed, 240 insertions(+) (limited to 'arch/arm/include') diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt index 3cc59c5e44ce..c04165868faf 100644 --- a/Documentation/virtual/kvm/devices/vcpu.txt +++ b/Documentation/virtual/kvm/devices/vcpu.txt @@ -6,3 +6,28 @@ KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct kvm_device_attr as other devices, but targets VCPU-wide settings and controls. The groups and attributes per virtual cpu, if any, are architecture specific. + +1. GROUP: KVM_ARM_VCPU_PMU_V3_CTRL +Architectures: ARM64 + +1.1. ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_IRQ +Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a + pointer to an int +Returns: -EBUSY: The PMU overflow interrupt is already set + -ENXIO: The overflow interrupt not set when attempting to get it + -ENODEV: PMUv3 not supported + -EINVAL: Invalid PMU overflow interrupt number supplied + +A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt +number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt +type must be same for each vcpu. As a PPI, the interrupt number is the same for +all vcpus, while as an SPI it must be a separate number per vcpu. + +1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT +Parameters: no additional parameter in kvm_device_attr.addr +Returns: -ENODEV: PMUv3 not supported + -ENXIO: PMUv3 not properly configured as required prior to calling this + attribute + -EBUSY: PMUv3 already initialized + +Request the initialization of the PMUv3. diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 19e9aba85463..385070180c25 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -287,5 +287,20 @@ static inline void kvm_arm_init_debug(void) {} static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} +static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 166232356291..75c7fed5d14c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -835,6 +835,7 @@ static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); break; } @@ -848,6 +849,7 @@ static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); break; } @@ -861,6 +863,7 @@ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); break; } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b02ef0828f22..71fa6fe9d54a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -361,6 +361,12 @@ void kvm_arm_init_debug(void); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); +int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); /* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 6aedbe314432..f209ea151dca 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -205,6 +205,11 @@ struct kvm_arch_memory_slot { #define KVM_DEV_ARM_VGIC_GRP_CTRL 4 #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 +/* Device Control API on vcpu fd */ +#define KVM_ARM_VCPU_PMU_V3_CTRL 0 +#define KVM_ARM_VCPU_PMU_V3_IRQ 0 +#define KVM_ARM_VCPU_PMU_V3_INIT 1 + /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_MASK 0xff diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index fcb778899a38..dbe45c364bbb 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -380,3 +380,54 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, } return 0; } + +int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret; + + switch (attr->group) { + case KVM_ARM_VCPU_PMU_V3_CTRL: + ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); + break; + default: + ret = -ENXIO; + break; + } + + return ret; +} + +int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret; + + switch (attr->group) { + case KVM_ARM_VCPU_PMU_V3_CTRL: + ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); + break; + default: + ret = -ENXIO; + break; + } + + return ret; +} + +int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret; + + switch (attr->group) { + case KVM_ARM_VCPU_PMU_V3_CTRL: + ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); + break; + default: + ret = -ENXIO; + break; + } + + return ret; +} diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index ee62497d46f7..fe389ac31489 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -39,6 +39,7 @@ struct kvm_pmu { }; #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) +#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); @@ -54,11 +55,18 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); bool kvm_arm_support_pmu_v3(void); +int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); #else struct kvm_pmu { }; #define kvm_arm_pmu_v3_ready(v) (false) +#define kvm_arm_pmu_irq_initialized(v) (false) static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) { @@ -82,6 +90,21 @@ static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} static inline bool kvm_arm_support_pmu_v3(void) { return false; } +static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} #endif #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 6e28f4f86cc6..b5754c6c5508 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -415,3 +416,114 @@ bool kvm_arm_support_pmu_v3(void) */ return (perf_num_counters() > 0); } + +static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) +{ + if (!kvm_arm_support_pmu_v3()) + return -ENODEV; + + if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) || + !kvm_arm_pmu_irq_initialized(vcpu)) + return -ENXIO; + + if (kvm_arm_pmu_v3_ready(vcpu)) + return -EBUSY; + + kvm_pmu_vcpu_reset(vcpu); + vcpu->arch.pmu.ready = true; + + return 0; +} + +static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi) +{ + int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!kvm_arm_pmu_irq_initialized(vcpu)) + continue; + + if (is_ppi) { + if (vcpu->arch.pmu.irq_num != irq) + return false; + } else { + if (vcpu->arch.pmu.irq_num == irq) + return false; + } + } + + return true; +} + + +int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_ARM_VCPU_PMU_V3_IRQ: { + int __user *uaddr = (int __user *)(long)attr->addr; + int irq; + + if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) + return -ENODEV; + + if (get_user(irq, uaddr)) + return -EFAULT; + + /* + * The PMU overflow interrupt could be a PPI or SPI, but for one + * VM the interrupt type must be same for each vcpu. As a PPI, + * the interrupt number is the same for all vcpus, while as an + * SPI it must be a separate number per vcpu. + */ + if (irq < VGIC_NR_SGIS || irq >= vcpu->kvm->arch.vgic.nr_irqs || + !irq_is_valid(vcpu->kvm, irq, irq < VGIC_NR_PRIVATE_IRQS)) + return -EINVAL; + + if (kvm_arm_pmu_irq_initialized(vcpu)) + return -EBUSY; + + kvm_debug("Set kvm ARM PMU irq: %d\n", irq); + vcpu->arch.pmu.irq_num = irq; + return 0; + } + case KVM_ARM_VCPU_PMU_V3_INIT: + return kvm_arm_pmu_v3_init(vcpu); + } + + return -ENXIO; +} + +int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_ARM_VCPU_PMU_V3_IRQ: { + int __user *uaddr = (int __user *)(long)attr->addr; + int irq; + + if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) + return -ENODEV; + + if (!kvm_arm_pmu_irq_initialized(vcpu)) + return -ENXIO; + + irq = vcpu->arch.pmu.irq_num; + return put_user(irq, uaddr); + } + } + + return -ENXIO; +} + +int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_ARM_VCPU_PMU_V3_IRQ: + case KVM_ARM_VCPU_PMU_V3_INIT: + if (kvm_arm_support_pmu_v3() && + test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) + return 0; + } + + return -ENXIO; +} -- cgit v1.2.3 From e91fb3bd757569aca48785358a4adbf41334d382 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 23 Feb 2016 15:39:38 +0100 Subject: ARM: at91: avoid defining CONFIG_* symbols in source code In an invalid randconfig build (fixed by another patch), I ran across this warning: arch/arm/include/debug/at91.S:18:0: error: "CONFIG_DEBUG_UART_VIRT" redefined [-Werror] #define CONFIG_DEBUG_UART_VIRT AT91_IO_P2V(CONFIG_DEBUG_UART_PHYS) As Russell pointed out, we should never #define a macro starting with CONFIG_ in a source file, as that is rather confusing. This renames the macro to avoid the symbol clash. Signed-off-by: Arnd Bergmann Suggested-by: Russell King Acked-by: Alexandre Belloni Acked-by: Nicolas Ferre --- arch/arm/include/debug/at91.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S index 43243be94cfc..d4ae3b8e2426 100644 --- a/arch/arm/include/debug/at91.S +++ b/arch/arm/include/debug/at91.S @@ -15,7 +15,7 @@ #define AT91_IO_P2V(x) (x) #endif -#define CONFIG_DEBUG_UART_VIRT AT91_IO_P2V(CONFIG_DEBUG_UART_PHYS) +#define AT91_DEBUG_UART_VIRT AT91_IO_P2V(CONFIG_DEBUG_UART_PHYS) #define AT91_DBGU_SR (0x14) /* Status Register */ #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ @@ -24,7 +24,7 @@ .macro addruart, rp, rv, tmp ldr \rp, =CONFIG_DEBUG_UART_PHYS @ System peripherals (phys address) - ldr \rv, =CONFIG_DEBUG_UART_VIRT @ System peripherals (virt address) + ldr \rv, =AT91_DEBUG_UART_VIRT @ System peripherals (virt address) .endm .macro senduart,rd,rx -- cgit v1.2.3 From bc4b024a8b8bd7dceb2697299aad2bda57d065e0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 7 Mar 2016 10:40:02 -0600 Subject: PCI: Move pci_dma_* helpers to common code For a long time all architectures implement the pci_dma_* functions using the generic DMA API, and they all use the same header to do so. Move this header, pci-dma-compat.h, to include/linux and include it from the generic pci.h instead of having each arch duplicate this include. Signed-off-by: Christoph Hellwig Signed-off-by: Bjorn Helgaas --- arch/alpha/include/asm/pci.h | 7 --- arch/arm/include/asm/pci.h | 1 - arch/arm64/include/asm/pci.h | 1 - arch/avr32/include/asm/pci.h | 2 - arch/blackfin/include/asm/pci.h | 1 - arch/cris/include/asm/pci.h | 3 - arch/frv/include/asm/pci.h | 1 - arch/ia64/include/asm/pci.h | 2 - arch/m68k/include/asm/pci.h | 1 - arch/microblaze/include/asm/pci.h | 2 - arch/mips/include/asm/pci.h | 3 - arch/mn10300/include/asm/pci.h | 3 - arch/parisc/include/asm/pci.h | 3 - arch/powerpc/include/asm/pci.h | 2 - arch/s390/include/asm/pci.h | 1 - arch/sh/include/asm/pci.h | 3 - arch/sparc/include/asm/pci.h | 3 - arch/tile/include/asm/pci.h | 3 - arch/unicore32/include/asm/pci.h | 1 - arch/x86/include/asm/pci.h | 3 - arch/xtensa/include/asm/pci.h | 3 - include/asm-generic/pci-dma-compat.h | 118 ----------------------------------- include/linux/pci-dma-compat.h | 118 +++++++++++++++++++++++++++++++++++ include/linux/pci.h | 4 ++ 24 files changed, 122 insertions(+), 167 deletions(-) delete mode 100644 include/asm-generic/pci-dma-compat.h create mode 100644 include/linux/pci-dma-compat.h (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h index 75d8865d763d..a06c24b3a2e1 100644 --- a/arch/alpha/include/asm/pci.h +++ b/arch/alpha/include/asm/pci.h @@ -65,13 +65,6 @@ extern void pcibios_set_master(struct pci_dev *dev); decisions. */ #define PCI_DMA_BUS_IS_PHYS 0 -#ifdef CONFIG_PCI - -/* implement the pci_ DMA API in terms of the generic device dma_ one */ -#include - -#endif - /* TODO: integrate with include/asm-generic/pci.h ? */ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index d7de19a77d51..057d381f4e57 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -2,7 +2,6 @@ #define ASMARM_PCI_H #ifdef __KERNEL__ -#include #include /* for pci_sys_data */ extern unsigned long pcibios_min_io; diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index f75b04e8d732..b9a7ba9ca44c 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h @@ -7,7 +7,6 @@ #include #include -#include #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0 diff --git a/arch/avr32/include/asm/pci.h b/arch/avr32/include/asm/pci.h index a32a02372017..0f5f134b896a 100644 --- a/arch/avr32/include/asm/pci.h +++ b/arch/avr32/include/asm/pci.h @@ -5,6 +5,4 @@ #define PCI_DMA_BUS_IS_PHYS (1) -#include - #endif /* __ASM_AVR32_PCI_H__ */ diff --git a/arch/blackfin/include/asm/pci.h b/arch/blackfin/include/asm/pci.h index 14efc0db1ade..11ea1cb35036 100644 --- a/arch/blackfin/include/asm/pci.h +++ b/arch/blackfin/include/asm/pci.h @@ -4,7 +4,6 @@ #define _ASM_BFIN_PCI_H #include -#include #include #define PCIBIOS_MIN_IO 0x00001000 diff --git a/arch/cris/include/asm/pci.h b/arch/cris/include/asm/pci.h index c15b4b4baafa..b1b289df04c7 100644 --- a/arch/cris/include/asm/pci.h +++ b/arch/cris/include/asm/pci.h @@ -48,9 +48,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #endif /* __KERNEL__ */ -/* implement the pci_ DMA API in terms of the generic device dma_ one */ -#include - /* generic pci stuff */ #include diff --git a/arch/frv/include/asm/pci.h b/arch/frv/include/asm/pci.h index 7e96b572f728..809cfc6707ab 100644 --- a/arch/frv/include/asm/pci.h +++ b/arch/frv/include/asm/pci.h @@ -15,7 +15,6 @@ #include #include -#include #include struct pci_dev; diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 07039d168f37..c0835b0dc722 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -50,8 +50,6 @@ struct pci_dev; extern unsigned long ia64_max_iommu_merge_mask; #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) -#include - #define HAVE_PCI_MMAP extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); diff --git a/arch/m68k/include/asm/pci.h b/arch/m68k/include/asm/pci.h index 848c3dfaad50..3a3dbcf4051d 100644 --- a/arch/m68k/include/asm/pci.h +++ b/arch/m68k/include/asm/pci.h @@ -1,7 +1,6 @@ #ifndef _ASM_M68K_PCI_H #define _ASM_M68K_PCI_H -#include #include /* The PCI address space does equal the physical memory diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index dc9eb6657e3a..fc3ecb55f1b2 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -22,8 +22,6 @@ #include #include -#include - #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 108d19376bb1..8c16fb7b8fdb 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -124,9 +124,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) #endif /* __KERNEL__ */ -/* implement the pci_ DMA API in terms of the generic device dma_ one */ -#include - /* Do platform specific device initialization at pci_enable_device() time */ extern int pcibios_plat_dev_init(struct pci_dev *dev); diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h index be3debb8fc02..51159fff025a 100644 --- a/arch/mn10300/include/asm/pci.h +++ b/arch/mn10300/include/asm/pci.h @@ -80,9 +80,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #endif /* __KERNEL__ */ -/* implement the pci_ DMA API in terms of the generic device dma_ one */ -#include - static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { return channel ? 15 : 14; diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 89c53bfff055..defebd956585 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -194,9 +194,6 @@ extern void pcibios_init_bridge(struct pci_dev *); #define PCIBIOS_MIN_IO 0x10 #define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */ -/* export the pci_ DMA API in terms of the dma_ one */ -#include - static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { return channel ? 15 : 14; diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 6f8065a7d487..a6f3ac0d4602 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -20,8 +20,6 @@ #include #include -#include - /* Return values for pci_controller_ops.probe_mode function */ #define PCI_PROBE_NONE -1 /* Don't look at this bus at all */ #define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */ diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index c873e682b67f..fdba308649d7 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -9,7 +9,6 @@ #include #include #include -#include #include #include diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index e343dbd02e41..644314f2b1ef 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -105,9 +105,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return channel ? 15 : 14; } -/* generic DMA-mapping stuff */ -#include - #endif /* __KERNEL__ */ #endif /* __ASM_SH_PCI_H */ diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h index d9c031f9910f..6e14fd179335 100644 --- a/arch/sparc/include/asm/pci.h +++ b/arch/sparc/include/asm/pci.h @@ -5,7 +5,4 @@ #else #include #endif - -#include - #endif diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h index dfedd7ac7298..fe3de505b024 100644 --- a/arch/tile/include/asm/pci.h +++ b/arch/tile/include/asm/pci.h @@ -226,7 +226,4 @@ static inline int pcibios_assign_all_busses(void) /* Use any cpu for PCI. */ #define cpumask_of_pcibus(bus) cpu_online_mask -/* implement the pci_ DMA API in terms of the generic device dma_ one */ -#include - #endif /* _ASM_TILE_PCI_H */ diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h index eb9dccecb338..37e55d018de5 100644 --- a/arch/unicore32/include/asm/pci.h +++ b/arch/unicore32/include/asm/pci.h @@ -13,7 +13,6 @@ #define __UNICORE_PCI_H__ #ifdef __KERNEL__ -#include #include #include /* for PCIBIOS_MIN_* */ diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 462594320d39..7fd8eaa8714d 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -105,9 +105,6 @@ void native_restore_msi_irqs(struct pci_dev *dev); #include #endif -/* implement the pci_ DMA API in terms of the generic device dma_ one */ -#include - /* generic pci stuff */ #include diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h index e438a00fbd63..5d6bd932ba4e 100644 --- a/arch/xtensa/include/asm/pci.h +++ b/arch/xtensa/include/asm/pci.h @@ -55,9 +55,6 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, #endif /* __KERNEL__ */ -/* Implement the pci_ DMA API in terms of the generic device dma_ one */ -#include - /* Generic PCI */ #include diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h deleted file mode 100644 index eafce7b6f052..000000000000 --- a/include/asm-generic/pci-dma-compat.h +++ /dev/null @@ -1,118 +0,0 @@ -/* include this file if the platform implements the dma_ DMA Mapping API - * and wants to provide the pci_ DMA Mapping API in terms of it */ - -#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H -#define _ASM_GENERIC_PCI_DMA_COMPAT_H - -#include - -static inline void * -pci_alloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle) -{ - return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); -} - -static inline void * -pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle) -{ - return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, - size, dma_handle, GFP_ATOMIC); -} - -static inline void -pci_free_consistent(struct pci_dev *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle); -} - -static inline dma_addr_t -pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) -{ - return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction); -} - -static inline void -pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, - size_t size, int direction) -{ - dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); -} - -static inline dma_addr_t -pci_map_page(struct pci_dev *hwdev, struct page *page, - unsigned long offset, size_t size, int direction) -{ - return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction); -} - -static inline void -pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, - size_t size, int direction) -{ - dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction); -} - -static inline int -pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); -} - -static inline void -pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, - size_t size, int direction) -{ - dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, - size_t size, int direction) -{ - dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, - int nelems, int direction) -{ - dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, - int nelems, int direction) -{ - dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); -} - -static inline int -pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) -{ - return dma_mapping_error(&pdev->dev, dma_addr); -} - -#ifdef CONFIG_PCI -static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) -{ - return dma_set_mask(&dev->dev, mask); -} - -static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) -{ - return dma_set_coherent_mask(&dev->dev, mask); -} -#endif - -#endif diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h new file mode 100644 index 000000000000..eafce7b6f052 --- /dev/null +++ b/include/linux/pci-dma-compat.h @@ -0,0 +1,118 @@ +/* include this file if the platform implements the dma_ DMA Mapping API + * and wants to provide the pci_ DMA Mapping API in terms of it */ + +#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H +#define _ASM_GENERIC_PCI_DMA_COMPAT_H + +#include + +static inline void * +pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); +} + +static inline void * +pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, + size, dma_handle, GFP_ATOMIC); +} + +static inline void +pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle); +} + +static inline dma_addr_t +pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) +{ + return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction); +} + +static inline void +pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, + size_t size, int direction) +{ + dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); +} + +static inline dma_addr_t +pci_map_page(struct pci_dev *hwdev, struct page *page, + unsigned long offset, size_t size, int direction) +{ + return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction); +} + +static inline void +pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, + size_t size, int direction) +{ + dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction); +} + +static inline int +pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, + int nents, int direction) +{ + return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); +} + +static inline void +pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, + int nents, int direction) +{ + dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, + size_t size, int direction) +{ + dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, + size_t size, int direction) +{ + dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, + int nelems, int direction) +{ + dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, + int nelems, int direction) +{ + dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); +} + +static inline int +pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) +{ + return dma_mapping_error(&pdev->dev, dma_addr); +} + +#ifdef CONFIG_PCI +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_mask(&dev->dev, mask); +} + +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_coherent_mask(&dev->dev, mask); +} +#endif + +#endif diff --git a/include/linux/pci.h b/include/linux/pci.h index 3d371c150753..5db6e0ce9504 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2011,4 +2011,8 @@ static inline bool pci_ari_enabled(struct pci_bus *bus) { return bus->self && bus->self->ari_enabled; } + +/* provide the legacy pci_dma_* API */ +#include + #endif /* LINUX_PCI_H */ -- cgit v1.2.3 From 01cfbad79a5e2b835abf6a8154a341d75a6fc8cd Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 Mar 2016 14:05:34 -0800 Subject: ipv4: Update parameters for csum_tcpudp_magic to their original types This patch updates all instances of csum_tcpudp_magic and csum_tcpudp_nofold to reflect the types that are usually used as the source inputs. For example the protocol field is populated based on nexthdr which is actually an unsigned 8 bit value. The length is usually populated based on skb->len which is an unsigned integer. This addresses an issue in which the IPv6 function csum_ipv6_magic was generating a checksum using the full 32b of skb->len while csum_tcpudp_magic was only using the lower 16 bits. As a result we could run into issues when attempting to adjust the checksum as there was no protocol agnostic way to update it. With this change the value is still truncated as many architectures use "(len + proto) << 8", however this truncation only occurs for values greater than 16776960 in length and as such is unlikely to occur as we stop the inner headers at ~64K in size. I did have to make a few minor changes in the arm, mn10300, nios2, and score versions of the function in order to support these changes as they were either using things such as an OR to combine the protocol and length, or were using ntohs to convert the length which would have truncated the value. I also updated a few spots in terms of whitespace and type differences for the addresses. Most of this was just to make sure all of the definitions were in sync going forward. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- arch/alpha/include/asm/checksum.h | 9 +++------ arch/alpha/lib/checksum.c | 8 ++------ arch/arc/include/asm/checksum.h | 4 ++-- arch/arm/include/asm/checksum.h | 10 +++++----- arch/avr32/include/asm/checksum.h | 10 ++++------ arch/blackfin/include/asm/checksum.h | 4 ++-- arch/c6x/include/asm/checksum.h | 4 ++-- arch/cris/include/arch-v10/arch/checksum.h | 4 ++-- arch/cris/include/arch-v32/arch/checksum.h | 2 +- arch/cris/include/asm/checksum.h | 5 ++--- arch/frv/include/asm/checksum.h | 8 ++++---- arch/hexagon/include/asm/checksum.h | 8 ++++---- arch/hexagon/lib/checksum.c | 10 ++++------ arch/ia64/include/asm/checksum.h | 12 ++++-------- arch/ia64/lib/checksum.c | 8 ++++---- arch/m32r/include/asm/checksum.h | 10 ++++------ arch/metag/include/asm/checksum.h | 7 +++---- arch/microblaze/include/asm/checksum.h | 4 ++-- arch/mips/include/asm/checksum.h | 6 +++--- arch/mn10300/include/asm/checksum.h | 17 +++++------------ arch/nios2/include/asm/checksum.h | 9 ++++----- arch/parisc/include/asm/checksum.h | 10 ++++------ arch/s390/include/asm/checksum.h | 6 ++---- arch/score/include/asm/checksum.h | 10 +++++----- arch/sh/include/asm/checksum_32.h | 6 ++---- arch/sparc/include/asm/checksum_32.h | 10 ++++------ arch/sparc/include/asm/checksum_64.h | 6 ++---- arch/unicore32/include/asm/checksum.h | 4 ++-- arch/x86/include/asm/checksum_32.h | 6 ++---- arch/x86/include/asm/checksum_64.h | 8 ++++---- arch/x86/um/asm/checksum.h | 9 ++++----- arch/xtensa/include/asm/checksum.h | 10 ++++------ include/asm-generic/checksum.h | 8 ++++---- lib/checksum.c | 4 +--- 34 files changed, 106 insertions(+), 150 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/checksum.h b/arch/alpha/include/asm/checksum.h index d3854bbf0a9e..cba34b1c738c 100644 --- a/arch/alpha/include/asm/checksum.h +++ b/arch/alpha/include/asm/checksum.h @@ -13,14 +13,11 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ -extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum); +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, unsigned short proto, - __wsum sum); + __u32 len, __u8 proto, __wsum sum); /* * computes the checksum of a memory block at buff, length len, diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c index 199f6efa83fa..377f9e34eb97 100644 --- a/arch/alpha/lib/checksum.c +++ b/arch/alpha/lib/checksum.c @@ -42,9 +42,7 @@ static inline unsigned short from64to16(unsigned long x) * returns a 16-bit checksum, already complemented. */ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, __wsum sum) { return (__force __sum16)~from64to16( (__force u64)saddr + (__force u64)daddr + @@ -52,9 +50,7 @@ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, } __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, __wsum sum) { unsigned long result; diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h index 10957298b7a3..913eb4aab05b 100644 --- a/arch/arc/include/asm/checksum.h +++ b/arch/arc/include/asm/checksum.h @@ -70,8 +70,8 @@ ip_fast_csum(const void *iph, unsigned int ihl) * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2] */ static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { __asm__ __volatile__( " add.f %0, %0, %1 \n" diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h index 523315115478..42d020b7dfba 100644 --- a/arch/arm/include/asm/checksum.h +++ b/arch/arm/include/asm/checksum.h @@ -84,10 +84,10 @@ ip_fast_csum(const void *iph, unsigned int ihl) } static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { - u32 lenprot = len | proto << 16; + u32 lenprot = len + proto; if (__builtin_constant_p(sum) && sum == 0) { __asm__( "adds %0, %1, %2 @ csum_tcpudp_nofold0 \n\t" @@ -121,8 +121,8 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, * returns a 16-bit checksum, already complemented */ static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } diff --git a/arch/avr32/include/asm/checksum.h b/arch/avr32/include/asm/checksum.h index 4ddbfd2486af..4ab7d5bdaf53 100644 --- a/arch/avr32/include/asm/checksum.h +++ b/arch/avr32/include/asm/checksum.h @@ -111,9 +111,8 @@ static inline __sum16 csum_fold(__wsum sum) } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { asm(" add %0, %1\n" " adc %0, %0, %2\n" @@ -132,9 +131,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } diff --git a/arch/blackfin/include/asm/checksum.h b/arch/blackfin/include/asm/checksum.h index 623cc7fb00bc..e7134bf94e3c 100644 --- a/arch/blackfin/include/asm/checksum.h +++ b/arch/blackfin/include/asm/checksum.h @@ -14,8 +14,8 @@ */ static inline __wsum -__csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +__csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { unsigned int carry; diff --git a/arch/c6x/include/asm/checksum.h b/arch/c6x/include/asm/checksum.h index 7246816d6e4d..249b0e421ddc 100644 --- a/arch/c6x/include/asm/checksum.h +++ b/arch/c6x/include/asm/checksum.h @@ -10,8 +10,8 @@ #define _ASM_C6X_CHECKSUM_H static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { unsigned long long tmp; diff --git a/arch/cris/include/arch-v10/arch/checksum.h b/arch/cris/include/arch-v10/arch/checksum.h index b8000c5d7fe1..d1d1bd9e1090 100644 --- a/arch/cris/include/arch-v10/arch/checksum.h +++ b/arch/cris/include/arch-v10/arch/checksum.h @@ -9,8 +9,8 @@ */ static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { __wsum res; __asm__ ("add.d %2, %0\n\t" diff --git a/arch/cris/include/arch-v32/arch/checksum.h b/arch/cris/include/arch-v32/arch/checksum.h index e5dcfce6e0dc..65cf205b1329 100644 --- a/arch/cris/include/arch-v32/arch/checksum.h +++ b/arch/cris/include/arch-v32/arch/checksum.h @@ -11,7 +11,7 @@ */ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, unsigned short proto, __wsum sum) + __u32 len, __u8 proto, __wsum sum) { __wsum res; diff --git a/arch/cris/include/asm/checksum.h b/arch/cris/include/asm/checksum.h index 75dcb77d6cb0..ea949c60b190 100644 --- a/arch/cris/include/asm/checksum.h +++ b/arch/cris/include/asm/checksum.h @@ -63,9 +63,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } diff --git a/arch/frv/include/asm/checksum.h b/arch/frv/include/asm/checksum.h index 269da09ff637..cd59cd4fd2d9 100644 --- a/arch/frv/include/asm/checksum.h +++ b/arch/frv/include/asm/checksum.h @@ -105,8 +105,8 @@ static inline __sum16 csum_fold(__wsum sum) * returns a 16-bit checksum, already complemented */ static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { asm(" addcc %1,%0,%0,icc0 \n" " addxcc %2,%0,%0,icc0 \n" @@ -120,8 +120,8 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, } static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } diff --git a/arch/hexagon/include/asm/checksum.h b/arch/hexagon/include/asm/checksum.h index 46ec8a7fd65f..d9f58d696238 100644 --- a/arch/hexagon/include/asm/checksum.h +++ b/arch/hexagon/include/asm/checksum.h @@ -38,12 +38,12 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, * returns a 16-bit checksum, already complemented */ #define csum_tcpudp_nofold csum_tcpudp_nofold -__wsum csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, - unsigned short len, unsigned short proto, __wsum sum); +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); #define csum_tcpudp_magic csum_tcpudp_magic -__sum16 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, - unsigned short len, unsigned short proto, __wsum sum); +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); #include diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c index 8169f78a46a7..617506d1a559 100644 --- a/arch/hexagon/lib/checksum.c +++ b/arch/hexagon/lib/checksum.c @@ -60,18 +60,16 @@ static inline unsigned short from64to16(u64 x) * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented. */ -__sum16 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, - unsigned short len, unsigned short proto, - __wsum sum) +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) { return (__force __sum16)~from64to16( (__force u64)saddr + (__force u64)daddr + (__force u64)sum + ((len + proto) << 8)); } -__wsum csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, - unsigned short len, unsigned short proto, - __wsum sum) +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) { u64 result; diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h index 97af155057e4..ac9c687e8384 100644 --- a/arch/ia64/include/asm/checksum.h +++ b/arch/ia64/include/asm/checksum.h @@ -16,15 +16,11 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); * Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit * checksum, already complemented */ -extern __sum16 csum_tcpudp_magic (__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum); +extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); -extern __wsum csum_tcpudp_nofold (__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum); +extern __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); /* * Computes the checksum of a memory block at buff, length len, diff --git a/arch/ia64/lib/checksum.c b/arch/ia64/lib/checksum.c index 9fc955026f86..2cb23cb0c2e1 100644 --- a/arch/ia64/lib/checksum.c +++ b/arch/ia64/lib/checksum.c @@ -34,8 +34,8 @@ from64to16 (unsigned long x) * returns a 16-bit checksum, already complemented. */ __sum16 -csum_tcpudp_magic (__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { return (__force __sum16)~from64to16( (__force u64)saddr + (__force u64)daddr + @@ -45,8 +45,8 @@ csum_tcpudp_magic (__be32 saddr, __be32 daddr, unsigned short len, EXPORT_SYMBOL(csum_tcpudp_magic); __wsum -csum_tcpudp_nofold (__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { unsigned long result; diff --git a/arch/m32r/include/asm/checksum.h b/arch/m32r/include/asm/checksum.h index a7a7c4f44abe..d68e93c9bd62 100644 --- a/arch/m32r/include/asm/checksum.h +++ b/arch/m32r/include/asm/checksum.h @@ -114,9 +114,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { #if defined(__LITTLE_ENDIAN) unsigned long len_proto = (proto + len) << 8; @@ -145,9 +144,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } diff --git a/arch/metag/include/asm/checksum.h b/arch/metag/include/asm/checksum.h index 08dd1cc65799..f65fe83b1730 100644 --- a/arch/metag/include/asm/checksum.h +++ b/arch/metag/include/asm/checksum.h @@ -59,8 +59,7 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); * returns a 16-bit checksum, already complemented */ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { unsigned long len_proto = (proto + len) << 8; @@ -78,8 +77,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, } static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } diff --git a/arch/microblaze/include/asm/checksum.h b/arch/microblaze/include/asm/checksum.h index 0185cbefdda4..adeecebbb0d1 100644 --- a/arch/microblaze/include/asm/checksum.h +++ b/arch/microblaze/include/asm/checksum.h @@ -16,8 +16,8 @@ */ #define csum_tcpudp_nofold csum_tcpudp_nofold static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { __asm__("add %0, %0, %1\n\t" "addc %0, %0, %2\n\t" diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index 3ceacde5eb6e..c635541d40b8 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -160,9 +160,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) } #define ip_fast_csum ip_fast_csum -static inline __wsum csum_tcpudp_nofold(__be32 saddr, - __be32 daddr, unsigned short len, unsigned short proto, - __wsum sum) +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, + __wsum sum) { __asm__( " .set push # csum_tcpudp_nofold\n" diff --git a/arch/mn10300/include/asm/checksum.h b/arch/mn10300/include/asm/checksum.h index 9fb2a8d8826a..c80df5b504ac 100644 --- a/arch/mn10300/include/asm/checksum.h +++ b/arch/mn10300/include/asm/checksum.h @@ -37,16 +37,11 @@ static inline __sum16 csum_fold(__wsum sum) return (~sum) >> 16; } -static inline __wsum csum_tcpudp_nofold(unsigned long saddr, - unsigned long daddr, - unsigned short len, - unsigned short proto, +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) { - __wsum tmp; - - tmp = (__wsum) ntohs(len) << 16; - tmp += (__wsum) proto << 8; + __wsum tmp = (__wsum)((len + proto) << 8); asm( " add %1,%0 \n" @@ -64,10 +59,8 @@ static inline __wsum csum_tcpudp_nofold(unsigned long saddr, * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ -static inline __sum16 csum_tcpudp_magic(unsigned long saddr, - unsigned long daddr, - unsigned short len, - unsigned short proto, +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); diff --git a/arch/nios2/include/asm/checksum.h b/arch/nios2/include/asm/checksum.h index 6bc1f0d5df7b..703c5ee63421 100644 --- a/arch/nios2/include/asm/checksum.h +++ b/arch/nios2/include/asm/checksum.h @@ -45,8 +45,7 @@ static inline __sum16 csum_fold(__wsum sum) */ #define csum_tcpudp_nofold csum_tcpudp_nofold static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { __asm__ __volatile__( @@ -60,7 +59,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, "cmpltu r8, %0, %3\n" "add %0, %0, r8\n" /* add carry */ : "=r" (sum), "=r" (saddr) - : "r" (daddr), "r" ((ntohs(len) << 16) + (proto * 256)), + : "r" (daddr), "r" ((len + proto) << 8), "0" (sum), "1" (saddr) : "r8"); @@ -69,8 +68,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, } static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h index c84b2fcb18a9..9815ab1fc8aa 100644 --- a/arch/parisc/include/asm/checksum.h +++ b/arch/parisc/include/asm/checksum.h @@ -85,9 +85,8 @@ static inline __sum16 csum_fold(__wsum csum) } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { __asm__( " add %1, %0, %0\n" @@ -104,9 +103,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index 740364856355..d7f100c53f07 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h @@ -91,8 +91,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) * returns a 32-bit checksum */ static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, unsigned short proto, +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum) { __u32 csum = (__force __u32)sum; @@ -118,8 +117,7 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, */ static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, unsigned short proto, +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h index 961bd64015a8..a375bc2700be 100644 --- a/arch/score/include/asm/checksum.h +++ b/arch/score/include/asm/checksum.h @@ -127,10 +127,10 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) } static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { - unsigned long tmp = (ntohs(len) << 16) + proto * 256; + unsigned long tmp = (len + proto) << 8; __asm__ __volatile__( ".set volatile\n\t" "add\t%0, %0, %2\n\t" @@ -161,8 +161,8 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, * returns a 16-bit checksum, already complemented */ static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h index 14b7ac2f0a07..fd730f140c06 100644 --- a/arch/sh/include/asm/checksum_32.h +++ b/arch/sh/include/asm/checksum_32.h @@ -115,8 +115,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { #ifdef __LITTLE_ENDIAN__ @@ -142,8 +141,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h index 426b2389a1c2..86ae655a3c0f 100644 --- a/arch/sparc/include/asm/checksum_32.h +++ b/arch/sparc/include/asm/checksum_32.h @@ -170,9 +170,8 @@ static inline __sum16 csum_fold(__wsum sum) } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { __asm__ __volatile__("addcc\t%1, %0, %0\n\t" "addxcc\t%2, %0, %0\n\t" @@ -190,9 +189,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h index b8779a6a5911..ef0c6f48189a 100644 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h @@ -96,8 +96,7 @@ static inline __sum16 csum_fold(__wsum sum) } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned int len, - unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { __asm__ __volatile__( @@ -116,8 +115,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); diff --git a/arch/unicore32/include/asm/checksum.h b/arch/unicore32/include/asm/checksum.h index f55c3f937c3e..23ceb9e3a89b 100644 --- a/arch/unicore32/include/asm/checksum.h +++ b/arch/unicore32/include/asm/checksum.h @@ -20,8 +20,8 @@ */ static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { __asm__( "add.a %0, %1, %2\n" diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h index f50de6951738..6f380605403d 100644 --- a/arch/x86/include/asm/checksum_32.h +++ b/arch/x86/include/asm/checksum_32.h @@ -112,8 +112,7 @@ static inline __sum16 csum_fold(__wsum sum) } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { asm("addl %1, %0 ;\n" @@ -131,8 +130,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h index cd00e1774491..97b98e2039bc 100644 --- a/arch/x86/include/asm/checksum_64.h +++ b/arch/x86/include/asm/checksum_64.h @@ -84,8 +84,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) * 32bit unfolded. */ static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { asm(" addl %1, %0\n" " adcl %2, %0\n" @@ -110,8 +110,8 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, * complemented and ready to be filled in. */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h index ee940185e89f..54d96f1e3594 100644 --- a/arch/x86/um/asm/checksum.h +++ b/arch/x86/um/asm/checksum.h @@ -87,8 +87,8 @@ static inline __sum16 csum_fold(__wsum sum) * 32bit unfolded. */ static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { asm(" addl %1, %0\n" " adcl %2, %0\n" @@ -104,9 +104,8 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index 0593de689b56..62254e6688f5 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -123,9 +123,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) } static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { #ifdef __XTENSA_EL__ @@ -157,9 +156,8 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index 59811df58c5b..3150cbd8eb21 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -65,14 +65,14 @@ static inline __sum16 csum_fold(__wsum csum) * returns a 16-bit checksum, already complemented */ extern __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum); +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum); #endif #ifndef csum_tcpudp_magic static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } diff --git a/lib/checksum.c b/lib/checksum.c index 8b39e86dbab5..d3ec93f9e5f3 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -191,9 +191,7 @@ static inline u32 from64to32(u64 x) } __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, __wsum sum) { unsigned long long s = (__force u32)sum; -- cgit v1.2.3 From 1e94082963747b551b129528714827f76a090e93 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 Mar 2016 14:05:41 -0800 Subject: ipv6: Pass proto to csum_ipv6_magic as __u8 instead of unsigned short This patch updates csum_ipv6_magic so that it correctly recognizes that protocol is a unsigned 8 bit value. This will allow us to better understand what limitations may or may not be present in how we handle the data. For example there are a number of places that call htonl on the protocol value. This is likely not necessary and can be replaced with a multiplication by ntohl(1) which will be converted to a shift by the compiler. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- arch/alpha/include/asm/checksum.h | 3 +-- arch/arm/include/asm/checksum.h | 4 ++-- arch/frv/include/asm/checksum.h | 2 +- arch/ia64/include/asm/checksum.h | 4 ++-- arch/m68k/include/asm/checksum.h | 2 +- arch/mips/include/asm/checksum.h | 2 +- arch/parisc/include/asm/checksum.h | 2 +- arch/score/include/asm/checksum.h | 5 ++--- arch/sh/include/asm/checksum_32.h | 3 +-- arch/sparc/include/asm/checksum_32.h | 3 +-- arch/sparc/include/asm/checksum_64.h | 3 +-- arch/x86/include/asm/checksum_32.h | 3 +-- arch/x86/include/asm/checksum_64.h | 2 +- arch/x86/lib/csum-wrappers_64.c | 2 +- arch/x86/um/asm/checksum_32.h | 2 +- arch/xtensa/include/asm/checksum.h | 2 +- include/net/ip6_checksum.h | 3 +-- net/ipv6/ip6_checksum.c | 3 +-- 18 files changed, 21 insertions(+), 29 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/checksum.h b/arch/alpha/include/asm/checksum.h index cba34b1c738c..f2bbdd2ace51 100644 --- a/arch/alpha/include/asm/checksum.h +++ b/arch/alpha/include/asm/checksum.h @@ -67,6 +67,5 @@ static inline __sum16 csum_fold(__wsum csum) #define _HAVE_ARCH_IPV6_CSUM extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum); + __u32 len, __u8 proto, __wsum sum); #endif diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h index 42d020b7dfba..524692f4acab 100644 --- a/arch/arm/include/asm/checksum.h +++ b/arch/arm/include/asm/checksum.h @@ -144,8 +144,8 @@ __csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __ __be32 proto, __wsum sum); static inline __sum16 -csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len, - unsigned short proto, __wsum sum) +csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum sum) { return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len), htonl(proto), sum)); diff --git a/arch/frv/include/asm/checksum.h b/arch/frv/include/asm/checksum.h index cd59cd4fd2d9..b77388c5901d 100644 --- a/arch/frv/include/asm/checksum.h +++ b/arch/frv/include/asm/checksum.h @@ -135,7 +135,7 @@ extern __sum16 ip_compute_csum(const void *buff, int len); #define _HAVE_ARCH_IPV6_CSUM static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, __wsum sum) + __u32 len, __u8 proto, __wsum sum) { unsigned long tmp, tmp2; diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h index ac9c687e8384..7accf54162b2 100644 --- a/arch/ia64/include/asm/checksum.h +++ b/arch/ia64/include/asm/checksum.h @@ -69,7 +69,7 @@ static inline __sum16 csum_fold(__wsum csum) #define _HAVE_ARCH_IPV6_CSUM 1 struct in6_addr; extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, - const struct in6_addr *daddr, __u32 len, unsigned short proto, - __wsum csum); + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum); #endif /* _ASM_IA64_CHECKSUM_H */ diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h index 2f88d867c711..75e91f03b178 100644 --- a/arch/m68k/include/asm/checksum.h +++ b/arch/m68k/include/asm/checksum.h @@ -117,7 +117,7 @@ static inline __sum16 ip_compute_csum(const void *buff, int len) #define _HAVE_ARCH_IPV6_CSUM static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, __wsum sum) + __u32 len, __u8 proto, __wsum sum) { register unsigned long tmp; __asm__("addl %2@,%0\n\t" diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index c635541d40b8..bce1ce53149a 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -215,7 +215,7 @@ static inline __sum16 ip_compute_csum(const void *buff, int len) #define _HAVE_ARCH_IPV6_CSUM static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { __wsum tmp; diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h index 9815ab1fc8aa..60c2c42619c9 100644 --- a/arch/parisc/include/asm/checksum.h +++ b/arch/parisc/include/asm/checksum.h @@ -122,7 +122,7 @@ static inline __sum16 ip_compute_csum(const void *buf, int len) #define _HAVE_ARCH_IPV6_CSUM static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { __asm__ __volatile__ ( diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h index a375bc2700be..539d9fd45d21 100644 --- a/arch/score/include/asm/checksum.h +++ b/arch/score/include/asm/checksum.h @@ -179,9 +179,8 @@ static inline unsigned short ip_compute_csum(const void *buff, int len) #define _HAVE_ARCH_IPV6_CSUM static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, - const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum) + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum sum) { __asm__ __volatile__( ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t" diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h index fd730f140c06..9c84386d35cb 100644 --- a/arch/sh/include/asm/checksum_32.h +++ b/arch/sh/include/asm/checksum_32.h @@ -159,8 +159,7 @@ static inline __sum16 ip_compute_csum(const void *buff, int len) #define _HAVE_ARCH_IPV6_CSUM static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, __wsum sum) { unsigned int __dummy; __asm__("clrt\n\t" diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h index 86ae655a3c0f..eff748c871ec 100644 --- a/arch/sparc/include/asm/checksum_32.h +++ b/arch/sparc/include/asm/checksum_32.h @@ -199,8 +199,7 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, __wsum sum) { __asm__ __volatile__ ( "addcc %3, %4, %%g4\n\t" diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h index ef0c6f48189a..0395d75322e9 100644 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h @@ -125,8 +125,7 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, __wsum sum) { __asm__ __volatile__ ( " addcc %3, %4, %%g7\n" diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h index 6f380605403d..532f85e6651f 100644 --- a/arch/x86/include/asm/checksum_32.h +++ b/arch/x86/include/asm/checksum_32.h @@ -149,8 +149,7 @@ static inline __sum16 ip_compute_csum(const void *buff, int len) #define _HAVE_ARCH_IPV6_CSUM static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, __wsum sum) { asm("addl 0(%1), %0 ;\n" "adcl 4(%1), %0 ;\n" diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h index 97b98e2039bc..c020ee75dce7 100644 --- a/arch/x86/include/asm/checksum_64.h +++ b/arch/x86/include/asm/checksum_64.h @@ -177,7 +177,7 @@ struct in6_addr; #define _HAVE_ARCH_IPV6_CSUM 1 extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, __wsum sum); + __u32 len, __u8 proto, __wsum sum); static inline unsigned add32_with_carry(unsigned a, unsigned b) { diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index 1318f75d56e4..28a6654f0d08 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -135,7 +135,7 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, __wsum sum) + __u32 len, __u8 proto, __wsum sum) { __u64 rest, sum64; diff --git a/arch/x86/um/asm/checksum_32.h b/arch/x86/um/asm/checksum_32.h index ab77b6f9a4bf..83a75f8a1233 100644 --- a/arch/x86/um/asm/checksum_32.h +++ b/arch/x86/um/asm/checksum_32.h @@ -13,7 +13,7 @@ static inline __sum16 ip_compute_csum(const void *buff, int len) #define _HAVE_ARCH_IPV6_CSUM static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { __asm__( diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index 62254e6688f5..ec35074fcb03 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -175,7 +175,7 @@ static __inline__ __sum16 ip_compute_csum(const void *buff, int len) #define _HAVE_ARCH_IPV6_CSUM static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, + __u32 len, __u8 proto, __wsum sum) { unsigned int __dummy; diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index 1a49b73f7f6e..cca840584c88 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -37,8 +37,7 @@ #ifndef _HAVE_ARCH_IPV6_CSUM __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum csum); + __u32 len, __u8 proto, __wsum csum); #endif static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto) diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c index 8f920580976f..b2025bf3da4a 100644 --- a/net/ipv6/ip6_checksum.c +++ b/net/ipv6/ip6_checksum.c @@ -6,8 +6,7 @@ #ifndef _HAVE_ARCH_IPV6_CSUM __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum csum) + __u32 len, __u8 proto, __wsum csum) { int carry; -- cgit v1.2.3 From be7635e7287e0e8013af3c89a6354a9e0182594c Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Fri, 25 Mar 2016 14:22:05 -0700 Subject: arch, ftrace: for KASAN put hard/soft IRQ entries into separate sections KASAN needs to know whether the allocation happens in an IRQ handler. This lets us strip everything below the IRQ entry point to reduce the number of unique stack traces needed to be stored. Move the definition of __irq_entry to so that the users don't need to pull in . Also introduce the __softirq_entry macro which is similar to __irq_entry, but puts the corresponding functions to the .softirqentry.text section. Signed-off-by: Alexander Potapenko Acked-by: Steven Rostedt Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrey Konovalov Cc: Dmitry Vyukov Cc: Andrey Ryabinin Cc: Konstantin Serebryany Cc: Dmitry Chernenkov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/exception.h | 2 +- arch/arm/kernel/vmlinux.lds.S | 1 + arch/arm64/include/asm/exception.h | 2 +- arch/arm64/kernel/vmlinux.lds.S | 1 + arch/blackfin/kernel/vmlinux.lds.S | 1 + arch/c6x/kernel/vmlinux.lds.S | 1 + arch/metag/kernel/vmlinux.lds.S | 1 + arch/microblaze/kernel/vmlinux.lds.S | 1 + arch/mips/kernel/vmlinux.lds.S | 1 + arch/nios2/kernel/vmlinux.lds.S | 1 + arch/openrisc/kernel/vmlinux.lds.S | 1 + arch/parisc/kernel/vmlinux.lds.S | 1 + arch/powerpc/kernel/vmlinux.lds.S | 1 + arch/s390/kernel/vmlinux.lds.S | 1 + arch/sh/kernel/vmlinux.lds.S | 1 + arch/sparc/kernel/vmlinux.lds.S | 1 + arch/tile/kernel/vmlinux.lds.S | 1 + arch/x86/kernel/vmlinux.lds.S | 1 + include/asm-generic/vmlinux.lds.h | 12 +++++++++++- include/linux/ftrace.h | 11 ----------- include/linux/interrupt.h | 20 ++++++++++++++++++++ kernel/softirq.c | 2 +- kernel/trace/trace_functions_graph.c | 1 + 23 files changed, 51 insertions(+), 15 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h index 5abaf5bbd985..bf1991263d2d 100644 --- a/arch/arm/include/asm/exception.h +++ b/arch/arm/include/asm/exception.h @@ -7,7 +7,7 @@ #ifndef __ASM_ARM_EXCEPTION_H #define __ASM_ARM_EXCEPTION_H -#include +#include #define __exception __attribute__((section(".exception.text"))) #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 1fab979daeaf..e2c6da096cef 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -108,6 +108,7 @@ SECTIONS *(.exception.text) __exception_text_end = .; IRQENTRY_TEXT + SOFTIRQENTRY_TEXT TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 6cb7e1a6bc02..0c2eec490abf 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -18,7 +18,7 @@ #ifndef __ASM_EXCEPTION_H #define __ASM_EXCEPTION_H -#include +#include #define __exception __attribute__((section(".exception.text"))) #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 37f624df68fa..5a1939a74ff3 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -103,6 +103,7 @@ SECTIONS *(.exception.text) __exception_text_end = .; IRQENTRY_TEXT + SOFTIRQENTRY_TEXT TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index c9eec84aa258..d920b959ff3a 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -35,6 +35,7 @@ SECTIONS #endif LOCK_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT KPROBES_TEXT #ifdef CONFIG_ROMKERNEL __sinittext = .; diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S index 5a6e141d1641..50bc10f97bcb 100644 --- a/arch/c6x/kernel/vmlinux.lds.S +++ b/arch/c6x/kernel/vmlinux.lds.S @@ -72,6 +72,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT KPROBES_TEXT *(.fixup) *(.gnu.warning) diff --git a/arch/metag/kernel/vmlinux.lds.S b/arch/metag/kernel/vmlinux.lds.S index e12055e88bfe..150ace92c7ad 100644 --- a/arch/metag/kernel/vmlinux.lds.S +++ b/arch/metag/kernel/vmlinux.lds.S @@ -24,6 +24,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.text.*) *(.gnu.warning) } diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index be9488d69734..0a47f0410554 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -36,6 +36,7 @@ SECTIONS { LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT . = ALIGN (4) ; _etext = . ; } diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 0a93e83cd014..54d653ee17e1 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -58,6 +58,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.text.*) *(.fixup) *(.gnu.warning) diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S index 326fab40a9de..e23e89539967 100644 --- a/arch/nios2/kernel/vmlinux.lds.S +++ b/arch/nios2/kernel/vmlinux.lds.S @@ -39,6 +39,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT KPROBES_TEXT } =0 _etext = .; diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index 2d69a853b742..d936de4c07ca 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -50,6 +50,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.fixup) *(.text.__*) _etext = .; diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 308f29081d46..f3ead0b6ce46 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -72,6 +72,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.text.do_softirq) *(.text.sys_exit) *(.text.do_sigaltstack) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index d41fd0af8980..2dd91f79de05 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -55,6 +55,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT #ifdef CONFIG_PPC32 *(.got1) diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 445657fe658c..0f41a8286378 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -28,6 +28,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) } :text = 0x0700 diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index db88cbf9eafd..235a4101999f 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -39,6 +39,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) _etext = .; /* End of text section */ diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index f1a2f688b28a..aadd321aa05d 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -48,6 +48,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.gnu.warning) } = 0 _etext = .; diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index 0e059a0101ea..378f5d8d1ec8 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S @@ -45,6 +45,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT __fix_text_end = .; /* tile-cpack won't rearrange before this */ ALIGN_FUNCTION(); *(.hottext*) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index d239639e0c1d..4c941f88d405 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -101,6 +101,7 @@ SECTIONS KPROBES_TEXT ENTRY_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) /* End of text section */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8f5a12ab2f2b..339125bb4d2c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -456,7 +456,7 @@ *(.entry.text) \ VMLINUX_SYMBOL(__entry_text_end) = .; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) #define IRQENTRY_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__irqentry_text_start) = .; \ @@ -466,6 +466,16 @@ #define IRQENTRY_TEXT #endif +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) +#define SOFTIRQENTRY_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__softirqentry_text_start) = .; \ + *(.softirqentry.text) \ + VMLINUX_SYMBOL(__softirqentry_text_end) = .; +#else +#define SOFTIRQENTRY_TEXT +#endif + /* Section used for early init (in .S files) */ #define HEAD_TEXT *(.head.text) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 6d9df3f7e334..dea12a6e413b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -811,16 +811,6 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, */ #define __notrace_funcgraph notrace -/* - * We want to which function is an entrypoint of a hardirq. - * That will help us to put a signal on output. - */ -#define __irq_entry __attribute__((__section__(".irqentry.text"))) - -/* Limits of hardirq entrypoints */ -extern char __irqentry_text_start[]; -extern char __irqentry_text_end[]; - #define FTRACE_NOTRACE_DEPTH 65536 #define FTRACE_RETFUNC_DEPTH 50 #define FTRACE_RETSTACK_ALLOC_SIZE 32 @@ -857,7 +847,6 @@ static inline void unpause_graph_tracing(void) #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ #define __notrace_funcgraph -#define __irq_entry #define INIT_FTRACE_GRAPH static inline void ftrace_graph_init_task(struct task_struct *t) { } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 358076eda364..9fcabeb07787 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -683,4 +683,24 @@ extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) +/* + * We want to know which function is an entrypoint of a hardirq or a softirq. + */ +#define __irq_entry __attribute__((__section__(".irqentry.text"))) +#define __softirq_entry \ + __attribute__((__section__(".softirqentry.text"))) + +/* Limits of hardirq entrypoints */ +extern char __irqentry_text_start[]; +extern char __irqentry_text_end[]; +/* Limits of softirq entrypoints */ +extern char __softirqentry_text_start[]; +extern char __softirqentry_text_end[]; + +#else +#define __irq_entry +#define __softirq_entry +#endif + #endif diff --git a/kernel/softirq.c b/kernel/softirq.c index 8aae49dd7da8..17caf4b63342 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -227,7 +227,7 @@ static inline bool lockdep_softirq_start(void) { return false; } static inline void lockdep_softirq_end(bool in_hardirq) { } #endif -asmlinkage __visible void __do_softirq(void) +asmlinkage __visible void __softirq_entry __do_softirq(void) { unsigned long end = jiffies + MAX_SOFTIRQ_TIME; unsigned long old_flags = current->flags; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 91d6a63a2ea7..3a0244ff7ea8 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -8,6 +8,7 @@ */ #include #include +#include #include #include -- cgit v1.2.3