From 57853e8906a04a86c32fb96d8421b923c6d64162 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 7 Jul 2015 18:19:38 +0100 Subject: ARM: 8403/1: kbuild: don't use generic mcs_spinlock.h header We provide our own implementation of asm/mcs_spinlock.h, so there's no need to ask for the (empty) generic version. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/Kbuild | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 83c50193626c..517ef6dd22b9 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -12,7 +12,6 @@ generic-y += irq_regs.h generic-y += kdebug.h generic-y += local.h generic-y += local64.h -generic-y += mcs_spinlock.h generic-y += msgbuf.h generic-y += param.h generic-y += parport.h -- cgit v1.2.3 From 20c305f66077d2e646b23336d4404261dc283cf9 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Fri, 10 Jul 2015 02:09:47 +0800 Subject: ARM: imx: add low-level debug support for i.mx6ul Enable low-level debug support for i.MX6UL by adding the debug port definitions for the SoC. Singed-off-by: Anson Huang Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: Shawn Guo --- arch/arm/Kconfig.debug | 9 +++++++++ arch/arm/include/debug/imx-uart.h | 13 +++++++++++++ 2 files changed, 22 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index f1b157971366..f21daa87ce18 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -411,6 +411,13 @@ choice Say Y here if you want kernel low-level debugging support on i.MX6SX. + config DEBUG_IMX6UL_UART + bool "i.MX6UL Debug UART" + depends on SOC_IMX6UL + help + Say Y here if you want kernel low-level debugging support + on i.MX6UL. + config DEBUG_IMX7D_UART bool "i.MX7D Debug UART" depends on SOC_IMX7D @@ -1269,6 +1276,7 @@ config DEBUG_IMX_UART_PORT DEBUG_IMX6Q_UART || \ DEBUG_IMX6SL_UART || \ DEBUG_IMX6SX_UART || \ + DEBUG_IMX6UL_UART || \ DEBUG_IMX7D_UART default 1 depends on ARCH_MXC @@ -1320,6 +1328,7 @@ config DEBUG_LL_INCLUDE DEBUG_IMX6Q_UART || \ DEBUG_IMX6SL_UART || \ DEBUG_IMX6SX_UART || \ + DEBUG_IMX6UL_UART || \ DEBUG_IMX7D_UART default "debug/ks8695.S" if DEBUG_KS8695_UART default "debug/msm.S" if DEBUG_QCOM_UARTDM diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h index 66f736f74684..bce58e975ad1 100644 --- a/arch/arm/include/debug/imx-uart.h +++ b/arch/arm/include/debug/imx-uart.h @@ -90,6 +90,17 @@ #define IMX6SX_UART_BASE_ADDR(n) IMX6SX_UART##n##_BASE_ADDR #define IMX6SX_UART_BASE(n) IMX6SX_UART_BASE_ADDR(n) +#define IMX6UL_UART1_BASE_ADDR 0x02020000 +#define IMX6UL_UART2_BASE_ADDR 0x021e8000 +#define IMX6UL_UART3_BASE_ADDR 0x021ec000 +#define IMX6UL_UART4_BASE_ADDR 0x021f0000 +#define IMX6UL_UART5_BASE_ADDR 0x021f4000 +#define IMX6UL_UART6_BASE_ADDR 0x021fc000 +#define IMX6UL_UART7_BASE_ADDR 0x02018000 +#define IMX6UL_UART8_BASE_ADDR 0x02024000 +#define IMX6UL_UART_BASE_ADDR(n) IMX6UL_UART##n##_BASE_ADDR +#define IMX6UL_UART_BASE(n) IMX6UL_UART_BASE_ADDR(n) + #define IMX7D_UART1_BASE_ADDR 0x30860000 #define IMX7D_UART2_BASE_ADDR 0x30890000 #define IMX7D_UART3_BASE_ADDR 0x30880000 @@ -124,6 +135,8 @@ #define UART_PADDR IMX_DEBUG_UART_BASE(IMX6SL) #elif defined(CONFIG_DEBUG_IMX6SX_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX6SX) +#elif defined(CONFIG_DEBUG_IMX6UL_UART) +#define UART_PADDR IMX_DEBUG_UART_BASE(IMX6UL) #elif defined(CONFIG_DEBUG_IMX7D_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX7D) -- cgit v1.2.3 From 96f0e00378d4a1fc1b79933ef84e1595015de808 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 3 Sep 2014 23:57:13 +0100 Subject: ARM: add basic support for on-demand backtrace of other CPUs As we now have generic infrastructure to support backtracing of other CPUs in the system on lockups, we can start to implement this for ARM. Initially, we add an IPI based implementation, as the GIC code needs modification to support the generation of FIQ IPIs, and not all ARM platforms have the ability to raise a FIQ in the non-secure world. This provides us with a "best efforts" implementation in the absence of FIQs. Signed-off-by: Russell King --- arch/arm/include/asm/irq.h | 5 +++++ arch/arm/kernel/smp.c | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 53c15dec7af6..be1d07d59ee9 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -35,6 +35,11 @@ extern void (*handle_arch_irq)(struct pt_regs *); extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); #endif +#ifdef CONFIG_SMP +extern void arch_trigger_all_cpu_backtrace(bool); +#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x) +#endif + #endif #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 90dfbedfbfb8..3a20c386fd33 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -72,6 +73,7 @@ enum ipi_msg_type { IPI_CPU_STOP, IPI_IRQ_WORK, IPI_COMPLETION, + IPI_CPU_BACKTRACE = 15, }; static DECLARE_COMPLETION(cpu_running); @@ -630,6 +632,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs) irq_exit(); break; + case IPI_CPU_BACKTRACE: + irq_enter(); + nmi_cpu_backtrace(regs); + irq_exit(); + break; + default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); @@ -724,3 +732,13 @@ static int __init register_cpufreq_notifier(void) core_initcall(register_cpufreq_notifier); #endif + +static void raise_nmi(cpumask_t *mask) +{ + smp_cross_call(mask, IPI_CPU_BACKTRACE); +} + +void arch_trigger_all_cpu_backtrace(bool include_self) +{ + nmi_trigger_all_cpu_backtrace(include_self, raise_nmi); +} -- cgit v1.2.3 From 56c7f5e77f797fd0dcf2376ce1496f4238e6be33 Mon Sep 17 00:00:00 2001 From: Alex Bennée Date: Tue, 7 Jul 2015 17:29:56 +0100 Subject: KVM: arm: introduce kvm_arm_init/setup/clear_debug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a precursor for later patches which will need to do more to setup debug state before entering the hyp.S switch code. The existing functionality for setting mdcr_el2 has been moved out of hyp.S and now uses the value kept in vcpu->arch.mdcr_el2. As the assembler used to previously mask and preserve MDCR_EL2.HPMN I've had to add a mechanism to save the value of mdcr_el2 as a per-cpu variable during the initialisation code. The kernel never sets this number so we are assuming the bootcode has set up the correct value here. This also moves the conditional setting of the TDA bit from the hyp code into the C code which is currently used for the lazy debug register context switch code. Signed-off-by: Alex Bennée Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 4 ++ arch/arm/kvm/arm.c | 6 +++ arch/arm64/include/asm/kvm_asm.h | 2 + arch/arm64/include/asm/kvm_host.h | 5 +++ arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/debug.c | 81 +++++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp.S | 19 ++++----- 8 files changed, 108 insertions(+), 12 deletions(-) create mode 100644 arch/arm64/kvm/debug.c (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index e896d2c196e6..2b0bc8c57552 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -231,4 +231,8 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arm_init_debug(void) {} +static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} +static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 1b693cb2d5b2..77151b111d32 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -543,6 +543,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) continue; } + kvm_arm_setup_debug(vcpu); + /************************************************************** * Enter the guest */ @@ -557,6 +559,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * Back from guest *************************************************************/ + kvm_arm_clear_debug(vcpu); + /* * We may have taken a host interrupt in HYP mode (ie * while executing the guest). This interrupt is still @@ -914,6 +918,8 @@ static void cpu_init_hyp_mode(void *dummy) vector_ptr = (unsigned long)__kvm_hyp_vector; __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); + + kvm_arm_init_debug(); } static int hyp_init_cpu_notify(struct notifier_block *self, diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 3c5fe685a2d6..f5e40dae291a 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -132,6 +132,8 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern u64 __vgic_v3_get_ich_vtr_el2(void); +extern u32 __kvm_get_mdcr_el2(void); + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2709db2a7eac..c90c6a41c448 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -103,6 +103,7 @@ struct kvm_vcpu_arch { /* HYP configuration */ u64 hcr_el2; + u32 mdcr_el2; /* Exception Information */ struct kvm_vcpu_fault_info fault; @@ -227,4 +228,8 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +void kvm_arm_init_debug(void); +void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); +void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index c99701a34d7b..5c900d49b906 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -117,6 +117,7 @@ int main(void) DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags)); DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); + DEFINE(VCPU_MDCR_EL2, offsetof(struct kvm_vcpu, arch.mdcr_el2)); DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index f90f4aa7f88d..1949fe5f5424 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -17,7 +17,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o -kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o +kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c new file mode 100644 index 000000000000..faf0e1fdba9e --- /dev/null +++ b/arch/arm64/kvm/debug.c @@ -0,0 +1,81 @@ +/* + * Debug and Guest Debug support + * + * Copyright (C) 2015 - Linaro Ltd + * Author: Alex Bennée + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#include + +static DEFINE_PER_CPU(u32, mdcr_el2); + +/** + * kvm_arm_init_debug - grab what we need for debug + * + * Currently the sole task of this function is to retrieve the initial + * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has + * presumably been set-up by some knowledgeable bootcode. + * + * It is called once per-cpu during CPU hyp initialisation. + */ + +void kvm_arm_init_debug(void) +{ + __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2)); +} + + +/** + * kvm_arm_setup_debug - set up debug related stuff + * + * @vcpu: the vcpu pointer + * + * This is called before each entry into the hypervisor to setup any + * debug related registers. Currently this just ensures we will trap + * access to: + * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR) + * - Debug ROM Address (MDCR_EL2_TDRA) + * - OS related registers (MDCR_EL2_TDOSA) + * + * Additionally, KVM only traps guest accesses to the debug registers if + * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY + * flag on vcpu->arch.debug_flags). Since the guest must not interfere + * with the hardware state when debugging the guest, we must ensure that + * trapping is enabled whenever we are debugging the guest using the + * debug registers. + */ + +void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) +{ + bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY); + + vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; + vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | + MDCR_EL2_TPMCR | + MDCR_EL2_TDRA | + MDCR_EL2_TDOSA); + + /* Trap on access to debug registers? */ + if (trap_debug) + vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; + +} + +void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) +{ + /* Nothing to do yet */ +} diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 17a8fb14f428..b3176e6e51d1 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -770,17 +770,8 @@ mov x2, #(1 << 15) // Trap CP15 Cr=15 msr hstr_el2, x2 - mrs x2, mdcr_el2 - and x2, x2, #MDCR_EL2_HPMN_MASK - orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR) - orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA) - - // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap - // if not dirty. - ldr x3, [x0, #VCPU_DEBUG_FLAGS] - tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f - orr x2, x2, #MDCR_EL2_TDA -1: + // Monitor Debug Config - see kvm_arm_setup_debug() + ldr x2, [x0, #VCPU_MDCR_EL2] msr mdcr_el2, x2 .endm @@ -1285,4 +1276,10 @@ ENTRY(__kvm_hyp_vector) ventry el1_error_invalid // Error 32-bit EL1 ENDPROC(__kvm_hyp_vector) + +ENTRY(__kvm_get_mdcr_el2) + mrs x0, mdcr_el2 + ret +ENDPROC(__kvm_get_mdcr_el2) + .popsection -- cgit v1.2.3 From 84e690bfbed1d1ecb45d8eccd4c7b6c8e878da1c Mon Sep 17 00:00:00 2001 From: Alex Bennée Date: Tue, 7 Jul 2015 17:30:00 +0100 Subject: KVM: arm64: introduce vcpu->arch.debug_ptr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This introduces a level of indirection for the debug registers. Instead of using the sys_regs[] directly we store registers in a structure in the vcpu. The new kvm_arm_reset_debug_ptr() sets the debug ptr to the guest context. Because we no longer give the sys_regs offset for the sys_reg_desc->reg field, but instead the index into a debug-specific struct we need to add a number of additional trap functions for each register. Also as the generic generic user-space access code no longer works we have introduced a new pair of function pointers to the sys_reg_desc structure to override the generic code when needed. Reviewed-by: Christoffer Dall Signed-off-by: Alex Bennée Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 1 + arch/arm/kvm/arm.c | 2 + arch/arm64/include/asm/kvm_asm.h | 24 ++-- arch/arm64/include/asm/kvm_host.h | 17 ++- arch/arm64/kernel/asm-offsets.c | 6 + arch/arm64/kvm/debug.c | 9 ++ arch/arm64/kvm/hyp.S | 24 ++-- arch/arm64/kvm/sys_regs.c | 274 +++++++++++++++++++++++++++++++++++--- arch/arm64/kvm/sys_regs.h | 6 + 9 files changed, 316 insertions(+), 47 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 2b0bc8c57552..dcba0fa5176e 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -234,5 +234,6 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arm_init_debug(void) {} static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} +static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 77151b111d32..9ce5cf02ed17 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -278,6 +278,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) /* Set up the timer */ kvm_timer_vcpu_init(vcpu); + kvm_arm_reset_debug_ptr(vcpu); + return 0; } diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index f5e40dae291a..67fa0de3d483 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -46,24 +46,16 @@ #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ #define PAR_EL1 21 /* Physical Address Register */ #define MDSCR_EL1 22 /* Monitor Debug System Control Register */ -#define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */ -#define DBGBCR15_EL1 38 -#define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */ -#define DBGBVR15_EL1 54 -#define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */ -#define DBGWCR15_EL1 70 -#define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */ -#define DBGWVR15_EL1 86 -#define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */ +#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */ /* 32bit specific registers. Keep them at the end of the range */ -#define DACR32_EL2 88 /* Domain Access Control Register */ -#define IFSR32_EL2 89 /* Instruction Fault Status Register */ -#define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */ -#define DBGVCR32_EL2 91 /* Debug Vector Catch Register */ -#define TEECR32_EL1 92 /* ThumbEE Configuration Register */ -#define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */ -#define NR_SYS_REGS 94 +#define DACR32_EL2 24 /* Domain Access Control Register */ +#define IFSR32_EL2 25 /* Instruction Fault Status Register */ +#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */ +#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */ +#define TEECR32_EL1 28 /* ThumbEE Configuration Register */ +#define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */ +#define NR_SYS_REGS 30 /* 32bit mapping */ #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index cfb675466e86..9b99402b14df 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -108,11 +108,25 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* Debug state */ + /* Guest debug state */ u64 debug_flags; + /* + * We maintain more than a single set of debug registers to support + * debugging the guest from the host and to maintain separate host and + * guest state during world switches. vcpu_debug_state are the debug + * registers of the vcpu as the guest sees them. host_debug_state are + * the host registers which are saved and restored during world switches. + * + * debug_ptr points to the set of debug registers that should be loaded + * onto the hardware when running the guest. + */ + struct kvm_guest_debug_arch *debug_ptr; + struct kvm_guest_debug_arch vcpu_debug_state; + /* Pointer to host CPU context */ kvm_cpu_context_t *host_cpu_context; + struct kvm_guest_debug_arch host_debug_state; /* VGIC state */ struct vgic_cpu vgic_cpu; @@ -242,5 +256,6 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} void kvm_arm_init_debug(void); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); +void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 5c900d49b906..d88630899a24 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -116,10 +116,16 @@ int main(void) DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags)); + DEFINE(VCPU_DEBUG_PTR, offsetof(struct kvm_vcpu, arch.debug_ptr)); + DEFINE(DEBUG_BCR, offsetof(struct kvm_guest_debug_arch, dbg_bcr)); + DEFINE(DEBUG_BVR, offsetof(struct kvm_guest_debug_arch, dbg_bvr)); + DEFINE(DEBUG_WCR, offsetof(struct kvm_guest_debug_arch, dbg_wcr)); + DEFINE(DEBUG_WVR, offsetof(struct kvm_guest_debug_arch, dbg_wvr)); DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(VCPU_MDCR_EL2, offsetof(struct kvm_vcpu, arch.mdcr_el2)); DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state)); DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index d439eb8f3239..e0947b77faaa 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -66,6 +66,15 @@ void kvm_arm_init_debug(void) __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2)); } +/** + * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state + */ + +void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state; +} + /** * kvm_arm_setup_debug - set up debug related stuff * diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 8264f5b4817c..d93c0a23630e 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -596,6 +596,7 @@ __restore_sysregs: /* Save debug state */ __save_debug: // x2: ptr to CPU context + // x3: ptr to debug reg struct // x4/x5/x6-22/x24-26: trashed mrs x26, id_aa64dfr0_el1 @@ -606,15 +607,15 @@ __save_debug: sub w25, w26, w25 // How many WPs to skip mov x5, x24 - add x4, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1) + add x4, x3, #DEBUG_BCR save_debug dbgbcr - add x4, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1) + add x4, x3, #DEBUG_BVR save_debug dbgbvr mov x5, x25 - add x4, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1) + add x4, x3, #DEBUG_WCR save_debug dbgwcr - add x4, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1) + add x4, x3, #DEBUG_WVR save_debug dbgwvr mrs x21, mdccint_el1 @@ -624,6 +625,7 @@ __save_debug: /* Restore debug state */ __restore_debug: // x2: ptr to CPU context + // x3: ptr to debug reg struct // x4/x5/x6-22/x24-26: trashed mrs x26, id_aa64dfr0_el1 @@ -634,15 +636,15 @@ __restore_debug: sub w25, w26, w25 // How many WPs to skip mov x5, x24 - add x4, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1) + add x4, x3, #DEBUG_BCR restore_debug dbgbcr - add x4, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1) + add x4, x3, #DEBUG_BVR restore_debug dbgbvr mov x5, x25 - add x4, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1) + add x4, x3, #DEBUG_WCR restore_debug dbgwcr - add x4, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1) + add x4, x3, #DEBUG_WVR restore_debug dbgwvr ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] @@ -682,6 +684,7 @@ ENTRY(__kvm_vcpu_run) bl __save_sysregs compute_debug_state 1f + add x3, x0, #VCPU_HOST_DEBUG_STATE bl __save_debug 1: activate_traps @@ -697,6 +700,8 @@ ENTRY(__kvm_vcpu_run) bl __restore_fpsimd skip_debug_state x3, 1f + ldr x3, [x0, #VCPU_DEBUG_PTR] + kern_hyp_va x3 bl __restore_debug 1: restore_guest_32bit_state @@ -717,6 +722,8 @@ __kvm_vcpu_return: bl __save_sysregs skip_debug_state x3, 1f + ldr x3, [x0, #VCPU_DEBUG_PTR] + kern_hyp_va x3 bl __save_debug 1: save_guest_32bit_state @@ -739,6 +746,7 @@ __kvm_vcpu_return: // already been saved. Note that we nuke the whole 64bit word. // If we ever add more flags, we'll have to be more careful... str xzr, [x0, #VCPU_DEBUG_FLAGS] + add x3, x0, #VCPU_HOST_DEBUG_STATE bl __restore_debug 1: restore_host_regs diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index c370b4014799..158bae7c52cc 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -211,6 +211,203 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu, return true; } +/* + * reg_to_dbg/dbg_to_reg + * + * A 32 bit write to a debug register leave top bits alone + * A 32 bit read from a debug register only returns the bottom bits + * + * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the + * hyp.S code switches between host and guest values in future. + */ +static inline void reg_to_dbg(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + u64 *dbg_reg) +{ + u64 val = *vcpu_reg(vcpu, p->Rt); + + if (p->is_32bit) { + val &= 0xffffffffUL; + val |= ((*dbg_reg >> 32) << 32); + } + + *dbg_reg = val; + vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; +} + +static inline void dbg_to_reg(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + u64 *dbg_reg) +{ + u64 val = *dbg_reg; + + if (p->is_32bit) + val &= 0xffffffffUL; + + *vcpu_reg(vcpu, p->Rt) = val; +} + +static inline bool trap_bvr(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *rd) +{ + u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + + if (p->is_write) + reg_to_dbg(vcpu, p, dbg_reg); + else + dbg_to_reg(vcpu, p, dbg_reg); + + return true; +} + +static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + + if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + return -EFAULT; + return 0; +} + +static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + return -EFAULT; + return 0; +} + +static inline void reset_bvr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; +} + +static inline bool trap_bcr(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *rd) +{ + u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + + if (p->is_write) + reg_to_dbg(vcpu, p, dbg_reg); + else + dbg_to_reg(vcpu, p, dbg_reg); + + return true; +} + +static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + + if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + return -EFAULT; + + return 0; +} + +static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + return -EFAULT; + return 0; +} + +static inline void reset_bcr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; +} + +static inline bool trap_wvr(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *rd) +{ + u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + + if (p->is_write) + reg_to_dbg(vcpu, p, dbg_reg); + else + dbg_to_reg(vcpu, p, dbg_reg); + + return true; +} + +static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + + if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + return -EFAULT; + return 0; +} + +static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + return -EFAULT; + return 0; +} + +static inline void reset_wvr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; +} + +static inline bool trap_wcr(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *rd) +{ + u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + + if (p->is_write) + reg_to_dbg(vcpu, p, dbg_reg); + else + dbg_to_reg(vcpu, p, dbg_reg); + + return true; +} + +static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + + if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + return -EFAULT; + return 0; +} + +static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + return -EFAULT; + return 0; +} + +static inline void reset_wcr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; +} + static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 amair; @@ -240,16 +437,16 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \ - trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \ + trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ /* DBGBCRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \ - trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \ + trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ /* DBGWVRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \ - trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \ + trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ /* DBGWCRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ - trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 } + trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } /* * Architected system registers. @@ -516,28 +713,55 @@ static bool trap_debug32(struct kvm_vcpu *vcpu, return true; } -#define DBG_BCR_BVR_WCR_WVR(n) \ - /* DBGBVRn */ \ - { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \ - NULL, (cp14_DBGBVR0 + (n) * 2) }, \ - /* DBGBCRn */ \ - { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \ - NULL, (cp14_DBGBCR0 + (n) * 2) }, \ - /* DBGWVRn */ \ - { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \ - NULL, (cp14_DBGWVR0 + (n) * 2) }, \ - /* DBGWCRn */ \ - { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \ - NULL, (cp14_DBGWCR0 + (n) * 2) } - -#define DBGBXVR(n) \ - { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \ - NULL, cp14_DBGBXVR0 + n * 2 } +/* AArch32 debug register mappings + * + * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] + * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] + * + * All control registers and watchpoint value registers are mapped to + * the lower 32 bits of their AArch64 equivalents. We share the trap + * handlers with the above AArch64 code which checks what mode the + * system is in. + */ + +static inline bool trap_xvr(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *rd) +{ + u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + + if (p->is_write) { + u64 val = *dbg_reg; + + val &= 0xffffffffUL; + val |= *vcpu_reg(vcpu, p->Rt) << 32; + *dbg_reg = val; + + vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; + } else { + *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32; + } + + return true; +} + +#define DBG_BCR_BVR_WCR_WVR(n) \ + /* DBGBVRn */ \ + { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ + /* DBGBCRn */ \ + { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ + /* DBGWVRn */ \ + { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ + /* DBGWCRn */ \ + { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } + +#define DBGBXVR(n) \ + { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n } /* * Trapped cp14 registers. We generally ignore most of the external * debug, on the principle that they don't really make sense to a - * guest. Revisit this one day, whould this principle change. + * guest. Revisit this one day, would this principle change. */ static const struct sys_reg_desc cp14_regs[] = { /* DBGIDR */ @@ -1303,6 +1527,9 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg if (!r) return get_invariant_sys_reg(reg->id, uaddr); + if (r->get_user) + return (r->get_user)(vcpu, r, reg, uaddr); + return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); } @@ -1321,6 +1548,9 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg if (!r) return set_invariant_sys_reg(reg->id, uaddr); + if (r->set_user) + return (r->set_user)(vcpu, r, reg, uaddr); + return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); } diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index d411e251412c..eaa324e4db4d 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -55,6 +55,12 @@ struct sys_reg_desc { /* Value (usually reset value) */ u64 val; + + /* Custom get/set_user functions, fallback to generic if NULL */ + int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr); + int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr); }; static inline void print_sys_reg_instr(const struct sys_reg_params *p) -- cgit v1.2.3 From 974a2aba99d29bada9212134e7565d2364967636 Mon Sep 17 00:00:00 2001 From: Arun Chandran Date: Fri, 12 Jun 2015 12:23:24 +0530 Subject: ARM: zynq: Fix earlyprintk in big endian mode earlyprintk messages are not appearing on the terminal emulator during a big endian kernel boot. In BE mode sending full words to UART will result in unprintable characters as they are byte swapped versions of printable ones. So send only bytes. Signed-off-by: Arun Chandran Tested-by: Michal Simek Signed-off-by: Michal Simek --- arch/arm/include/debug/zynq.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/debug/zynq.S b/arch/arm/include/debug/zynq.S index bd13dedbdeff..de86b9247564 100644 --- a/arch/arm/include/debug/zynq.S +++ b/arch/arm/include/debug/zynq.S @@ -38,7 +38,7 @@ .endm .macro senduart,rd,rx - str \rd, [\rx, #UART_FIFO_OFFSET] @ TXDATA + strb \rd, [\rx, #UART_FIFO_OFFSET] @ TXDATA .endm .macro waituart,rd,rx -- cgit v1.2.3 From f81309067ff2d84788316c513a415f6bb8c9171f Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 1 Jun 2015 23:44:46 +0100 Subject: ARM: move heavy barrier support out of line The existing memory barrier macro causes a significant amount of code to be inserted inline at every call site. For example, in gpio_set_irq_type(), we have this for mb(): c0344c08: f57ff04e dsb st c0344c0c: e59f8190 ldr r8, [pc, #400] ; c0344da4 c0344c10: e3590004 cmp r9, #4 c0344c14: e5983014 ldr r3, [r8, #20] c0344c18: 0a000054 beq c0344d70 c0344c1c: e3530000 cmp r3, #0 c0344c20: 0a000004 beq c0344c38 c0344c24: e50b2030 str r2, [fp, #-48] ; 0xffffffd0 c0344c28: e50bc034 str ip, [fp, #-52] ; 0xffffffcc c0344c2c: e12fff33 blx r3 c0344c30: e51bc034 ldr ip, [fp, #-52] ; 0xffffffcc c0344c34: e51b2030 ldr r2, [fp, #-48] ; 0xffffffd0 c0344c38: e5963004 ldr r3, [r6, #4] Moving the outer_cache_sync() call out of line reduces the impact of the barrier: c0344968: f57ff04e dsb st c034496c: e35a0004 cmp sl, #4 c0344970: e50b2030 str r2, [fp, #-48] ; 0xffffffd0 c0344974: 0a000044 beq c0344a8c c0344978: ebf363dd bl c001d8f4 c034497c: e5953004 ldr r3, [r5, #4] This should reduce the cache footprint of this code. Overall, this results in a reduction of around 20K in the kernel size: text data bss dec hex filename 10773970 667392 10369656 21811018 14ccf4a ../build/imx6/vmlinux-old 10754219 667392 10369656 21791267 14c8223 ../build/imx6/vmlinux-new Another advantage to this approach is that we can finally resolve the issue of SoCs which have their own memory barrier requirements within multiplatform kernels (such as OMAP.) Here, the bus interconnects need additional handling to ensure that writes become visible in the correct order (eg, between dma_map() operations, writes to DMA coherent memory, and MMIO accesses.) Acked-by: Tony Lindgren Acked-by: Richard Woodruff Signed-off-by: Russell King --- arch/arm/include/asm/barrier.h | 12 +++++++++--- arch/arm/include/asm/outercache.h | 17 ----------------- arch/arm/kernel/irq.c | 1 + arch/arm/mach-mmp/pm-pxa910.c | 1 + arch/arm/mach-prima2/pm.c | 1 + arch/arm/mach-ux500/cache-l2x0.c | 1 + arch/arm/mm/Kconfig | 4 ++++ arch/arm/mm/flush.c | 11 +++++++++++ 8 files changed, 28 insertions(+), 20 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 6c2327e1c732..fea99b0e2087 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -2,7 +2,6 @@ #define __ASM_BARRIER_H #ifndef __ASSEMBLY__ -#include #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); @@ -37,12 +36,19 @@ #define dmb(x) __asm__ __volatile__ ("" : : : "memory") #endif +#ifdef CONFIG_ARM_HEAVY_MB +extern void arm_heavy_mb(void); +#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0) +#else +#define __arm_heavy_mb(x...) dsb(x) +#endif + #ifdef CONFIG_ARCH_HAS_BARRIERS #include #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) -#define mb() do { dsb(); outer_sync(); } while (0) +#define mb() __arm_heavy_mb() #define rmb() dsb() -#define wmb() do { dsb(st); outer_sync(); } while (0) +#define wmb() __arm_heavy_mb(st) #define dma_rmb() dmb(osh) #define dma_wmb() dmb(oshst) #else diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index 563b92fc2f41..c2bf24f40177 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -129,21 +129,4 @@ static inline void outer_resume(void) { } #endif -#ifdef CONFIG_OUTER_CACHE_SYNC -/** - * outer_sync - perform a sync point for outer cache - * - * Ensure that all outer cache operations are complete and any store - * buffers are drained. - */ -static inline void outer_sync(void) -{ - if (outer_cache.sync) - outer_cache.sync(); -} -#else -static inline void outer_sync(void) -{ } -#endif - #endif /* __ASM_OUTERCACHE_H */ diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 350f188c92d2..b96c8ed1723a 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -39,6 +39,7 @@ #include #include +#include #include #include #include diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c index 04c9daf9f8d7..7db5870d127f 100644 --- a/arch/arm/mach-mmp/pm-pxa910.c +++ b/arch/arm/mach-mmp/pm-pxa910.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c index d99d08eeb966..83e94c95e314 100644 --- a/arch/arm/mach-prima2/pm.c +++ b/arch/arm/mach-prima2/pm.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c index 7557bede7ae6..780bd13cd7e3 100644 --- a/arch/arm/mach-ux500/cache-l2x0.c +++ b/arch/arm/mach-ux500/cache-l2x0.c @@ -8,6 +8,7 @@ #include #include +#include #include #include "db8500-regs.h" diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 7c6b976ab8d3..df7537f12469 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -883,6 +883,7 @@ config OUTER_CACHE config OUTER_CACHE_SYNC bool + select ARM_HEAVY_MB help The outer cache has a outer_cache_fns.sync function pointer that can be used to drain the write buffer of the outer cache. @@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS This option allows the use of custom mandatory barriers included via the mach/barriers.h file. +config ARM_HEAVY_MB + bool + config ARCH_SUPPORTS_BIG_ENDIAN bool help diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 34b66af516ea..ce6c2960d5ac 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -21,6 +21,17 @@ #include "mm.h" +#ifdef CONFIG_ARM_HEAVY_MB +void arm_heavy_mb(void) +{ +#ifdef CONFIG_OUTER_CACHE_SYNC + if (outer_cache.sync) + outer_cache.sync(); +#endif +} +EXPORT_SYMBOL(arm_heavy_mb); +#endif + #ifdef CONFIG_CPU_CACHE_VIPT static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) -- cgit v1.2.3 From 4e1f8a6f1d978f033f1751e2887b3a69fab3f878 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 3 Jun 2015 13:10:16 +0100 Subject: ARM: add soc memory barrier extension Add an extension to the heavy barrier code to allow a SoC specific memory barrier function to be provided. This is needed for platforms where the interconnect has weak ordering, and thus needs assistance to ensure that memory writes are properly visible in the correct order to other parts of the system. Acked-by: Tony Lindgren Acked-by: Richard Woodruff Signed-off-by: Russell King --- arch/arm/include/asm/barrier.h | 1 + arch/arm/mm/flush.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index fea99b0e2087..3d8f1d3ad9a7 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -37,6 +37,7 @@ #endif #ifdef CONFIG_ARM_HEAVY_MB +extern void (*soc_mb)(void); extern void arm_heavy_mb(void); #define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0) #else diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index ce6c2960d5ac..1ec8e7590fc6 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -22,12 +22,16 @@ #include "mm.h" #ifdef CONFIG_ARM_HEAVY_MB +void (*soc_mb)(void); + void arm_heavy_mb(void) { #ifdef CONFIG_OUTER_CACHE_SYNC if (outer_cache.sync) outer_cache.sync(); #endif + if (soc_mb) + soc_mb(); } EXPORT_SYMBOL(arm_heavy_mb); #endif -- cgit v1.2.3 From 125897908c718972351b589da89b7f990892d4df Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 23 Apr 2014 20:04:39 +0200 Subject: arm: Provide atomic_{or,xor,and} Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/arm/include/asm/atomic.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index e22c11970b7b..ff214bac9cb4 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -194,6 +194,14 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) +#define CONFIG_ARCH_HAS_ATOMIC_OR +#define atomic_andnot atomic_andnot + +ATOMIC_OP(and, &=, and) +ATOMIC_OP(andnot, &= ~, bic) +ATOMIC_OP(or, |=, orr) +ATOMIC_OP(xor, ^=, eor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -321,6 +329,13 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(sub, subs, sbc) +#define atomic64_andnot atomic64_andnot + +ATOMIC64_OP(and, and, and) +ATOMIC64_OP(andnot, bic, bic) +ATOMIC64_OP(or, orr, orr) +ATOMIC64_OP(xor, eor, eor) + #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -- cgit v1.2.3 From e6942b7de2dfe44ebde9bae57dadece5abca9de8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 23 Apr 2014 19:32:50 +0200 Subject: atomic: Provide atomic_{or,xor,and} Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/alpha/include/asm/atomic.h | 1 - arch/arc/include/asm/atomic.h | 1 - arch/arm/include/asm/atomic.h | 1 - arch/arm64/include/asm/atomic.h | 1 - arch/avr32/include/asm/atomic.h | 2 -- arch/blackfin/include/asm/atomic.h | 2 -- arch/frv/include/asm/atomic.h | 2 -- arch/h8300/include/asm/atomic.h | 2 -- arch/hexagon/include/asm/atomic.h | 2 -- arch/ia64/include/asm/atomic.h | 2 -- arch/m32r/include/asm/atomic.h | 2 -- arch/m68k/include/asm/atomic.h | 2 -- arch/metag/include/asm/atomic_lnkget.h | 2 -- arch/mips/include/asm/atomic.h | 2 -- arch/mn10300/include/asm/atomic.h | 2 -- arch/parisc/include/asm/atomic.h | 2 -- arch/powerpc/include/asm/atomic.h | 2 -- arch/s390/include/asm/atomic.h | 2 -- arch/sh/include/asm/atomic-grb.h | 2 -- arch/sparc/include/asm/atomic_32.h | 2 -- arch/sparc/include/asm/atomic_64.h | 2 -- arch/tile/include/asm/atomic_32.h | 2 -- arch/tile/include/asm/atomic_64.h | 2 -- arch/x86/include/asm/atomic.h | 2 -- arch/xtensa/include/asm/atomic.h | 2 -- include/asm-generic/atomic.h | 21 ++++++++++++--------- include/asm-generic/atomic64.h | 4 ++++ include/linux/atomic.h | 13 ------------- lib/atomic64.c | 3 +++ 29 files changed, 19 insertions(+), 68 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 0eff853398d2..e8c956098424 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -110,7 +110,6 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR #define atomic_andnot atomic_andnot #define atomic64_andnot atomic64_andnot diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index e90b701fc6a8..2a847821dee1 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -144,7 +144,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR #define atomic_andnot atomic_andnot ATOMIC_OP(and, &=, and) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index ff214bac9cb4..82b75a7cb762 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -194,7 +194,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR #define atomic_andnot atomic_andnot ATOMIC_OP(and, &=, and) diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 2876173397b2..866a71fca9a3 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -85,7 +85,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, add) ATOMIC_OPS(sub, sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR #define atomic_andnot atomic_andnot ATOMIC_OP(and, and) diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index 115d3005e4bc..97c9bdf83409 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -51,8 +51,6 @@ static inline void atomic_##op(int i, atomic_t *v) \ (void)__atomic_##op##_return(i, v); \ } -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, and) ATOMIC_OP(or, or) ATOMIC_OP(xor, eor) diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index eafa55b81a7b..2d6a7a3823c3 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -28,8 +28,6 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i) #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i)) -#define CONFIG_ARCH_HAS_ATOMIC_OR - #define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i) #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 74d22454d7c6..fc48bea26b40 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -192,8 +192,6 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \ (void)__atomic64_fetch_##op(i, &v->counter); \ } -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(or) ATOMIC_OP(and) ATOMIC_OP(xor) diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index f181f820be33..c4d061f09c44 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -41,8 +41,6 @@ static inline void atomic_##op(int i, atomic_t *v) \ ATOMIC_OP_RETURN(add, +=) ATOMIC_OP_RETURN(sub, -=) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, &=) ATOMIC_OP(or, |=) ATOMIC_OP(xor, ^=) diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 4efe2c7c0dd8..811d61f6422d 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -132,8 +132,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 0809ef5d6b9a..be4beeb77d57 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -69,8 +69,6 @@ ATOMIC_OP(sub, -) : ia64_atomic_sub(__ia64_asr_i, v); \ }) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, &) ATOMIC_OP(or, |) ATOMIC_OP(xor, ^) diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 7245463c1e98..b2a13fbd5be0 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -94,8 +94,6 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index c30e43ea49a3..93ebd96aa494 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -77,8 +77,6 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, &=, and) ATOMIC_OP(or, |=, or) ATOMIC_OP(xor, ^=, eor) diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h index 930c12cb8d37..0642606de901 100644 --- a/arch/metag/include/asm/atomic_lnkget.h +++ b/arch/metag/include/asm/atomic_lnkget.h @@ -74,8 +74,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 0430ba6ab762..4c42fd9af777 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -137,8 +137,6 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add, +=, addu) ATOMIC_OPS(sub, -=, subu) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, &=, and) ATOMIC_OP(or, |=, or) ATOMIC_OP(xor, ^=, xor) diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 03eea8158cf9..f5a63f0bda46 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -89,8 +89,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index be2c50ddebd6..2536965d00ea 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -126,8 +126,6 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, +=) ATOMIC_OPS(sub, -=) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, &=) ATOMIC_OP(or, |=) ATOMIC_OP(xor, ^=) diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 6ca89e2aca15..55f106ed12bf 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -67,8 +67,6 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \ ATOMIC_OPS(add, add) ATOMIC_OPS(sub, subf) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, and) ATOMIC_OP(or, or) ATOMIC_OP(xor, xor) diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index b3859d8e001f..d761aeff72da 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -282,8 +282,6 @@ static inline void atomic64_##op(long i, atomic64_t *v) \ __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ } -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC64_OP(and, AND) ATOMIC64_OP(or, OR) ATOMIC64_OP(xor, XOR) diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h index 4b03830d48c7..b94df40e5f2d 100644 --- a/arch/sh/include/asm/atomic-grb.h +++ b/arch/sh/include/asm/atomic-grb.h @@ -48,8 +48,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index e19d8880b146..7dcbebbcaec6 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -17,8 +17,6 @@ #include #include -#define CONFIG_ARCH_HAS_ATOMIC_OR - #define ATOMIC_INIT(i) { (i) } int atomic_add_return(int, atomic_t *); diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index d6af27c93450..917084ace49d 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -33,8 +33,6 @@ long atomic64_##op##_return(long, atomic64_t *); ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 94237922f0dd..d320ce253d86 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -41,8 +41,6 @@ static inline void atomic_##op(int i, atomic_t *v) \ _atomic_##op((unsigned long *)&v->counter, i); \ } -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index d07d9fc6e2a1..096a56d6ead4 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -58,8 +58,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return oldval; } -#define CONFIG_ARCH_HAS_ATOMIC_OR - static inline void atomic_and(int i, atomic_t *v) { __insn_fetchand4((void *)&v->counter, i); diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index f3a3ec040694..b3493023efda 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -191,8 +191,6 @@ static inline void atomic_##op(int i, atomic_t *v) \ : "memory"); \ } -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 4dd2450300a6..31371f43c23b 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -145,8 +145,6 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 92947e0a532a..a41b0b8f7404 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -102,24 +102,27 @@ ATOMIC_OP_RETURN(sub, -) ATOMIC_OP(and, &) #endif -#ifndef atomic_clear_mask -#define atomic_clear_mask(i, v) atomic_and(~(i), (v)) -#endif - #ifndef atomic_or -#ifndef CONFIG_ARCH_HAS_ATOMIC_OR -#define CONFIG_ARCH_HAS_ATOMIC_OR -#endif ATOMIC_OP(or, |) #endif -#ifndef atomic_set_mask -#define atomic_set_mask(i, v) atomic_or((i), (v)) +#ifndef atomic_xor +ATOMIC_OP(xor, ^) #endif #undef ATOMIC_OP_RETURN #undef ATOMIC_OP +static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + atomic_and(~mask, v); +} + +static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + atomic_or(mask, v); +} + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 30ad9c86cebb..d48e78ccad3d 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -32,6 +32,10 @@ extern long long atomic64_##op##_return(long long a, atomic64_t *v); ATOMIC64_OPS(add) ATOMIC64_OPS(sub) +ATOMIC64_OP(and) +ATOMIC64_OP(or) +ATOMIC64_OP(xor) + #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 5b08a8540ecf..7d6279012a1f 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -111,19 +111,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif -#ifndef CONFIG_ARCH_HAS_ATOMIC_OR -static inline void atomic_or(int i, atomic_t *v) -{ - int old; - int new; - - do { - old = atomic_read(v); - new = old | i; - } while (atomic_cmpxchg(v, old, new) != old); -} -#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */ - #include #ifdef CONFIG_GENERIC_ATOMIC64 #include diff --git a/lib/atomic64.c b/lib/atomic64.c index 1298c05ef528..2886ebac6567 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -102,6 +102,9 @@ EXPORT_SYMBOL(atomic64_##op##_return); ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) +ATOMIC64_OP(and, &=) +ATOMIC64_OP(or, |=) +ATOMIC64_OP(xor, ^=) #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN -- cgit v1.2.3 From fa8ad7889d83bcf0a6cdbf6d3622f3ec019cde14 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 6 Jul 2015 12:23:53 +0100 Subject: arm: perf: factor arm_pmu core out to drivers To enable sharing of the arm_pmu code with arm64, this patch factors it out to drivers/perf/. A new drivers/perf directory is added for performance monitor drivers to live under. MAINTAINERS is updated accordingly. Files added previously without a corresponsing MAINTAINERS update (perf_regs.c, perf_callchain.c, and perf_event.h) are also added. Cc: Arnaldo Carvalho de Melo Cc: Greg Kroah-Hartman Cc: Ingo Molnar Cc: Linus Walleij Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Russell King Cc: Will Deacon Signed-off-by: Mark Rutland [will: augmented Kconfig help slightly] Signed-off-by: Will Deacon --- MAINTAINERS | 6 +- arch/arm/Kconfig | 8 +- arch/arm/include/asm/pmu.h | 154 ------ arch/arm/kernel/Makefile | 3 +- arch/arm/kernel/perf_event.c | 921 ------------------------------------ arch/arm/kernel/perf_event_v6.c | 2 +- arch/arm/kernel/perf_event_v7.c | 2 +- arch/arm/kernel/perf_event_xscale.c | 2 +- arch/arm/mach-ux500/cpu-db8500.c | 2 +- drivers/Kconfig | 2 + drivers/Makefile | 1 + drivers/perf/Kconfig | 15 + drivers/perf/Makefile | 1 + drivers/perf/arm_pmu.c | 921 ++++++++++++++++++++++++++++++++++++ include/linux/perf/arm_pmu.h | 154 ++++++ 15 files changed, 1105 insertions(+), 1089 deletions(-) delete mode 100644 arch/arm/include/asm/pmu.h delete mode 100644 arch/arm/kernel/perf_event.c create mode 100644 drivers/perf/Kconfig create mode 100644 drivers/perf/Makefile create mode 100644 drivers/perf/arm_pmu.c create mode 100644 include/linux/perf/arm_pmu.h (limited to 'arch/arm/include') diff --git a/MAINTAINERS b/MAINTAINERS index fd6078443083..485c92ced47d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -799,11 +799,13 @@ F: arch/arm/include/asm/floppy.h ARM PMU PROFILING AND DEBUGGING M: Will Deacon S: Maintained -F: arch/arm/kernel/perf_event* +F: arch/arm/kernel/perf_* F: arch/arm/oprofile/common.c -F: arch/arm/include/asm/pmu.h F: arch/arm/kernel/hw_breakpoint.c F: arch/arm/include/asm/hw_breakpoint.h +F: arch/arm/include/asm/perf_event.h +F: drivers/perf/arm_pmu.c +F: include/linux/perf/arm_pmu.h ARM PORT M: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1c5021002fe4..4f7bc3d4b186 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1701,12 +1701,8 @@ config HIGHPTE user-space 2nd level page tables to reside in high memory. config HW_PERF_EVENTS - bool "Enable hardware performance counter support for perf events" - depends on PERF_EVENTS - default y - help - Enable hardware performance counter support for perf events. If - disabled, perf events will use software events only. + def_bool y + depends on ARM_PMU config SYS_SUPPORTS_HUGETLBFS def_bool y diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h deleted file mode 100644 index 3fc87dfd77e6..000000000000 --- a/arch/arm/include/asm/pmu.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * linux/arch/arm/include/asm/pmu.h - * - * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#ifndef __ARM_PMU_H__ -#define __ARM_PMU_H__ - -#include -#include - -#include - -/* - * struct arm_pmu_platdata - ARM PMU platform data - * - * @handle_irq: an optional handler which will be called from the - * interrupt and passed the address of the low level handler, - * and can be used to implement any platform specific handling - * before or after calling it. - */ -struct arm_pmu_platdata { - irqreturn_t (*handle_irq)(int irq, void *dev, - irq_handler_t pmu_handler); -}; - -#ifdef CONFIG_HW_PERF_EVENTS - -/* - * The ARMv7 CPU PMU supports up to 32 event counters. - */ -#define ARMPMU_MAX_HWEVENTS 32 - -#define HW_OP_UNSUPPORTED 0xFFFF -#define C(_x) PERF_COUNT_HW_CACHE_##_x -#define CACHE_OP_UNSUPPORTED 0xFFFF - -#define PERF_MAP_ALL_UNSUPPORTED \ - [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED - -#define PERF_CACHE_MAP_ALL_UNSUPPORTED \ -[0 ... C(MAX) - 1] = { \ - [0 ... C(OP_MAX) - 1] = { \ - [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \ - }, \ -} - -/* The events for a given PMU register set. */ -struct pmu_hw_events { - /* - * The events that are active on the PMU for the given index. - */ - struct perf_event *events[ARMPMU_MAX_HWEVENTS]; - - /* - * A 1 bit for an index indicates that the counter is being used for - * an event. A 0 means that the counter can be used. - */ - DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); - - /* - * Hardware lock to serialize accesses to PMU registers. Needed for the - * read/modify/write sequences. - */ - raw_spinlock_t pmu_lock; - - /* - * When using percpu IRQs, we need a percpu dev_id. Place it here as we - * already have to allocate this struct per cpu. - */ - struct arm_pmu *percpu_pmu; -}; - -struct arm_pmu { - struct pmu pmu; - cpumask_t active_irqs; - cpumask_t supported_cpus; - int *irq_affinity; - char *name; - irqreturn_t (*handle_irq)(int irq_num, void *dev); - void (*enable)(struct perf_event *event); - void (*disable)(struct perf_event *event); - int (*get_event_idx)(struct pmu_hw_events *hw_events, - struct perf_event *event); - void (*clear_event_idx)(struct pmu_hw_events *hw_events, - struct perf_event *event); - int (*set_event_filter)(struct hw_perf_event *evt, - struct perf_event_attr *attr); - u32 (*read_counter)(struct perf_event *event); - void (*write_counter)(struct perf_event *event, u32 val); - void (*start)(struct arm_pmu *); - void (*stop)(struct arm_pmu *); - void (*reset)(void *); - int (*request_irq)(struct arm_pmu *, irq_handler_t handler); - void (*free_irq)(struct arm_pmu *); - int (*map_event)(struct perf_event *event); - int num_events; - atomic_t active_events; - struct mutex reserve_mutex; - u64 max_period; - struct platform_device *plat_device; - struct pmu_hw_events __percpu *hw_events; - struct notifier_block hotplug_nb; -}; - -#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) - -int armpmu_register(struct arm_pmu *armpmu, int type); - -u64 armpmu_event_update(struct perf_event *event); - -int armpmu_event_set_period(struct perf_event *event); - -int armpmu_map_event(struct perf_event *event, - const unsigned (*event_map)[PERF_COUNT_HW_MAX], - const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u32 raw_event_mask); - -struct pmu_probe_info { - unsigned int cpuid; - unsigned int mask; - int (*init)(struct arm_pmu *); -}; - -#define PMU_PROBE(_cpuid, _mask, _fn) \ -{ \ - .cpuid = (_cpuid), \ - .mask = (_mask), \ - .init = (_fn), \ -} - -#define ARM_PMU_PROBE(_cpuid, _fn) \ - PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn) - -#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK) - -#define XSCALE_PMU_PROBE(_version, _fn) \ - PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn) - -int arm_pmu_device_probe(struct platform_device *pdev, - const struct of_device_id *of_table, - const struct pmu_probe_info *probe_table); - -#endif /* CONFIG_HW_PERF_EVENTS */ - -#endif /* __ARM_PMU_H__ */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index e69f7a19735d..fcb25c1c5c21 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -71,8 +71,7 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o -obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o \ - perf_event_xscale.o perf_event_v6.o \ +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \ perf_event_v7.o CFLAGS_pj4-cp0.o := -marm AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c deleted file mode 100644 index 1cb40651d783..000000000000 --- a/arch/arm/kernel/perf_event.c +++ /dev/null @@ -1,921 +0,0 @@ -#undef DEBUG - -/* - * ARM performance counter support. - * - * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles - * Copyright (C) 2010 ARM Ltd., Will Deacon - * - * This code is based on the sparc64 perf event code, which is in turn based - * on the x86 code. - */ -#define pr_fmt(fmt) "hw perfevents: " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -static int -armpmu_map_cache_event(const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u64 config) -{ - unsigned int cache_type, cache_op, cache_result, ret; - - cache_type = (config >> 0) & 0xff; - if (cache_type >= PERF_COUNT_HW_CACHE_MAX) - return -EINVAL; - - cache_op = (config >> 8) & 0xff; - if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) - return -EINVAL; - - cache_result = (config >> 16) & 0xff; - if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) - return -EINVAL; - - ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; - - if (ret == CACHE_OP_UNSUPPORTED) - return -ENOENT; - - return ret; -} - -static int -armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) -{ - int mapping; - - if (config >= PERF_COUNT_HW_MAX) - return -EINVAL; - - mapping = (*event_map)[config]; - return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; -} - -static int -armpmu_map_raw_event(u32 raw_event_mask, u64 config) -{ - return (int)(config & raw_event_mask); -} - -int -armpmu_map_event(struct perf_event *event, - const unsigned (*event_map)[PERF_COUNT_HW_MAX], - const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u32 raw_event_mask) -{ - u64 config = event->attr.config; - int type = event->attr.type; - - if (type == event->pmu->type) - return armpmu_map_raw_event(raw_event_mask, config); - - switch (type) { - case PERF_TYPE_HARDWARE: - return armpmu_map_hw_event(event_map, config); - case PERF_TYPE_HW_CACHE: - return armpmu_map_cache_event(cache_map, config); - case PERF_TYPE_RAW: - return armpmu_map_raw_event(raw_event_mask, config); - } - - return -ENOENT; -} - -int armpmu_event_set_period(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - s64 left = local64_read(&hwc->period_left); - s64 period = hwc->sample_period; - int ret = 0; - - if (unlikely(left <= -period)) { - left = period; - local64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - if (unlikely(left <= 0)) { - left += period; - local64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - /* - * Limit the maximum period to prevent the counter value - * from overtaking the one we are about to program. In - * effect we are reducing max_period to account for - * interrupt latency (and we are being very conservative). - */ - if (left > (armpmu->max_period >> 1)) - left = armpmu->max_period >> 1; - - local64_set(&hwc->prev_count, (u64)-left); - - armpmu->write_counter(event, (u64)(-left) & 0xffffffff); - - perf_event_update_userpage(event); - - return ret; -} - -u64 armpmu_event_update(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - u64 delta, prev_raw_count, new_raw_count; - -again: - prev_raw_count = local64_read(&hwc->prev_count); - new_raw_count = armpmu->read_counter(event); - - if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count) - goto again; - - delta = (new_raw_count - prev_raw_count) & armpmu->max_period; - - local64_add(delta, &event->count); - local64_sub(delta, &hwc->period_left); - - return new_raw_count; -} - -static void -armpmu_read(struct perf_event *event) -{ - armpmu_event_update(event); -} - -static void -armpmu_stop(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - - /* - * ARM pmu always has to update the counter, so ignore - * PERF_EF_UPDATE, see comments in armpmu_start(). - */ - if (!(hwc->state & PERF_HES_STOPPED)) { - armpmu->disable(event); - armpmu_event_update(event); - hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; - } -} - -static void armpmu_start(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - - /* - * ARM pmu always has to reprogram the period, so ignore - * PERF_EF_RELOAD, see the comment below. - */ - if (flags & PERF_EF_RELOAD) - WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); - - hwc->state = 0; - /* - * Set the period again. Some counters can't be stopped, so when we - * were stopped we simply disabled the IRQ source and the counter - * may have been left counting. If we don't do this step then we may - * get an interrupt too soon or *way* too late if the overflow has - * happened since disabling. - */ - armpmu_event_set_period(event); - armpmu->enable(event); -} - -static void -armpmu_del(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - - armpmu_stop(event, PERF_EF_UPDATE); - hw_events->events[idx] = NULL; - clear_bit(idx, hw_events->used_mask); - if (armpmu->clear_event_idx) - armpmu->clear_event_idx(hw_events, event); - - perf_event_update_userpage(event); -} - -static int -armpmu_add(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - /* An event following a process won't be stopped earlier */ - if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) - return -ENOENT; - - perf_pmu_disable(event->pmu); - - /* If we don't have a space for the counter then finish early. */ - idx = armpmu->get_event_idx(hw_events, event); - if (idx < 0) { - err = idx; - goto out; - } - - /* - * If there is an event in the counter we are going to use then make - * sure it is disabled. - */ - event->hw.idx = idx; - armpmu->disable(event); - hw_events->events[idx] = event; - - hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; - if (flags & PERF_EF_START) - armpmu_start(event, PERF_EF_RELOAD); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - perf_pmu_enable(event->pmu); - return err; -} - -static int -validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, - struct perf_event *event) -{ - struct arm_pmu *armpmu; - - if (is_software_event(event)) - return 1; - - /* - * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The - * core perf code won't check that the pmu->ctx == leader->ctx - * until after pmu->event_init(event). - */ - if (event->pmu != pmu) - return 0; - - if (event->state < PERF_EVENT_STATE_OFF) - return 1; - - if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) - return 1; - - armpmu = to_arm_pmu(event->pmu); - return armpmu->get_event_idx(hw_events, event) >= 0; -} - -static int -validate_group(struct perf_event *event) -{ - struct perf_event *sibling, *leader = event->group_leader; - struct pmu_hw_events fake_pmu; - - /* - * Initialise the fake PMU. We only need to populate the - * used_mask for the purposes of validation. - */ - memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); - - if (!validate_event(event->pmu, &fake_pmu, leader)) - return -EINVAL; - - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(event->pmu, &fake_pmu, sibling)) - return -EINVAL; - } - - if (!validate_event(event->pmu, &fake_pmu, event)) - return -EINVAL; - - return 0; -} - -static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) -{ - struct arm_pmu *armpmu; - struct platform_device *plat_device; - struct arm_pmu_platdata *plat; - int ret; - u64 start_clock, finish_clock; - - /* - * we request the IRQ with a (possibly percpu) struct arm_pmu**, but - * the handlers expect a struct arm_pmu*. The percpu_irq framework will - * do any necessary shifting, we just need to perform the first - * dereference. - */ - armpmu = *(void **)dev; - plat_device = armpmu->plat_device; - plat = dev_get_platdata(&plat_device->dev); - - start_clock = sched_clock(); - if (plat && plat->handle_irq) - ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); - else - ret = armpmu->handle_irq(irq, armpmu); - finish_clock = sched_clock(); - - perf_sample_event_took(finish_clock - start_clock); - return ret; -} - -static void -armpmu_release_hardware(struct arm_pmu *armpmu) -{ - armpmu->free_irq(armpmu); -} - -static int -armpmu_reserve_hardware(struct arm_pmu *armpmu) -{ - int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); - if (err) { - armpmu_release_hardware(armpmu); - return err; - } - - return 0; -} - -static void -hw_perf_event_destroy(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - atomic_t *active_events = &armpmu->active_events; - struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; - - if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { - armpmu_release_hardware(armpmu); - mutex_unlock(pmu_reserve_mutex); - } -} - -static int -event_requires_mode_exclusion(struct perf_event_attr *attr) -{ - return attr->exclude_idle || attr->exclude_user || - attr->exclude_kernel || attr->exclude_hv; -} - -static int -__hw_perf_event_init(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - int mapping; - - mapping = armpmu->map_event(event); - - if (mapping < 0) { - pr_debug("event %x:%llx not supported\n", event->attr.type, - event->attr.config); - return mapping; - } - - /* - * We don't assign an index until we actually place the event onto - * hardware. Use -1 to signify that we haven't decided where to put it - * yet. For SMP systems, each core has it's own PMU so we can't do any - * clever allocation or constraints checking at this point. - */ - hwc->idx = -1; - hwc->config_base = 0; - hwc->config = 0; - hwc->event_base = 0; - - /* - * Check whether we need to exclude the counter from certain modes. - */ - if ((!armpmu->set_event_filter || - armpmu->set_event_filter(hwc, &event->attr)) && - event_requires_mode_exclusion(&event->attr)) { - pr_debug("ARM performance counters do not support " - "mode exclusion\n"); - return -EOPNOTSUPP; - } - - /* - * Store the event encoding into the config_base field. - */ - hwc->config_base |= (unsigned long)mapping; - - if (!is_sampling_event(event)) { - /* - * For non-sampling runs, limit the sample_period to half - * of the counter width. That way, the new counter value - * is far less likely to overtake the previous one unless - * you have some serious IRQ latency issues. - */ - hwc->sample_period = armpmu->max_period >> 1; - hwc->last_period = hwc->sample_period; - local64_set(&hwc->period_left, hwc->sample_period); - } - - if (event->group_leader != event) { - if (validate_group(event) != 0) - return -EINVAL; - } - - return 0; -} - -static int armpmu_event_init(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - int err = 0; - atomic_t *active_events = &armpmu->active_events; - - /* - * Reject CPU-affine events for CPUs that are of a different class to - * that which this PMU handles. Process-following events (where - * event->cpu == -1) can be migrated between CPUs, and thus we have to - * reject them later (in armpmu_add) if they're scheduled on a - * different class of CPU. - */ - if (event->cpu != -1 && - !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) - return -ENOENT; - - /* does not support taken branch sampling */ - if (has_branch_stack(event)) - return -EOPNOTSUPP; - - if (armpmu->map_event(event) == -ENOENT) - return -ENOENT; - - event->destroy = hw_perf_event_destroy; - - if (!atomic_inc_not_zero(active_events)) { - mutex_lock(&armpmu->reserve_mutex); - if (atomic_read(active_events) == 0) - err = armpmu_reserve_hardware(armpmu); - - if (!err) - atomic_inc(active_events); - mutex_unlock(&armpmu->reserve_mutex); - } - - if (err) - return err; - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err; -} - -static void armpmu_enable(struct pmu *pmu) -{ - struct arm_pmu *armpmu = to_arm_pmu(pmu); - struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); - - /* For task-bound events we may be called on other CPUs */ - if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) - return; - - if (enabled) - armpmu->start(armpmu); -} - -static void armpmu_disable(struct pmu *pmu) -{ - struct arm_pmu *armpmu = to_arm_pmu(pmu); - - /* For task-bound events we may be called on other CPUs */ - if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) - return; - - armpmu->stop(armpmu); -} - -/* - * In heterogeneous systems, events are specific to a particular - * microarchitecture, and aren't suitable for another. Thus, only match CPUs of - * the same microarchitecture. - */ -static int armpmu_filter_match(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - unsigned int cpu = smp_processor_id(); - return cpumask_test_cpu(cpu, &armpmu->supported_cpus); -} - -static void armpmu_init(struct arm_pmu *armpmu) -{ - atomic_set(&armpmu->active_events, 0); - mutex_init(&armpmu->reserve_mutex); - - armpmu->pmu = (struct pmu) { - .pmu_enable = armpmu_enable, - .pmu_disable = armpmu_disable, - .event_init = armpmu_event_init, - .add = armpmu_add, - .del = armpmu_del, - .start = armpmu_start, - .stop = armpmu_stop, - .read = armpmu_read, - .filter_match = armpmu_filter_match, - }; -} - -int armpmu_register(struct arm_pmu *armpmu, int type) -{ - armpmu_init(armpmu); - pr_info("enabled with %s PMU driver, %d counters available\n", - armpmu->name, armpmu->num_events); - return perf_pmu_register(&armpmu->pmu, armpmu->name, type); -} - -/* Set at runtime when we know what CPU type we are. */ -static struct arm_pmu *__oprofile_cpu_pmu; - -/* - * Despite the names, these two functions are CPU-specific and are used - * by the OProfile/perf code. - */ -const char *perf_pmu_name(void) -{ - if (!__oprofile_cpu_pmu) - return NULL; - - return __oprofile_cpu_pmu->name; -} -EXPORT_SYMBOL_GPL(perf_pmu_name); - -int perf_num_counters(void) -{ - int max_events = 0; - - if (__oprofile_cpu_pmu != NULL) - max_events = __oprofile_cpu_pmu->num_events; - - return max_events; -} -EXPORT_SYMBOL_GPL(perf_num_counters); - -static void cpu_pmu_enable_percpu_irq(void *data) -{ - int irq = *(int *)data; - - enable_percpu_irq(irq, IRQ_TYPE_NONE); -} - -static void cpu_pmu_disable_percpu_irq(void *data) -{ - int irq = *(int *)data; - - disable_percpu_irq(irq); -} - -static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) -{ - int i, irq, irqs; - struct platform_device *pmu_device = cpu_pmu->plat_device; - struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - - irq = platform_get_irq(pmu_device, 0); - if (irq >= 0 && irq_is_percpu(irq)) { - on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); - free_percpu_irq(irq, &hw_events->percpu_pmu); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - if (cpu_pmu->irq_affinity) - cpu = cpu_pmu->irq_affinity[i]; - - if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq >= 0) - free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); - } - } -} - -static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) -{ - int i, err, irq, irqs; - struct platform_device *pmu_device = cpu_pmu->plat_device; - struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; - - if (!pmu_device) - return -ENODEV; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (irqs < 1) { - pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); - return 0; - } - - irq = platform_get_irq(pmu_device, 0); - if (irq >= 0 && irq_is_percpu(irq)) { - err = request_percpu_irq(irq, handler, "arm-pmu", - &hw_events->percpu_pmu); - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - return err; - } - on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq < 0) - continue; - - if (cpu_pmu->irq_affinity) - cpu = cpu_pmu->irq_affinity[i]; - - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { - pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, cpu); - continue; - } - - err = request_irq(irq, handler, - IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", - per_cpu_ptr(&hw_events->percpu_pmu, cpu)); - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - return err; - } - - cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); - } - } - - return 0; -} - -/* - * PMU hardware loses all context when a CPU goes offline. - * When a CPU is hotplugged back in, since some hardware registers are - * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading - * junk values out of them. - */ -static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, - void *hcpu) -{ - int cpu = (unsigned long)hcpu; - struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); - - if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) - return NOTIFY_DONE; - - if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) - return NOTIFY_DONE; - - if (pmu->reset) - pmu->reset(pmu); - else - return NOTIFY_DONE; - - return NOTIFY_OK; -} - -static int cpu_pmu_init(struct arm_pmu *cpu_pmu) -{ - int err; - int cpu; - struct pmu_hw_events __percpu *cpu_hw_events; - - cpu_hw_events = alloc_percpu(struct pmu_hw_events); - if (!cpu_hw_events) - return -ENOMEM; - - cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; - err = register_cpu_notifier(&cpu_pmu->hotplug_nb); - if (err) - goto out_hw_events; - - for_each_possible_cpu(cpu) { - struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); - raw_spin_lock_init(&events->pmu_lock); - events->percpu_pmu = cpu_pmu; - } - - cpu_pmu->hw_events = cpu_hw_events; - cpu_pmu->request_irq = cpu_pmu_request_irq; - cpu_pmu->free_irq = cpu_pmu_free_irq; - - /* Ensure the PMU has sane values out of reset. */ - if (cpu_pmu->reset) - on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset, - cpu_pmu, 1); - - /* If no interrupts available, set the corresponding capability flag */ - if (!platform_get_irq(cpu_pmu->plat_device, 0)) - cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; - - return 0; - -out_hw_events: - free_percpu(cpu_hw_events); - return err; -} - -static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) -{ - unregister_cpu_notifier(&cpu_pmu->hotplug_nb); - free_percpu(cpu_pmu->hw_events); -} - -/* - * CPU PMU identification and probing. - */ -static int probe_current_pmu(struct arm_pmu *pmu, - const struct pmu_probe_info *info) -{ - int cpu = get_cpu(); - unsigned int cpuid = read_cpuid_id(); - int ret = -ENODEV; - - pr_info("probing PMU on CPU %d\n", cpu); - - for (; info->init != NULL; info++) { - if ((cpuid & info->mask) != info->cpuid) - continue; - ret = info->init(pmu); - break; - } - - put_cpu(); - return ret; -} - -static int of_pmu_irq_cfg(struct arm_pmu *pmu) -{ - int *irqs, i = 0; - bool using_spi = false; - struct platform_device *pdev = pmu->plat_device; - - irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); - if (!irqs) - return -ENOMEM; - - do { - struct device_node *dn; - int cpu, irq; - - /* See if we have an affinity entry */ - dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i); - if (!dn) - break; - - /* Check the IRQ type and prohibit a mix of PPIs and SPIs */ - irq = platform_get_irq(pdev, i); - if (irq >= 0) { - bool spi = !irq_is_percpu(irq); - - if (i > 0 && spi != using_spi) { - pr_err("PPI/SPI IRQ type mismatch for %s!\n", - dn->name); - kfree(irqs); - return -EINVAL; - } - - using_spi = spi; - } - - /* Now look up the logical CPU number */ - for_each_possible_cpu(cpu) - if (dn == of_cpu_device_node_get(cpu)) - break; - - if (cpu >= nr_cpu_ids) { - pr_warn("Failed to find logical CPU for %s\n", - dn->name); - of_node_put(dn); - cpumask_setall(&pmu->supported_cpus); - break; - } - of_node_put(dn); - - /* For SPIs, we need to track the affinity per IRQ */ - if (using_spi) { - if (i >= pdev->num_resources) { - of_node_put(dn); - break; - } - - irqs[i] = cpu; - } - - /* Keep track of the CPUs containing this PMU type */ - cpumask_set_cpu(cpu, &pmu->supported_cpus); - of_node_put(dn); - i++; - } while (1); - - /* If we didn't manage to parse anything, claim to support all CPUs */ - if (cpumask_weight(&pmu->supported_cpus) == 0) - cpumask_setall(&pmu->supported_cpus); - - /* If we matched up the IRQ affinities, use them to route the SPIs */ - if (using_spi && i == pdev->num_resources) - pmu->irq_affinity = irqs; - else - kfree(irqs); - - return 0; -} - -int arm_pmu_device_probe(struct platform_device *pdev, - const struct of_device_id *of_table, - const struct pmu_probe_info *probe_table) -{ - const struct of_device_id *of_id; - const int (*init_fn)(struct arm_pmu *); - struct device_node *node = pdev->dev.of_node; - struct arm_pmu *pmu; - int ret = -ENODEV; - - pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); - if (!pmu) { - pr_info("failed to allocate PMU device!\n"); - return -ENOMEM; - } - - if (!__oprofile_cpu_pmu) - __oprofile_cpu_pmu = pmu; - - pmu->plat_device = pdev; - - if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { - init_fn = of_id->data; - - ret = of_pmu_irq_cfg(pmu); - if (!ret) - ret = init_fn(pmu); - } else { - ret = probe_current_pmu(pmu, probe_table); - cpumask_setall(&pmu->supported_cpus); - } - - if (ret) { - pr_info("failed to probe PMU!\n"); - goto out_free; - } - - ret = cpu_pmu_init(pmu); - if (ret) - goto out_free; - - ret = armpmu_register(pmu, -1); - if (ret) - goto out_destroy; - - return 0; - -out_destroy: - cpu_pmu_destroy(pmu); -out_free: - pr_info("failed to register PMU devices!\n"); - kfree(pmu); - return ret; -} diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 09f83e414a72..09413e7b49aa 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -34,9 +34,9 @@ #include #include -#include #include +#include #include enum armv6_perf_types { diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f9b37f876e20..126dc679b230 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -21,11 +21,11 @@ #include #include #include -#include #include #include "../vfp/vfpinstr.h" #include +#include #include /* diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 304d056d5b25..aa0499e2eef7 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -16,9 +16,9 @@ #include #include -#include #include +#include #include enum xscale_perf_types { diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 16913800bbf9..5578dc1ab52b 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -20,10 +20,10 @@ #include #include #include +#include #include #include -#include #include #include "setup.h" diff --git a/drivers/Kconfig b/drivers/Kconfig index 6e973b8e3a3b..3497485f5eab 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -176,6 +176,8 @@ source "drivers/powercap/Kconfig" source "drivers/mcb/Kconfig" +source "drivers/perf/Kconfig" + source "drivers/ras/Kconfig" source "drivers/thunderbolt/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index b64b49f6e01b..f245f2291b8a 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -161,6 +161,7 @@ obj-$(CONFIG_NTB) += ntb/ obj-$(CONFIG_FMC) += fmc/ obj-$(CONFIG_POWERCAP) += powercap/ obj-$(CONFIG_MCB) += mcb/ +obj-$(CONFIG_PERF_EVENTS) += perf/ obj-$(CONFIG_RAS) += ras/ obj-$(CONFIG_THUNDERBOLT) += thunderbolt/ obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/ diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig new file mode 100644 index 000000000000..d9de36ee165d --- /dev/null +++ b/drivers/perf/Kconfig @@ -0,0 +1,15 @@ +# +# Performance Monitor Drivers +# + +menu "Performance monitor support" + +config ARM_PMU + depends on PERF_EVENTS && ARM + bool "ARM PMU framework" + default y + help + Say y if you want to use CPU performance monitors on ARM-based + systems. + +endmenu diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile new file mode 100644 index 000000000000..acd2397ded94 --- /dev/null +++ b/drivers/perf/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ARM_PMU) += arm_pmu.o diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c new file mode 100644 index 000000000000..2365a32a595e --- /dev/null +++ b/drivers/perf/arm_pmu.c @@ -0,0 +1,921 @@ +#undef DEBUG + +/* + * ARM performance counter support. + * + * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles + * Copyright (C) 2010 ARM Ltd., Will Deacon + * + * This code is based on the sparc64 perf event code, which is in turn based + * on the x86 code. + */ +#define pr_fmt(fmt) "hw perfevents: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static int +armpmu_map_cache_event(const unsigned (*cache_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u64 config) +{ + unsigned int cache_type, cache_op, cache_result, ret; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; + + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + + return ret; +} + +static int +armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) +{ + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -EINVAL; + + mapping = (*event_map)[config]; + return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; +} + +static int +armpmu_map_raw_event(u32 raw_event_mask, u64 config) +{ + return (int)(config & raw_event_mask); +} + +int +armpmu_map_event(struct perf_event *event, + const unsigned (*event_map)[PERF_COUNT_HW_MAX], + const unsigned (*cache_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u32 raw_event_mask) +{ + u64 config = event->attr.config; + int type = event->attr.type; + + if (type == event->pmu->type) + return armpmu_map_raw_event(raw_event_mask, config); + + switch (type) { + case PERF_TYPE_HARDWARE: + return armpmu_map_hw_event(event_map, config); + case PERF_TYPE_HW_CACHE: + return armpmu_map_cache_event(cache_map, config); + case PERF_TYPE_RAW: + return armpmu_map_raw_event(raw_event_mask, config); + } + + return -ENOENT; +} + +int armpmu_event_set_period(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + /* + * Limit the maximum period to prevent the counter value + * from overtaking the one we are about to program. In + * effect we are reducing max_period to account for + * interrupt latency (and we are being very conservative). + */ + if (left > (armpmu->max_period >> 1)) + left = armpmu->max_period >> 1; + + local64_set(&hwc->prev_count, (u64)-left); + + armpmu->write_counter(event, (u64)(-left) & 0xffffffff); + + perf_event_update_userpage(event); + + return ret; +} + +u64 armpmu_event_update(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta, prev_raw_count, new_raw_count; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = armpmu->read_counter(event); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - prev_raw_count) & armpmu->max_period; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static void +armpmu_read(struct perf_event *event) +{ + armpmu_event_update(event); +} + +static void +armpmu_stop(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * ARM pmu always has to update the counter, so ignore + * PERF_EF_UPDATE, see comments in armpmu_start(). + */ + if (!(hwc->state & PERF_HES_STOPPED)) { + armpmu->disable(event); + armpmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static void armpmu_start(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * ARM pmu always has to reprogram the period, so ignore + * PERF_EF_RELOAD, see the comment below. + */ + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + /* + * Set the period again. Some counters can't be stopped, so when we + * were stopped we simply disabled the IRQ source and the counter + * may have been left counting. If we don't do this step then we may + * get an interrupt too soon or *way* too late if the overflow has + * happened since disabling. + */ + armpmu_event_set_period(event); + armpmu->enable(event); +} + +static void +armpmu_del(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + armpmu_stop(event, PERF_EF_UPDATE); + hw_events->events[idx] = NULL; + clear_bit(idx, hw_events->used_mask); + if (armpmu->clear_event_idx) + armpmu->clear_event_idx(hw_events, event); + + perf_event_update_userpage(event); +} + +static int +armpmu_add(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + int err = 0; + + /* An event following a process won't be stopped earlier */ + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) + return -ENOENT; + + perf_pmu_disable(event->pmu); + + /* If we don't have a space for the counter then finish early. */ + idx = armpmu->get_event_idx(hw_events, event); + if (idx < 0) { + err = idx; + goto out; + } + + /* + * If there is an event in the counter we are going to use then make + * sure it is disabled. + */ + event->hw.idx = idx; + armpmu->disable(event); + hw_events->events[idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + armpmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; +} + +static int +validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, + struct perf_event *event) +{ + struct arm_pmu *armpmu; + + if (is_software_event(event)) + return 1; + + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != pmu) + return 0; + + if (event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) + return 1; + + armpmu = to_arm_pmu(event->pmu); + return armpmu->get_event_idx(hw_events, event) >= 0; +} + +static int +validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct pmu_hw_events fake_pmu; + + /* + * Initialise the fake PMU. We only need to populate the + * used_mask for the purposes of validation. + */ + memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); + + if (!validate_event(event->pmu, &fake_pmu, leader)) + return -EINVAL; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (!validate_event(event->pmu, &fake_pmu, sibling)) + return -EINVAL; + } + + if (!validate_event(event->pmu, &fake_pmu, event)) + return -EINVAL; + + return 0; +} + +static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) +{ + struct arm_pmu *armpmu; + struct platform_device *plat_device; + struct arm_pmu_platdata *plat; + int ret; + u64 start_clock, finish_clock; + + /* + * we request the IRQ with a (possibly percpu) struct arm_pmu**, but + * the handlers expect a struct arm_pmu*. The percpu_irq framework will + * do any necessary shifting, we just need to perform the first + * dereference. + */ + armpmu = *(void **)dev; + plat_device = armpmu->plat_device; + plat = dev_get_platdata(&plat_device->dev); + + start_clock = sched_clock(); + if (plat && plat->handle_irq) + ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); + else + ret = armpmu->handle_irq(irq, armpmu); + finish_clock = sched_clock(); + + perf_sample_event_took(finish_clock - start_clock); + return ret; +} + +static void +armpmu_release_hardware(struct arm_pmu *armpmu) +{ + armpmu->free_irq(armpmu); +} + +static int +armpmu_reserve_hardware(struct arm_pmu *armpmu) +{ + int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); + if (err) { + armpmu_release_hardware(armpmu); + return err; + } + + return 0; +} + +static void +hw_perf_event_destroy(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + atomic_t *active_events = &armpmu->active_events; + struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; + + if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { + armpmu_release_hardware(armpmu); + mutex_unlock(pmu_reserve_mutex); + } +} + +static int +event_requires_mode_exclusion(struct perf_event_attr *attr) +{ + return attr->exclude_idle || attr->exclude_user || + attr->exclude_kernel || attr->exclude_hv; +} + +static int +__hw_perf_event_init(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int mapping; + + mapping = armpmu->map_event(event); + + if (mapping < 0) { + pr_debug("event %x:%llx not supported\n", event->attr.type, + event->attr.config); + return mapping; + } + + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. For SMP systems, each core has it's own PMU so we can't do any + * clever allocation or constraints checking at this point. + */ + hwc->idx = -1; + hwc->config_base = 0; + hwc->config = 0; + hwc->event_base = 0; + + /* + * Check whether we need to exclude the counter from certain modes. + */ + if ((!armpmu->set_event_filter || + armpmu->set_event_filter(hwc, &event->attr)) && + event_requires_mode_exclusion(&event->attr)) { + pr_debug("ARM performance counters do not support " + "mode exclusion\n"); + return -EOPNOTSUPP; + } + + /* + * Store the event encoding into the config_base field. + */ + hwc->config_base |= (unsigned long)mapping; + + if (!is_sampling_event(event)) { + /* + * For non-sampling runs, limit the sample_period to half + * of the counter width. That way, the new counter value + * is far less likely to overtake the previous one unless + * you have some serious IRQ latency issues. + */ + hwc->sample_period = armpmu->max_period >> 1; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + if (event->group_leader != event) { + if (validate_group(event) != 0) + return -EINVAL; + } + + return 0; +} + +static int armpmu_event_init(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + int err = 0; + atomic_t *active_events = &armpmu->active_events; + + /* + * Reject CPU-affine events for CPUs that are of a different class to + * that which this PMU handles. Process-following events (where + * event->cpu == -1) can be migrated between CPUs, and thus we have to + * reject them later (in armpmu_add) if they're scheduled on a + * different class of CPU. + */ + if (event->cpu != -1 && + !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) + return -ENOENT; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + if (armpmu->map_event(event) == -ENOENT) + return -ENOENT; + + event->destroy = hw_perf_event_destroy; + + if (!atomic_inc_not_zero(active_events)) { + mutex_lock(&armpmu->reserve_mutex); + if (atomic_read(active_events) == 0) + err = armpmu_reserve_hardware(armpmu); + + if (!err) + atomic_inc(active_events); + mutex_unlock(&armpmu->reserve_mutex); + } + + if (err) + return err; + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err; +} + +static void armpmu_enable(struct pmu *pmu) +{ + struct arm_pmu *armpmu = to_arm_pmu(pmu); + struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); + int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); + + /* For task-bound events we may be called on other CPUs */ + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) + return; + + if (enabled) + armpmu->start(armpmu); +} + +static void armpmu_disable(struct pmu *pmu) +{ + struct arm_pmu *armpmu = to_arm_pmu(pmu); + + /* For task-bound events we may be called on other CPUs */ + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) + return; + + armpmu->stop(armpmu); +} + +/* + * In heterogeneous systems, events are specific to a particular + * microarchitecture, and aren't suitable for another. Thus, only match CPUs of + * the same microarchitecture. + */ +static int armpmu_filter_match(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + unsigned int cpu = smp_processor_id(); + return cpumask_test_cpu(cpu, &armpmu->supported_cpus); +} + +static void armpmu_init(struct arm_pmu *armpmu) +{ + atomic_set(&armpmu->active_events, 0); + mutex_init(&armpmu->reserve_mutex); + + armpmu->pmu = (struct pmu) { + .pmu_enable = armpmu_enable, + .pmu_disable = armpmu_disable, + .event_init = armpmu_event_init, + .add = armpmu_add, + .del = armpmu_del, + .start = armpmu_start, + .stop = armpmu_stop, + .read = armpmu_read, + .filter_match = armpmu_filter_match, + }; +} + +int armpmu_register(struct arm_pmu *armpmu, int type) +{ + armpmu_init(armpmu); + pr_info("enabled with %s PMU driver, %d counters available\n", + armpmu->name, armpmu->num_events); + return perf_pmu_register(&armpmu->pmu, armpmu->name, type); +} + +/* Set at runtime when we know what CPU type we are. */ +static struct arm_pmu *__oprofile_cpu_pmu; + +/* + * Despite the names, these two functions are CPU-specific and are used + * by the OProfile/perf code. + */ +const char *perf_pmu_name(void) +{ + if (!__oprofile_cpu_pmu) + return NULL; + + return __oprofile_cpu_pmu->name; +} +EXPORT_SYMBOL_GPL(perf_pmu_name); + +int perf_num_counters(void) +{ + int max_events = 0; + + if (__oprofile_cpu_pmu != NULL) + max_events = __oprofile_cpu_pmu->num_events; + + return max_events; +} +EXPORT_SYMBOL_GPL(perf_num_counters); + +static void cpu_pmu_enable_percpu_irq(void *data) +{ + int irq = *(int *)data; + + enable_percpu_irq(irq, IRQ_TYPE_NONE); +} + +static void cpu_pmu_disable_percpu_irq(void *data) +{ + int irq = *(int *)data; + + disable_percpu_irq(irq); +} + +static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) +{ + int i, irq, irqs; + struct platform_device *pmu_device = cpu_pmu->plat_device; + struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; + + irqs = min(pmu_device->num_resources, num_possible_cpus()); + + irq = platform_get_irq(pmu_device, 0); + if (irq >= 0 && irq_is_percpu(irq)) { + on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); + free_percpu_irq(irq, &hw_events->percpu_pmu); + } else { + for (i = 0; i < irqs; ++i) { + int cpu = i; + + if (cpu_pmu->irq_affinity) + cpu = cpu_pmu->irq_affinity[i]; + + if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) + continue; + irq = platform_get_irq(pmu_device, i); + if (irq >= 0) + free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); + } + } +} + +static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) +{ + int i, err, irq, irqs; + struct platform_device *pmu_device = cpu_pmu->plat_device; + struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; + + if (!pmu_device) + return -ENODEV; + + irqs = min(pmu_device->num_resources, num_possible_cpus()); + if (irqs < 1) { + pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); + return 0; + } + + irq = platform_get_irq(pmu_device, 0); + if (irq >= 0 && irq_is_percpu(irq)) { + err = request_percpu_irq(irq, handler, "arm-pmu", + &hw_events->percpu_pmu); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", + irq); + return err; + } + on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); + } else { + for (i = 0; i < irqs; ++i) { + int cpu = i; + + err = 0; + irq = platform_get_irq(pmu_device, i); + if (irq < 0) + continue; + + if (cpu_pmu->irq_affinity) + cpu = cpu_pmu->irq_affinity[i]; + + /* + * If we have a single PMU interrupt that we can't shift, + * assume that we're running on a uniprocessor machine and + * continue. Otherwise, continue without this interrupt. + */ + if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + continue; + } + + err = request_irq(irq, handler, + IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", + per_cpu_ptr(&hw_events->percpu_pmu, cpu)); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", + irq); + return err; + } + + cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); + } + } + + return 0; +} + +/* + * PMU hardware loses all context when a CPU goes offline. + * When a CPU is hotplugged back in, since some hardware registers are + * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading + * junk values out of them. + */ +static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, + void *hcpu) +{ + int cpu = (unsigned long)hcpu; + struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); + + if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) + return NOTIFY_DONE; + + if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) + return NOTIFY_DONE; + + if (pmu->reset) + pmu->reset(pmu); + else + return NOTIFY_DONE; + + return NOTIFY_OK; +} + +static int cpu_pmu_init(struct arm_pmu *cpu_pmu) +{ + int err; + int cpu; + struct pmu_hw_events __percpu *cpu_hw_events; + + cpu_hw_events = alloc_percpu(struct pmu_hw_events); + if (!cpu_hw_events) + return -ENOMEM; + + cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; + err = register_cpu_notifier(&cpu_pmu->hotplug_nb); + if (err) + goto out_hw_events; + + for_each_possible_cpu(cpu) { + struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); + raw_spin_lock_init(&events->pmu_lock); + events->percpu_pmu = cpu_pmu; + } + + cpu_pmu->hw_events = cpu_hw_events; + cpu_pmu->request_irq = cpu_pmu_request_irq; + cpu_pmu->free_irq = cpu_pmu_free_irq; + + /* Ensure the PMU has sane values out of reset. */ + if (cpu_pmu->reset) + on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset, + cpu_pmu, 1); + + /* If no interrupts available, set the corresponding capability flag */ + if (!platform_get_irq(cpu_pmu->plat_device, 0)) + cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + + return 0; + +out_hw_events: + free_percpu(cpu_hw_events); + return err; +} + +static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) +{ + unregister_cpu_notifier(&cpu_pmu->hotplug_nb); + free_percpu(cpu_pmu->hw_events); +} + +/* + * CPU PMU identification and probing. + */ +static int probe_current_pmu(struct arm_pmu *pmu, + const struct pmu_probe_info *info) +{ + int cpu = get_cpu(); + unsigned int cpuid = read_cpuid_id(); + int ret = -ENODEV; + + pr_info("probing PMU on CPU %d\n", cpu); + + for (; info->init != NULL; info++) { + if ((cpuid & info->mask) != info->cpuid) + continue; + ret = info->init(pmu); + break; + } + + put_cpu(); + return ret; +} + +static int of_pmu_irq_cfg(struct arm_pmu *pmu) +{ + int *irqs, i = 0; + bool using_spi = false; + struct platform_device *pdev = pmu->plat_device; + + irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); + if (!irqs) + return -ENOMEM; + + do { + struct device_node *dn; + int cpu, irq; + + /* See if we have an affinity entry */ + dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i); + if (!dn) + break; + + /* Check the IRQ type and prohibit a mix of PPIs and SPIs */ + irq = platform_get_irq(pdev, i); + if (irq >= 0) { + bool spi = !irq_is_percpu(irq); + + if (i > 0 && spi != using_spi) { + pr_err("PPI/SPI IRQ type mismatch for %s!\n", + dn->name); + kfree(irqs); + return -EINVAL; + } + + using_spi = spi; + } + + /* Now look up the logical CPU number */ + for_each_possible_cpu(cpu) + if (dn == of_cpu_device_node_get(cpu)) + break; + + if (cpu >= nr_cpu_ids) { + pr_warn("Failed to find logical CPU for %s\n", + dn->name); + of_node_put(dn); + cpumask_setall(&pmu->supported_cpus); + break; + } + of_node_put(dn); + + /* For SPIs, we need to track the affinity per IRQ */ + if (using_spi) { + if (i >= pdev->num_resources) { + of_node_put(dn); + break; + } + + irqs[i] = cpu; + } + + /* Keep track of the CPUs containing this PMU type */ + cpumask_set_cpu(cpu, &pmu->supported_cpus); + of_node_put(dn); + i++; + } while (1); + + /* If we didn't manage to parse anything, claim to support all CPUs */ + if (cpumask_weight(&pmu->supported_cpus) == 0) + cpumask_setall(&pmu->supported_cpus); + + /* If we matched up the IRQ affinities, use them to route the SPIs */ + if (using_spi && i == pdev->num_resources) + pmu->irq_affinity = irqs; + else + kfree(irqs); + + return 0; +} + +int arm_pmu_device_probe(struct platform_device *pdev, + const struct of_device_id *of_table, + const struct pmu_probe_info *probe_table) +{ + const struct of_device_id *of_id; + const int (*init_fn)(struct arm_pmu *); + struct device_node *node = pdev->dev.of_node; + struct arm_pmu *pmu; + int ret = -ENODEV; + + pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); + if (!pmu) { + pr_info("failed to allocate PMU device!\n"); + return -ENOMEM; + } + + if (!__oprofile_cpu_pmu) + __oprofile_cpu_pmu = pmu; + + pmu->plat_device = pdev; + + if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { + init_fn = of_id->data; + + ret = of_pmu_irq_cfg(pmu); + if (!ret) + ret = init_fn(pmu); + } else { + ret = probe_current_pmu(pmu, probe_table); + cpumask_setall(&pmu->supported_cpus); + } + + if (ret) { + pr_info("failed to probe PMU!\n"); + goto out_free; + } + + ret = cpu_pmu_init(pmu); + if (ret) + goto out_free; + + ret = armpmu_register(pmu, -1); + if (ret) + goto out_destroy; + + return 0; + +out_destroy: + cpu_pmu_destroy(pmu); +out_free: + pr_info("failed to register PMU devices!\n"); + kfree(pmu); + return ret; +} diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h new file mode 100644 index 000000000000..bfa673bb822d --- /dev/null +++ b/include/linux/perf/arm_pmu.h @@ -0,0 +1,154 @@ +/* + * linux/arch/arm/include/asm/pmu.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ARM_PMU_H__ +#define __ARM_PMU_H__ + +#include +#include + +#include + +/* + * struct arm_pmu_platdata - ARM PMU platform data + * + * @handle_irq: an optional handler which will be called from the + * interrupt and passed the address of the low level handler, + * and can be used to implement any platform specific handling + * before or after calling it. + */ +struct arm_pmu_platdata { + irqreturn_t (*handle_irq)(int irq, void *dev, + irq_handler_t pmu_handler); +}; + +#ifdef CONFIG_ARM_PMU + +/* + * The ARMv7 CPU PMU supports up to 32 event counters. + */ +#define ARMPMU_MAX_HWEVENTS 32 + +#define HW_OP_UNSUPPORTED 0xFFFF +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xFFFF + +#define PERF_MAP_ALL_UNSUPPORTED \ + [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED + +#define PERF_CACHE_MAP_ALL_UNSUPPORTED \ +[0 ... C(MAX) - 1] = { \ + [0 ... C(OP_MAX) - 1] = { \ + [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \ + }, \ +} + +/* The events for a given PMU register set. */ +struct pmu_hw_events { + /* + * The events that are active on the PMU for the given index. + */ + struct perf_event *events[ARMPMU_MAX_HWEVENTS]; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); + + /* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ + raw_spinlock_t pmu_lock; + + /* + * When using percpu IRQs, we need a percpu dev_id. Place it here as we + * already have to allocate this struct per cpu. + */ + struct arm_pmu *percpu_pmu; +}; + +struct arm_pmu { + struct pmu pmu; + cpumask_t active_irqs; + cpumask_t supported_cpus; + int *irq_affinity; + char *name; + irqreturn_t (*handle_irq)(int irq_num, void *dev); + void (*enable)(struct perf_event *event); + void (*disable)(struct perf_event *event); + int (*get_event_idx)(struct pmu_hw_events *hw_events, + struct perf_event *event); + void (*clear_event_idx)(struct pmu_hw_events *hw_events, + struct perf_event *event); + int (*set_event_filter)(struct hw_perf_event *evt, + struct perf_event_attr *attr); + u32 (*read_counter)(struct perf_event *event); + void (*write_counter)(struct perf_event *event, u32 val); + void (*start)(struct arm_pmu *); + void (*stop)(struct arm_pmu *); + void (*reset)(void *); + int (*request_irq)(struct arm_pmu *, irq_handler_t handler); + void (*free_irq)(struct arm_pmu *); + int (*map_event)(struct perf_event *event); + int num_events; + atomic_t active_events; + struct mutex reserve_mutex; + u64 max_period; + struct platform_device *plat_device; + struct pmu_hw_events __percpu *hw_events; + struct notifier_block hotplug_nb; +}; + +#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) + +int armpmu_register(struct arm_pmu *armpmu, int type); + +u64 armpmu_event_update(struct perf_event *event); + +int armpmu_event_set_period(struct perf_event *event); + +int armpmu_map_event(struct perf_event *event, + const unsigned (*event_map)[PERF_COUNT_HW_MAX], + const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u32 raw_event_mask); + +struct pmu_probe_info { + unsigned int cpuid; + unsigned int mask; + int (*init)(struct arm_pmu *); +}; + +#define PMU_PROBE(_cpuid, _mask, _fn) \ +{ \ + .cpuid = (_cpuid), \ + .mask = (_mask), \ + .init = (_fn), \ +} + +#define ARM_PMU_PROBE(_cpuid, _fn) \ + PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn) + +#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK) + +#define XSCALE_PMU_PROBE(_version, _fn) \ + PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn) + +int arm_pmu_device_probe(struct platform_device *pdev, + const struct of_device_id *of_table, + const struct pmu_probe_info *probe_table); + +#endif /* CONFIG_ARM_PMU */ + +#endif /* __ARM_PMU_H__ */ -- cgit v1.2.3 From 787047eea24a2443c366679ae6b5a3873a33b64e Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 29 Jul 2015 00:34:48 +0100 Subject: ARM: 8392/3: smp: Only expose /sys/.../cpuX/online if hotpluggable Writes to /sys/.../cpuX/online fail if we determine the platform doesn't support hotplug for that CPU. Furthermore, if the cpu_die op isn't specified the system hangs when we try to offline a CPU and it comes right back online unexpectedly. Let's figure this stuff out before we make the sysfs nodes so that the online file doesn't even exist if it isn't (at least sometimes) possible to hotplug the CPU. Add a new 'cpu_can_disable' op and repoint all 'cpu_disable' implementations at it because all implementers use the op to indicate if a CPU can be hotplugged or not in a static fashion. With PSCI we may need to add a 'cpu_disable' op so that the secure OS can be migrated off the CPU we're trying to hotplug. In this case, the 'cpu_can_disable' op will indicate that all CPUs are hotpluggable by returning true, but the 'cpu_disable' op will make a PSCI migration call and occasionally fail, denying the hotplug of a CPU. This shouldn't be any worse than x86 where we may indicate that all CPUs are hotpluggable but occasionally we can't offline a CPU due to check_irq_vectors_for_cpu_disable() failing to find a CPU to move vectors to. Cc: Mark Rutland Cc: Nicolas Pitre Cc: Dave Martin Acked-by: Simon Horman [shmobile portion] Tested-by: Simon Horman Cc: Magnus Damm Cc: Tested-by: Tyler Baker Cc: Geert Uytterhoeven Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/common/mcpm_platsmp.c | 12 ++++-------- arch/arm/include/asm/smp.h | 1 + arch/arm/include/asm/smp_plat.h | 9 +++++++++ arch/arm/kernel/setup.c | 2 +- arch/arm/kernel/smp.c | 15 ++++++++++++++- arch/arm/mach-shmobile/common.h | 2 +- arch/arm/mach-shmobile/platsmp.c | 4 ++-- arch/arm/mach-shmobile/smp-r8a7790.c | 2 +- arch/arm/mach-shmobile/smp-r8a7791.c | 2 +- arch/arm/mach-shmobile/smp-sh73a0.c | 2 +- 10 files changed, 35 insertions(+), 16 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c index 92e54d7c6f46..2b25b6038f66 100644 --- a/arch/arm/common/mcpm_platsmp.c +++ b/arch/arm/common/mcpm_platsmp.c @@ -65,14 +65,10 @@ static int mcpm_cpu_kill(unsigned int cpu) return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster); } -static int mcpm_cpu_disable(unsigned int cpu) +static bool mcpm_cpu_can_disable(unsigned int cpu) { - /* - * We assume all CPUs may be shut down. - * This would be the hook to use for eventual Secure - * OS migration requests as described in the PSCI spec. - */ - return 0; + /* We assume all CPUs may be shut down. */ + return true; } static void mcpm_cpu_die(unsigned int cpu) @@ -92,7 +88,7 @@ static struct smp_operations __initdata mcpm_smp_ops = { .smp_secondary_init = mcpm_secondary_init, #ifdef CONFIG_HOTPLUG_CPU .cpu_kill = mcpm_cpu_kill, - .cpu_disable = mcpm_cpu_disable, + .cpu_can_disable = mcpm_cpu_can_disable, .cpu_die = mcpm_cpu_die, #endif }; diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2f3ac1ba6fb4..318ce89eeff7 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -105,6 +105,7 @@ struct smp_operations { #ifdef CONFIG_HOTPLUG_CPU int (*cpu_kill)(unsigned int cpu); void (*cpu_die)(unsigned int cpu); + bool (*cpu_can_disable)(unsigned int cpu); int (*cpu_disable)(unsigned int cpu); #endif #endif diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 993e5224d8f7..f9080717fc88 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void) extern int platform_can_secondary_boot(void); extern int platform_can_cpu_hotplug(void); +#ifdef CONFIG_HOTPLUG_CPU +extern int platform_can_hotplug_cpu(unsigned int cpu); +#else +static inline int platform_can_hotplug_cpu(unsigned int cpu) +{ + return 0; +} +#endif + #endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 36c18b73c1f4..6bbec6042052 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -1015,7 +1015,7 @@ static int __init topology_init(void) for_each_possible_cpu(cpu) { struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); - cpuinfo->cpu.hotpluggable = 1; + cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu); register_cpu(&cpuinfo->cpu, cpu); } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 90dfbedfbfb8..3cd846f48eaf 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu) if (smp_ops.cpu_disable) return smp_ops.cpu_disable(cpu); + return 0; +} + +int platform_can_hotplug_cpu(unsigned int cpu) +{ + /* cpu_die must be specified to support hotplug */ + if (!smp_ops.cpu_die) + return 0; + + if (smp_ops.cpu_can_disable) + return smp_ops.cpu_can_disable(cpu); + /* * By default, allow disabling all CPUs except the first one, * since this is special on a lot of platforms, e.g. because * of clock tick interrupts. */ - return cpu == 0 ? -EPERM : 0; + return cpu != 0; } + /* * __cpu_disable runs on the processor to be shutdown. */ diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h index 476092b86c6e..8d27ec546a35 100644 --- a/arch/arm/mach-shmobile/common.h +++ b/arch/arm/mach-shmobile/common.h @@ -13,7 +13,7 @@ extern void shmobile_smp_boot(void); extern void shmobile_smp_sleep(void); extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg); -extern int shmobile_smp_cpu_disable(unsigned int cpu); +extern bool shmobile_smp_cpu_can_disable(unsigned int cpu); extern void shmobile_boot_scu(void); extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus); extern void shmobile_smp_scu_cpu_die(unsigned int cpu); diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c index 3923e09e966d..b23378f3d7e1 100644 --- a/arch/arm/mach-shmobile/platsmp.c +++ b/arch/arm/mach-shmobile/platsmp.c @@ -31,8 +31,8 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg) } #ifdef CONFIG_HOTPLUG_CPU -int shmobile_smp_cpu_disable(unsigned int cpu) +bool shmobile_smp_cpu_can_disable(unsigned int cpu) { - return 0; /* Hotplug of any CPU is supported */ + return true; /* Hotplug of any CPU is supported */ } #endif diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c index 930f45cbc08a..947e437cab68 100644 --- a/arch/arm/mach-shmobile/smp-r8a7790.c +++ b/arch/arm/mach-shmobile/smp-r8a7790.c @@ -64,7 +64,7 @@ struct smp_operations r8a7790_smp_ops __initdata = { .smp_prepare_cpus = r8a7790_smp_prepare_cpus, .smp_boot_secondary = shmobile_smp_apmu_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU - .cpu_disable = shmobile_smp_cpu_disable, + .cpu_can_disable = shmobile_smp_cpu_can_disable, .cpu_die = shmobile_smp_apmu_cpu_die, .cpu_kill = shmobile_smp_apmu_cpu_kill, #endif diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c index 5e2d1db79afa..b2508c0d276b 100644 --- a/arch/arm/mach-shmobile/smp-r8a7791.c +++ b/arch/arm/mach-shmobile/smp-r8a7791.c @@ -58,7 +58,7 @@ struct smp_operations r8a7791_smp_ops __initdata = { .smp_prepare_cpus = r8a7791_smp_prepare_cpus, .smp_boot_secondary = r8a7791_smp_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU - .cpu_disable = shmobile_smp_cpu_disable, + .cpu_can_disable = shmobile_smp_cpu_can_disable, .cpu_die = shmobile_smp_apmu_cpu_die, .cpu_kill = shmobile_smp_apmu_cpu_kill, #endif diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c index 2106d6b76a06..ae7c764fd6b4 100644 --- a/arch/arm/mach-shmobile/smp-sh73a0.c +++ b/arch/arm/mach-shmobile/smp-sh73a0.c @@ -68,7 +68,7 @@ struct smp_operations sh73a0_smp_ops __initdata = { .smp_prepare_cpus = sh73a0_smp_prepare_cpus, .smp_boot_secondary = sh73a0_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU - .cpu_disable = shmobile_smp_cpu_disable, + .cpu_can_disable = shmobile_smp_cpu_can_disable, .cpu_die = shmobile_smp_scu_cpu_die, .cpu_kill = shmobile_smp_scu_cpu_kill, #endif -- cgit v1.2.3 From 9ac87c5a0b19417f925dc61cac7198d872159a2c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 31 Jul 2015 11:28:54 +0100 Subject: ARM: 8407/1: switch_to: Remove finish_arch_switch Fold finish_arch_switch() into switch_to(), in preparation for the removal of the finish_arch_switch call from core sched code. Signed-off-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Russell King --- arch/arm/include/asm/switch_to.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index c99e259469f7..12ebfcc1d539 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -10,7 +10,9 @@ * CPU. */ #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) -#define finish_arch_switch(prev) dsb(ish) +#define __complete_pending_tlbi() dsb(ish) +#else +#define __complete_pending_tlbi() #endif /* @@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ + __complete_pending_tlbi(); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) -- cgit v1.2.3 From 1234e3fda9aa24b2d650bbcd9ef09d5f6a12dc86 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 24 Jul 2015 09:10:55 +0100 Subject: ARM: reduce visibility of dmac_* functions The dmac_* functions are private to the ARM DMA API implementation, and should not be used by drivers. In order to discourage their use, remove their prototypes and macros from asm/*.h. We have to leave dmac_flush_range() behind as Exynos and MSM IOMMU code use these; once these sites are fixed, this can be moved also. Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 4 ---- arch/arm/include/asm/glue-cache.h | 2 -- arch/arm/mm/dma-mapping.c | 1 + arch/arm/mm/dma.h | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 33 insertions(+), 6 deletions(-) create mode 100644 arch/arm/mm/dma.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 4812cda8fd17..c5230a44eeca 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache; * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_map_area cpu_cache.dma_map_area -#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_flush_range cpu_cache.dma_flush_range #else @@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -extern void dmac_map_area(const void *, size_t, int); -extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_flush_range(const void *, const void *); #endif diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index a3c24cd5b7c8..cab07f69382d 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) -#define dmac_map_area __glue(_CACHE,_dma_map_area) -#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) #endif diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1ced8a0f7a52..5edf17cf043d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -39,6 +39,7 @@ #include #include +#include "dma.h" #include "mm.h" /* diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h new file mode 100644 index 000000000000..70ea6852f94e --- /dev/null +++ b/arch/arm/mm/dma.h @@ -0,0 +1,32 @@ +#ifndef DMA_H +#define DMA_H + +#include + +#ifndef MULTI_CACHE +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) + +/* + * These are private to the dma-mapping API. Do not use directly. + * Their sole purpose is to ensure that data held in the cache + * is visible to DMA, or data written by DMA to system memory is + * visible to the CPU. + */ +extern void dmac_map_area(const void *, size_t, int); +extern void dmac_unmap_area(const void *, size_t, int); + +#else + +/* + * These are private to the dma-mapping API. Do not use directly. + * Their sole purpose is to ensure that data held in the cache + * is visible to DMA, or data written by DMA to system memory is + * visible to the CPU. + */ +#define dmac_map_area cpu_cache.dma_map_area +#define dmac_unmap_area cpu_cache.dma_unmap_area + +#endif + +#endif -- cgit v1.2.3 From 76695af20c015206cffb84b15912be6797d0cca2 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Sun, 2 Aug 2015 17:11:04 +0200 Subject: locking, arch: use WRITE_ONCE()/READ_ONCE() in smp_store_release()/smp_load_acquire() Replace ACCESS_ONCE() macro in smp_store_release() and smp_load_acquire() with WRITE_ONCE() and READ_ONCE() on x86, arm, arm64, ia64, metag, mips, powerpc, s390, sparc and asm-generic since ACCESS_ONCE() does not work reliably on non-scalar types. WRITE_ONCE() and READ_ONCE() were introduced in the following commits: 230fa253df63 ("kernel: Provide READ_ONCE and ASSIGN_ONCE") 43239cbe79fc ("kernel: Change ASSIGN_ONCE(val, x) to WRITE_ONCE(x, val)") Signed-off-by: Andrey Konovalov Signed-off-by: Peter Zijlstra (Intel) Acked-by: Davidlohr Bueso Acked-by: Michael Ellerman (powerpc) Acked-by: Ralf Baechle Cc: Alexander Duyck Cc: Andre Przywara Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christian Borntraeger Cc: David S. Miller Cc: Davidlohr Bueso Cc: Dmitry Vyukov Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Heiko Carstens Cc: James Hogan Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Paul E. McKenney Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Russell King Cc: Thomas Gleixner Cc: Tony Luck Cc: Will Deacon Cc: linux-arch@vger.kernel.org Link: http://lkml.kernel.org/r/1438528264-714-1-git-send-email-andreyknvl@google.com Signed-off-by: Ingo Molnar --- arch/arm/include/asm/barrier.h | 4 ++-- arch/arm64/include/asm/barrier.h | 4 ++-- arch/ia64/include/asm/barrier.h | 4 ++-- arch/metag/include/asm/barrier.h | 4 ++-- arch/mips/include/asm/barrier.h | 4 ++-- arch/powerpc/include/asm/barrier.h | 4 ++-- arch/s390/include/asm/barrier.h | 4 ++-- arch/sparc/include/asm/barrier_64.h | 4 ++-- arch/x86/include/asm/barrier.h | 8 ++++---- include/asm-generic/barrier.h | 4 ++-- 10 files changed, 22 insertions(+), 22 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 6c2327e1c732..70393574e0fa 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -67,12 +67,12 @@ do { \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ ___p1; \ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 0fa47c4275cb..ef93b20bc964 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -44,12 +44,12 @@ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 843ba435e43b..df896a1c41d3 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -66,12 +66,12 @@ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index 5a696e507930..172b7e5efc53 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -90,12 +90,12 @@ static inline void fence(void) do { \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ ___p1; \ diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 7ecba84656d4..752e0b86c171 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -133,12 +133,12 @@ do { \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ ___p1; \ diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 51ccc7232042..0eca6efc0631 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -76,12 +76,12 @@ do { \ compiletime_assert_atomic_type(*p); \ smp_lwsync(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ smp_lwsync(); \ ___p1; \ diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index e6f8615a11eb..d48fe0162331 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -42,12 +42,12 @@ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 809941e33e12..14a928601657 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -60,12 +60,12 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index e51a8f803f55..d2bcfbed11c3 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -57,12 +57,12 @@ do { \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ ___p1; \ @@ -74,12 +74,12 @@ do { \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 55e3abc2d027..b42afada1280 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -108,12 +108,12 @@ do { \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ ___p1; \ -- cgit v1.2.3 From 11276d5306b8e5b438a36bbff855fe792d7eaa61 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 24 Jul 2015 15:09:55 +0200 Subject: locking/static_keys: Add a new static_key interface There are various problems and short-comings with the current static_key interface: - static_key_{true,false}() read like a branch depending on the key value, instead of the actual likely/unlikely branch depending on init value. - static_key_{true,false}() are, as stated above, tied to the static_key init values STATIC_KEY_INIT_{TRUE,FALSE}. - we're limited to the 2 (out of 4) possible options that compile to a default NOP because that's what our arch_static_branch() assembly emits. So provide a new static_key interface: DEFINE_STATIC_KEY_TRUE(name); DEFINE_STATIC_KEY_FALSE(name); Which define a key of different types with an initial true/false value. Then allow: static_branch_likely() static_branch_unlikely() to take a key of either type and emit the right instruction for the case. This means adding a second arch_static_branch_jump() assembly helper which emits a JMP per default. In order to determine the right instruction for the right state, encode the branch type in the LSB of jump_entry::key. This is the final step in removing the naming confusion that has led to a stream of avoidable bugs such as: a833581e372a ("x86, perf: Fix static_key bug in load_mm_cr4()") ... but it also allows new static key combinations that will give us performance enhancements in the subsequent patches. Tested-by: Rabin Vincent # arm Signed-off-by: Peter Zijlstra (Intel) Acked-by: Michael Ellerman # ppc Acked-by: Heiko Carstens # s390 Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/arm/include/asm/jump_label.h | 25 ++++-- arch/arm64/include/asm/jump_label.h | 18 +++- arch/mips/include/asm/jump_label.h | 19 ++++- arch/powerpc/include/asm/jump_label.h | 19 ++++- arch/s390/include/asm/jump_label.h | 19 ++++- arch/sparc/include/asm/jump_label.h | 35 ++++++-- arch/x86/include/asm/jump_label.h | 21 ++++- include/linux/jump_label.h | 149 +++++++++++++++++++++++++++++++--- kernel/jump_label.c | 37 +++++++-- 9 files changed, 298 insertions(+), 44 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h index 5f337dc5c108..34f7b6980d21 100644 --- a/arch/arm/include/asm/jump_label.h +++ b/arch/arm/include/asm/jump_label.h @@ -4,23 +4,32 @@ #ifndef __ASSEMBLY__ #include +#include #define JUMP_LABEL_NOP_SIZE 4 -#ifdef CONFIG_THUMB2_KERNEL -#define JUMP_LABEL_NOP "nop.w" -#else -#define JUMP_LABEL_NOP "nop" -#endif +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +{ + asm_volatile_goto("1:\n\t" + WASM(nop) "\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".word 1b, %l[l_yes], %c0\n\t" + ".popsection\n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) { asm_volatile_goto("1:\n\t" - JUMP_LABEL_NOP "\n\t" + WASM(b) " %l[l_yes]\n\t" ".pushsection __jump_table, \"aw\"\n\t" ".word 1b, %l[l_yes], %c0\n\t" ".popsection\n\t" - : : "i" (key) : : l_yes); + : : "i" (&((char *)key)[branch]) : : l_yes); return false; l_yes: diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h index c0e5165c2f76..1b5e0e843c3a 100644 --- a/arch/arm64/include/asm/jump_label.h +++ b/arch/arm64/include/asm/jump_label.h @@ -26,14 +26,28 @@ #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm goto("1: nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" ".align 3\n\t" ".quad 1b, %l[l_yes], %c0\n\t" ".popsection\n\t" - : : "i"(key) : : l_yes); + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm goto("1: b %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %c0\n\t" + ".popsection\n\t" + : : "i"(&((char *)key)[branch]) : : l_yes); return false; l_yes: diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index 608aa57799c8..e77672539e8e 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -26,14 +26,29 @@ #define NOP_INSN "nop" #endif -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("1:\t" NOP_INSN "\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" ".popsection\n\t" - : : "i" (key) : : l_yes); + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("1:\tj %l[l_yes]\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + WORD_INSN " 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + return false; l_yes: return true; diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index efbf9a322a23..47e155f15433 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h @@ -18,14 +18,29 @@ #define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) #define JUMP_LABEL_NOP_SIZE 4 -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("1:\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" ".popsection \n\t" - : : "i" (key) : : l_yes); + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("1:\n\t" + "b %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" + ".popsection \n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + return false; l_yes: return true; diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 69972b7957ee..7f9fd5e3f1bf 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -12,14 +12,29 @@ * We use a brcl 0,2 instruction for jump labels at compile time so it * can be easily distinguished from a hotpatch generated instruction. */ -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" ".pushsection __jump_table, \"aw\"\n" ".balign 8\n" ".quad 0b, %l[label], %0\n" ".popsection\n" - : : "X" (key) : : label); + : : "X" (&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("0: brcl 15, %l[label]\n" + ".pushsection __jump_table, \"aw\"\n" + ".balign 8\n" + ".quad 0b, %l[label], %0\n" + ".popsection\n" + : : "X" (&((char *)key)[branch]) : : label); + return false; label: return true; diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h index cc9b04a2b11b..62d0354d1727 100644 --- a/arch/sparc/include/asm/jump_label.h +++ b/arch/sparc/include/asm/jump_label.h @@ -7,16 +7,33 @@ #define JUMP_LABEL_NOP_SIZE 4 -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { - asm_volatile_goto("1:\n\t" - "nop\n\t" - "nop\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".align 4\n\t" - ".word 1b, %l[l_yes], %c0\n\t" - ".popsection \n\t" - : : "i" (key) : : l_yes); + asm_volatile_goto("1:\n\t" + "nop\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 4\n\t" + ".word 1b, %l[l_yes], %c0\n\t" + ".popsection \n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("1:\n\t" + "b %l[l_yes]\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 4\n\t" + ".word 1b, %l[l_yes], %c0\n\t" + ".popsection \n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + return false; l_yes: return true; diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index a4c1cf7e93f8..28d7a857f9d1 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -16,7 +16,7 @@ # define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC #endif -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("1:" ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" @@ -24,7 +24,24 @@ static __always_inline bool arch_static_branch(struct static_key *key) _ASM_ALIGN "\n\t" _ASM_PTR "1b, %l[l_yes], %c0 \n\t" ".popsection \n\t" - : : "i" (key) : : l_yes); + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("1:" + ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t" + "2:\n\t" + ".pushsection __jump_table, \"aw\" \n\t" + _ASM_ALIGN "\n\t" + _ASM_PTR "1b, %l[l_yes], %c0 \n\t" + ".popsection \n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + return false; l_yes: return true; diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 65f0ebac63cf..e337a1961933 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -107,12 +107,12 @@ static inline int static_key_count(struct static_key *key) static __always_inline bool static_key_false(struct static_key *key) { - return arch_static_branch(key); + return arch_static_branch(key, false); } static __always_inline bool static_key_true(struct static_key *key) { - return !static_key_false(key); + return !arch_static_branch(key, true); } extern struct jump_entry __start___jump_table[]; @@ -130,12 +130,12 @@ extern void static_key_slow_inc(struct static_key *key); extern void static_key_slow_dec(struct static_key *key); extern void jump_label_apply_nops(struct module *mod); -#define STATIC_KEY_INIT_TRUE ((struct static_key) \ +#define STATIC_KEY_INIT_TRUE \ { .enabled = ATOMIC_INIT(1), \ - .entries = (void *)JUMP_TYPE_TRUE }) -#define STATIC_KEY_INIT_FALSE ((struct static_key) \ + .entries = (void *)JUMP_TYPE_TRUE } +#define STATIC_KEY_INIT_FALSE \ { .enabled = ATOMIC_INIT(0), \ - .entries = (void *)JUMP_TYPE_FALSE }) + .entries = (void *)JUMP_TYPE_FALSE } #else /* !HAVE_JUMP_LABEL */ @@ -183,10 +183,8 @@ static inline int jump_label_apply_nops(struct module *mod) return 0; } -#define STATIC_KEY_INIT_TRUE ((struct static_key) \ - { .enabled = ATOMIC_INIT(1) }) -#define STATIC_KEY_INIT_FALSE ((struct static_key) \ - { .enabled = ATOMIC_INIT(0) }) +#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } +#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } #endif /* HAVE_JUMP_LABEL */ @@ -218,6 +216,137 @@ static inline void static_key_disable(struct static_key *key) static_key_slow_dec(key); } +/* -------------------------------------------------------------------------- */ + +/* + * Two type wrappers around static_key, such that we can use compile time + * type differentiation to emit the right code. + * + * All the below code is macros in order to play type games. + */ + +struct static_key_true { + struct static_key key; +}; + +struct static_key_false { + struct static_key key; +}; + +#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, } +#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, } + +#define DEFINE_STATIC_KEY_TRUE(name) \ + struct static_key_true name = STATIC_KEY_TRUE_INIT + +#define DEFINE_STATIC_KEY_FALSE(name) \ + struct static_key_false name = STATIC_KEY_FALSE_INIT + +#ifdef HAVE_JUMP_LABEL + +/* + * Combine the right initial value (type) with the right branch order + * to generate the desired result. + * + * + * type\branch| likely (1) | unlikely (0) + * -----------+-----------------------+------------------ + * | | + * true (1) | ... | ... + * | NOP | JMP L + * | | 1: ... + * | L: ... | + * | | + * | | L: + * | | jmp 1b + * | | + * -----------+-----------------------+------------------ + * | | + * false (0) | ... | ... + * | JMP L | NOP + * | | 1: ... + * | L: ... | + * | | + * | | L: + * | | jmp 1b + * | | + * -----------+-----------------------+------------------ + * + * The initial value is encoded in the LSB of static_key::entries, + * type: 0 = false, 1 = true. + * + * The branch type is encoded in the LSB of jump_entry::key, + * branch: 0 = unlikely, 1 = likely. + * + * This gives the following logic table: + * + * enabled type branch instuction + * -----------------------------+----------- + * 0 0 0 | NOP + * 0 0 1 | JMP + * 0 1 0 | NOP + * 0 1 1 | JMP + * + * 1 0 0 | JMP + * 1 0 1 | NOP + * 1 1 0 | JMP + * 1 1 1 | NOP + * + * Which gives the following functions: + * + * dynamic: instruction = enabled ^ branch + * static: instruction = type ^ branch + * + * See jump_label_type() / jump_label_init_type(). + */ + +extern bool ____wrong_branch_error(void); + +#define static_branch_likely(x) \ +({ \ + bool branch; \ + if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \ + branch = !arch_static_branch(&(x)->key, true); \ + else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ + branch = !arch_static_branch_jump(&(x)->key, true); \ + else \ + branch = ____wrong_branch_error(); \ + branch; \ +}) + +#define static_branch_unlikely(x) \ +({ \ + bool branch; \ + if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \ + branch = arch_static_branch_jump(&(x)->key, false); \ + else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ + branch = arch_static_branch(&(x)->key, false); \ + else \ + branch = ____wrong_branch_error(); \ + branch; \ +}) + +#else /* !HAVE_JUMP_LABEL */ + +#define static_branch_likely(x) likely(static_key_enabled(&(x)->key)) +#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key)) + +#endif /* HAVE_JUMP_LABEL */ + +/* + * Advanced usage; refcount, branch is enabled when: count != 0 + */ + +#define static_branch_inc(x) static_key_slow_inc(&(x)->key) +#define static_branch_dec(x) static_key_slow_dec(&(x)->key) + +/* + * Normal usage; boolean enable/disable. + */ + +#define static_branch_enable(x) static_key_enable(&(x)->key) +#define static_branch_disable(x) static_key_disable(&(x)->key) + #endif /* _LINUX_JUMP_LABEL_H */ #endif /* __ASSEMBLY__ */ diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 2e7cc1e4b4b5..8fd00d892286 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -165,16 +165,22 @@ static inline bool static_key_type(struct static_key *key) static inline struct static_key *jump_entry_key(struct jump_entry *entry) { - return (struct static_key *)((unsigned long)entry->key); + return (struct static_key *)((unsigned long)entry->key & ~1UL); +} + +static bool jump_entry_branch(struct jump_entry *entry) +{ + return (unsigned long)entry->key & 1UL; } static enum jump_label_type jump_label_type(struct jump_entry *entry) { struct static_key *key = jump_entry_key(entry); bool enabled = static_key_enabled(key); - bool type = static_key_type(key); + bool branch = jump_entry_branch(entry); - return enabled ^ type; + /* See the comment in linux/jump_label.h */ + return enabled ^ branch; } static void __jump_label_update(struct static_key *key, @@ -205,7 +211,10 @@ void __init jump_label_init(void) for (iter = iter_start; iter < iter_stop; iter++) { struct static_key *iterk; - arch_jump_label_transform_static(iter, jump_label_type(iter)); + /* rewrite NOPs */ + if (jump_label_type(iter) == JUMP_LABEL_NOP) + arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); + iterk = jump_entry_key(iter); if (iterk == key) continue; @@ -225,6 +234,16 @@ void __init jump_label_init(void) #ifdef CONFIG_MODULES +static enum jump_label_type jump_label_init_type(struct jump_entry *entry) +{ + struct static_key *key = jump_entry_key(entry); + bool type = static_key_type(key); + bool branch = jump_entry_branch(entry); + + /* See the comment in linux/jump_label.h */ + return type ^ branch; +} + struct static_key_mod { struct static_key_mod *next; struct jump_entry *entries; @@ -276,8 +295,11 @@ void jump_label_apply_nops(struct module *mod) if (iter_start == iter_stop) return; - for (iter = iter_start; iter < iter_stop; iter++) - arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); + for (iter = iter_start; iter < iter_stop; iter++) { + /* Only write NOPs for arch_branch_static(). */ + if (jump_label_init_type(iter) == JUMP_LABEL_NOP) + arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); + } } static int jump_label_add_module(struct module *mod) @@ -318,7 +340,8 @@ static int jump_label_add_module(struct module *mod) jlm->next = key->next; key->next = jlm; - if (jump_label_type(iter) == JUMP_LABEL_JMP) + /* Only update if we've changed from our initial state */ + if (jump_label_type(iter) != jump_label_init_type(iter)) __jump_label_update(key, iter, iter_stop); } -- cgit v1.2.3 From be120397e7709d9d5ed88317a385ce864a2603bc Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 31 Jul 2015 15:46:19 +0100 Subject: ARM: migrate to common PSCI client code Now that the common PSCI client code has been factored out to drivers/firmware, and made safe for 32-bit use, move the 32-bit ARM code over to it. This results in a moderate reduction of duplicated lines, and will prevent further duplication as the PSCI client code is updated for PSCI 1.0 and beyond. The two legacy platform users of the PSCI invocation code are updated to account for interface changes. In both cases the power state parameter (which is constant) is now generated using macros, so that the pack/unpack logic can be killed in preparation for PSCI 1.0 power state changes. Signed-off-by: Mark Rutland Acked-by: Rob Herring Cc: Catalin Marinas Cc: Ashwin Chaugule Cc: Lorenzo Pieralisi Cc: Russell King Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm/Kconfig | 1 + arch/arm/include/asm/psci.h | 23 --- arch/arm/kernel/Makefile | 2 +- arch/arm/kernel/psci.c | 299 -------------------------------------- arch/arm/kernel/psci_smp.c | 29 +++- arch/arm/kernel/setup.c | 3 +- arch/arm/mach-highbank/highbank.c | 2 +- arch/arm/mach-highbank/pm.c | 16 +- drivers/cpuidle/cpuidle-calxeda.c | 15 +- 9 files changed, 46 insertions(+), 344 deletions(-) delete mode 100644 arch/arm/kernel/psci.c (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1c5021002fe4..f5cd68425d8d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1496,6 +1496,7 @@ config HOTPLUG_CPU config ARM_PSCI bool "Support for the ARM Power State Coordination Interface (PSCI)" depends on CPU_V7 + select ARM_PSCI_FW help Say Y here if you want Linux to communicate with system firmware implementing the PSCI specification for CPU-centric power diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h index c25ef3ec6d1f..68ee3ce17b82 100644 --- a/arch/arm/include/asm/psci.h +++ b/arch/arm/include/asm/psci.h @@ -14,34 +14,11 @@ #ifndef __ASM_ARM_PSCI_H #define __ASM_ARM_PSCI_H -#define PSCI_POWER_STATE_TYPE_STANDBY 0 -#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 - -struct psci_power_state { - u16 id; - u8 type; - u8 affinity_level; -}; - -struct psci_operations { - int (*cpu_suspend)(struct psci_power_state state, - unsigned long entry_point); - int (*cpu_off)(struct psci_power_state state); - int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); - int (*migrate)(unsigned long cpuid); - int (*affinity_info)(unsigned long target_affinity, - unsigned long lowest_affinity_level); - int (*migrate_info_type)(void); -}; - -extern struct psci_operations psci_ops; extern struct smp_operations psci_smp_ops; #ifdef CONFIG_ARM_PSCI -int psci_init(void); bool psci_smp_available(void); #else -static inline int psci_init(void) { return 0; } static inline bool psci_smp_available(void) { return false; } #endif diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index e69f7a19735d..3b995f5c524d 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -89,7 +89,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o ifeq ($(CONFIG_ARM_PSCI),y) -obj-y += psci.o psci-call.o +obj-y += psci-call.o obj-$(CONFIG_SMP) += psci_smp.o endif diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c deleted file mode 100644 index f90fdf4ce7c7..000000000000 --- a/arch/arm/kernel/psci.c +++ /dev/null @@ -1,299 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2012 ARM Limited - * - * Author: Will Deacon - */ - -#define pr_fmt(fmt) "psci: " fmt - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -struct psci_operations psci_ops; - -static int (*invoke_psci_fn)(u32, u32, u32, u32); -typedef int (*psci_initcall_t)(const struct device_node *); - -asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32); -asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32); - -enum psci_function { - PSCI_FN_CPU_SUSPEND, - PSCI_FN_CPU_ON, - PSCI_FN_CPU_OFF, - PSCI_FN_MIGRATE, - PSCI_FN_AFFINITY_INFO, - PSCI_FN_MIGRATE_INFO_TYPE, - PSCI_FN_MAX, -}; - -static u32 psci_function_id[PSCI_FN_MAX]; - -static int psci_to_linux_errno(int errno) -{ - switch (errno) { - case PSCI_RET_SUCCESS: - return 0; - case PSCI_RET_NOT_SUPPORTED: - return -EOPNOTSUPP; - case PSCI_RET_INVALID_PARAMS: - return -EINVAL; - case PSCI_RET_DENIED: - return -EPERM; - }; - - return -EINVAL; -} - -static u32 psci_power_state_pack(struct psci_power_state state) -{ - return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT) - & PSCI_0_2_POWER_STATE_ID_MASK) | - ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT) - & PSCI_0_2_POWER_STATE_TYPE_MASK) | - ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT) - & PSCI_0_2_POWER_STATE_AFFL_MASK); -} - -static int psci_get_version(void) -{ - int err; - - err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); - return err; -} - -static int psci_cpu_suspend(struct psci_power_state state, - unsigned long entry_point) -{ - int err; - u32 fn, power_state; - - fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; - power_state = psci_power_state_pack(state); - err = invoke_psci_fn(fn, power_state, entry_point, 0); - return psci_to_linux_errno(err); -} - -static int psci_cpu_off(struct psci_power_state state) -{ - int err; - u32 fn, power_state; - - fn = psci_function_id[PSCI_FN_CPU_OFF]; - power_state = psci_power_state_pack(state); - err = invoke_psci_fn(fn, power_state, 0, 0); - return psci_to_linux_errno(err); -} - -static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_CPU_ON]; - err = invoke_psci_fn(fn, cpuid, entry_point, 0); - return psci_to_linux_errno(err); -} - -static int psci_migrate(unsigned long cpuid) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_MIGRATE]; - err = invoke_psci_fn(fn, cpuid, 0, 0); - return psci_to_linux_errno(err); -} - -static int psci_affinity_info(unsigned long target_affinity, - unsigned long lowest_affinity_level) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_AFFINITY_INFO]; - err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0); - return err; -} - -static int psci_migrate_info_type(void) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE]; - err = invoke_psci_fn(fn, 0, 0, 0); - return err; -} - -static int get_set_conduit_method(struct device_node *np) -{ - const char *method; - - pr_info("probing for conduit method from DT.\n"); - - if (of_property_read_string(np, "method", &method)) { - pr_warn("missing \"method\" property\n"); - return -ENXIO; - } - - if (!strcmp("hvc", method)) { - invoke_psci_fn = __invoke_psci_fn_hvc; - } else if (!strcmp("smc", method)) { - invoke_psci_fn = __invoke_psci_fn_smc; - } else { - pr_warn("invalid \"method\" property: %s\n", method); - return -EINVAL; - } - return 0; -} - -static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd) -{ - invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); -} - -static void psci_sys_poweroff(void) -{ - invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); -} - -/* - * PSCI Function IDs for v0.2+ are well defined so use - * standard values. - */ -static int psci_0_2_init(struct device_node *np) -{ - int err, ver; - - err = get_set_conduit_method(np); - - if (err) - goto out_put_node; - - ver = psci_get_version(); - - if (ver == PSCI_RET_NOT_SUPPORTED) { - /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */ - pr_err("PSCI firmware does not comply with the v0.2 spec.\n"); - err = -EOPNOTSUPP; - goto out_put_node; - } else { - pr_info("PSCIv%d.%d detected in firmware.\n", - PSCI_VERSION_MAJOR(ver), - PSCI_VERSION_MINOR(ver)); - - if (PSCI_VERSION_MAJOR(ver) == 0 && - PSCI_VERSION_MINOR(ver) < 2) { - err = -EINVAL; - pr_err("Conflicting PSCI version detected.\n"); - goto out_put_node; - } - } - - pr_info("Using standard PSCI v0.2 function IDs\n"); - psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND; - psci_ops.cpu_suspend = psci_cpu_suspend; - - psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF; - psci_ops.cpu_off = psci_cpu_off; - - psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON; - psci_ops.cpu_on = psci_cpu_on; - - psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE; - psci_ops.migrate = psci_migrate; - - psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO; - psci_ops.affinity_info = psci_affinity_info; - - psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] = - PSCI_0_2_FN_MIGRATE_INFO_TYPE; - psci_ops.migrate_info_type = psci_migrate_info_type; - - arm_pm_restart = psci_sys_reset; - - pm_power_off = psci_sys_poweroff; - -out_put_node: - of_node_put(np); - return err; -} - -/* - * PSCI < v0.2 get PSCI Function IDs via DT. - */ -static int psci_0_1_init(struct device_node *np) -{ - u32 id; - int err; - - err = get_set_conduit_method(np); - - if (err) - goto out_put_node; - - pr_info("Using PSCI v0.1 Function IDs from DT\n"); - - if (!of_property_read_u32(np, "cpu_suspend", &id)) { - psci_function_id[PSCI_FN_CPU_SUSPEND] = id; - psci_ops.cpu_suspend = psci_cpu_suspend; - } - - if (!of_property_read_u32(np, "cpu_off", &id)) { - psci_function_id[PSCI_FN_CPU_OFF] = id; - psci_ops.cpu_off = psci_cpu_off; - } - - if (!of_property_read_u32(np, "cpu_on", &id)) { - psci_function_id[PSCI_FN_CPU_ON] = id; - psci_ops.cpu_on = psci_cpu_on; - } - - if (!of_property_read_u32(np, "migrate", &id)) { - psci_function_id[PSCI_FN_MIGRATE] = id; - psci_ops.migrate = psci_migrate; - } - -out_put_node: - of_node_put(np); - return err; -} - -static const struct of_device_id psci_of_match[] __initconst = { - { .compatible = "arm,psci", .data = psci_0_1_init}, - { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, - {}, -}; - -int __init psci_init(void) -{ - struct device_node *np; - const struct of_device_id *matched_np; - psci_initcall_t init_fn; - - np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); - if (!np) - return -ENODEV; - - init_fn = (psci_initcall_t)matched_np->data; - return init_fn(np); -} diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c index 244aaddfbfda..61c04b02faeb 100644 --- a/arch/arm/kernel/psci_smp.c +++ b/arch/arm/kernel/psci_smp.c @@ -17,6 +17,8 @@ #include #include #include +#include + #include #include @@ -56,17 +58,29 @@ static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle) } #ifdef CONFIG_HOTPLUG_CPU +int psci_cpu_disable(unsigned int cpu) +{ + /* Fail early if we don't have CPU_OFF support */ + if (!psci_ops.cpu_off) + return -EOPNOTSUPP; + + /* Trusted OS will deny CPU_OFF */ + if (psci_tos_resident_on(cpu)) + return -EPERM; + + return 0; +} + void __ref psci_cpu_die(unsigned int cpu) { - const struct psci_power_state ps = { - .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, - }; + u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN << + PSCI_0_2_POWER_STATE_TYPE_SHIFT; - if (psci_ops.cpu_off) - psci_ops.cpu_off(ps); + if (psci_ops.cpu_off) + psci_ops.cpu_off(state); - /* We should never return */ - panic("psci: cpu %d failed to shutdown\n", cpu); + /* We should never return */ + panic("psci: cpu %d failed to shutdown\n", cpu); } int __ref psci_cpu_kill(unsigned int cpu) @@ -109,6 +123,7 @@ bool __init psci_smp_available(void) struct smp_operations __initdata psci_smp_ops = { .smp_boot_secondary = psci_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU + .cpu_disable = psci_cpu_disable, .cpu_die = psci_cpu_die, .cpu_kill = psci_cpu_kill, #endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 36c18b73c1f4..9c38bd42f04b 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -972,7 +973,7 @@ void __init setup_arch(char **cmdline_p) unflatten_device_tree(); arm_dt_init_cpu_maps(); - psci_init(); + psci_dt_init(); xen_early_init(); #ifdef CONFIG_SMP if (is_smp()) { diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 231fba0d03e5..6050a14faee6 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -28,8 +28,8 @@ #include #include #include +#include -#include #include #include #include diff --git a/arch/arm/mach-highbank/pm.c b/arch/arm/mach-highbank/pm.c index 7f2bd85eb935..400311695548 100644 --- a/arch/arm/mach-highbank/pm.c +++ b/arch/arm/mach-highbank/pm.c @@ -16,19 +16,21 @@ #include #include +#include #include #include -#include + +#include + +#define HIGHBANK_SUSPEND_PARAM \ + ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ + (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ + (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) static int highbank_suspend_finish(unsigned long val) { - const struct psci_power_state ps = { - .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, - .affinity_level = 1, - }; - - return psci_ops.cpu_suspend(ps, __pa(cpu_resume)); + return psci_ops.cpu_suspend(HIGHBANK_SUSPEND_PARAM, __pa(cpu_resume)); } static int highbank_pm_enter(suspend_state_t state) diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c index c13feec89ea1..ea9728fde9b3 100644 --- a/drivers/cpuidle/cpuidle-calxeda.c +++ b/drivers/cpuidle/cpuidle-calxeda.c @@ -25,16 +25,21 @@ #include #include #include +#include + #include #include -#include + +#include + +#define CALXEDA_IDLE_PARAM \ + ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ + (0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ + (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) static int calxeda_idle_finish(unsigned long val) { - const struct psci_power_state ps = { - .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, - }; - return psci_ops.cpu_suspend(ps, __pa(cpu_resume)); + return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume)); } static int calxeda_pwrdown_idle(struct cpuidle_device *dev, -- cgit v1.2.3 From 7baa7aecdd2f009ddd00a4ad0690c6918bab5b01 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 29 Jul 2015 12:41:49 +0100 Subject: sched, arm: Remove finish_arch_switch() Fold finish_arch_switch() into switch_to(). Signed-off-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Cc: linux@arm.linux.org.uk [ Fixed up the SOB chain. ] Signed-off-by: Ingo Molnar --- arch/arm/include/asm/switch_to.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index c99e259469f7..12ebfcc1d539 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -10,7 +10,9 @@ * CPU. */ #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) -#define finish_arch_switch(prev) dsb(ish) +#define __complete_pending_tlbi() dsb(ish) +#else +#define __complete_pending_tlbi() #endif /* @@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ + __complete_pending_tlbi(); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) -- cgit v1.2.3 From c268a743103aebba8d81d3365107f7170653099e Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Thu, 30 Jul 2015 19:12:12 +0200 Subject: ARM: at91/soc: add basic support for new sama5d2 SoC Add Kconfig entries, header file changes and addition to the documentation. The early debug infrastructure is also added for easy development. Signed-off-by: Ludovic Desroches Signed-off-by: Nicolas Ferre Acked-by: Alexandre Belloni Signed-off-by: Olof Johansson --- Documentation/arm/Atmel/README | 5 +++++ Documentation/devicetree/bindings/arm/atmel-at91.txt | 2 ++ arch/arm/Kconfig.debug | 6 ++++++ arch/arm/include/debug/at91.S | 5 ++++- arch/arm/mach-at91/Kconfig | 12 ++++++++++++ arch/arm/mach-at91/sama5.c | 3 +++ arch/arm/mach-at91/soc.h | 3 +++ 7 files changed, 35 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/Documentation/arm/Atmel/README b/Documentation/arm/Atmel/README index c53a19b4aab2..0931cf7e2e56 100644 --- a/Documentation/arm/Atmel/README +++ b/Documentation/arm/Atmel/README @@ -90,6 +90,11 @@ the Atmel website: http://www.atmel.com. + Datasheet http://www.atmel.com/Images/Atmel-11238-32-bit-Cortex-A5-Microcontroller-SAMA5D4_Datasheet.pdf + - sama5d2 family + - sama5d27 + + Datasheet + Coming soon + Linux kernel information ------------------------ diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt index 424ac8cbfa08..7f04b8ae4ca9 100644 --- a/Documentation/devicetree/bindings/arm/atmel-at91.txt +++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt @@ -27,6 +27,8 @@ compatible: must be one of: o "atmel,at91sam9xe" * "atmel,sama5" for SoCs using a Cortex-A5, shall be extended with the specific SoC family: + o "atmel,sama5d2" shall be extended with the specific SoC compatible: + - "atmel,sama5d27" o "atmel,sama5d3" shall be extended with the specific SoC compatible: - "atmel,sama5d31" - "atmel,sama5d33" diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index a2e16f940394..946c8c0fa1fb 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -141,6 +141,12 @@ choice depends on ARCH_AT91 depends on SOC_SAMA5 + config AT91_DEBUG_LL_DBGU3 + bool "Kernel low-level debugging on sama5d2" + select DEBUG_AT91_UART + depends on ARCH_AT91 + depends on SOC_SAMA5 + config DEBUG_BCM2835 bool "Kernel low-level debugging on BCM2835 PL011 UART" depends on ARCH_BCM2835 diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S index c3c45e628e33..2556a8801c8c 100644 --- a/arch/arm/include/debug/at91.S +++ b/arch/arm/include/debug/at91.S @@ -13,9 +13,12 @@ #define AT91_DBGU 0xfffff200 /* AT91_BASE_DBGU0 */ #elif defined(CONFIG_AT91_DEBUG_LL_DBGU1) #define AT91_DBGU 0xffffee00 /* AT91_BASE_DBGU1 */ -#else +#elif defined(CONFIG_AT91_DEBUG_LL_DBGU2) /* On sama5d4, use USART3 as low level serial console */ #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */ +#else +/* On sama5d2, use UART1 as low level serial console */ +#define AT91_DBGU 0xf8020000 #endif #ifdef CONFIG_MMU diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig index fd95f34945f4..89a755b90db2 100644 --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig @@ -8,6 +8,18 @@ menuconfig ARCH_AT91 select SOC_BUS if ARCH_AT91 +config SOC_SAMA5D2 + bool "SAMA5D2 family" if ARCH_MULTI_V7 + select SOC_SAMA5 + select CACHE_L2X0 + select HAVE_FB_ATMEL + select HAVE_AT91_UTMI + select HAVE_AT91_USB_CLK + select HAVE_AT91_H32MX + select HAVE_AT91_GENERATED_CLK + help + Select this if ou are using one of Atmel's SAMA5D2 family SoC. + config SOC_SAMA5D3 bool "SAMA5D3 family" if ARCH_MULTI_V7 select SOC_SAMA5 diff --git a/arch/arm/mach-at91/sama5.c b/arch/arm/mach-at91/sama5.c index 41d829d8e7d5..90c3c3051ae7 100644 --- a/arch/arm/mach-at91/sama5.c +++ b/arch/arm/mach-at91/sama5.c @@ -18,6 +18,8 @@ #include "soc.h" static const struct at91_soc sama5_socs[] = { + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27_EXID_MATCH, + "sama5d27", "sama5d2"), AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH, "sama5d31", "sama5d3"), AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH, @@ -64,6 +66,7 @@ DT_MACHINE_START(sama5_dt, "Atmel SAMA5") MACHINE_END static const char *sama5_alt_dt_board_compat[] __initconst = { + "atmel,sama5d2", "atmel,sama5d4", NULL }; diff --git a/arch/arm/mach-at91/soc.h b/arch/arm/mach-at91/soc.h index be23c400596b..8ede0ef86172 100644 --- a/arch/arm/mach-at91/soc.h +++ b/arch/arm/mach-at91/soc.h @@ -62,6 +62,9 @@ at91_soc_init(const struct at91_soc *socs); #define AT91SAM9XE256_CIDR_MATCH 0x329a93a0 #define AT91SAM9XE512_CIDR_MATCH 0x329aa3a0 +#define SAMA5D2_CIDR_MATCH 0x0a5c08c0 +#define SAMA5D27_EXID_MATCH 0x00000021 + #define SAMA5D3_CIDR_MATCH 0x0a5c07c0 #define SAMA5D31_EXID_MATCH 0x00444300 #define SAMA5D33_EXID_MATCH 0x00414300 -- cgit v1.2.3 From efaa6e266ba70439da00e7f1c8a218e243ae140a Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 24 Jul 2015 10:21:02 +0100 Subject: firmware: qcom_scm-32: replace open-coded call to __cpuc_flush_dcache_area() Rathe rthan directly accessing architecture internal functions, provide an "method"-centric wrapper for qcom_scm-32 to do what's necessary to ensure that the secure monitor can see the data. This is called "secure_flush_area" and ensures that the specified memory area is coherent across the secure boundary. Acked-by: Andy Gross Reviewed-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 17 +++++++++++++++++ drivers/firmware/qcom_scm-32.c | 4 +--- 2 files changed, 18 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index c5230a44eeca..d5525bfc7e3e 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -502,4 +502,21 @@ static inline void set_kernel_text_ro(void) { } void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, void *kaddr, unsigned long len); +/** + * secure_flush_area - ensure coherency across the secure boundary + * @addr: virtual address + * @size: size of region + * + * Ensure that the specified area of memory is coherent across the secure + * boundary from the non-secure side. This is used when calling secure + * firmware where the secure firmware does not ensure coherency. + */ +static inline void secure_flush_area(const void *addr, size_t size) +{ + phys_addr_t phys = __pa(addr); + + __cpuc_flush_dcache_area((void *)addr, size); + outer_flush_range(phys, phys + size); +} + #endif diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index 1bd6f9c34331..29e6850665eb 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c @@ -24,7 +24,6 @@ #include #include -#include #include #include "qcom_scm.h" @@ -219,8 +218,7 @@ static int __qcom_scm_call(const struct qcom_scm_command *cmd) * Flush the command buffer so that the secure world sees * the correct data. */ - __cpuc_flush_dcache_area((void *)cmd, cmd->len); - outer_flush_range(cmd_addr, cmd_addr + cmd->len); + secure_flush_area(cmd, cmd->len); ret = smc(cmd_addr); if (ret < 0) -- cgit v1.2.3 From 0ca326de7aa9cb253db9c1a3eb3f0487c8dbf912 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 6 Aug 2015 17:54:44 +0100 Subject: locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations By defining our SMP atomics in terms of relaxed operations, we gain a small reduction in code size and have acquire/release/fence variants generated automatically by the core code. Signed-off-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman.Long@hp.com Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1438880084-18856-9-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- arch/arm/include/asm/atomic.h | 37 ++++++++++++++------------------- arch/arm/include/asm/cmpxchg.h | 47 +++++++----------------------------------- 2 files changed, 24 insertions(+), 60 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 82b75a7cb762..fe3ef397f5a4 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ \ - smp_mb(); \ prefetchw(&v->counter); \ \ __asm__ __volatile__("@ atomic_" #op "_return\n" \ @@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ : "r" (&v->counter), "Ir" (i) \ : "cc"); \ \ - smp_mb(); \ - \ return result; \ } -static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +#define atomic_add_return_relaxed atomic_add_return_relaxed +#define atomic_sub_return_relaxed atomic_sub_return_relaxed + +static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) { int oldval; unsigned long res; - smp_mb(); prefetchw(&ptr->counter); do { @@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) : "cc"); } while (res); - smp_mb(); - return oldval; } +#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed static inline int __atomic_add_unless(atomic_t *v, int a, int u) { @@ -297,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \ } \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ -static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ +static inline long long \ +atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \ { \ long long result; \ unsigned long tmp; \ \ - smp_mb(); \ prefetchw(&v->counter); \ \ __asm__ __volatile__("@ atomic64_" #op "_return\n" \ @@ -316,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ : "r" (&v->counter), "r" (i) \ : "cc"); \ \ - smp_mb(); \ - \ return result; \ } @@ -328,6 +324,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(sub, subs, sbc) +#define atomic64_add_return_relaxed atomic64_add_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed + #define atomic64_andnot atomic64_andnot ATOMIC64_OP(and, and, and) @@ -339,13 +338,12 @@ ATOMIC64_OP(xor, eor, eor) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, - long long new) +static inline long long +atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new) { long long oldval; unsigned long res; - smp_mb(); prefetchw(&ptr->counter); do { @@ -360,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, : "cc"); } while (res); - smp_mb(); - return oldval; } +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed -static inline long long atomic64_xchg(atomic64_t *ptr, long long new) +static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new) { long long result; unsigned long tmp; - smp_mb(); prefetchw(&ptr->counter); __asm__ __volatile__("@ atomic64_xchg\n" @@ -382,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) : "r" (&ptr->counter), "r" (new) : "cc"); - smp_mb(); - return result; } +#define atomic64_xchg_relaxed atomic64_xchg_relaxed static inline long long atomic64_dec_if_positive(atomic64_t *v) { diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 1692a05d3207..916a2744d5c6 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -35,7 +35,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size unsigned int tmp; #endif - smp_mb(); prefetchw((const void *)ptr); switch (size) { @@ -98,12 +97,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size __bad_xchg(ptr, size), ret = 0; break; } - smp_mb(); return ret; } -#define xchg(ptr, x) ({ \ +#define xchg_relaxed(ptr, x) ({ \ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ sizeof(*(ptr))); \ }) @@ -117,6 +115,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size #error "SMP is not supported on this platform" #endif +#define xchg xchg_relaxed + /* * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * them available. @@ -194,23 +194,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return oldval; } -static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long ret; - - smp_mb(); - ret = __cmpxchg(ptr, old, new, size); - smp_mb(); - - return ret; -} - -#define cmpxchg(ptr,o,n) ({ \ - (__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr))); \ +#define cmpxchg_relaxed(ptr,o,n) ({ \ + (__typeof__(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ }) static inline unsigned long __cmpxchg_local(volatile void *ptr, @@ -273,25 +261,6 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr, #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) -static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr, - unsigned long long old, - unsigned long long new) -{ - unsigned long long ret; - - smp_mb(); - ret = __cmpxchg64(ptr, old, new); - smp_mb(); - - return ret; -} - -#define cmpxchg64(ptr, o, n) ({ \ - (__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n)); \ -}) - #endif /* __LINUX_ARM_ARCH__ >= 6 */ #endif /* __ASM_ARM_CMPXCHG_H */ -- cgit v1.2.3 From a5f4c561b3b19a9bc43a81da6382b0098ebbc1fb Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Thu, 13 Aug 2015 00:01:52 +0100 Subject: ARM: 8415/1: early fixmap support for earlycon Add early fixmap support, initially to support permanent, fixed mapping support for early console. A temporary, early pte is created which is migrated to a permanent mapping in paging_init. This is also needed since the attributes may change as the memory types are initialized. The 3MiB range of fixmap spans two pte tables, but currently only one pte is created for early fixmap support. Re-add FIX_KMAP_BEGIN to the index calculation in highmem.c since the index for kmap does not start at zero anymore. This reverts 4221e2e6b316 ("ARM: 8031/1: fixmap: remove FIX_KMAP_BEGIN and FIX_KMAP_END") to some extent. Cc: Mark Salter Cc: Kees Cook Cc: Laura Abbott Cc: Arnd Bergmann Cc: Ard Biesheuvel Signed-off-by: Rob Herring Signed-off-by: Stefan Agner Signed-off-by: Russell King --- arch/arm/Kconfig | 3 ++ arch/arm/include/asm/fixmap.h | 15 +++++++- arch/arm/kernel/setup.c | 4 ++ arch/arm/mm/highmem.c | 6 +-- arch/arm/mm/mmu.c | 88 +++++++++++++++++++++++++++++++++++++++---- 5 files changed, 105 insertions(+), 11 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a750c1425c3a..1bcda7cb2e04 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64 config ARCH_HAS_BANDGAP bool +config FIX_EARLYCON_MEM + def_bool y if MMU + config GENERIC_HWEIGHT bool default y diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index 0415eae1df27..58cfe9f1a687 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -6,9 +6,13 @@ #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) #include +#include enum fixed_addresses { - FIX_KMAP_BEGIN, + FIX_EARLYCON_MEM_BASE, + __end_of_permanent_fixed_addresses, + + FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, /* Support writing RO kernel text via kprobes, jump labels, etc. */ @@ -18,7 +22,16 @@ enum fixed_addresses { __end_of_fixed_addresses }; +#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY) + +#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK) + +/* Used by set_fixmap_(io|nocache), both meant for mapping a device */ +#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED) +#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); +void __init early_fixmap_init(void); #include diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 6bbec6042052..e2ecee6b70ca 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -954,6 +955,9 @@ void __init setup_arch(char **cmdline_p) strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = cmd_line; + if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM)) + early_fixmap_init(); + parse_early_param(); #ifdef CONFIG_MMU diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index ee8dfa793989..9df5f09585ca 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page) type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); + idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM /* @@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr) if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); - idx = type + KM_TYPE_NR * smp_processor_id(); + idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); @@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn) return page_address(page); type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); + idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(get_fixmap_pte(vaddr))); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6ca7d9aa896f..fb9e817d08bb 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type) } EXPORT_SYMBOL(get_mem_type); +static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr); + +static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS] + __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata; + +static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr) +{ + return &bm_pte[pte_index(addr)]; +} + +static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr) +{ + return pte_offset_kernel(dir, addr); +} + +static inline pmd_t * __init fixmap_pmd(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + pud_t *pud = pud_offset(pgd, addr); + pmd_t *pmd = pmd_offset(pud, addr); + + return pmd; +} + +void __init early_fixmap_init(void) +{ + pmd_t *pmd; + + /* + * The early fixmap range spans multiple pmds, for which + * we are not prepared: + */ + BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT) + != FIXADDR_TOP >> PMD_SHIFT); + + pmd = fixmap_pmd(FIXADDR_TOP); + pmd_populate_kernel(&init_mm, pmd, bm_pte); + + pte_offset_fixmap = pte_offset_early_fixmap; +} + /* * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). * As a result, this can only be called with preemption disabled, as under @@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type); void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { unsigned long vaddr = __fix_to_virt(idx); - pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr); + pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr); /* Make sure fixmap region does not exceed available allocation. */ BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > @@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md) } if ((md->type == MT_DEVICE || md->type == MT_ROM) && - md->virtual >= PAGE_OFFSET && + md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", (long long)__pfn_to_phys((u64)md->pfn), md->virtual); @@ -1213,10 +1254,10 @@ void __init arm_mm_memblock_reserve(void) /* * Set up the device mappings. Since we clear out the page tables for all - * mappings above VMALLOC_START, we will remove any debug device mappings. - * This means you have to be careful how you debug this function, or any - * called function. This means you can't use any function or debugging - * method which may touch any device, otherwise the kernel _will_ crash. + * mappings above VMALLOC_START, except early fixmap, we might remove debug + * device mappings. This means earlycon can be used to debug this function + * Any other function or debugging method which may touch any device _will_ + * crash the kernel. */ static void __init devicemaps_init(const struct machine_desc *mdesc) { @@ -1231,7 +1272,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) early_trap_init(vectors); - for (addr = VMALLOC_START; addr; addr += PMD_SIZE) + /* + * Clear page table except top pmd used by early fixmaps + */ + for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); /* @@ -1483,6 +1527,35 @@ void __init early_paging_init(const struct machine_desc *mdesc) #endif +static void __init early_fixmap_shutdown(void) +{ + int i; + unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1); + + pte_offset_fixmap = pte_offset_late_fixmap; + pmd_clear(fixmap_pmd(va)); + local_flush_tlb_kernel_page(va); + + for (i = 0; i < __end_of_permanent_fixed_addresses; i++) { + pte_t *pte; + struct map_desc map; + + map.virtual = fix_to_virt(i); + pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual); + + /* Only i/o device mappings are supported ATM */ + if (pte_none(*pte) || + (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED) + continue; + + map.pfn = pte_pfn(*pte); + map.type = MT_DEVICE; + map.length = PAGE_SIZE; + + create_mapping(&map); + } +} + /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. @@ -1495,6 +1568,7 @@ void __init paging_init(const struct machine_desc *mdesc) prepare_page_table(); map_lowmem(); dma_contiguous_remap(); + early_fixmap_shutdown(); devicemaps_init(mdesc); kmap_init(); tcm_init(); -- cgit v1.2.3 From 8901925d32232adb57ba1b4c26d0c0f9d521a78c Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 17 Aug 2015 03:59:52 +0100 Subject: ARM: 8417/1: refactor bitops functions with BIT_MASK() and BIT_WORD() Use BIT_MASK() and BIT_WORD() rather than hard-coding the size of the "long" type. Signed-off-by: Masahiro Yamada Signed-off-by: Russell King --- arch/arm/include/asm/bitops.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 56380995f4c3..e943e6cee254 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -35,9 +35,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); *p |= mask; @@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long * static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); *p &= ~mask; @@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); *p ^= mask; @@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned int res; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); res = *p; @@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned int res; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); res = *p; @@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned int res; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); res = *p; -- cgit v1.2.3 From 96231b2686b53f71838a335bdc404cb5285d1a01 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 18 Aug 2015 09:14:15 +0100 Subject: ARM: 8419/1: dma-mapping: harmonize definition of DMA_ERROR_CODE All architectures except arm that define DMA_ERROR_CODE are casting it to (dma_addr_t) - as it is always compared to dma_addr_t in arm as well this could be harmonized. Signed-off-by: Nicholas Mc Guire Acked-by: Marek Szyprowski Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index b52101d37ec7..a68b9d8a71fe 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -14,7 +14,7 @@ #include #include -#define DMA_ERROR_CODE (~0) +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) extern struct dma_map_ops arm_dma_ops; extern struct dma_map_ops arm_coherent_dma_ops; -- cgit v1.2.3 From 4a5b69464e51f4a8dd432e8c2a1468630df1a53c Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Tue, 28 Jul 2015 10:10:42 +0100 Subject: xen/events: Support event channel rebind on ARM Currently, the event channel rebind code is gated with the presence of the vector callback. The virtual interrupt controller on ARM has the concept of per-CPU interrupt (PPI) which allow us to support per-VCPU event channel. Therefore there is no need of vector callback for ARM. Xen is already using a free PPI to notify the guest VCPU of an event. Furthermore, the xen code initialization in Linux (see arch/arm/xen/enlighten.c) is requesting correctly a per-CPU IRQ. Introduce new helper xen_support_evtchn_rebind to allow architecture decide whether rebind an event is support or not. It will always return true on ARM and keep the same behavior on x86. This is also allow us to drop the usage of xen_have_vector_callback entirely in the ARM code. Signed-off-by: Julien Grall Signed-off-by: David Vrabel --- arch/arm/include/asm/xen/events.h | 6 ++++++ arch/arm/xen/enlighten.c | 4 ---- arch/arm64/include/asm/xen/events.h | 6 ++++++ arch/x86/include/asm/xen/events.h | 11 +++++++++++ drivers/xen/events/events_base.c | 6 +----- include/xen/events.h | 1 - 6 files changed, 24 insertions(+), 10 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h index 8b1f37bfeeec..71e473d05fcc 100644 --- a/arch/arm/include/asm/xen/events.h +++ b/arch/arm/include/asm/xen/events.h @@ -20,4 +20,10 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) atomic64_t, \ counter), (val)) +/* Rebind event channel is supported by default */ +static inline bool xen_support_evtchn_rebind(void) +{ + return true; +} + #endif /* _ASM_ARM_XEN_EVENTS_H */ diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 6c09cc440a2b..40b961d8e953 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -45,10 +45,6 @@ static struct vcpu_info __percpu *xen_vcpu_info; unsigned long xen_released_pages; struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; -/* TODO: to be removed */ -__read_mostly int xen_have_vector_callback; -EXPORT_SYMBOL_GPL(xen_have_vector_callback); - int xen_platform_pci_unplug = XEN_UNPLUG_ALL; EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h index 86553213c132..4318866d053c 100644 --- a/arch/arm64/include/asm/xen/events.h +++ b/arch/arm64/include/asm/xen/events.h @@ -18,4 +18,10 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) #define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) +/* Rebind event channel is supported by default */ +static inline bool xen_support_evtchn_rebind(void) +{ + return true; +} + #endif /* _ASM_ARM64_XEN_EVENTS_H */ diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h index 608a79d5a466..e6911caf5bbf 100644 --- a/arch/x86/include/asm/xen/events.h +++ b/arch/x86/include/asm/xen/events.h @@ -20,4 +20,15 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) /* No need for a barrier -- XCHG is a barrier on x86. */ #define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) +extern int xen_have_vector_callback; + +/* + * Events delivered via platform PCI interrupts are always + * routed to vcpu 0 and hence cannot be rebound. + */ +static inline bool xen_support_evtchn_rebind(void) +{ + return (!xen_hvm_domain() || xen_have_vector_callback); +} + #endif /* _ASM_X86_XEN_EVENTS_H */ diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 96093ae369a5..ed620e5857a1 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -1301,11 +1301,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) if (!VALID_EVTCHN(evtchn)) return -1; - /* - * Events delivered via platform PCI interrupts are always - * routed to vcpu 0 and hence cannot be rebound. - */ - if (xen_hvm_domain() && !xen_have_vector_callback) + if (!xen_support_evtchn_rebind()) return -1; /* Send future instances of this interrupt to other vcpu. */ diff --git a/include/xen/events.h b/include/xen/events.h index 7d95fdf9cf3e..88da2abaf535 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -92,7 +92,6 @@ void xen_hvm_callback_vector(void); #ifdef CONFIG_TRACING #define trace_xen_hvm_callback_vector xen_hvm_callback_vector #endif -extern int xen_have_vector_callback; int xen_set_callback_via(uint64_t via); void xen_evtchn_do_upcall(struct pt_regs *regs); void xen_hvm_evtchn_do_upcall(void); -- cgit v1.2.3 From 724afaea2020f3bd98891b535f3ce5d3935bcf63 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Fri, 7 Aug 2015 17:34:34 +0100 Subject: arm/xen: Remove helpers which are PV specific ARM guests are always HVM. The current implementation is assuming a 1:1 mapping which is only true for DOM0 and may not be at all in the future. Furthermore, all the helpers but arbitrary_virt_to_machine are used in x86 specific code (or only compiled for). The helper arbitrary_virt_to_machine is only used in PV specific code. Therefore we should never call the function. Add a BUG() in this helper and drop all the others. Signed-off-by: Julien Grall Acked-by: Stefano Stabellini Signed-off-by: David Vrabel --- arch/arm/include/asm/xen/page.h | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 1bee8ca12494..98b1084f8282 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -54,26 +54,14 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) #define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) -static inline xmaddr_t phys_to_machine(xpaddr_t phys) -{ - unsigned offset = phys.paddr & ~PAGE_MASK; - return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); -} - -static inline xpaddr_t machine_to_phys(xmaddr_t machine) -{ - unsigned offset = machine.maddr & ~PAGE_MASK; - return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); -} /* VIRT <-> MACHINE conversion */ -#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) +/* Only used in PV code. But ARM guests are always HVM. */ static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) { - /* TODO: assuming it is mapped in the kernel 1:1 */ - return virt_to_machine(vaddr); + BUG(); } /* TODO: this shouldn't be here but it is because the frontend drivers -- cgit v1.2.3 From 8953aab1e80fd299d6185a57edaff733fa5c6a55 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Wed, 29 Jul 2015 12:33:18 +0100 Subject: ARM/PCI, designware, xilinx: Use pci_scan_root_bus_msi() ARM previously stored the msi_controller pointer in its sysdata, struct pci_sys_data, and implemented pcibios_msi_controller() to retrieve it. That made PCI host controller drivers specific to ARM because they had to put the msi_controller pointer in the ARM-specific pci_sys_data. There is now a generic mechanism, pci_scan_root_bus_msi(), for giving the msi_controller pointer to the PCI core. Use this for all ARM systems and for the DesignWare and Xilinx PCI host controller drivers. This removes an ARM dependency from the DesignWare, DRA7xx, EXYNOS, i.MX6, Keystone, Layerscape, SPEAr13xx, and Xilinx drivers. [bhelgaas: changelog, split into separate patch] Suggested-by: Russell King Signed-off-by: Lorenzo Pieralisi Signed-off-by: Bjorn Helgaas Acked-by: Jingoo Han CC: Pratyush Anand CC: Arnd Bergmann CC: Simon Horman CC: Russell King CC: Thomas Petazzoni CC: Thierry Reding CC: Michal Simek CC: Marc Zyngier --- arch/arm/include/asm/mach/pci.h | 2 -- arch/arm/kernel/bios32.c | 5 +++-- drivers/pci/host/pcie-designware.c | 12 +++++++++--- drivers/pci/host/pcie-xilinx.c | 11 ++++++++--- 4 files changed, 20 insertions(+), 10 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 28b9bb35949e..c074e7a319e7 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -19,9 +19,7 @@ struct pci_bus; struct device; struct hw_pci { -#ifdef CONFIG_PCI_MSI struct msi_controller *msi_ctrl; -#endif struct pci_ops *ops; int nr_controllers; void **private_data; diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 4e95260efb39..283bc1c7b502 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -486,8 +486,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, if (hw->scan) sys->bus = hw->scan(nr, sys); else - sys->bus = pci_scan_root_bus(parent, sys->busnr, - hw->ops, sys, &sys->resources); + sys->bus = pci_scan_root_bus_msi(parent, + sys->busnr, hw->ops, sys, + &sys->resources, hw->msi_ctrl); if (WARN(!sys->bus, "PCI: unable to scan bus!")) { kfree(sys); diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 69486be7181e..fe11cc175f77 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -526,7 +526,6 @@ int dw_pcie_host_init(struct pcie_port *pp) #ifdef CONFIG_PCI_MSI dw_pcie_msi_chip.dev = pp->dev; - dw_pci.msi_ctrl = &dw_pcie_msi_chip; #endif dw_pci.nr_controllers = 1; @@ -708,8 +707,15 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) struct pcie_port *pp = sys_to_pcie(sys); pp->root_bus_nr = sys->busnr; - bus = pci_scan_root_bus(pp->dev, sys->busnr, - &dw_pcie_ops, sys, &sys->resources); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + bus = pci_scan_root_bus_msi(pp->dev, sys->busnr, &dw_pcie_ops, + sys, &sys->resources, + &dw_pcie_msi_chip); + else + bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops, + sys, &sys->resources); + if (!bus) return NULL; diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index f1a06a091ccb..38d3114b97d3 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c @@ -647,9 +647,15 @@ static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys) struct pci_bus *bus; port->root_busno = sys->busnr; - bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops, - sys, &sys->resources); + if (IS_ENABLED(CONFIG_PCI_MSI)) + bus = pci_scan_root_bus_msi(port->dev, sys->busnr, + &xilinx_pcie_ops, sys, + &sys->resources, + &xilinx_pcie_msi_chip); + else + bus = pci_scan_root_bus(port->dev, sys->busnr, + &xilinx_pcie_ops, sys, &sys->resources); return bus; } @@ -847,7 +853,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev) #ifdef CONFIG_PCI_MSI xilinx_pcie_msi_chip.dev = port->dev; - hw.msi_ctrl = &xilinx_pcie_msi_chip; #endif pci_common_init_dev(dev, &hw); -- cgit v1.2.3 From b5e5e8a13e19ee17e6ffbe2c3d344182b7be20f6 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Tue, 4 Aug 2015 11:58:49 -0500 Subject: ARM/PCI: Remove msi_controller from struct pci_sys_data ARM now uses pci_bus->msi to store the msi_controller pointer, so we don't need to save it in struct pci_sys_data, and we don't need to implement pcibios_msi_controller() to get it out of pci_sys_data. Remove msi_controller from struct pci_sys_data and pcibios_msi_controller(). [bhelgaas: changelog, split into separate patch] Signed-off-by: Lorenzo Pieralisi Signed-off-by: Bjorn Helgaas Reviewed-by: Jingoo Han --- arch/arm/include/asm/mach/pci.h | 3 --- arch/arm/kernel/bios32.c | 12 ------------ 2 files changed, 15 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index c074e7a319e7..8857d2869a5f 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -40,9 +40,6 @@ struct hw_pci { * Per-controller structure */ struct pci_sys_data { -#ifdef CONFIG_PCI_MSI - struct msi_controller *msi_ctrl; -#endif struct list_head node; int busnr; /* primary bus number */ u64 mem_offset; /* bus->cpu memory mapping offset */ diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 283bc1c7b502..874e1823f803 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -18,15 +18,6 @@ static int debug_pci; -#ifdef CONFIG_PCI_MSI -struct msi_controller *pcibios_msi_controller(struct pci_dev *dev) -{ - struct pci_sys_data *sysdata = dev->bus->sysdata; - - return sysdata->msi_ctrl; -} -#endif - /* * We can't use pci_get_device() here since we are * called from interrupt context. @@ -462,9 +453,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, if (WARN(!sys, "PCI: unable to allocate sys data!")) break; -#ifdef CONFIG_PCI_MSI - sys->msi_ctrl = hw->msi_ctrl; -#endif sys->busnr = busnr; sys->swizzle = hw->swizzle; sys->map_irq = hw->map_irq; -- cgit v1.2.3 From 1eef5d2f1b461c120bcd82077edee5ec706ac53b Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 19 Aug 2015 21:23:48 +0100 Subject: ARM: domains: switch to keeping domain value in register Rather than modifying both the domain access control register and our per-thread copy, modify only the domain access control register, and use the per-thread copy to save and restore the register over context switches. We can also avoid the explicit initialisation of the init thread_info structure. This allows us to avoid needing to gain access to the thread information at the uaccess control sites. Signed-off-by: Russell King --- arch/arm/include/asm/domain.h | 20 +++++++++++++++----- arch/arm/include/asm/thread_info.h | 3 --- arch/arm/kernel/entry-armv.S | 2 ++ arch/arm/kernel/process.c | 13 ++++++++++--- 4 files changed, 27 insertions(+), 11 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 6ddbe446425e..7f2941905714 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -59,6 +59,17 @@ #ifndef __ASSEMBLY__ +static inline unsigned int get_domain(void) +{ + unsigned int domain; + + asm( + "mrc p15, 0, %0, c3, c0 @ get domain" + : "=r" (domain)); + + return domain; +} + #ifdef CONFIG_CPU_USE_DOMAINS static inline void set_domain(unsigned val) { @@ -70,11 +81,10 @@ static inline void set_domain(unsigned val) #define modify_domain(dom,type) \ do { \ - struct thread_info *thread = current_thread_info(); \ - unsigned int domain = thread->cpu_domain; \ - domain &= ~domain_val(dom, DOMAIN_MANAGER); \ - thread->cpu_domain = domain | domain_val(dom, type); \ - set_domain(thread->cpu_domain); \ + unsigned int domain = get_domain(); \ + domain &= ~domain_val(dom, DOMAIN_MANAGER); \ + domain = domain | domain_val(dom, type); \ + set_domain(domain); \ } while (0) #else diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index bd32eded3e50..0a0aec410d8c 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -74,9 +74,6 @@ struct thread_info { .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 7dac3086e361..d19adcf6c580 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -770,6 +770,8 @@ ENTRY(__switch_to) ldr r4, [r2, #TI_TP_VALUE] ldr r5, [r2, #TI_TP_VALUE + 4] #ifdef CONFIG_CPU_USE_DOMAINS + mrc p15, 0, r6, c3, c0, 0 @ Get domain register + str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register ldr r6, [r2, #TI_CPU_DOMAIN] #endif switch_tls r1, r4, r5, r3, r7 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index f192a2a41719..e722f9b3c9b1 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -146,10 +146,9 @@ void __show_regs(struct pt_regs *regs) buf[0] = '\0'; #ifdef CONFIG_CPU_CP15_MMU { - unsigned int transbase, dac; + unsigned int transbase, dac = get_domain(); asm("mrc p15, 0, %0, c2, c0\n\t" - "mrc p15, 0, %1, c3, c0\n" - : "=r" (transbase), "=r" (dac)); + : "=r" (transbase)); snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", transbase, dac); } @@ -210,6 +209,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); + /* + * Copy the initial value of the domain access control register + * from the current thread: thread->addr_limit will have been + * copied from the current thread via setup_thread_stack() in + * kernel/fork.c + */ + thread->cpu_domain = get_domain(); + if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->ARM_r0 = 0; -- cgit v1.2.3 From 8e798706f7e9cd7f096aa194de90269dde83773e Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 19 Aug 2015 22:36:24 +0100 Subject: ARM: domains: provide domain_mask() Provide a macro to generate the mask for a domain, rather than using domain_val(, DOMAIN_MANAGER) which won't work when CPU_USE_DOMAINS is turned off. Signed-off-by: Russell King --- arch/arm/include/asm/domain.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 7f2941905714..045b9b453bcd 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -55,7 +55,8 @@ #define DOMAIN_MANAGER 1 #endif -#define domain_val(dom,type) ((type) << (2*(dom))) +#define domain_mask(dom) ((3) << (2 * (dom))) +#define domain_val(dom,type) ((type) << (2 * (dom))) #ifndef __ASSEMBLY__ @@ -82,7 +83,7 @@ static inline void set_domain(unsigned val) #define modify_domain(dom,type) \ do { \ unsigned int domain = get_domain(); \ - domain &= ~domain_val(dom, DOMAIN_MANAGER); \ + domain &= ~domain_mask(dom); \ domain = domain | domain_val(dom, type); \ set_domain(domain); \ } while (0) -- cgit v1.2.3 From 0171356a7708af01ad3224702b7f0aaa5b7a1399 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Aug 2015 09:23:26 +0100 Subject: ARM: domains: move initial domain setting value to asm/domains.h Signed-off-by: Russell King --- arch/arm/include/asm/domain.h | 6 ++++++ arch/arm/kernel/head.S | 5 +---- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 045b9b453bcd..4218f88e8f7e 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -58,6 +58,12 @@ #define domain_mask(dom) ((3) << (2 * (dom))) #define domain_val(dom,type) ((type) << (2 * (dom))) +#define DACR_INIT \ + (domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT)) + #ifndef __ASSEMBLY__ static inline unsigned int get_domain(void) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index bd755d97e459..d56e5e9a9e1e 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -461,10 +461,7 @@ __enable_mmu: #ifdef CONFIG_ARM_LPAE mcrr p15, 0, r4, r5, c2 @ load TTBR0 #else - mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT)) + mov r5, #DACR_INIT mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer #endif -- cgit v1.2.3 From 3c2aed5b28819564e1a07b4686bd89802bcc4d6b Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Aug 2015 09:30:16 +0100 Subject: ARM: domains: get rid of manager mode for user domain Since we switched to early trap initialisation in 94e5a85b3be0 ("ARM: earlier initialization of vectors page") we haven't been writing directly to the vectors page, and so there's no need for this domain to be in manager mode. Switch it to client mode. Signed-off-by: Russell King --- arch/arm/include/asm/domain.h | 2 +- arch/arm/kernel/traps.c | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 4218f88e8f7e..08b601e69ddc 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -59,7 +59,7 @@ #define domain_val(dom,type) ((type) << (2 * (dom))) #define DACR_INIT \ - (domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ + (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT)) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index d358226236f2..969f9d9e665f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base) kuser_init(vectors_base); flush_icache_range(vectors, vectors + PAGE_SIZE * 2); - modify_domain(DOMAIN_USER, DOMAIN_CLIENT); #else /* ifndef CONFIG_CPU_V7M */ /* * on V7-M there is no need to copy the vector table to a dedicated -- cgit v1.2.3 From a02d8dfd54cdf3b1b0464ccc2c1c4afe2c003a35 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Aug 2015 09:38:31 +0100 Subject: ARM: domains: keep vectors in separate domain Keep the machine vectors in its own domain to avoid software based user access control from making the vector code inaccessible, and thereby deadlocking the machine. Signed-off-by: Russell King --- arch/arm/include/asm/domain.h | 4 +++- arch/arm/include/asm/pgtable-2level-hwdef.h | 1 + arch/arm/mm/mmu.c | 4 ++-- arch/arm/mm/pgd.c | 10 ++++++++++ 4 files changed, 16 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 08b601e69ddc..396a12e486fe 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -43,6 +43,7 @@ #define DOMAIN_USER 1 #define DOMAIN_IO 0 #endif +#define DOMAIN_VECTORS 3 /* * Domain types @@ -62,7 +63,8 @@ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT)) + domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) #ifndef __ASSEMBLY__ diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h index 5e68278e953e..d0131ee6f6af 100644 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -23,6 +23,7 @@ #define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) +#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f) #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ /* * - section diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6ca7d9aa896f..a016de248034 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -291,13 +291,13 @@ static struct mem_type mem_types[] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY, .prot_l1 = PMD_TYPE_TABLE, - .domain = DOMAIN_USER, + .domain = DOMAIN_VECTORS, }, [MT_HIGH_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_USER | L_PTE_RDONLY, .prot_l1 = PMD_TYPE_TABLE, - .domain = DOMAIN_USER, + .domain = DOMAIN_VECTORS, }, [MT_MEMORY_RWX] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index a3681f11dd9f..e683db1b90a3 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!new_pte) goto no_pte; +#ifndef CONFIG_ARM_LPAE + /* + * Modify the PTE pointer to have the correct domain. This + * needs to be the vectors domain to avoid the low vectors + * being unmapped. + */ + pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK; + pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS); +#endif + init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); -- cgit v1.2.3 From 1fb6755f16872ad256c18cce2830f9087502dffd Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Aug 2015 09:42:10 +0100 Subject: ARM: domains: remove DOMAIN_TABLE DOMAIN_TABLE is not used; in any case, it aliases to the kernel domain. Remove this definition. Signed-off-by: Russell King --- arch/arm/include/asm/domain.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 396a12e486fe..2be929549938 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -34,12 +34,10 @@ */ #ifndef CONFIG_IO_36 #define DOMAIN_KERNEL 0 -#define DOMAIN_TABLE 0 #define DOMAIN_USER 1 #define DOMAIN_IO 2 #else #define DOMAIN_KERNEL 2 -#define DOMAIN_TABLE 2 #define DOMAIN_USER 1 #define DOMAIN_IO 0 #endif @@ -62,7 +60,6 @@ #define DACR_INIT \ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) -- cgit v1.2.3 From b64d1f66517a89b9b0f6bd0bca86b05a55a5e742 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 18 Aug 2015 23:06:25 +0100 Subject: ARM: uaccess: simplify user access assembly The user assembly for byte and word accesses was virtually identical. Rather than duplicating this, use a macro instead. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/uaccess.h | 47 +++++++++++------------------------------- 1 file changed, 12 insertions(+), 35 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 74b17d09ef7a..4cf54ebe408a 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -311,9 +311,9 @@ do { \ (x) = (__typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user_asm_byte(x, addr, err) \ +#define __get_user_asm(x, addr, err, instr) \ __asm__ __volatile__( \ - "1: " TUSER(ldrb) " %1,[%2],#0\n" \ + "1: " TUSER(instr) " %1, [%2], #0\n" \ "2:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -329,6 +329,9 @@ do { \ : "r" (addr), "i" (-EFAULT) \ : "cc") +#define __get_user_asm_byte(x, addr, err) \ + __get_user_asm(x, addr, err, ldrb) + #ifndef __ARMEB__ #define __get_user_asm_half(x, __gu_addr, err) \ ({ \ @@ -348,22 +351,7 @@ do { \ #endif #define __get_user_asm_word(x, addr, err) \ - __asm__ __volatile__( \ - "1: " TUSER(ldr) " %1,[%2],#0\n" \ - "2:\n" \ - " .pushsection .text.fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %0, %3\n" \ - " mov %1, #0\n" \ - " b 2b\n" \ - " .popsection\n" \ - " .pushsection __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .popsection" \ - : "+r" (err), "=&r" (x) \ - : "r" (addr), "i" (-EFAULT) \ - : "cc") + __get_user_asm(x, addr, err, ldr) #define __put_user(x, ptr) \ ({ \ @@ -393,9 +381,9 @@ do { \ } \ } while (0) -#define __put_user_asm_byte(x, __pu_addr, err) \ +#define __put_user_asm(x, __pu_addr, err, instr) \ __asm__ __volatile__( \ - "1: " TUSER(strb) " %1,[%2],#0\n" \ + "1: " TUSER(instr) " %1, [%2], #0\n" \ "2:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -410,6 +398,9 @@ do { \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") +#define __put_user_asm_byte(x, __pu_addr, err) \ + __put_user_asm(x, __pu_addr, err, strb) + #ifndef __ARMEB__ #define __put_user_asm_half(x, __pu_addr, err) \ ({ \ @@ -427,21 +418,7 @@ do { \ #endif #define __put_user_asm_word(x, __pu_addr, err) \ - __asm__ __volatile__( \ - "1: " TUSER(str) " %1,[%2],#0\n" \ - "2:\n" \ - " .pushsection .text.fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %0, %3\n" \ - " b 2b\n" \ - " .popsection\n" \ - " .pushsection __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .popsection" \ - : "+r" (err) \ - : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ - : "cc") + __put_user_asm(x, __pu_addr, err, str) #ifndef __ARMEB__ #define __reg_oper0 "%R2" -- cgit v1.2.3 From 01e09a28167c338684606b70797422da3bbb6650 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 20 Aug 2015 14:22:48 +0100 Subject: ARM: entry: get rid of asm_trace_hardirqs_on_cond There's no need for this macro, it can use a default for the condition argument. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 4abe57279c66..742495eb5526 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -116,7 +116,7 @@ #endif .endm - .macro asm_trace_hardirqs_on_cond, cond + .macro asm_trace_hardirqs_on, cond=al #if defined(CONFIG_TRACE_IRQFLAGS) /* * actually the registers should be pushed and pop'd conditionally, but @@ -128,10 +128,6 @@ #endif .endm - .macro asm_trace_hardirqs_on - asm_trace_hardirqs_on_cond al - .endm - .macro disable_irq disable_irq_notrace asm_trace_hardirqs_off @@ -173,7 +169,7 @@ .macro restore_irqs, oldcpsr tst \oldcpsr, #PSR_I_BIT - asm_trace_hardirqs_on_cond eq + asm_trace_hardirqs_on cond=eq restore_irqs_notrace \oldcpsr .endm -- cgit v1.2.3 From 3302caddf10ad50710dbb7a94ccbdb3ad5bf1412 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 20 Aug 2015 16:13:37 +0100 Subject: ARM: entry: efficiency cleanups Make the "fast" syscall return path fast again. The addition of IRQ tracing and context tracking has made this path grossly inefficient. We can do much better if these options are enabled if we save the syscall return code on the stack - we then don't need to save a bunch of registers around every single callout to C code. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 16 +++++++--- arch/arm/include/asm/thread_info.h | 20 +++++-------- arch/arm/kernel/entry-common.S | 61 ++++++++++++++++++++++++++++---------- arch/arm/kernel/signal.c | 6 ++++ 4 files changed, 71 insertions(+), 32 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 742495eb5526..5a5504f90d5f 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -108,29 +108,37 @@ .endm #endif - .macro asm_trace_hardirqs_off + .macro asm_trace_hardirqs_off, save=1 #if defined(CONFIG_TRACE_IRQFLAGS) + .if \save stmdb sp!, {r0-r3, ip, lr} + .endif bl trace_hardirqs_off + .if \save ldmia sp!, {r0-r3, ip, lr} + .endif #endif .endm - .macro asm_trace_hardirqs_on, cond=al + .macro asm_trace_hardirqs_on, cond=al, save=1 #if defined(CONFIG_TRACE_IRQFLAGS) /* * actually the registers should be pushed and pop'd conditionally, but * after bl the flags are certainly clobbered */ + .if \save stmdb sp!, {r0-r3, ip, lr} + .endif bl\cond trace_hardirqs_on + .if \save ldmia sp!, {r0-r3, ip, lr} + .endif #endif .endm - .macro disable_irq + .macro disable_irq, save=1 disable_irq_notrace - asm_trace_hardirqs_off + asm_trace_hardirqs_off \save .endm .macro enable_irq diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index bd32eded3e50..71e0ffcedf8e 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -136,22 +136,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, /* * thread information flags: - * TIF_SYSCALL_TRACE - syscall trace active - * TIF_SYSCAL_AUDIT - syscall auditing active - * TIF_SIGPENDING - signal pending - * TIF_NEED_RESCHED - rescheduling necessary - * TIF_NOTIFY_RESUME - callback before returning to user * TIF_USEDFPU - FPU was used by this task this quantum (SMP) * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_SIGPENDING 0 -#define TIF_NEED_RESCHED 1 +#define TIF_SIGPENDING 0 /* signal pending */ +#define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ -#define TIF_UPROBE 7 -#define TIF_SYSCALL_TRACE 8 -#define TIF_SYSCALL_AUDIT 9 -#define TIF_SYSCALL_TRACEPOINT 10 -#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ +#define TIF_UPROBE 3 /* breakpointed or singlestepping */ +#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ +#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ + #define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 92828a1dec80..dd3721d1185e 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -24,35 +24,55 @@ .align 5 +#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) /* - * This is the fast syscall return path. We do as little as - * possible here, and this includes saving r0 back into the SVC - * stack. + * This is the fast syscall return path. We do as little as possible here, + * such as avoiding writing r0 to the stack. We only use this path if we + * have tracing and context tracking disabled - the overheads from those + * features make this path too inefficient. */ ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) - disable_irq @ disable interrupts + disable_irq_notrace @ disable interrupts ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK - bne __sys_trace_return - tst r1, #_TIF_WORK_MASK + tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK bne fast_work_pending - asm_trace_hardirqs_on /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr - ct_user_enter restore_user_regs fast = 1, offset = S_OFF UNWIND(.fnend ) +ENDPROC(ret_fast_syscall) -/* - * Ok, we need to do extra processing, enter the slow path. - */ + /* Ok, we need to do extra processing, enter the slow path. */ fast_work_pending: str r0, [sp, #S_R0+S_OFF]! @ returned r0 -work_pending: + /* fall through to work_pending */ +#else +/* + * The "replacement" ret_fast_syscall for when tracing or context tracking + * is enabled. As we will need to call out to some C functions, we save + * r0 first to avoid needing to save registers around each C function call. + */ +ret_fast_syscall: + UNWIND(.fnstart ) + UNWIND(.cantunwind ) + str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 + disable_irq_notrace @ disable interrupts + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing + tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + beq no_work_pending + UNWIND(.fnend ) +ENDPROC(ret_fast_syscall) + + /* Slower path - fall through to work_pending */ +#endif + + tst r1, #_TIF_SYSCALL_WORK + bne __sys_trace_return_nosave +slow_work_pending: mov r0, sp @ 'regs' mov r2, why @ 'syscall' bl do_work_pending @@ -64,16 +84,19 @@ work_pending: /* * "slow" syscall return path. "why" tells us if this was a real syscall. + * IRQs may be enabled here, so always disable them. Note that we use the + * "notrace" version to avoid calling into the tracing code unnecessarily. + * do_work_pending() will update this state if necessary. */ ENTRY(ret_to_user) ret_slow_syscall: - disable_irq @ disable interrupts + disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK - bne work_pending + bne slow_work_pending no_work_pending: - asm_trace_hardirqs_on + asm_trace_hardirqs_on save = 0 /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr @@ -251,6 +274,12 @@ __sys_trace_return: bl syscall_trace_exit b ret_slow_syscall +__sys_trace_return_nosave: + asm_trace_hardirqs_off save=0 + mov r0, sp + bl syscall_trace_exit + b ret_slow_syscall + .align 5 #ifdef CONFIG_ALIGNMENT_TRAP .type __cr_alignment, #object diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 423663e23791..b6cda06b455f 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall) asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { + /* + * The assembly code enters us with IRQs off, but it hasn't + * informed the tracing code of that for efficiency reasons. + * Update the trace code with the current status. + */ + trace_hardirqs_off(); do { if (likely(thread_flags & _TIF_NEED_RESCHED)) { schedule(); -- cgit v1.2.3 From 3fba7e23f754a9a6e639b640fa2a393712ffe1b8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 19 Aug 2015 11:02:28 +0100 Subject: ARM: uaccess: provide uaccess_save_and_enable() and uaccess_restore() Provide uaccess_save_and_enable() and uaccess_restore() to permit control of userspace visibility to the kernel, and hook these into the appropriate places in the kernel where we need to access userspace. Signed-off-by: Russell King --- arch/arm/include/asm/futex.h | 19 ++++++++-- arch/arm/include/asm/uaccess.h | 71 +++++++++++++++++++++++++++++++++++--- arch/arm/kernel/armksyms.c | 6 ++-- arch/arm/lib/clear_user.S | 6 ++-- arch/arm/lib/copy_from_user.S | 6 ++-- arch/arm/lib/copy_to_user.S | 6 ++-- arch/arm/lib/uaccess_with_memcpy.c | 4 +-- 7 files changed, 97 insertions(+), 21 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 5eed82809d82..6795368ad023 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -22,8 +22,11 @@ #ifdef CONFIG_SMP #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ +({ \ + unsigned int __ua_flags; \ smp_mb(); \ prefetchw(uaddr); \ + __ua_flags = uaccess_save_and_enable(); \ __asm__ __volatile__( \ "1: ldrex %1, [%3]\n" \ " " insn "\n" \ @@ -34,12 +37,15 @@ __futex_atomic_ex_table("%5") \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ - : "cc", "memory") + : "cc", "memory"); \ + uaccess_restore(__ua_flags); \ +}) static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { + unsigned int __ua_flags; int ret; u32 val; @@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, smp_mb(); /* Prefetching cannot fault */ prefetchw(uaddr); + __ua_flags = uaccess_save_and_enable(); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: ldrex %1, [%4]\n" " teq %1, %2\n" @@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "=&r" (ret), "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); + uaccess_restore(__ua_flags); smp_mb(); *uval = val; @@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, #include #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ +({ \ + unsigned int __ua_flags = uaccess_save_and_enable(); \ __asm__ __volatile__( \ "1: " TUSER(ldr) " %1, [%3]\n" \ " " insn "\n" \ @@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, __futex_atomic_ex_table("%5") \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ - : "cc", "memory") + : "cc", "memory"); \ + uaccess_restore(__ua_flags); \ +}) static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { + unsigned int __ua_flags; int ret = 0; u32 val; @@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; preempt_disable(); + __ua_flags = uaccess_save_and_enable(); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: " TUSER(ldr) " %1, [%4]\n" " teq %1, %2\n" @@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "+r" (ret), "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); + uaccess_restore(__ua_flags); *uval = val; preempt_enable(); diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 74b17d09ef7a..82880132f941 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -49,6 +49,21 @@ struct exception_table_entry extern int fixup_exception(struct pt_regs *regs); +/* + * These two functions allow hooking accesses to userspace to increase + * system integrity by ensuring that the kernel can not inadvertantly + * perform such accesses (eg, via list poison values) which could then + * be exploited for priviledge escalation. + */ +static inline unsigned int uaccess_save_and_enable(void) +{ + return 0; +} + +static inline void uaccess_restore(unsigned int flags) +{ +} + /* * These two are intentionally not defined anywhere - if the kernel * code generates any references to them, that's a bug. @@ -165,6 +180,7 @@ extern int __get_user_64t_4(void *); register typeof(x) __r2 asm("r2"); \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ + unsigned int __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(__p))) { \ case 1: \ if (sizeof((x)) >= 8) \ @@ -192,6 +208,7 @@ extern int __get_user_64t_4(void *); break; \ default: __e = __get_user_bad(); break; \ } \ + uaccess_restore(__ua_flags); \ x = (typeof(*(p))) __r2; \ __e; \ }) @@ -224,6 +241,7 @@ extern int __put_user_8(void *, unsigned long long); register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ + unsigned int __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(__p))) { \ case 1: \ __put_user_x(__r2, __p, __e, __l, 1); \ @@ -239,6 +257,7 @@ extern int __put_user_8(void *, unsigned long long); break; \ default: __e = __put_user_bad(); break; \ } \ + uaccess_restore(__ua_flags); \ __e; \ }) @@ -300,14 +319,17 @@ static inline void set_fs(mm_segment_t fs) do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ + unsigned int __ua_flags; \ __chk_user_ptr(ptr); \ might_fault(); \ + __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(ptr))) { \ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ default: (__gu_val) = __get_user_bad(); \ } \ + uaccess_restore(__ua_flags); \ (x) = (__typeof__(*(ptr)))__gu_val; \ } while (0) @@ -381,9 +403,11 @@ do { \ #define __put_user_err(x, ptr, err) \ do { \ unsigned long __pu_addr = (unsigned long)(ptr); \ + unsigned int __ua_flags; \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ might_fault(); \ + __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(ptr))) { \ case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ @@ -391,6 +415,7 @@ do { \ case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ default: __put_user_bad(); \ } \ + uaccess_restore(__ua_flags); \ } while (0) #define __put_user_asm_byte(x, __pu_addr, err) \ @@ -474,11 +499,46 @@ do { \ #ifdef CONFIG_MMU -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); -extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); -extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); +extern unsigned long __must_check +arm_copy_from_user(void *to, const void __user *from, unsigned long n); + +static inline unsigned long __must_check +__copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned int __ua_flags = uaccess_save_and_enable(); + n = arm_copy_from_user(to, from, n); + uaccess_restore(__ua_flags); + return n; +} + +extern unsigned long __must_check +arm_copy_to_user(void __user *to, const void *from, unsigned long n); +extern unsigned long __must_check +__copy_to_user_std(void __user *to, const void *from, unsigned long n); + +static inline unsigned long __must_check +__copy_to_user(void __user *to, const void *from, unsigned long n) +{ + unsigned int __ua_flags = uaccess_save_and_enable(); + n = arm_copy_to_user(to, from, n); + uaccess_restore(__ua_flags); + return n; +} + +extern unsigned long __must_check +arm_clear_user(void __user *addr, unsigned long n); +extern unsigned long __must_check +__clear_user_std(void __user *addr, unsigned long n); + +static inline unsigned long __must_check +__clear_user(void __user *addr, unsigned long n) +{ + unsigned int __ua_flags = uaccess_save_and_enable(); + n = arm_clear_user(addr, n); + uaccess_restore(__ua_flags); + return n; +} + #else #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) @@ -511,6 +571,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo return n; } +/* These are from lib/ code, and use __get_user() and friends */ extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index a88671cfe1ff..a35d72d30b56 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero); #ifdef CONFIG_MMU EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(__copy_from_user); -EXPORT_SYMBOL(__copy_to_user); -EXPORT_SYMBOL(__clear_user); +EXPORT_SYMBOL(arm_copy_from_user); +EXPORT_SYMBOL(arm_copy_to_user); +EXPORT_SYMBOL(arm_clear_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index 1710fd7db2d5..970d6c043774 100644 --- a/arch/arm/lib/clear_user.S +++ b/arch/arm/lib/clear_user.S @@ -12,14 +12,14 @@ .text -/* Prototype: int __clear_user(void *addr, size_t sz) +/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear * Returns : number of bytes NOT cleared */ ENTRY(__clear_user_std) -WEAK(__clear_user) +WEAK(arm_clear_user) stmfd sp!, {r1, lr} mov r2, #0 cmp r1, #4 @@ -44,7 +44,7 @@ WEAK(__clear_user) USER( strnebt r2, [r0]) mov r0, #0 ldmfd sp!, {r1, pc} -ENDPROC(__clear_user) +ENDPROC(arm_clear_user) ENDPROC(__clear_user_std) .pushsection .text.fixup,"ax" diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index 7a235b9952be..1512bebfbf1b 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -17,7 +17,7 @@ /* * Prototype: * - * size_t __copy_from_user(void *to, const void *from, size_t n) + * size_t arm_copy_from_user(void *to, const void *from, size_t n) * * Purpose: * @@ -89,11 +89,11 @@ .text -ENTRY(__copy_from_user) +ENTRY(arm_copy_from_user) #include "copy_template.S" -ENDPROC(__copy_from_user) +ENDPROC(arm_copy_from_user) .pushsection .fixup,"ax" .align 0 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index 9648b0675a3e..caf5019d8161 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -17,7 +17,7 @@ /* * Prototype: * - * size_t __copy_to_user(void *to, const void *from, size_t n) + * size_t arm_copy_to_user(void *to, const void *from, size_t n) * * Purpose: * @@ -93,11 +93,11 @@ .text ENTRY(__copy_to_user_std) -WEAK(__copy_to_user) +WEAK(arm_copy_to_user) #include "copy_template.S" -ENDPROC(__copy_to_user) +ENDPROC(arm_copy_to_user) ENDPROC(__copy_to_user_std) .pushsection .text.fixup,"ax" diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 3e58d710013c..77f020e75ccd 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -136,7 +136,7 @@ out: } unsigned long -__copy_to_user(void __user *to, const void *from, unsigned long n) +arm_copy_to_user(void __user *to, const void *from, unsigned long n) { /* * This test is stubbed out of the main function above to keep @@ -190,7 +190,7 @@ out: return n; } -unsigned long __clear_user(void __user *addr, unsigned long n) +unsigned long arm_clear_user(void __user *addr, unsigned long n) { /* See rational for this in __copy_to_user() above. */ if (n < 64) -- cgit v1.2.3 From 9205b797dbe519a629267ec8c5766cd973d35063 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 24 Aug 2015 21:49:30 +0100 Subject: ARM: 8421/1: smp: Collapse arch_cpu_idle_dead() into cpu_die() The only caller of cpu_die() on ARM is arch_cpu_idle_dead(), so let's simplify the code by renaming cpu_die() to arch_cpu_idle_dead(). While were here, drop the __ref annotation because __cpuinit is gone nowadays. Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/include/asm/smp.h | 1 - arch/arm/kernel/process.c | 7 ------- arch/arm/kernel/smp.c | 2 +- 3 files changed, 1 insertion(+), 9 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 318ce89eeff7..ef356659b4f4 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -74,7 +74,6 @@ extern void secondary_startup_arm(void); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); -extern void cpu_die(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index f192a2a41719..358984b7f249 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void) ledtrig_cpu(CPU_LED_IDLE_END); } -#ifdef CONFIG_HOTPLUG_CPU -void arch_cpu_idle_dead(void) -{ - cpu_die(); -} -#endif - void __show_regs(struct pt_regs *regs) { unsigned long flags; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3cd846f48eaf..0aad7cdf2e58 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -266,7 +266,7 @@ void __cpu_die(unsigned int cpu) * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ -void __ref cpu_die(void) +void arch_cpu_idle_dead(void) { unsigned int cpu = smp_processor_id(); -- cgit v1.2.3 From 2190fed67ba6f3e8129513929f2395843645e928 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 20 Aug 2015 10:32:02 +0100 Subject: ARM: entry: provide uaccess assembly macro hooks Provide hooks into the kernel entry and exit paths to permit control of userspace visibility to the kernel. The intended use is: - on entry to kernel from user, uaccess_disable will be called to disable userspace visibility - on exit from kernel to user, uaccess_enable will be called to enable userspace visibility - on entry from a kernel exception, uaccess_save_and_disable will be called to save the current userspace visibility setting, and disable access - on exit from a kernel exception, uaccess_restore will be called to restore the userspace visibility as it was before the exception occurred. These hooks allows us to keep userspace visibility disabled for the vast majority of the kernel, except for localised regions where we want to explicitly access userspace. Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 17 +++++++++++++++++ arch/arm/kernel/entry-armv.S | 30 ++++++++++++++++++++++-------- arch/arm/kernel/entry-common.S | 2 ++ arch/arm/kernel/entry-header.S | 3 +++ arch/arm/mm/abort-ev4.S | 1 + arch/arm/mm/abort-ev5t.S | 1 + arch/arm/mm/abort-ev5tj.S | 1 + arch/arm/mm/abort-ev6.S | 7 ++++--- arch/arm/mm/abort-ev7.S | 1 + arch/arm/mm/abort-lv4t.S | 2 ++ arch/arm/mm/abort-macro.S | 1 + 11 files changed, 55 insertions(+), 11 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 4abe57279c66..a91177043467 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -445,6 +445,23 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm + .macro uaccess_disable, tmp, isb=1 + .endm + + .macro uaccess_enable, tmp, isb=1 + .endm + + .macro uaccess_save, tmp + .endm + + .macro uaccess_restore + .endm + + .macro uaccess_save_and_disable, tmp + uaccess_save \tmp + uaccess_disable \tmp + .endm + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro ret\c, reg #if __LINUX_ARM_ARCH__ < 6 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d19adcf6c580..61f00a3f3047 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -149,10 +149,10 @@ ENDPROC(__und_invalid) #define SPFIX(code...) #endif - .macro svc_entry, stack_hole=0, trace=1 + .macro svc_entry, stack_hole=0, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.save {r0 - pc} ) - sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) + sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4) #ifdef CONFIG_THUMB2_KERNEL SPFIX( str r0, [sp] ) @ temporarily saved SPFIX( mov r0, sp ) @@ -167,7 +167,7 @@ ENDPROC(__und_invalid) ldmia r0, {r3 - r5} add r7, sp, #S_SP - 4 @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" - add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) + add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4) SPFIX( addeq r2, r2, #4 ) str r3, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack @@ -185,6 +185,11 @@ ENDPROC(__und_invalid) @ stmia r7, {r2 - r6} + uaccess_save r0 + .if \uaccess + uaccess_disable r0 + .endif + .if \trace #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off @@ -194,7 +199,7 @@ ENDPROC(__und_invalid) .align 5 __dabt_svc: - svc_entry + svc_entry uaccess=0 mov r2, sp dabt_helper THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR @@ -368,7 +373,7 @@ ENDPROC(__fiq_abt) #error "sizeof(struct pt_regs) must be a multiple of 8" #endif - .macro usr_entry, trace=1 + .macro usr_entry, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.cantunwind ) @ don't unwind the user space sub sp, sp, #S_FRAME_SIZE @@ -400,6 +405,10 @@ ENDPROC(__fiq_abt) ARM( stmdb r0, {sp, lr}^ ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) + .if \uaccess + uaccess_disable ip + .endif + @ Enable the alignment trap while in kernel mode ATRAP( teq r8, r7) ATRAP( mcrne p15, 0, r8, c1, c0, 0) @@ -435,7 +444,7 @@ ENDPROC(__fiq_abt) .align 5 __dabt_usr: - usr_entry + usr_entry uaccess=0 kuser_cmpxchg_check mov r2, sp dabt_helper @@ -458,7 +467,7 @@ ENDPROC(__irq_usr) .align 5 __und_usr: - usr_entry + usr_entry uaccess=0 mov r2, r4 mov r3, r5 @@ -484,6 +493,8 @@ __und_usr: 1: ldrt r0, [r4] ARM_BE8(rev r0, r0) @ little endian instruction + uaccess_disable ip + @ r0 = 32-bit ARM instruction which caused the exception @ r2 = PC value for the following instruction (:= regs->ARM_pc) @ r4 = PC value for the faulting instruction @@ -518,9 +529,10 @@ __und_usr_thumb: 2: ldrht r5, [r4] ARM_BE8(rev16 r5, r5) @ little endian instruction cmp r5, #0xe800 @ 32bit instruction if xx != 0 - blo __und_usr_fault_16 @ 16bit undefined instruction + blo __und_usr_fault_16_pan @ 16bit undefined instruction 3: ldrht r0, [r2] ARM_BE8(rev16 r0, r0) @ little endian instruction + uaccess_disable ip add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update orr r0, r0, r5, lsl #16 @@ -715,6 +727,8 @@ ENDPROC(no_fp) __und_usr_fault_32: mov r1, #4 b 1f +__und_usr_fault_16_pan: + uaccess_disable ip __und_usr_fault_16: mov r1, #2 1: mov r0, sp diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 92828a1dec80..189154980703 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -173,6 +173,8 @@ ENTRY(vector_swi) USER( ldr scno, [lr, #-4] ) @ get SWI instruction #endif + uaccess_disable tbl + adr tbl, sys_call_table @ load syscall table pointer #if defined(CONFIG_OABI_COMPAT) diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index d47b5161b029..0d22ad206d52 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -215,6 +215,7 @@ blne trace_hardirqs_off #endif .endif + uaccess_restore #ifndef CONFIG_THUMB2_KERNEL @ ARM mode SVC restore @@ -258,6 +259,7 @@ @ on the stack remains correct). @ .macro svc_exit_via_fiq + uaccess_restore #ifndef CONFIG_THUMB2_KERNEL @ ARM mode restore mov r0, sp @@ -287,6 +289,7 @@ .macro restore_user_regs, fast = 0, offset = 0 + uaccess_enable r1, isb=0 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode restore mov r2, sp diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S index 54473cd4aba9..b3b31e30cadd 100644 --- a/arch/arm/mm/abort-ev4.S +++ b/arch/arm/mm/abort-ev4.S @@ -19,6 +19,7 @@ ENTRY(v4_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR ldr r3, [r4] @ read aborted ARM instruction + uaccess_disable ip @ disable userspace access bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index c913031b79cc..a6a381a6caa5 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -21,6 +21,7 @@ ENTRY(v5t_early_abort) mrc p15, 0, r0, c6, c0, 0 @ get FAR do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction + uaccess_disable ip @ disable user access bic r1, r1, #1 << 11 @ clear bits 11 of FSR teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? beq do_DataAbort @ yes diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index 1b80d71adb0f..00ab011bef58 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -24,6 +24,7 @@ ENTRY(v5tj_early_abort) bne do_DataAbort do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction + uaccess_disable ip @ disable userspace access teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? beq do_DataAbort @ yes tst r3, #1 << 20 @ L = 0 -> write diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 113704f30e9f..8801a15aa105 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -26,17 +26,18 @@ ENTRY(v6_early_abort) ldr ip, =0x4107b36 mrc p15, 0, r3, c0, c0, 0 @ get processor id teq ip, r3, lsr #4 @ r0 ARM1136? - bne do_DataAbort + bne 1f tst r5, #PSR_J_BIT @ Java? tsteq r5, #PSR_T_BIT @ Thumb? - bne do_DataAbort + bne 1f bic r1, r1, #1 << 11 @ clear bit 11 of FSR ldr r3, [r4] @ read aborted ARM instruction ARM_BE8(rev r3, r3) teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? - beq do_DataAbort @ yes + beq 1f @ yes tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. #endif +1: uaccess_disable ip @ disable userspace access b do_DataAbort diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index 4812ad054214..e8d0e08c227f 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -15,6 +15,7 @@ ENTRY(v7_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR + uaccess_disable ip @ disable userspace access /* * V6 code adjusts the returned DFSR. diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index f3982580c273..6d8e8e3365d1 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -26,6 +26,7 @@ ENTRY(v4t_late_abort) #endif bne .data_thumb_abort ldr r8, [r4] @ read arm instruction + uaccess_disable ip @ disable userspace access tst r8, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 @@ -155,6 +156,7 @@ ENTRY(v4t_late_abort) .data_thumb_abort: ldrh r8, [r4] @ read instruction + uaccess_disable ip @ disable userspace access tst r8, #1 << 11 @ L = 1 -> write? orreq r1, r1, #1 << 8 @ yes and r7, r8, #15 << 12 diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index 50d6c0a900b1..4509bee4e081 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -13,6 +13,7 @@ tst \psr, #PSR_T_BIT beq not_thumb ldrh \tmp, [\pc] @ Read aborted Thumb instruction + uaccess_disable ip @ disable userspace access and \tmp, \tmp, # 0xfe00 @ Mask opcode field cmp \tmp, # 0x5600 @ Is it ldrsb? orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes -- cgit v1.2.3 From a5e090acbf545c0a3b04080f8a488b17ec41fe02 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 19 Aug 2015 20:40:41 +0100 Subject: ARM: software-based priviledged-no-access support Provide a software-based implementation of the priviledged no access support found in ARMv8.1. Userspace pages are mapped using a different domain number from the kernel and IO mappings. If we switch the user domain to "no access" when we enter the kernel, we can prevent the kernel from touching userspace. However, the kernel needs to be able to access userspace via the various user accessor functions. With the wrapping in the previous patch, we can temporarily enable access when the kernel needs user access, and re-disable it afterwards. This allows us to trap non-intended accesses to userspace, eg, caused by an inadvertent dereference of the LIST_POISON* values, which, with appropriate user mappings setup, can be made to succeed. This in turn can allow use-after-free bugs to be further exploited than would otherwise be possible. Signed-off-by: Russell King --- arch/arm/Kconfig | 15 +++++++++++++++ arch/arm/include/asm/assembler.h | 30 ++++++++++++++++++++++++++++++ arch/arm/include/asm/domain.h | 21 +++++++++++++++++++-- arch/arm/include/asm/uaccess.h | 14 ++++++++++++++ arch/arm/kernel/process.c | 36 ++++++++++++++++++++++++++++++------ arch/arm/kernel/swp_emulate.c | 3 +++ arch/arm/lib/csumpartialcopyuser.S | 14 ++++++++++++++ 7 files changed, 125 insertions(+), 8 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a750c1425c3a..e15d5ed4d5f1 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1694,6 +1694,21 @@ config HIGHPTE bool "Allocate 2nd-level pagetables from highmem" depends on HIGHMEM +config CPU_SW_DOMAIN_PAN + bool "Enable use of CPU domains to implement privileged no-access" + depends on MMU && !ARM_LPAE + default y + help + Increase kernel security by ensuring that normal kernel accesses + are unable to access userspace addresses. This can help prevent + use-after-free bugs becoming an exploitable privilege escalation + by ensuring that magic values (such as LIST_POISON) will always + fault when dereferenced. + + CPUs with low-vector mappings use a best-efforts implementation. + Their lower 1MB needs to remain accessible for the vectors, but + the remainder of userspace will become appropriately inaccessible. + config HW_PERF_EVENTS bool "Enable hardware performance counter support for perf events" depends on PERF_EVENTS diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index a91177043467..3ae0eda5e64f 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -446,15 +446,45 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .endm .macro uaccess_disable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_DISABLE + mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register + .if \isb + instr_sync + .endif +#endif .endm .macro uaccess_enable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_ENABLE + mcr p15, 0, \tmp, c3, c0, 0 + .if \isb + instr_sync + .endif +#endif .endm .macro uaccess_save, tmp +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + mrc p15, 0, \tmp, c3, c0, 0 + str \tmp, [sp, #S_FRAME_SIZE] +#endif .endm .macro uaccess_restore +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + ldr r0, [sp, #S_FRAME_SIZE] + mcr p15, 0, r0, c3, c0, 0 +#endif .endm .macro uaccess_save_and_disable, tmp diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 2be929549938..e878129f2fee 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -57,11 +57,29 @@ #define domain_mask(dom) ((3) << (2 * (dom))) #define domain_val(dom,type) ((type) << (2 * (dom))) +#ifdef CONFIG_CPU_SW_DOMAIN_PAN +#define DACR_INIT \ + (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) +#else #define DACR_INIT \ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) +#endif + +#define __DACR_DEFAULT \ + domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT) + +#define DACR_UACCESS_DISABLE \ + (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) +#define DACR_UACCESS_ENABLE \ + (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT)) #ifndef __ASSEMBLY__ @@ -76,7 +94,6 @@ static inline unsigned int get_domain(void) return domain; } -#ifdef CONFIG_CPU_USE_DOMAINS static inline void set_domain(unsigned val) { asm volatile( @@ -85,6 +102,7 @@ static inline void set_domain(unsigned val) isb(); } +#ifdef CONFIG_CPU_USE_DOMAINS #define modify_domain(dom,type) \ do { \ unsigned int domain = get_domain(); \ @@ -94,7 +112,6 @@ static inline void set_domain(unsigned val) } while (0) #else -static inline void set_domain(unsigned val) { } static inline void modify_domain(unsigned dom, unsigned type) { } #endif diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 82880132f941..01bae13b2cea 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -57,11 +57,25 @@ extern int fixup_exception(struct pt_regs *regs); */ static inline unsigned int uaccess_save_and_enable(void) { +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + unsigned int old_domain = get_domain(); + + /* Set the current domain access to permit user accesses */ + set_domain((old_domain & ~domain_mask(DOMAIN_USER)) | + domain_val(DOMAIN_USER, DOMAIN_CLIENT)); + + return old_domain; +#else return 0; +#endif } static inline void uaccess_restore(unsigned int flags) { +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* Restore the user access mask */ + set_domain(flags); +#endif } /* diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index e722f9b3c9b1..3f18098dfd08 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs) buf[4] = '\0'; #ifndef CONFIG_CPU_V7M - printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", - buf, interrupts_enabled(regs) ? "n" : "ff", - fast_interrupts_enabled(regs) ? "n" : "ff", - processor_modes[processor_mode(regs)], - isa_modes[isa_mode(regs)], - get_fs() == get_ds() ? "kernel" : "user"); + { + unsigned int domain = get_domain(); + const char *segment; + +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Get the domain register for the parent context. In user + * mode, we don't save the DACR, so lets use what it should + * be. For other modes, we place it after the pt_regs struct. + */ + if (user_mode(regs)) + domain = DACR_UACCESS_ENABLE; + else + domain = *(unsigned int *)(regs + 1); +#endif + + if ((domain & domain_mask(DOMAIN_USER)) == + domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) + segment = "none"; + else if (get_fs() == get_ds()) + segment = "kernel"; + else + segment = "user"; + + printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", + buf, interrupts_enabled(regs) ? "n" : "ff", + fast_interrupts_enabled(regs) ? "n" : "ff", + processor_modes[processor_mode(regs)], + isa_modes[isa_mode(regs)], segment); + } #else printk("xPSR: %08lx\n", regs->ARM_cpsr); #endif diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 1361756782c7..5b26e7efa9ea 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data, while (1) { unsigned long temp; + unsigned int __ua_flags; + __ua_flags = uaccess_save_and_enable(); if (type == TYPE_SWPB) __user_swpb_asm(*data, address, res, temp); else __user_swp_asm(*data, address, res, temp); + uaccess_restore(__ua_flags); if (likely(res != -EAGAIN) || signal_pending(current)) break; diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index 1d0957e61f89..1712f132b80d 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S @@ -17,6 +17,19 @@ .text +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + .macro save_regs + mrc p15, 0, ip, c3, c0, 0 + stmfd sp!, {r1, r2, r4 - r8, ip, lr} + uaccess_enable ip + .endm + + .macro load_regs + ldmfd sp!, {r1, r2, r4 - r8, ip, lr} + mcr p15, 0, ip, c3, c0, 0 + ret lr + .endm +#else .macro save_regs stmfd sp!, {r1, r2, r4 - r8, lr} .endm @@ -24,6 +37,7 @@ .macro load_regs ldmfd sp!, {r1, r2, r4 - r8, pc} .endm +#endif .macro load1b, reg1 ldrusr \reg1, r0, 1 -- cgit v1.2.3 From 012dcef3f058385268630c0003e9b7f8dcafbeb4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Aug 2015 17:41:01 -0400 Subject: mm: move __phys_to_pfn and __pfn_to_phys to asm/generic/memory_model.h Three architectures already define these, and we'll need them genericly soon. Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- arch/arm/include/asm/memory.h | 6 ------ arch/arm64/include/asm/memory.h | 6 ------ arch/unicore32/include/asm/memory.h | 6 ------ include/asm-generic/memory_model.h | 6 ++++++ 4 files changed, 6 insertions(+), 18 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index b7f6fb462ea0..98d58bb04ac5 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -118,12 +118,6 @@ #define DTCM_OFFSET UL(0xfffe8000) #endif -/* - * Convert a physical address to a Page Frame Number and back - */ -#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) -#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) - /* * Convert a page to/from a physical address */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index f800d45ea226..d808bb688751 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -80,12 +80,6 @@ #define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET)) #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET)) -/* - * Convert a physical address to a Page Frame Number and back - */ -#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) -#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) - /* * Convert a page to/from a physical address */ diff --git a/arch/unicore32/include/asm/memory.h b/arch/unicore32/include/asm/memory.h index debafc40200a..3bb0a29fd2d7 100644 --- a/arch/unicore32/include/asm/memory.h +++ b/arch/unicore32/include/asm/memory.h @@ -60,12 +60,6 @@ #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) #endif -/* - * Convert a physical address to a Page Frame Number and back - */ -#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) -#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) - /* * Convert a page to/from a physical address */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 14909b0b9cae..f20f407ce45d 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -69,6 +69,12 @@ }) #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ +/* + * Convert a physical address to a Page Frame Number and back + */ +#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) + #define page_to_pfn __page_to_pfn #define pfn_to_page __pfn_to_page -- cgit v1.2.3 From 32e09870eedfb501a6cb5729d8c23f44f8a7cbdd Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Fri, 7 Aug 2015 17:34:35 +0100 Subject: xen: Make clear that swiotlb and biomerge are dealing with DMA address The swiotlb is required when programming a DMA address on ARM when a device is not protected by an IOMMU. In this case, the DMA address should always be equal to the machine address. For DOM0 memory, Xen ensure it by have an identity mapping between the guest address and host address. However, when mapping a foreign grant reference, the 1:1 model doesn't work. For ARM guest, most of the callers of pfn_to_mfn expects to get a GFN (Guest Frame Number), i.e a PFN (Page Frame Number) from the Linux point of view given that all ARM guest are auto-translated. Even though the name pfn_to_mfn is misleading, we need to ensure that those caller get a GFN and not by mistake a MFN. In pratical, I haven't seen error related to this but we should fix it for the sake of correctness. In order to fix the implementation of pfn_to_mfn on ARM in a follow-up patch, we have to introduce new helpers to return the DMA from a PFN and the invert. On x86, the new helpers will be an alias of pfn_to_mfn and mfn_to_pfn. The helpers will be used in swiotlb and xen_biovec_phys_mergeable. This is necessary in the latter because we have to ensure that the biovec code will not try to merge a biovec using foreign page and another using Linux memory. Lastly, the helper mfn_to_local_pfn has been renamed to bfn_to_local_pfn given that the only usage was in swiotlb. Signed-off-by: Julien Grall Reviewed-by: Stefano Stabellini Signed-off-by: David Vrabel --- arch/arm/include/asm/xen/page.h | 23 +++++++++++++++++++++-- arch/arm/xen/mm.c | 4 ++-- arch/x86/include/asm/xen/page.h | 8 ++++++-- drivers/xen/biomerge.c | 6 +++--- drivers/xen/swiotlb-xen.c | 16 ++++++++-------- 5 files changed, 40 insertions(+), 17 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 98b1084f8282..5f76a9e7ef1b 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -52,7 +52,26 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) return mfn; } -#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) +/* Pseudo-physical <-> BUS conversion */ +static inline unsigned long pfn_to_bfn(unsigned long pfn) +{ + unsigned long mfn; + + if (phys_to_mach.rb_node != NULL) { + mfn = __pfn_to_mfn(pfn); + if (mfn != INVALID_P2M_ENTRY) + return mfn; + } + + return pfn; +} + +static inline unsigned long bfn_to_pfn(unsigned long bfn) +{ + return bfn; +} + +#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) /* VIRT <-> MACHINE conversion */ #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) @@ -96,7 +115,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) bool xen_arch_need_swiotlb(struct device *dev, unsigned long pfn, - unsigned long mfn); + unsigned long bfn); unsigned long xen_get_swiotlb_free_pages(unsigned int order); #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 03e75fef15b8..6dd911d1f0ac 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -139,9 +139,9 @@ void __xen_dma_sync_single_for_device(struct device *hwdev, bool xen_arch_need_swiotlb(struct device *dev, unsigned long pfn, - unsigned long mfn) + unsigned long bfn) { - return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev)); + return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev)); } int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index a3804fbe1f36..724bffbde54d 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -176,6 +176,10 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine) return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); } +/* Pseudo-physical <-> Bus conversion */ +#define pfn_to_bfn(pfn) pfn_to_mfn(pfn) +#define bfn_to_pfn(bfn) mfn_to_pfn(bfn) + /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry @@ -196,7 +200,7 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine) * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ -static inline unsigned long mfn_to_local_pfn(unsigned long mfn) +static inline unsigned long bfn_to_local_pfn(unsigned long mfn) { unsigned long pfn; @@ -262,7 +266,7 @@ void make_lowmem_page_readwrite(void *vaddr); static inline bool xen_arch_need_swiotlb(struct device *dev, unsigned long pfn, - unsigned long mfn) + unsigned long bfn) { return false; } diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c index 0edb91c0de6b..8ae2fc90e1ea 100644 --- a/drivers/xen/biomerge.c +++ b/drivers/xen/biomerge.c @@ -6,10 +6,10 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, const struct bio_vec *vec2) { - unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page)); - unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page)); + unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); + unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && - ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); + ((bfn1 == bfn2) || ((bfn1+1) == bfn2)); } EXPORT_SYMBOL(xen_biovec_phys_mergeable); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 4c549323c605..d757a3e610c6 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -82,8 +82,8 @@ static u64 start_dma_addr; */ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) { - unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr)); - dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT; + unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr)); + dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT; dma |= paddr & ~PAGE_MASK; @@ -92,7 +92,7 @@ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) { - unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr)); + unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr)); dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; phys_addr_t paddr = dma; @@ -110,15 +110,15 @@ static int check_pages_physically_contiguous(unsigned long pfn, unsigned int offset, size_t length) { - unsigned long next_mfn; + unsigned long next_bfn; int i; int nr_pages; - next_mfn = pfn_to_mfn(pfn); + next_bfn = pfn_to_bfn(pfn); nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; for (i = 1; i < nr_pages; i++) { - if (pfn_to_mfn(++pfn) != ++next_mfn) + if (pfn_to_bfn(++pfn) != ++next_bfn) return 0; } return 1; @@ -138,8 +138,8 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) { - unsigned long mfn = PFN_DOWN(dma_addr); - unsigned long pfn = mfn_to_local_pfn(mfn); + unsigned long bfn = PFN_DOWN(dma_addr); + unsigned long pfn = bfn_to_local_pfn(bfn); phys_addr_t paddr; /* If the address is outside our domain, it CAN -- cgit v1.2.3 From 5192b35de47e47a0f736fe30da199f32030680e7 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Fri, 7 Aug 2015 17:34:36 +0100 Subject: arm/xen: implement correctly pfn_to_mfn After the commit introducing convertion between DMA and guest addresses, all the callers of pfn_to_mfn are expecting to get a GFN (Guest Frame Number). On ARM, all the guests are auto-translated so the GFN is equal to the Linux PFN (Pseudo-physical Frame Number). The current implementation may return an MFN if the caller is passing a PFN associated to a mapped foreign grant. In pratice, I haven't seen the problem on running guest but we should fix it for the sake of correctness. Correct the implementation by always returning the pfn passed in parameter. A follow-up patch will take care to rename pfn_to_mfn to a suitable name. Signed-off-by: Julien Grall Reviewed-by: Stefano Stabellini Signed-off-by: David Vrabel --- arch/arm/include/asm/xen/page.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 5f76a9e7ef1b..911d62b4df26 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -36,14 +36,6 @@ extern struct rb_root phys_to_mach; static inline unsigned long pfn_to_mfn(unsigned long pfn) { - unsigned long mfn; - - if (phys_to_mach.rb_node != NULL) { - mfn = __pfn_to_mfn(pfn); - if (mfn != INVALID_P2M_ENTRY) - return mfn; - } - return pfn; } -- cgit v1.2.3 From 0df4f266b3af90442bbeb5e685a84a80745beba0 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Fri, 7 Aug 2015 17:34:37 +0100 Subject: xen: Use correctly the Xen memory terminologies Based on include/xen/mm.h [1], Linux is mistakenly using MFN when GFN is meant, I suspect this is because the first support for Xen was for PV. This resulted in some misimplementation of helpers on ARM and confused developers about the expected behavior. For instance, with pfn_to_mfn, we expect to get an MFN based on the name. Although, if we look at the implementation on x86, it's returning a GFN. For clarity and avoid new confusion, replace any reference to mfn with gfn in any helpers used by PV drivers. The x86 code will still keep some reference of pfn_to_mfn which may be used by all kind of guests No changes as been made in the hypercall field, even though they may be invalid, in order to keep the same as the defintion in xen repo. Note that page_to_mfn has been renamed to xen_page_to_gfn to avoid a name to close to the KVM function gfn_to_page. Take also the opportunity to simplify simple construction such as pfn_to_mfn(page_to_pfn(page)) into xen_page_to_gfn. More complex clean up will come in follow-up patches. [1] http://xenbits.xen.org/gitweb/?p=xen.git;a=commitdiff;h=e758ed14f390342513405dd766e874934573e6cb Signed-off-by: Julien Grall Reviewed-by: Stefano Stabellini Acked-by: Dmitry Torokhov Acked-by: Wei Liu Signed-off-by: David Vrabel --- arch/arm/include/asm/xen/page.h | 13 ++++++------ arch/x86/include/asm/xen/page.h | 35 +++++++++++++++++++++++++++++++-- arch/x86/xen/smp.c | 2 +- drivers/block/xen-blkfront.c | 6 +++--- drivers/input/misc/xen-kbdfront.c | 4 ++-- drivers/net/xen-netback/netback.c | 4 ++-- drivers/net/xen-netfront.c | 12 ++++++----- drivers/scsi/xen-scsifront.c | 10 +++++----- drivers/tty/hvc/hvc_xen.c | 5 +++-- drivers/video/fbdev/xen-fbfront.c | 4 ++-- drivers/xen/balloon.c | 2 +- drivers/xen/events/events_base.c | 2 +- drivers/xen/events/events_fifo.c | 4 ++-- drivers/xen/gntalloc.c | 3 ++- drivers/xen/manage.c | 2 +- drivers/xen/tmem.c | 4 ++-- drivers/xen/xenbus/xenbus_client.c | 2 +- drivers/xen/xenbus/xenbus_dev_backend.c | 2 +- drivers/xen/xenbus/xenbus_probe.c | 8 +++----- include/xen/page.h | 4 ++-- 20 files changed, 81 insertions(+), 47 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 911d62b4df26..127956353b00 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -34,14 +34,15 @@ typedef struct xpaddr { unsigned long __pfn_to_mfn(unsigned long pfn); extern struct rb_root phys_to_mach; -static inline unsigned long pfn_to_mfn(unsigned long pfn) +/* Pseudo-physical <-> Guest conversion */ +static inline unsigned long pfn_to_gfn(unsigned long pfn) { return pfn; } -static inline unsigned long mfn_to_pfn(unsigned long mfn) +static inline unsigned long gfn_to_pfn(unsigned long gfn) { - return mfn; + return gfn; } /* Pseudo-physical <-> BUS conversion */ @@ -65,9 +66,9 @@ static inline unsigned long bfn_to_pfn(unsigned long bfn) #define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) -/* VIRT <-> MACHINE conversion */ -#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) -#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) +/* VIRT <-> GUEST conversion */ +#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v))) +#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << PAGE_SHIFT)) /* Only used in PV code. But ARM guests are always HVM. */ static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 724bffbde54d..0679e11d2cf7 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -101,6 +101,11 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) { unsigned long mfn; + /* + * Some x86 code are still using pfn_to_mfn instead of + * pfn_to_mfn. This will have to be removed when we figured + * out which call. + */ if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; @@ -147,6 +152,11 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; + /* + * Some x86 code are still using mfn_to_pfn instead of + * gfn_to_pfn. This will have to be removed when we figure + * out which call. + */ if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; @@ -176,9 +186,26 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine) return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); } +/* Pseudo-physical <-> Guest conversion */ +static inline unsigned long pfn_to_gfn(unsigned long pfn) +{ + if (xen_feature(XENFEAT_auto_translated_physmap)) + return pfn; + else + return pfn_to_mfn(pfn); +} + +static inline unsigned long gfn_to_pfn(unsigned long gfn) +{ + if (xen_feature(XENFEAT_auto_translated_physmap)) + return gfn; + else + return mfn_to_pfn(gfn); +} + /* Pseudo-physical <-> Bus conversion */ -#define pfn_to_bfn(pfn) pfn_to_mfn(pfn) -#define bfn_to_pfn(bfn) mfn_to_pfn(bfn) +#define pfn_to_bfn(pfn) pfn_to_gfn(pfn) +#define bfn_to_pfn(bfn) gfn_to_pfn(bfn) /* * We detect special mappings in one of two ways: @@ -219,6 +246,10 @@ static inline unsigned long bfn_to_local_pfn(unsigned long mfn) #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) +/* VIRT <-> GUEST conversion */ +#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v))) +#define gfn_to_virt(g) (__va(gfn_to_pfn(g) << PAGE_SHIFT)) + static inline unsigned long pte_mfn(pte_t pte) { return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 2a9ff7342791..3f4ebf0261f2 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -453,7 +453,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) } #endif ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); - ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); + ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) BUG(); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 5dd591d6c859..432e1058721f 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -250,7 +250,7 @@ static struct grant *get_grant(grant_ref_t *gref_head, struct blkfront_info *info) { struct grant *gnt_list_entry; - unsigned long buffer_mfn; + unsigned long buffer_gfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_first_entry(&info->grants, struct grant, @@ -269,10 +269,10 @@ static struct grant *get_grant(grant_ref_t *gref_head, BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } - buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); + buffer_gfn = pfn_to_gfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->xbdev->otherend_id, - buffer_mfn, 0); + buffer_gfn, 0); return gnt_list_entry; } diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 95599e478e19..23d0549539d4 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -232,7 +232,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev, struct xenbus_transaction xbt; ret = gnttab_grant_foreign_access(dev->otherend_id, - virt_to_mfn(info->page), 0); + virt_to_gfn(info->page), 0); if (ret < 0) return ret; info->gref = ret; @@ -255,7 +255,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev, goto error_irqh; } ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", - virt_to_mfn(info->page)); + virt_to_gfn(info->page)); if (ret) goto error_xenbus; ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 3f44b522b831..7c64c74711e8 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -314,7 +314,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb } else { copy_gop->source.domid = DOMID_SELF; copy_gop->source.u.gmfn = - virt_to_mfn(page_address(page)); + virt_to_gfn(page_address(page)); } copy_gop->source.offset = offset; @@ -1296,7 +1296,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; queue->tx_copy_ops[*copy_ops].dest.u.gmfn = - virt_to_mfn(skb->data); + virt_to_gfn(skb->data); queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; queue->tx_copy_ops[*copy_ops].dest.offset = offset_in_page(skb->data); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index f948c46d5132..47f791e60851 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -291,7 +291,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) struct sk_buff *skb; unsigned short id; grant_ref_t ref; - unsigned long pfn; + unsigned long gfn; struct xen_netif_rx_request *req; skb = xennet_alloc_one_rx_buffer(queue); @@ -307,12 +307,12 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) BUG_ON((signed short)ref < 0); queue->grant_rx_ref[id] = ref; - pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); + gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); req = RING_GET_REQUEST(&queue->rx, req_prod); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, - pfn_to_mfn(pfn), + gfn, 0); req->id = id; @@ -430,8 +430,10 @@ static struct xen_netif_tx_request *xennet_make_one_txreq( ref = gnttab_claim_grant_reference(&queue->gref_tx_head); BUG_ON((signed short)ref < 0); - gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, - page_to_mfn(page), GNTMAP_readonly); + gnttab_grant_foreign_access_ref(ref, + queue->info->xbdev->otherend_id, + xen_page_to_gfn(page), + GNTMAP_readonly); queue->tx_skbs[id].skb = skb; queue->grant_tx_page[id] = page; diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index fad22caf0eff..9dc8687bf048 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -377,7 +377,6 @@ static int map_data_for_request(struct vscsifrnt_info *info, unsigned int data_len = scsi_bufflen(sc); unsigned int data_grants = 0, seg_grants = 0; struct scatterlist *sg; - unsigned long mfn; struct scsiif_request_segment *seg; ring_req->nr_segments = 0; @@ -420,9 +419,9 @@ static int map_data_for_request(struct vscsifrnt_info *info, ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); - mfn = pfn_to_mfn(page_to_pfn(page)); gnttab_grant_foreign_access_ref(ref, - info->dev->otherend_id, mfn, 1); + info->dev->otherend_id, + xen_page_to_gfn(page), 1); shadow->gref[ref_cnt] = ref; ring_req->seg[ref_cnt].gref = ref; ring_req->seg[ref_cnt].offset = (uint16_t)off; @@ -454,9 +453,10 @@ static int map_data_for_request(struct vscsifrnt_info *info, ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); - mfn = pfn_to_mfn(page_to_pfn(page)); gnttab_grant_foreign_access_ref(ref, - info->dev->otherend_id, mfn, grant_ro); + info->dev->otherend_id, + xen_page_to_gfn(page), + grant_ro); shadow->gref[ref_cnt] = ref; seg->gref = ref; diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index a9d837f83ce8..efe5124a368d 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -265,7 +265,8 @@ static int xen_pv_console_init(void) return 0; } info->evtchn = xen_start_info->console.domU.evtchn; - info->intf = mfn_to_virt(xen_start_info->console.domU.mfn); + /* GFN == MFN for PV guest */ + info->intf = gfn_to_virt(xen_start_info->console.domU.mfn); info->vtermno = HVC_COOKIE; spin_lock(&xencons_lock); @@ -390,7 +391,7 @@ static int xencons_connect_backend(struct xenbus_device *dev, if (IS_ERR(info->hvc)) return PTR_ERR(info->hvc); if (xen_pv_domain()) - mfn = virt_to_mfn(info->intf); + mfn = virt_to_gfn(info->intf); else mfn = __pa(info->intf) >> PAGE_SHIFT; ret = gnttab_alloc_grant_references(1, &gref_head); diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index 09dc44736c1a..25e3cce0c35f 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -539,7 +539,7 @@ static int xenfb_remove(struct xenbus_device *dev) static unsigned long vmalloc_to_mfn(void *address) { - return pfn_to_mfn(vmalloc_to_pfn(address)); + return pfn_to_gfn(vmalloc_to_pfn(address)); } static void xenfb_init_shared_page(struct xenfb_info *info, @@ -586,7 +586,7 @@ static int xenfb_connect_backend(struct xenbus_device *dev, goto unbind_irq; } ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", - virt_to_mfn(info->page)); + virt_to_gfn(info->page)); if (ret) goto error_xenbus; ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 1fa633b2d556..c79329fcfa78 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -441,7 +441,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) /* Update direct mapping, invalidate P2M, and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = frame_list[i]; - frame_list[i] = pfn_to_mfn(pfn); + frame_list[i] = pfn_to_gfn(pfn); page = pfn_to_page(pfn); #ifdef CONFIG_XEN_HAVE_PVMMU diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index ed620e5857a1..c49bb7a5be8f 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -1688,7 +1688,7 @@ void __init xen_init_IRQ(void) struct physdev_pirq_eoi_gmfn eoi_gmfn; pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); - eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map); + eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); /* TODO: No PVH support for PIRQ EOI */ if (rc != 0) { diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index ed673e1acd61..1d4baf56c36b 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -111,7 +111,7 @@ static int init_control_block(int cpu, for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++) q->head[i] = 0; - init_control.control_gfn = virt_to_mfn(control_block); + init_control.control_gfn = virt_to_gfn(control_block); init_control.offset = 0; init_control.vcpu = cpu; @@ -167,7 +167,7 @@ static int evtchn_fifo_setup(struct irq_info *info) /* Mask all events in this page before adding it. */ init_array_page(array_page); - expand_array.array_gfn = virt_to_mfn(array_page); + expand_array.array_gfn = virt_to_gfn(array_page); ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array); if (ret < 0) diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index e53fe191738c..14370df9ac1c 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -142,7 +142,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, /* Grant foreign access to the page. */ rc = gnttab_grant_foreign_access(op->domid, - pfn_to_mfn(page_to_pfn(gref->page)), readonly); + xen_page_to_gfn(gref->page), + readonly); if (rc < 0) goto undo; gref_ids[i] = gref->gref_id = rc; diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index d10effee9b9e..e12bd3635f83 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -80,7 +80,7 @@ static int xen_suspend(void *data) * is resuming in a new domain. */ si->cancelled = HYPERVISOR_suspend(xen_pv_domain() - ? virt_to_mfn(xen_start_info) + ? virt_to_gfn(xen_start_info) : 0); xen_arch_post_suspend(si->cancelled); diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index 239738f944ba..28c97ff606f4 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c @@ -131,7 +131,7 @@ static int xen_tmem_new_pool(struct tmem_pool_uuid uuid, static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, u32 index, unsigned long pfn) { - unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; + unsigned long gmfn = pfn_to_gfn(pfn); return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, gmfn, 0, 0, 0); @@ -140,7 +140,7 @@ static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, u32 index, unsigned long pfn) { - unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; + unsigned long gmfn = pfn_to_gfn(pfn); return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, gmfn, 0, 0, 0); diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index e30353575d5d..2ba09c1195c8 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -380,7 +380,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, for (i = 0; i < nr_pages; i++) { err = gnttab_grant_foreign_access(dev->otherend_id, - virt_to_mfn(vaddr), 0); + virt_to_gfn(vaddr), 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page"); diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c index b17707ee07d4..ee6d9efd7b76 100644 --- a/drivers/xen/xenbus/xenbus_dev_backend.c +++ b/drivers/xen/xenbus/xenbus_dev_backend.c @@ -49,7 +49,7 @@ static long xenbus_alloc(domid_t domid) goto out_err; gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid, - virt_to_mfn(xen_store_interface), 0 /* writable */); + virt_to_gfn(xen_store_interface), 0 /* writable */); arg.dom = DOMID_SELF; arg.remote_dom = domid; diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 4308fb3cf7c2..b3870f4ca1d0 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -711,9 +711,7 @@ static int __init xenstored_local_init(void) if (!page) goto out_err; - xen_store_mfn = xen_start_info->store_mfn = - pfn_to_mfn(virt_to_phys((void *)page) >> - PAGE_SHIFT); + xen_store_mfn = xen_start_info->store_mfn = virt_to_gfn((void *)page); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; @@ -787,12 +785,12 @@ static int __init xenbus_init(void) err = xenstored_local_init(); if (err) goto out_error; - xen_store_interface = mfn_to_virt(xen_store_mfn); + xen_store_interface = gfn_to_virt(xen_store_mfn); break; case XS_PV: xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; - xen_store_interface = mfn_to_virt(xen_store_mfn); + xen_store_interface = gfn_to_virt(xen_store_mfn); break; case XS_HVM: err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); diff --git a/include/xen/page.h b/include/xen/page.h index a5983da2f5cd..1daae485e336 100644 --- a/include/xen/page.h +++ b/include/xen/page.h @@ -3,9 +3,9 @@ #include -static inline unsigned long page_to_mfn(struct page *page) +static inline unsigned long xen_page_to_gfn(struct page *page) { - return pfn_to_mfn(page_to_pfn(page)); + return pfn_to_gfn(page_to_pfn(page)); } struct xen_memory_region { -- cgit v1.2.3 From 296254f3223d201f2aa53f5f717eedfdc63f3db8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 7 Sep 2015 00:30:06 +0100 Subject: ARM: uaccess: remove unneeded uaccess_save_and_disable macro This macro is never referenced, remove it. Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 3ae0eda5e64f..9007c518d1d8 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -487,11 +487,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm - .macro uaccess_save_and_disable, tmp - uaccess_save \tmp - uaccess_disable \tmp - .endm - .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro ret\c, reg #if __LINUX_ARM_ARCH__ < 6 -- cgit v1.2.3 From 6894258eda2f9badc28c878086c0e54bd5b7fb30 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Sep 2015 15:39:39 -0700 Subject: dma-mapping: consolidate dma_{alloc,free}_{attrs,coherent} Since 2009 we have a nice asm-generic header implementing lots of DMA API functions for architectures using struct dma_map_ops, but unfortunately it's still missing a lot of APIs that all architectures still have to duplicate. This series consolidates the remaining functions, although we still need arch opt outs for two of them as a few architectures have very non-standard implementations. This patch (of 5): The coherent DMA allocator works the same over all architectures supporting dma_map operations. This patch consolidates them and converges the minor differences: - the debug_dma helpers are now called from all architectures, including those that were previously missing them - dma_alloc_from_coherent and dma_release_from_coherent are now always called from the generic alloc/free routines instead of the ops dma-mapping-common.h always includes dma-coherent.h to get the defintions for them, or the stubs if the architecture doesn't support this feature - checks for ->alloc / ->free presence are removed. There is only one magic instead of dma_map_ops without them (mic_dma_ops) and that one is x86 only anyway. Besides that only x86 needs special treatment to replace a default devices if none is passed and tweak the gfp_flags. An optional arch hook is provided for that. [linux@roeck-us.net: fix build] [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Yoshinori Sato Cc: Michal Simek Cc: Jonas Bonn Cc: Chris Metcalf Cc: Guan Xuetao Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Shevchenko Signed-off-by: Guenter Roeck Signed-off-by: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/dma-mapping.h | 18 ---------- arch/arm/include/asm/dma-mapping.h | 29 ---------------- arch/arm/mm/dma-mapping.c | 12 ------- arch/arm64/include/asm/dma-mapping.h | 33 ------------------ arch/h8300/include/asm/dma-mapping.h | 26 -------------- arch/hexagon/include/asm/dma-mapping.h | 33 ------------------ arch/ia64/include/asm/dma-mapping.h | 25 ------------- arch/microblaze/include/asm/dma-mapping.h | 31 ----------------- arch/mips/cavium-octeon/dma-octeon.c | 8 ----- arch/mips/include/asm/dma-mapping.h | 31 ----------------- arch/mips/loongson64/common/dma-swiotlb.c | 8 ----- arch/mips/mm/dma-default.c | 7 ---- arch/mips/netlogic/common/nlm-dma.c | 10 ------ arch/openrisc/include/asm/dma-mapping.h | 30 ---------------- arch/powerpc/include/asm/dma-mapping.h | 33 ------------------ arch/s390/include/asm/dma-mapping.h | 31 ----------------- arch/sh/include/asm/dma-mapping.h | 37 -------------------- arch/sparc/include/asm/dma-mapping.h | 26 -------------- arch/tile/include/asm/dma-mapping.h | 27 -------------- arch/unicore32/include/asm/dma-mapping.h | 24 ------------- arch/x86/include/asm/dma-mapping.h | 16 ++------- arch/x86/kernel/pci-dma.c | 49 +++++--------------------- arch/xtensa/include/asm/dma-mapping.h | 31 ----------------- drivers/xen/swiotlb-xen.c | 6 ---- include/asm-generic/dma-mapping-common.h | 58 +++++++++++++++++++++++++++++++ 25 files changed, 70 insertions(+), 569 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index dfa32f061320..9fef5bd59a82 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -12,24 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs); -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs); -} - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return get_dma_ops(dev)->mapping_error(dev, dma_addr); diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a68b9d8a71fe..bc404473f1ca 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -8,7 +8,6 @@ #include #include -#include #include #include @@ -209,21 +208,6 @@ extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *cpu_addr; - BUG_ON(!ops); - - cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - return cpu_addr; -} - /** * arm_dma_free - free memory allocated by arm_dma_alloc * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -241,19 +225,6 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs); -#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!ops); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - /** * arm_dma_mmap - map a coherent DMA allocation into user space * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index bf35abcc7d59..e62604384945 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -676,10 +676,6 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); - void *memory; - - if (dma_alloc_from_coherent(dev, size, handle, &memory)) - return memory; return __dma_alloc(dev, size, handle, gfp, prot, false, attrs, __builtin_return_address(0)); @@ -688,11 +684,6 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { - void *memory; - - if (dma_alloc_from_coherent(dev, size, handle, &memory)) - return memory; - return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, attrs, __builtin_return_address(0)); } @@ -752,9 +743,6 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); - if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) - return; - size = PAGE_ALIGN(size); if (nommu()) { diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index f0d6d0bfe55c..5e11b3f0fe3a 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -22,8 +22,6 @@ #include #include -#include - #include #include @@ -120,37 +118,6 @@ static inline void dma_mark_clean(void *addr, size_t size) { } -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *vaddr; - - if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) - return vaddr; - - vaddr = ops->alloc(dev, size, dma_handle, flags, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); - return vaddr; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dev_addr, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (dma_release_from_coherent(dev, get_order(size), vaddr)) - return; - - debug_dma_free_coherent(dev, size, vaddr, dev_addr); - ops->free(dev, size, vaddr, dev_addr, attrs); -} - /* * There is no dma_cache_sync() implementation, so just return NULL here. */ diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 6e67a90902f2..826aa9b519b7 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -1,8 +1,6 @@ #ifndef _H8300_DMA_MAPPING_H #define _H8300_DMA_MAPPING_H -#include - extern struct dma_map_ops h8300_dma_map_ops; static inline struct dma_map_ops *get_dma_ops(struct device *dev) @@ -25,30 +23,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *memory; - - memory = ops->alloc(dev, size, dma_handle, flag, attrs); - return memory; -} - -#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 16965427f6b4..c20d3caa7dad 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -70,37 +70,4 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return (dma_addr == bad_dma_address); } -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - void *ret; - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - ret = ops->alloc(dev, size, dma_handle, flag, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, ret); - - return ret; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); -} - #endif diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index cf3ab7e784b5..d36f83cc226a 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -23,31 +23,6 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *daddr, gfp_t gfp, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - void *caddr; - - caddr = ops->alloc(dev, size, daddr, gfp, attrs); - debug_dma_alloc_coherent(dev, size, *daddr, caddr); - return caddr; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *caddr, dma_addr_t daddr, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - debug_dma_free_coherent(dev, size, caddr, daddr); - ops->free(dev, size, caddr, daddr, attrs); -} - #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index ab353723076a..801dbe215a8c 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -27,7 +27,6 @@ #include #include #include -#include #include #define DMA_ERROR_CODE (~(dma_addr_t)0x0) @@ -102,36 +101,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *memory; - - BUG_ON(!ops); - - memory = ops->alloc(dev, size, dma_handle, flag, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, memory); - return memory; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!ops); - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index d8960d46417b..2cd45f5f9481 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -161,9 +161,6 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, { void *ret; - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); @@ -194,11 +191,6 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, static void octeon_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { - int order = get_order(size); - - if (dma_release_from_coherent(dev, order, vaddr)) - return; - swiotlb_free_coherent(dev, size, vaddr, dma_handle); } diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 360b3387182a..b197595134ba 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -4,7 +4,6 @@ #include #include #include -#include #ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */ #include @@ -65,36 +64,6 @@ dma_set_mask(struct device *dev, u64 mask) extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - void *ret; - struct dma_map_ops *ops = get_dma_ops(dev); - - ret = ops->alloc(dev, size, dma_handle, gfp, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, ret); - - return ret; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - ops->free(dev, size, vaddr, dma_handle, attrs); - - debug_dma_free_coherent(dev, size, vaddr, dma_handle); -} - - void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index 2c6b989c1bc4..ef9da3b5c543 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c @@ -14,9 +14,6 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, { void *ret; - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); @@ -46,11 +43,6 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, static void loongson_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { - int order = get_order(size); - - if (dma_release_from_coherent(dev, order, vaddr)) - return; - swiotlb_free_coherent(dev, size, vaddr, dma_handle); } diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 8f23cf08f4ba..6c0fd13fa8e8 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -137,9 +137,6 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, struct page *page = NULL; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - gfp = massage_gfp_flags(dev, gfp); if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) @@ -176,13 +173,9 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long) vaddr; - int order = get_order(size); unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; - if (dma_release_from_coherent(dev, order, vaddr)) - return; - plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!plat_device_is_coherent(dev) && !hw_coherentio) diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c index f3d4ae87abc7..3758715d4ab6 100644 --- a/arch/mips/netlogic/common/nlm-dma.c +++ b/arch/mips/netlogic/common/nlm-dma.c @@ -47,11 +47,6 @@ static char *nlm_swiotlb; static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { - void *ret; - - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); @@ -69,11 +64,6 @@ static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, static void nlm_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { - int order = get_order(size); - - if (dma_release_from_coherent(dev, order, vaddr)) - return; - swiotlb_free_coherent(dev, size, vaddr, dma_handle); } diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index fab8628e1b6e..a81d6f68e9c8 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -23,7 +23,6 @@ */ #include -#include #include #include @@ -38,35 +37,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *memory; - - memory = ops->alloc(dev, size, dma_handle, gfp, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, memory); - - return memory; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 710f60e380e0..e6ca63ac4c6c 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -137,39 +137,6 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int __dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - void *cpu_addr; - - BUG_ON(!dma_ops); - - cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - - return cpu_addr; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - - dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { struct dma_map_ops *dma_ops = get_dma_ops(dev); diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 9d395961e713..c29c9c7d81e8 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -56,35 +56,4 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == DMA_ERROR_CODE; } -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *cpu_addr; - - BUG_ON(!ops); - - cpu_addr = ops->alloc(dev, size, dma_handle, flags, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - - return cpu_addr; -} - -#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!ops); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - #endif /* _ASM_S390_DMA_MAPPING_H */ diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index b437f2c780b8..3c78059e66ff 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -9,7 +9,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } -#include #include static inline int dma_supported(struct device *dev, u64 mask) @@ -53,42 +52,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == 0; } -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *memory; - - if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) - return memory; - if (!ops->alloc) - return NULL; - - memory = ops->alloc(dev, size, dma_handle, gfp, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, memory); - - return memory; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (dma_release_from_coherent(dev, get_order(size), vaddr)) - return; - - debug_dma_free_coherent(dev, size, vaddr, dma_handle); - if (ops->free) - ops->free(dev, size, vaddr, dma_handle, attrs); -} - /* arch/sh/mm/consistent.c */ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 7e064c68c5ec..a8c678494ce7 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -41,32 +41,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *cpu_addr; - - cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - return cpu_addr; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { debug_dma_mapping_error(dev, dma_addr); diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 1eae359d8315..4aba10e49310 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -116,34 +116,7 @@ dma_set_mask(struct device *dev, u64 mask) return 0; } -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - void *cpu_addr; - - cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - - return cpu_addr; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - - dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) #define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) /* diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 366460a81796..5294d03e59de 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -18,8 +18,6 @@ #include #include -#include - #include #include @@ -82,28 +80,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) return 0; } -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - return dma_ops->alloc(dev, size, dma_handle, flag, attrs); -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 1f5b7287d1ad..f9b1b6cc48b6 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -12,7 +12,6 @@ #include #include #include -#include #include #ifdef CONFIG_ISA @@ -41,6 +40,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #endif } +bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); +#define arch_dma_alloc_attrs arch_dma_alloc_attrs + #include /* Make sure we keep the same behaviour */ @@ -125,16 +127,4 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) return gfp; } -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -void * -dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, struct dma_attrs *attrs); - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t bus, - struct dma_attrs *attrs); - #endif diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 353972c1946c..bd23971e8f1d 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -140,50 +140,19 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, free_pages((unsigned long)vaddr, get_order(size)); } -void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, struct dma_attrs *attrs) +bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) { - struct dma_map_ops *ops = get_dma_ops(dev); - void *memory; - - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - - if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) - return memory; - - if (!dev) - dev = &x86_dma_fallback_dev; - - if (!is_device_dma_capable(dev)) - return NULL; - - if (!ops->alloc) - return NULL; - - memory = ops->alloc(dev, size, dma_handle, - dma_alloc_coherent_gfp_flags(dev, gfp), attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, memory); - - return memory; -} -EXPORT_SYMBOL(dma_alloc_attrs); - -void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t bus, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - WARN_ON(irqs_disabled()); /* for portability */ + *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp); + *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - if (dma_release_from_coherent(dev, get_order(size), vaddr)) - return; + if (!*dev) + *dev = &x86_dma_fallback_dev; + if (!is_device_dma_capable(*dev)) + return false; + return true; - debug_dma_free_coherent(dev, size, vaddr, bus); - if (ops->free) - ops->free(dev, size, vaddr, bus, attrs); } -EXPORT_SYMBOL(dma_free_attrs); +EXPORT_SYMBOL(arch_dma_alloc_attrs); /* * See for the iommu kernel diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index f01cb3044e50..bf24c908e5ff 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -34,37 +34,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) #define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - void *ret; - struct dma_map_ops *ops = get_dma_ops(dev); - - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - - ret = ops->alloc(dev, size, dma_handle, gfp, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, ret); - - return ret; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (dma_release_from_coherent(dev, get_order(size), vaddr)) - return; - - ops->free(dev, size, vaddr, dma_handle, attrs); - debug_dma_free_coherent(dev, size, vaddr, dma_handle); -} static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 4c549323c605..da1029ef8159 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -311,9 +311,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ flags &= ~(__GFP_DMA | __GFP_HIGHMEM); - if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) - return ret; - /* On ARM this function returns an ioremap'ped virtual address for * which virt_to_phys doesn't return the corresponding physical * address. In fact on ARM virt_to_phys only works for kernel direct @@ -356,9 +353,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, phys_addr_t phys; u64 dma_mask = DMA_BIT_MASK(32); - if (dma_release_from_coherent(hwdev, order, vaddr)) - return; - if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 940d5ec122c9..56dd9ea2bc8c 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -6,6 +6,7 @@ #include #include #include +#include static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, @@ -237,4 +238,61 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) +#ifndef arch_dma_alloc_attrs +#define arch_dma_alloc_attrs(dev, flag) (true) +#endif + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *cpu_addr; + + BUG_ON(!ops); + + if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) + return cpu_addr; + + if (!arch_dma_alloc_attrs(&dev, &flag)) + return NULL; + if (!ops->alloc) + return NULL; + + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); + return cpu_addr; +} + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!ops); + WARN_ON(irqs_disabled()); + + if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) + return; + + if (!ops->free) + return; + + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free(dev, size, cpu_addr, dma_handle, attrs); +} + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); +} + #endif -- cgit v1.2.3 From 1e8937526e2309d48fccd81bb30a590ac21a5516 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Sep 2015 15:39:42 -0700 Subject: dma-mapping: consolidate dma_{alloc,free}_noncoherent Most architectures do not support non-coherent allocations and either define dma_{alloc,free}_noncoherent to their coherent versions or stub them out. Openrisc uses dma_{alloc,free}_attrs to implement them, and only Mips implements them directly. This patch moves the Openrisc version to common code, and handles the DMA_ATTR_NON_CONSISTENT case in the mips dma_map_ops instance. Note that actual non-coherent allocations require a dma_cache_sync implementation, so if non-coherent allocations didn't work on an architecture before this patch they still won't work after it. [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Yoshinori Sato Cc: Michal Simek Cc: Jonas Bonn Cc: Chris Metcalf Cc: Guan Xuetao Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Shevchenko Signed-off-by: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/dma-mapping.h | 3 --- arch/arm/include/asm/dma-mapping.h | 21 ++++++--------------- arch/arm64/include/asm/dma-mapping.h | 14 -------------- arch/h8300/include/asm/dma-mapping.h | 3 --- arch/hexagon/include/asm/dma-mapping.h | 3 --- arch/ia64/include/asm/dma-mapping.h | 3 --- arch/microblaze/include/asm/dma-mapping.h | 3 --- arch/mips/include/asm/dma-mapping.h | 6 ------ arch/mips/mm/dma-default.c | 20 +++++++++++++++----- arch/openrisc/include/asm/dma-mapping.h | 20 -------------------- arch/powerpc/include/asm/dma-mapping.h | 3 --- arch/s390/include/asm/dma-mapping.h | 3 --- arch/sh/include/asm/dma-mapping.h | 3 --- arch/sparc/include/asm/dma-mapping.h | 3 --- arch/tile/include/asm/dma-mapping.h | 3 --- arch/unicore32/include/asm/dma-mapping.h | 3 --- arch/x86/include/asm/dma-mapping.h | 3 --- arch/xtensa/include/asm/dma-mapping.h | 3 --- include/asm-generic/dma-mapping-common.h | 18 ++++++++++++++++++ 19 files changed, 39 insertions(+), 99 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 9fef5bd59a82..0552bf097245 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -27,9 +27,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return get_dma_ops(dev)->set_dma_mask(dev, mask); } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #define dma_cache_sync(dev, va, size, dir) ((void)0) #endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index bc404473f1ca..0b7787167b64 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -38,6 +38,12 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) dev->archdata.dma_ops = ops; } +/* + * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent + * implementations, we don't provide a dma_cache_sync function so drivers using + * this API are highlighted with build warnings. + */ + #include static inline int dma_set_mask(struct device *dev, u64 mask) @@ -175,21 +181,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == DMA_ERROR_CODE; } -/* - * Dummy noncoherent implementation. We don't provide a dma_cache_sync - * function so drivers using this API are highlighted with build warnings. - */ -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp) -{ - return NULL; -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle) -{ -} - extern int dma_supported(struct device *dev, u64 mask); extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 5e11b3f0fe3a..178e60b80922 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -118,19 +118,5 @@ static inline void dma_mark_clean(void *addr, size_t size) { } -/* - * There is no dma_cache_sync() implementation, so just return NULL here. - */ -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t flags) -{ - return NULL; -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle) -{ -} - #endif /* __KERNEL__ */ #endif /* __ASM_DMA_MAPPING_H */ diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 826aa9b519b7..72465ce59453 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -20,9 +20,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return 0; } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index c20d3caa7dad..58d2d8f1544a 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -34,9 +34,6 @@ extern int bad_dma_address; extern struct dma_map_ops *dma_ops; -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline struct dma_map_ops *get_dma_ops(struct device *dev) { if (unlikely(dev == NULL)) diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index d36f83cc226a..a925ff03c964 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -23,9 +23,6 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #define get_dma_ops(dev) platform_dma_get_ops(dev) #include diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 801dbe215a8c..bc81625d486f 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -98,9 +98,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return (dma_addr == DMA_ERROR_CODE); } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index b197595134ba..709b2ba79cc3 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -64,10 +64,4 @@ dma_set_mask(struct device *dev, u64 mask) extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); -void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_noncoherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - #endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 6c0fd13fa8e8..a914dc1cb6d1 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -112,7 +112,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) return gfp | dma_flag; } -void *dma_alloc_noncoherent(struct device *dev, size_t size, +static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; @@ -128,7 +128,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, return ret; } -EXPORT_SYMBOL(dma_alloc_noncoherent); static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) @@ -137,6 +136,13 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, struct page *page = NULL; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + /* + * XXX: seems like the coherent and non-coherent implementations could + * be consolidated. + */ + if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) + return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); + gfp = massage_gfp_flags(dev, gfp); if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) @@ -161,13 +167,12 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, } -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) +static void mips_dma_free_noncoherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); free_pages((unsigned long) vaddr, get_order(size)); } -EXPORT_SYMBOL(dma_free_noncoherent); static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) @@ -176,6 +181,11 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; + if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { + mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); + return; + } + plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!plat_device_is_coherent(dev) && !hw_coherentio) diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index a81d6f68e9c8..57722528ea4d 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -37,26 +37,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) -{ - struct dma_attrs attrs; - - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); - - return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) -{ - struct dma_attrs attrs; - - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); - - dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); -} - static inline int dma_supported(struct device *dev, u64 dma_mask) { /* Support 32 bit DMA mask exclusively */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index e6ca63ac4c6c..7971b421c677 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -177,9 +177,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) return daddr - get_dma_offset(dev); } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #define ARCH_HAS_DMA_MMAP_COHERENT static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index c29c9c7d81e8..b729efeb9ad8 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -25,9 +25,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, { } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #include static inline int dma_supported(struct device *dev, u64 mask) diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 3c78059e66ff..2c3fa2ccbe9b 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -38,9 +38,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { struct dma_map_ops *ops = get_dma_ops(dev); diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index a8c678494ce7..2564edcb9728 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -9,9 +9,6 @@ int dma_supported(struct device *dev, u64 mask); -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir) { diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 4aba10e49310..e982dfa5d2f4 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -116,9 +116,6 @@ dma_set_mask(struct device *dev, u64 mask) return 0; } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) - /* * dma_alloc_noncoherent() is #defined to return coherent memory, * so there's no need to do any flushing here. diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 5294d03e59de..636e942940a0 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -80,9 +80,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) return 0; } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index f9b1b6cc48b6..7e47e4d6e69c 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -56,9 +56,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return (dma_addr == DMA_ERROR_CODE); } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index bf24c908e5ff..0a19581375da 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -32,9 +32,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 56dd9ea2bc8c..ec321dd98f93 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -295,4 +295,22 @@ static inline void dma_free_coherent(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); } +static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); +} + +static inline void dma_free_noncoherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); +} + #endif -- cgit v1.2.3 From efa21e432c7b3c8ae976039d614a017799b6e874 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Sep 2015 15:39:46 -0700 Subject: dma-mapping: cosolidate dma_mapping_error Currently there are three valid implementations of dma_mapping_error: (1) call ->mapping_error (2) check for a hardcoded error code (3) always return 0 This patch provides a common implementation that calls ->mapping_error if present, then checks for DMA_ERROR_CODE if defined or otherwise returns 0. [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Yoshinori Sato Cc: Michal Simek Cc: Jonas Bonn Cc: Chris Metcalf Cc: Guan Xuetao Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Shevchenko Signed-off-by: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/dma-mapping.h | 5 ----- arch/arm/include/asm/dma-mapping.h | 9 --------- arch/arm64/include/asm/dma-mapping.h | 7 ------- arch/h8300/include/asm/dma-mapping.h | 5 ----- arch/hexagon/include/asm/dma-mapping.h | 11 +---------- arch/ia64/include/asm/dma-mapping.h | 7 ------- arch/microblaze/include/asm/dma-mapping.h | 11 ----------- arch/mips/include/asm/dma-mapping.h | 8 -------- arch/openrisc/include/asm/dma-mapping.h | 5 ----- arch/powerpc/include/asm/dma-mapping.h | 17 ++--------------- arch/s390/include/asm/dma-mapping.h | 10 ---------- arch/sh/include/asm/dma-mapping.h | 13 ++----------- arch/sparc/include/asm/dma-mapping.h | 6 ------ arch/tile/include/asm/dma-mapping.h | 7 ------- arch/unicore32/include/asm/dma-mapping.h | 10 ---------- arch/x86/include/asm/dma-mapping.h | 11 ----------- arch/xtensa/include/asm/dma-mapping.h | 9 --------- include/asm-generic/dma-mapping-common.h | 14 ++++++++++++++ 18 files changed, 19 insertions(+), 146 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 0552bf097245..80ac3e835efe 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -12,11 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return get_dma_ops(dev)->mapping_error(dev, dma_addr); -} - static inline int dma_supported(struct device *dev, u64 mask) { return get_dma_ops(dev)->dma_supported(dev, mask); diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 0b7787167b64..9bef3c541c39 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -172,15 +172,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline void dma_mark_clean(void *addr, size_t size) { } -/* - * DMA errors are defined by all-bits-set in the DMA address. - */ -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - return dma_addr == DMA_ERROR_CODE; -} - extern int dma_supported(struct device *dev, u64 mask); extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 178e60b80922..f45f444b7a66 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -84,13 +84,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) return (phys_addr_t)dev_addr; } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - debug_dma_mapping_error(dev, dev_addr); - return ops->mapping_error(dev, dev_addr); -} - static inline int dma_supported(struct device *dev, u64 mask) { struct dma_map_ops *ops = get_dma_ops(dev); diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 72465ce59453..5eef05382fff 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -20,9 +20,4 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return 0; } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - #endif diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 58d2d8f1544a..e66119290eca 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -31,6 +31,7 @@ struct device; extern int bad_dma_address; +#define DMA_ERROR_CODE bad_dma_address extern struct dma_map_ops *dma_ops; @@ -57,14 +58,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dev, dma_addr); - - return (dma_addr == bad_dma_address); -} - #endif diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index a925ff03c964..27b713d0edbc 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -27,13 +27,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, #include -static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - debug_dma_mapping_error(dev, daddr); - return ops->mapping_error(dev, daddr); -} - static inline int dma_supported(struct device *dev, u64 mask) { struct dma_map_ops *ops = platform_dma_get_ops(dev); diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index bc81625d486f..e5b843839263 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -87,17 +87,6 @@ static inline void __dma_sync(unsigned long paddr, } } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, dma_addr); - if (ops->mapping_error) - return ops->mapping_error(dev, dma_addr); - - return (dma_addr == DMA_ERROR_CODE); -} - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 709b2ba79cc3..158bb36bdcb4 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -37,14 +37,6 @@ static inline int dma_supported(struct device *dev, u64 mask) return ops->dma_supported(dev, mask); } -static inline int dma_mapping_error(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, mask); - return ops->mapping_error(dev, mask); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 57722528ea4d..7dfe9d50856e 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -43,11 +43,6 @@ static inline int dma_supported(struct device *dev, u64 dma_mask) return dma_mask == DMA_BIT_MASK(32); } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - static inline int dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 7971b421c677..712d5afc055a 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -18,7 +18,9 @@ #include #include +#ifdef CONFIG_PPC64 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) +#endif /* Some dma direct funcs must be visible for use in other dma_ops */ extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, @@ -137,21 +139,6 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int __dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, dma_addr); - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dev, dma_addr); - -#ifdef CONFIG_PPC64 - return (dma_addr == DMA_ERROR_CODE); -#else - return 0; -#endif -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { #ifdef CONFIG_SWIOTLB diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index b729efeb9ad8..3c293291319b 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -43,14 +43,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, dma_addr); - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dev, dma_addr); - return dma_addr == DMA_ERROR_CODE; -} - #endif /* _ASM_S390_DMA_MAPPING_H */ diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 2c3fa2ccbe9b..98308c497162 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -9,6 +9,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } +#define DMA_ERROR_CODE 0 + #include static inline int dma_supported(struct device *dev, u64 mask) @@ -38,17 +40,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, dma_addr); - if (ops->mapping_error) - return ops->mapping_error(dev, dma_addr); - - return dma_addr == 0; -} - /* arch/sh/mm/consistent.c */ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 2564edcb9728..5069d137453b 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -38,12 +38,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - return (dma_addr == DMA_ERROR_CODE); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { #ifdef CONFIG_PCI diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index e982dfa5d2f4..f8f7a05023bf 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -74,13 +74,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } -static inline int -dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - return get_dma_ops(dev)->mapping_error(dev, dma_addr); -} - static inline int dma_supported(struct device *dev, u64 mask) { diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 636e942940a0..175d7e3f7b0a 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -38,16 +38,6 @@ static inline int dma_supported(struct device *dev, u64 mask) return dma_ops->dma_supported(dev, mask); } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dev, dma_addr); - - return 0; -} - #include static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 7e47e4d6e69c..bbca62e3e43f 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -45,17 +45,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); #include -/* Make sure we keep the same behaviour */ -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - debug_dma_mapping_error(dev, dma_addr); - if (ops->mapping_error) - return ops->mapping_error(dev, dma_addr); - - return (dma_addr == DMA_ERROR_CODE); -} - extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 0a19581375da..21925bfdaff7 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -32,15 +32,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int -dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, dma_addr); - return ops->mapping_error(dev, dma_addr); -} - static inline int dma_supported(struct device *dev, u64 mask) { diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index ec321dd98f93..cdaa24193d4c 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -313,4 +313,18 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); } +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + debug_dma_mapping_error(dev, dma_addr); + + if (get_dma_ops(dev)->mapping_error) + return get_dma_ops(dev)->mapping_error(dev, dma_addr); + +#ifdef DMA_ERROR_CODE + return dma_addr == DMA_ERROR_CODE; +#else + return 0; +#endif +} + #endif -- cgit v1.2.3 From ee196371d5cb1942ebdccc16bdce389812aa265e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Sep 2015 15:39:49 -0700 Subject: dma-mapping: consolidate dma_supported Most architectures just call into ->dma_supported, but some also return 1 if the method is not present, or 0 if no dma ops are present (although that should never happeb). Consolidate this more broad version into common code. Also fix h8300 which inorrectly always returned 0, which would have been a problem if it's dma_set_mask implementation wasn't a similarly buggy noop. As a few architectures have much more elaborate implementations, we still allow for arch overrides. [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Yoshinori Sato Cc: Michal Simek Cc: Jonas Bonn Cc: Chris Metcalf Cc: Guan Xuetao Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Shevchenko Signed-off-by: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/dma-mapping.h | 5 ----- arch/arm/include/asm/dma-mapping.h | 6 +++--- arch/arm64/include/asm/dma-mapping.h | 6 ------ arch/h8300/include/asm/dma-mapping.h | 5 ----- arch/hexagon/include/asm/dma-mapping.h | 1 + arch/ia64/include/asm/dma-mapping.h | 6 ------ arch/microblaze/include/asm/dma-mapping.h | 13 +------------ arch/mips/include/asm/dma-mapping.h | 6 ------ arch/openrisc/include/asm/dma-mapping.h | 5 +++-- arch/powerpc/include/asm/dma-mapping.h | 11 ----------- arch/s390/include/asm/dma-mapping.h | 9 --------- arch/sh/include/asm/dma-mapping.h | 10 ---------- arch/sparc/include/asm/dma-mapping.h | 1 + arch/tile/include/asm/dma-mapping.h | 6 ------ arch/unicore32/include/asm/dma-mapping.h | 10 ---------- arch/x86/include/asm/dma-mapping.h | 4 +++- arch/xtensa/include/asm/dma-mapping.h | 6 ------ include/asm-generic/dma-mapping-common.h | 13 +++++++++++++ 18 files changed, 25 insertions(+), 98 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 80ac3e835efe..9d763e535c5a 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -12,11 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->dma_supported(dev, mask); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { return get_dma_ops(dev)->set_dma_mask(dev, mask); diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 9bef3c541c39..2f9c731691c0 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -38,12 +38,14 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) dev->archdata.dma_ops = ops; } +#define HAVE_ARCH_DMA_SUPPORTED 1 +extern int dma_supported(struct device *dev, u64 mask); + /* * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent * implementations, we don't provide a dma_cache_sync function so drivers using * this API are highlighted with build warnings. */ - #include static inline int dma_set_mask(struct device *dev, u64 mask) @@ -172,8 +174,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline void dma_mark_clean(void *addr, size_t size) { } -extern int dma_supported(struct device *dev, u64 mask); - extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); /** diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index f45f444b7a66..f519a58c55ae 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -84,12 +84,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) return (phys_addr_t)dev_addr; } -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - return ops->dma_supported(dev, mask); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { if (!dev->dma_mask || !dma_supported(dev, mask)) diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 5eef05382fff..48d652eb1b5f 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -10,11 +10,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - return 0; -} - static inline int dma_set_mask(struct device *dev, u64 mask) { return 0; diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index e66119290eca..36e8de710b32 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -43,6 +43,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } +#define HAVE_ARCH_DMA_SUPPORTED 1 extern int dma_supported(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 27b713d0edbc..7982caa7c5e7 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -27,12 +27,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->dma_supported(dev, mask); -} - static inline int dma_set_mask (struct device *dev, u64 mask) { diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index e5b843839263..3b453c503a43 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -44,16 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &dma_direct_ops; } -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (unlikely(!ops)) - return 0; - if (!ops->dma_supported) - return 1; - return ops->dma_supported(dev, mask); -} +#include static inline int dma_set_mask(struct device *dev, u64 dma_mask) { @@ -69,8 +60,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) return 0; } -#include - static inline void __dma_sync(unsigned long paddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 158bb36bdcb4..8bf8ec30a4b2 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -31,12 +31,6 @@ static inline void dma_mark_clean(void *addr, size_t size) {} #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - return ops->dma_supported(dev, mask); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 7dfe9d50856e..8fc08b883477 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -35,14 +35,15 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &or1k_dma_map_ops; } -#include - +#define HAVE_ARCH_DMA_SUPPORTED 1 static inline int dma_supported(struct device *dev, u64 dma_mask) { /* Support 32 bit DMA mask exclusively */ return dma_mask == DMA_BIT_MASK(32); } +#include + static inline int dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 712d5afc055a..dd43e0c6f219 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -124,17 +124,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (unlikely(dma_ops == NULL)) - return 0; - if (dma_ops->dma_supported == NULL) - return 1; - return dma_ops->dma_supported(dev, mask); -} - extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int __dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 3c293291319b..1f42489797da 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -27,15 +27,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (dma_ops->dma_supported == NULL) - return 1; - return dma_ops->dma_supported(dev, mask); -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 98308c497162..088f6e5f1a92 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -13,16 +13,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (ops->dma_supported) - return ops->dma_supported(dev, mask); - - return 1; -} - static inline int dma_set_mask(struct device *dev, u64 mask) { struct dma_map_ops *ops = get_dma_ops(dev); diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 5069d137453b..184651bb0b46 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -7,6 +7,7 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0x0) +#define HAVE_ARCH_DMA_SUPPORTED 1 int dma_supported(struct device *dev, u64 mask); static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index f8f7a05023bf..559ed4a60077 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -74,12 +74,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } -static inline int -dma_supported(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->dma_supported(dev, mask); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 175d7e3f7b0a..21231c14182c 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -28,16 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &swiotlb_dma_map_ops; } -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (unlikely(dma_ops == NULL)) - return 0; - - return dma_ops->dma_supported(dev, mask); -} - #include static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index bbca62e3e43f..b1fbf582048b 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -43,9 +43,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); #define arch_dma_alloc_attrs arch_dma_alloc_attrs +#define HAVE_ARCH_DMA_SUPPORTED 1 +extern int dma_supported(struct device *hwdev, u64 mask); + #include -extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 21925bfdaff7..329abc7211e9 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -32,12 +32,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int -dma_supported(struct device *dev, u64 mask) -{ - return 1; -} - static inline int dma_set_mask(struct device *dev, u64 mask) { diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index cdaa24193d4c..67fa6bcd644c 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -327,4 +327,17 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #endif } +#ifndef HAVE_ARCH_DMA_SUPPORTED +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (!ops) + return 0; + if (!ops->dma_supported) + return 1; + return ops->dma_supported(dev, mask); +} +#endif + #endif -- cgit v1.2.3 From 452e06af1f0149b01201f94264d452cd7a95db7a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Sep 2015 15:39:53 -0700 Subject: dma-mapping: consolidate dma_set_mask Almost everyone implements dma_set_mask the same way, although some time that's hidden in ->set_dma_mask methods. This patch consolidates those into a common implementation that either calls ->set_dma_mask if present or otherwise uses the default implementation. Some architectures used to only call ->set_dma_mask after the initial checks, and those instance have been fixed to do the full work. h8300 implemented dma_set_mask bogusly as a no-ops and has been fixed. Unfortunately some architectures overload unrelated semantics like changing the dma_ops into it so we still need to allow for an architecture override for now. [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Yoshinori Sato Cc: Michal Simek Cc: Jonas Bonn Cc: Chris Metcalf Cc: Guan Xuetao Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Shevchenko Signed-off-by: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/dma-mapping.h | 5 ----- arch/alpha/kernel/pci-noop.c | 10 ---------- arch/alpha/kernel/pci_iommu.c | 11 ----------- arch/arm/include/asm/dma-mapping.h | 5 ----- arch/arm64/include/asm/dma-mapping.h | 9 --------- arch/h8300/include/asm/dma-mapping.h | 5 ----- arch/hexagon/include/asm/dma-mapping.h | 1 - arch/hexagon/kernel/dma.c | 11 ----------- arch/ia64/include/asm/dma-mapping.h | 9 --------- arch/microblaze/include/asm/dma-mapping.h | 14 -------------- arch/mips/include/asm/dma-mapping.h | 16 ---------------- arch/mips/loongson64/common/dma-swiotlb.c | 3 +++ arch/openrisc/include/asm/dma-mapping.h | 9 --------- arch/powerpc/include/asm/dma-mapping.h | 4 +++- arch/s390/include/asm/dma-mapping.h | 2 -- arch/s390/pci/pci_dma.c | 10 ---------- arch/sh/include/asm/dma-mapping.h | 14 -------------- arch/sparc/include/asm/dma-mapping.h | 4 +++- arch/tile/include/asm/dma-mapping.h | 6 ++++-- arch/unicore32/include/asm/dma-mapping.h | 10 ---------- arch/x86/include/asm/dma-mapping.h | 2 -- arch/x86/kernel/pci-dma.c | 11 ----------- arch/xtensa/include/asm/dma-mapping.h | 11 ----------- include/asm-generic/dma-mapping-common.h | 15 +++++++++++++++ 24 files changed, 28 insertions(+), 169 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 9d763e535c5a..72a8ca7796d9 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -12,11 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->set_dma_mask(dev, mask); -} - #define dma_cache_sync(dev, va, size, dir) ((void)0) #endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index df24b76f9246..2b1f4a1e9272 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -166,15 +166,6 @@ static int alpha_noop_supported(struct device *dev, u64 mask) return mask < 0x00ffffffUL ? 0 : 1; } -static int alpha_noop_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - return 0; -} - struct dma_map_ops alpha_noop_ops = { .alloc = alpha_noop_alloc_coherent, .free = alpha_noop_free_coherent, @@ -182,7 +173,6 @@ struct dma_map_ops alpha_noop_ops = { .map_sg = alpha_noop_map_sg, .mapping_error = alpha_noop_mapping_error, .dma_supported = alpha_noop_supported, - .set_dma_mask = alpha_noop_set_mask, }; struct dma_map_ops *dma_ops = &alpha_noop_ops; diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index eddee7720343..8969bf2dfe3a 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -939,16 +939,6 @@ static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == 0; } -static int alpha_pci_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || - !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) - return -EIO; - - *dev->dma_mask = mask; - return 0; -} - struct dma_map_ops alpha_pci_ops = { .alloc = alpha_pci_alloc_coherent, .free = alpha_pci_free_coherent, @@ -958,7 +948,6 @@ struct dma_map_ops alpha_pci_ops = { .unmap_sg = alpha_pci_unmap_sg, .mapping_error = alpha_pci_mapping_error, .dma_supported = alpha_pci_supported, - .set_dma_mask = alpha_pci_set_mask, }; struct dma_map_ops *dma_ops = &alpha_pci_ops; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 2f9c731691c0..ccb3aa64640d 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -48,11 +48,6 @@ extern int dma_supported(struct device *dev, u64 mask); */ #include -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->set_dma_mask(dev, mask); -} - #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma #endif diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index f519a58c55ae..cfdb34bedbcd 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -84,15 +84,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) return (phys_addr_t)dev_addr; } -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - *dev->dma_mask = mask; - - return 0; -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 48d652eb1b5f..d9b5b806afe6 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -10,9 +10,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - return 0; -} - #endif diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 36e8de710b32..268fde8a4575 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -45,7 +45,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #define HAVE_ARCH_DMA_SUPPORTED 1 extern int dma_supported(struct device *dev, u64 mask); -extern int dma_set_mask(struct device *dev, u64 mask); extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index b74f9bae31a3..9e3ddf792bd3 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -44,17 +44,6 @@ int dma_supported(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_supported); -int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} -EXPORT_SYMBOL(dma_set_mask); - static struct gen_pool *coherent_pool; diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 7982caa7c5e7..9beccf8010bd 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -27,15 +27,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, #include -static inline int -dma_set_mask (struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - *dev->dma_mask = mask; - return 0; -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 3b453c503a43..24b12970c9cf 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -46,20 +46,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (unlikely(ops == NULL)) - return -EIO; - if (ops->set_dma_mask) - return ops->set_dma_mask(dev, dma_mask); - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - *dev->dma_mask = dma_mask; - return 0; -} - static inline void __dma_sync(unsigned long paddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 8bf8ec30a4b2..e604f760c4a0 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -31,22 +31,6 @@ static inline void dma_mark_clean(void *addr, size_t size) {} #include -static inline int -dma_set_mask(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if(!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - if (ops->set_dma_mask) - return ops->set_dma_mask(dev, mask); - - *dev->dma_mask = mask; - - return 0; -} - extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index ef9da3b5c543..4ffa6fc81c8f 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c @@ -85,6 +85,9 @@ static void loongson_dma_sync_sg_for_device(struct device *dev, static int loongson_dma_set_mask(struct device *dev, u64 mask) { + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) { *dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits); return -EIO; diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 8fc08b883477..413bfcf86384 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -44,13 +44,4 @@ static inline int dma_supported(struct device *dev, u64 dma_mask) #include -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} #endif /* __ASM_OPENRISC_DMA_MAPPING_H */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index dd43e0c6f219..7f522c021dc3 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -122,9 +122,11 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) /* this will be removed soon */ #define flush_write_buffers() +#define HAVE_ARCH_DMA_SET_MASK 1 +extern int dma_set_mask(struct device *dev, u64 dma_mask); + #include -extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int __dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 1f42489797da..b3fd54d93dd2 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -18,8 +18,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &s390_dma_ops; } -extern int dma_set_mask(struct device *dev, u64 mask); - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 42b76580c8b8..37505b8b4093 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -262,16 +262,6 @@ out: spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); } -int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - return 0; -} -EXPORT_SYMBOL_GPL(dma_set_mask); - static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 088f6e5f1a92..a3745a3fe029 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -13,20 +13,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - if (ops->set_dma_mask) - return ops->set_dma_mask(dev, mask); - - *dev->dma_mask = mask; - - return 0; -} - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 184651bb0b46..a21da597b0b5 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -37,7 +37,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } -#include +#define HAVE_ARCH_DMA_SET_MASK 1 static inline int dma_set_mask(struct device *dev, u64 mask) { @@ -52,4 +52,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return -EINVAL; } +#include + #endif diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 559ed4a60077..96ac6cce4a32 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -59,8 +59,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline void dma_mark_clean(void *addr, size_t size) {} -#include - static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) { dev->archdata.dma_ops = ops; @@ -74,6 +72,10 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } +#define HAVE_ARCH_DMA_SET_MASK 1 + +#include + static inline int dma_set_mask(struct device *dev, u64 mask) { diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 21231c14182c..8140e053ccd3 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -50,16 +50,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline void dma_mark_clean(void *addr, size_t size) {} -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index b1fbf582048b..953b7263f844 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -48,8 +48,6 @@ extern int dma_supported(struct device *hwdev, u64 mask); #include -extern int dma_set_mask(struct device *dev, u64 mask); - extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, struct dma_attrs *attrs); diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index bd23971e8f1d..84b8ef82a159 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -58,17 +58,6 @@ EXPORT_SYMBOL(x86_dma_fallback_dev); /* Number of entries preallocated for DMA-API debugging */ #define PREALLOC_DMA_DEBUG_ENTRIES 65536 -int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} -EXPORT_SYMBOL(dma_set_mask); - void __init pci_iommu_alloc(void) { struct iommu_table_entry *p; diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 329abc7211e9..4427f38b634e 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -32,17 +32,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int -dma_set_mask(struct device *dev, u64 mask) -{ - if(!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 67fa6bcd644c..b1bc954eccf3 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -340,4 +340,19 @@ static inline int dma_supported(struct device *dev, u64 mask) } #endif +#ifndef HAVE_ARCH_DMA_SET_MASK +static inline int dma_set_mask(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops->set_dma_mask) + return ops->set_dma_mask(dev, mask); + + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + *dev->dma_mask = mask; + return 0; +} +#endif + #endif -- cgit v1.2.3 From 716ff1921a86c637b8875c7bb312fc6755fa9300 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 11 Sep 2015 08:17:39 +0100 Subject: ARM: domains: thread_info.h no longer needs asm/domains.h As of 1eef5d2f1b46 ("ARM: domains: switch to keeping domain value in register") we no longer need to include asm/domains.h into asm/thread_info.h. Remove it. Tested-by: Robert Jarzmik Signed-off-by: Russell King --- arch/arm/include/asm/thread_info.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 0a0aec410d8c..ae02e68b61fc 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -25,7 +25,6 @@ struct task_struct; #include -#include typedef unsigned long mm_segment_t; -- cgit v1.2.3 From 6e8f580d1fcc18e290713984c379cb97131c015a Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 11 Sep 2015 08:34:52 +0100 Subject: ARM: domains: add memory dependencies to get_domain/set_domain We need to have memory dependencies on get_domain/set_domain to avoid the compiler over-optimising these inline assembly instructions. Loads/stores must not be reordered across a set_domain(), so introduce a compiler barrier for that assembly. The value of get_domain() must not be cached across a set_domain(), but we still want to allow the compiler to optimise it away. Introduce a dependency on current_thread_info()->cpu_domain to avoid this; the new memory clobber in set_domain() should therefore cause the compiler to re-load this. The other advantage of using this is we should have its address in the register set already, or very soon after at most call sites. Tested-by: Robert Jarzmik Signed-off-by: Russell King --- arch/arm/include/asm/domain.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index e878129f2fee..fc8ba1663601 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -12,6 +12,7 @@ #ifndef __ASSEMBLY__ #include +#include #endif /* @@ -89,7 +90,8 @@ static inline unsigned int get_domain(void) asm( "mrc p15, 0, %0, c3, c0 @ get domain" - : "=r" (domain)); + : "=r" (domain) + : "m" (current_thread_info()->cpu_domain)); return domain; } @@ -98,7 +100,7 @@ static inline void set_domain(unsigned val) { asm volatile( "mcr p15, 0, %0, c3, c0 @ set domain" - : : "r" (val)); + : : "r" (val) : "memory"); isb(); } -- cgit v1.2.3 From a4a5a7379e4ca03c192b732d61e446994eb67bbc Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Fri, 11 Sep 2015 17:12:27 +0100 Subject: ARM: 8431/1: fix alignement of __bug_table section entries On old ARM chips, unaligned accesses to memory are not trapped and fixed. On module load, symbols are relocated, and the relocation of __bug_table symbols is done on a u32 basis. Yet the section is not aligned to a multiple of 4 address, but to a multiple of 2. This triggers an Oops on pxa architecture, where address 0xbf0021ea is the first relocation in the __bug_table section : apply_relocate(): pxa3xx_nand: section 13 reloc 0 sym '' Unable to handle kernel paging request at virtual address bf0021ea pgd = e1cd0000 [bf0021ea] *pgd=c1cce851, *pte=c1cde04f, *ppte=c1cde01f Internal error: Oops: 23 [#1] ARM Modules linked in: CPU: 0 PID: 606 Comm: insmod Not tainted 4.2.0-rc8-next-20150828-cm-x300+ #887 Hardware name: CM-X300 module task: e1c68700 ti: e1c3e000 task.ti: e1c3e000 PC is at apply_relocate+0x2f4/0x3d4 LR is at 0xbf0021ea pc : [] lr : [] psr: 80000013 sp : e1c3fe30 ip : 60000013 fp : e49e8c60 r10: e49e8fa8 r9 : 00000000 r8 : e49e7c58 r7 : e49e8c38 r6 : e49e8a58 r5 : e49e8920 r4 : e49e8918 r3 : bf0021ea r2 : bf007034 r1 : 00000000 r0 : bf000000 Flags: Nzcv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none Control: 0000397f Table: c1cd0018 DAC: 00000051 Process insmod (pid: 606, stack limit = 0xe1c3e198) [] (apply_relocate) from [] (load_module+0x1248/0x1f5c) [] (load_module) from [] (SyS_init_module+0xe4/0x170) [] (SyS_init_module) from [] (ret_fast_syscall+0x0/0x38) Fix this by ensuring entries in __bug_table are all aligned to at least of multiple of 4. This transforms a module section __bug_table as : - [12] __bug_table PROGBITS 00000000 002232 000018 00 A 0 0 1 + [12] __bug_table PROGBITS 00000000 002232 000018 00 A 0 0 4 Signed-off-by: Robert Jarzmik Reviewed-by: Dave Martin Signed-off-by: Russell King --- arch/arm/include/asm/bug.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index b274bde24905..e7335a92144e 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -40,6 +40,7 @@ do { \ "2:\t.asciz " #__file "\n" \ ".popsection\n" \ ".pushsection __bug_table,\"a\"\n" \ + ".align 2\n" \ "3:\t.word 1b, 2b\n" \ "\t.hword " #__line ", 0\n" \ ".popsection"); \ -- cgit v1.2.3 From 62bea5bff486644ecf363fe8a1a2f6f32c614a49 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 15 Sep 2015 18:27:57 +0200 Subject: KVM: add halt_attempted_poll to VCPU stats This new statistic can help diagnosing VCPUs that, for any reason, trigger bad behavior of halt_poll_ns autotuning. For example, say halt_poll_ns = 480000, and wakeups are spaced exactly like 479us, 481us, 479us, 481us. Then KVM always fails polling and wastes 10+20+40+80+160+320+480 = 1110 microseconds out of every 479+481+479+481+479+481+479 = 3359 microseconds. The VCPU then is consuming about 30% more CPU than it would use without polling. This would show as an abnormally high number of attempted polling compared to the successful polls. Acked-by: Christian Borntraeger Signed-off-by: Paolo Bonzini --- arch/arm/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/kvm_host.h | 1 + arch/mips/include/asm/kvm_host.h | 1 + arch/mips/kvm/mips.c | 1 + arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/book3s.c | 1 + arch/powerpc/kvm/booke.c | 1 + arch/s390/include/asm/kvm_host.h | 1 + arch/s390/kvm/kvm-s390.c | 1 + arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/x86.c | 1 + virt/kvm/kvm_main.c | 1 + 12 files changed, 12 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index dcba0fa5176e..687ddeba3611 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -148,6 +148,7 @@ struct kvm_vm_stat { struct kvm_vcpu_stat { u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; }; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 415938dc45cf..486594583cc6 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -195,6 +195,7 @@ struct kvm_vm_stat { struct kvm_vcpu_stat { u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; }; diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index e8c8d9d0c45f..3a54dbca9f7e 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -128,6 +128,7 @@ struct kvm_vcpu_stat { u32 msa_disabled_exits; u32 flush_dcache_exits; u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; }; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index cd4c129ce743..49ff3bfc007e 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -55,6 +55,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, + { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, {NULL} }; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 98eebbf66340..195886a583ba 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -108,6 +108,7 @@ struct kvm_vcpu_stat { u32 dec_exits; u32 ext_intr_exits; u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; u32 dbell_exits; u32 gdbell_exits; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index d75bf325f54a..cf009167d208 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -53,6 +53,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "ext_intr", VCPU_STAT(ext_intr_exits) }, { "queue_intr", VCPU_STAT(queue_intr) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), }, + { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "pf_storage", VCPU_STAT(pf_storage) }, { "sp_storage", VCPU_STAT(sp_storage) }, diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index ae458f0fd061..fd5875179e5c 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "dec", VCPU_STAT(dec_exits) }, { "ext_intr", VCPU_STAT(ext_intr_exits) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, + { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "doorbell", VCPU_STAT(dbell_exits) }, { "guest doorbell", VCPU_STAT(gdbell_exits) }, diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3d012e071647..6ce4a0b7e8da 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -210,6 +210,7 @@ struct kvm_vcpu_stat { u32 exit_validity; u32 exit_instruction; u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; u32 instruction_lctl; u32 instruction_lctlg; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c91eb941b444..2f807ab1725e 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, + { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, { "instruction_lctl", VCPU_STAT(instruction_lctl) }, diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c12e845f59e6..349f80a82b82 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -711,6 +711,7 @@ struct kvm_vcpu_stat { u32 nmi_window_exits; u32 halt_exits; u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; u32 request_irq_exits; u32 irq_exits; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a60bdbccff51..6bbb0dfb99d0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -149,6 +149,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "nmi_window", VCPU_STAT(nmi_window_exits) }, { "halt_exits", VCPU_STAT(halt_exits) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, + { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9af68db73c6a..04146a2e1d81 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2004,6 +2004,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) if (vcpu->halt_poll_ns) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); + ++vcpu->stat.halt_attempted_poll; do { /* * This sets KVM_REQ_UNHALT if an interrupt -- cgit v1.2.3 From bd0b9ac405e1794d72533c3d487aa65b6b955a0c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 14 Sep 2015 10:42:37 +0200 Subject: genirq: Remove irq argument from irq flow handlers Most interrupt flow handlers do not use the irq argument. Those few which use it can retrieve the irq number from the irq descriptor. Remove the argument. Search and replace was done with coccinelle and some extra helper scripts around it. Thanks to Julia for her help! Signed-off-by: Thomas Gleixner Cc: Julia Lawall Cc: Jiang Liu --- arch/alpha/kernel/irq.c | 2 +- arch/arc/kernel/mcip.c | 2 +- arch/arm/common/it8152.c | 2 +- arch/arm/common/locomo.c | 2 +- arch/arm/common/sa1111.c | 6 ++---- arch/arm/include/asm/hardware/it8152.h | 2 +- arch/arm/include/asm/mach/irq.h | 4 ++-- arch/arm/mach-dove/irq.c | 6 +++--- arch/arm/mach-footbridge/isa-irq.c | 5 ++--- arch/arm/mach-gemini/gpio.c | 2 +- arch/arm/mach-imx/3ds_debugboard.c | 2 +- arch/arm/mach-imx/mach-mx31ads.c | 2 +- arch/arm/mach-iop13xx/msi.c | 2 +- arch/arm/mach-lpc32xx/irq.c | 4 ++-- arch/arm/mach-netx/generic.c | 3 +-- arch/arm/mach-omap1/fpga.c | 2 +- arch/arm/mach-omap2/prm_common.c | 2 +- arch/arm/mach-pxa/balloon3.c | 2 +- arch/arm/mach-pxa/cm-x2xx-pci.c | 5 ++--- arch/arm/mach-pxa/lpd270.c | 2 +- arch/arm/mach-pxa/pcm990-baseboard.c | 2 +- arch/arm/mach-pxa/viper.c | 2 +- arch/arm/mach-pxa/zeus.c | 2 +- arch/arm/mach-rpc/ecard.c | 3 +-- arch/arm/mach-s3c24xx/bast-irq.c | 4 +--- arch/arm/mach-s3c64xx/common.c | 8 ++++---- arch/arm/mach-sa1100/neponset.c | 2 +- arch/arm/plat-orion/gpio.c | 2 +- arch/avr32/mach-at32ap/extint.c | 2 +- arch/avr32/mach-at32ap/pio.c | 2 +- arch/blackfin/include/asm/irq_handler.h | 4 ++-- arch/blackfin/kernel/irqchip.c | 2 +- arch/blackfin/mach-bf537/ints-priority.c | 10 +++------ arch/blackfin/mach-common/ints-priority.c | 5 ++--- arch/c6x/platforms/megamod-pic.c | 2 +- arch/m68k/amiga/amiints.c | 8 ++++---- arch/m68k/coldfire/intc-5272.c | 6 ++---- arch/m68k/include/asm/irq.h | 3 +-- arch/m68k/include/asm/mac_via.h | 2 +- arch/m68k/mac/baboon.c | 2 +- arch/m68k/mac/oss.c | 4 ++-- arch/m68k/mac/psc.c | 2 +- arch/m68k/mac/via.c | 6 +++--- arch/metag/kernel/irq.c | 4 +--- arch/mips/alchemy/common/irq.c | 4 ++-- arch/mips/alchemy/devboards/bcsr.c | 2 +- arch/mips/ath25/ar2315.c | 2 +- arch/mips/ath25/ar5312.c | 2 +- arch/mips/ath79/irq.c | 8 ++++---- arch/mips/cavium-octeon/octeon-irq.c | 2 +- arch/mips/include/asm/netlogic/common.h | 4 ++-- arch/mips/jz4740/gpio.c | 2 +- arch/mips/netlogic/common/smp.c | 4 ++-- arch/mips/pci/pci-ar2315.c | 2 +- arch/mips/pci/pci-ar71xx.c | 2 +- arch/mips/pci/pci-ar724x.c | 2 +- arch/mips/pci/pci-rt3883.c | 2 +- arch/mips/ralink/irq.c | 2 +- arch/powerpc/include/asm/qe_ic.h | 23 +++++++++------------ arch/powerpc/include/asm/tsi108_pci.h | 2 +- arch/powerpc/platforms/512x/mpc5121_ads_cpld.c | 3 +-- arch/powerpc/platforms/52xx/media5200.c | 2 +- arch/powerpc/platforms/52xx/mpc52xx_gpt.c | 2 +- arch/powerpc/platforms/82xx/pq2ads-pci-pic.c | 2 +- arch/powerpc/platforms/85xx/common.c | 2 +- arch/powerpc/platforms/85xx/mpc85xx_cds.c | 6 ++---- arch/powerpc/platforms/85xx/mpc85xx_ds.c | 2 +- arch/powerpc/platforms/85xx/socrates_fpga_pic.c | 2 +- arch/powerpc/platforms/86xx/pic.c | 2 +- arch/powerpc/platforms/8xx/m8xx_setup.c | 2 +- arch/powerpc/platforms/cell/axon_msi.c | 2 +- arch/powerpc/platforms/cell/interrupt.c | 2 +- arch/powerpc/platforms/cell/spider-pic.c | 2 +- arch/powerpc/platforms/chrp/setup.c | 2 +- arch/powerpc/platforms/embedded6xx/hlwd-pic.c | 3 +-- arch/powerpc/platforms/embedded6xx/mvme5100.c | 2 +- arch/powerpc/platforms/pseries/setup.c | 2 +- arch/powerpc/sysdev/ge/ge_pic.c | 2 +- arch/powerpc/sysdev/ge/ge_pic.h | 2 -- arch/powerpc/sysdev/mpic.c | 2 +- arch/powerpc/sysdev/qe_lib/qe_ic.c | 4 ++-- arch/powerpc/sysdev/tsi108_pci.c | 2 +- arch/powerpc/sysdev/uic.c | 2 +- arch/powerpc/sysdev/xilinx_intc.c | 2 +- arch/sh/boards/mach-se/7343/irq.c | 2 +- arch/sh/boards/mach-se/7722/irq.c | 2 +- arch/sh/boards/mach-se/7724/irq.c | 2 +- arch/sh/boards/mach-x3proto/gpio.c | 2 +- arch/sh/cchips/hd6446x/hd64461.c | 2 +- arch/sparc/kernel/leon_kernel.c | 2 +- arch/sparc/kernel/leon_pci_grpci1.c | 2 +- arch/sparc/kernel/leon_pci_grpci2.c | 2 +- arch/tile/kernel/pci_gx.c | 5 ++--- arch/unicore32/kernel/irq.c | 2 +- arch/x86/kernel/irq_32.c | 19 +++++++---------- arch/x86/kernel/irq_64.c | 2 +- arch/x86/lguest/boot.c | 2 +- drivers/dma/ipu/ipu_irq.c | 2 +- drivers/gpio/gpio-altera.c | 6 ++---- drivers/gpio/gpio-bcm-kona.c | 2 +- drivers/gpio/gpio-brcmstb.c | 2 +- drivers/gpio/gpio-davinci.c | 3 +-- drivers/gpio/gpio-dwapb.c | 2 +- drivers/gpio/gpio-ep93xx.c | 5 ++--- drivers/gpio/gpio-intel-mid.c | 2 +- drivers/gpio/gpio-lynxpoint.c | 2 +- drivers/gpio/gpio-mpc8xxx.c | 2 +- drivers/gpio/gpio-msic.c | 2 +- drivers/gpio/gpio-msm-v2.c | 2 +- drivers/gpio/gpio-mvebu.c | 2 +- drivers/gpio/gpio-mxc.c | 4 ++-- drivers/gpio/gpio-mxs.c | 2 +- drivers/gpio/gpio-omap.c | 2 +- drivers/gpio/gpio-pl061.c | 2 +- drivers/gpio/gpio-pxa.c | 2 +- drivers/gpio/gpio-sa1100.c | 3 +-- drivers/gpio/gpio-tegra.c | 2 +- drivers/gpio/gpio-timberdale.c | 2 +- drivers/gpio/gpio-tz1090.c | 4 ++-- drivers/gpio/gpio-vf610.c | 2 +- drivers/gpio/gpio-zx.c | 2 +- drivers/gpio/gpio-zynq.c | 2 +- drivers/gpu/ipu-v3/ipu-common.c | 4 ++-- drivers/irqchip/exynos-combiner.c | 6 ++---- drivers/irqchip/irq-armada-370-xp.c | 3 +-- drivers/irqchip/irq-bcm2835.c | 4 ++-- drivers/irqchip/irq-bcm7038-l1.c | 2 +- drivers/irqchip/irq-bcm7120-l2.c | 2 +- drivers/irqchip/irq-brcmstb-l2.c | 7 +++---- drivers/irqchip/irq-dw-apb-ictl.c | 2 +- drivers/irqchip/irq-gic.c | 4 ++-- drivers/irqchip/irq-i8259.c | 2 +- drivers/irqchip/irq-imgpdc.c | 4 ++-- drivers/irqchip/irq-keystone.c | 2 +- drivers/irqchip/irq-metag-ext.c | 2 +- drivers/irqchip/irq-metag.c | 2 +- drivers/irqchip/irq-mips-gic.c | 2 +- drivers/irqchip/irq-mmp.c | 2 +- drivers/irqchip/irq-orion.c | 2 +- drivers/irqchip/irq-s3c24xx.c | 2 +- drivers/irqchip/irq-sunxi-nmi.c | 2 +- drivers/irqchip/irq-tb10x.c | 2 +- drivers/irqchip/irq-versatile-fpga.c | 8 ++++---- drivers/irqchip/irq-vic.c | 2 +- drivers/irqchip/spear-shirq.c | 2 +- drivers/mfd/asic3.c | 2 +- drivers/mfd/ezx-pcap.c | 2 +- drivers/mfd/htc-egpio.c | 2 +- drivers/mfd/jz4740-adc.c | 2 +- drivers/mfd/pm8921-core.c | 2 +- drivers/mfd/t7l66xb.c | 2 +- drivers/mfd/tc6393xb.c | 3 +-- drivers/mfd/ucb1x00-core.c | 2 +- drivers/pci/host/pci-keystone.c | 5 ++--- drivers/pci/host/pci-xgene-msi.c | 2 +- drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c | 2 +- drivers/pinctrl/intel/pinctrl-baytrail.c | 2 +- drivers/pinctrl/intel/pinctrl-cherryview.c | 2 +- drivers/pinctrl/intel/pinctrl-intel.c | 2 +- drivers/pinctrl/mediatek/pinctrl-mtk-common.c | 2 +- drivers/pinctrl/nomadik/pinctrl-nomadik.c | 4 ++-- drivers/pinctrl/pinctrl-adi2.c | 3 +-- drivers/pinctrl/pinctrl-amd.c | 6 +++--- drivers/pinctrl/pinctrl-at91.c | 2 +- drivers/pinctrl/pinctrl-coh901.c | 2 +- drivers/pinctrl/pinctrl-pistachio.c | 6 ++---- drivers/pinctrl/pinctrl-rockchip.c | 2 +- drivers/pinctrl/pinctrl-single.c | 2 +- drivers/pinctrl/pinctrl-st.c | 4 ++-- drivers/pinctrl/qcom/pinctrl-msm.c | 5 ++--- drivers/pinctrl/samsung/pinctrl-exynos.c | 4 ++-- drivers/pinctrl/samsung/pinctrl-s3c24xx.c | 8 ++++---- drivers/pinctrl/samsung/pinctrl-s3c64xx.c | 10 ++++----- drivers/pinctrl/sirf/pinctrl-atlas7.c | 4 ++-- drivers/pinctrl/sirf/pinctrl-sirf.c | 4 ++-- drivers/pinctrl/spear/pinctrl-plgpio.c | 2 +- drivers/pinctrl/sunxi/pinctrl-sunxi.c | 2 +- drivers/sh/intc/core.c | 2 +- drivers/sh/intc/virq.c | 4 ++-- drivers/soc/dove/pmu.c | 4 ++-- drivers/spmi/spmi-pmic-arb.c | 2 +- include/linux/irq.h | 16 +++++++-------- include/linux/irqdesc.h | 4 ++-- include/linux/irqhandler.h | 2 +- kernel/irq/chip.c | 27 ++++++++----------------- kernel/irq/handle.c | 4 +++- kernel/irq/irqdesc.c | 2 +- kernel/irq/resend.c | 2 +- 188 files changed, 281 insertions(+), 340 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2804648c8ff4..2d6efcff3bf3 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -117,6 +117,6 @@ handle_irq(int irq) } irq_enter(); - generic_handle_irq_desc(irq, desc); + generic_handle_irq_desc(desc); irq_exit(); } diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index d9e44b62df05..4ffd1855f1bd 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -252,7 +252,7 @@ static struct irq_chip idu_irq_chip = { static int idu_first_irq; -static void idu_cascade_isr(unsigned int __core_irq, struct irq_desc *desc) +static void idu_cascade_isr(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); unsigned int core_irq = irq_desc_get_irq(desc); diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 96dabcb6c621..996aed3b4eee 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c @@ -95,7 +95,7 @@ void it8152_init_irq(void) } } -void it8152_irq_demux(unsigned int irq, struct irq_desc *desc) +void it8152_irq_demux(struct irq_desc *desc) { int bits_pd, bits_lp, bits_ld; int i; diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index 304adea4bc52..0e97b4b871f9 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c @@ -138,7 +138,7 @@ static struct locomo_dev_info locomo_devices[] = { }, }; -static void locomo_handler(unsigned int __irq, struct irq_desc *desc) +static void locomo_handler(struct irq_desc *desc) { struct locomo *lchip = irq_desc_get_chip_data(desc); int req, i; diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 4f290250fa93..3d224941b541 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -196,10 +196,8 @@ static struct sa1111_dev_info sa1111_devices[] = { * active IRQs causes the interrupt output to pulse, the upper levels * will call us again if there are more interrupts to process. */ -static void -sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void sa1111_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); unsigned int stat0, stat1, i; struct sa1111 *sachip = irq_desc_get_handler_data(desc); void __iomem *mapbase = sachip->base + SA1111_INTC; @@ -214,7 +212,7 @@ sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc) sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1); if (stat0 == 0 && stat1 == 0) { - do_bad_IRQ(irq, desc); + do_bad_IRQ(desc); return; } diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index d36a73d7c0e8..076777ff3daa 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h @@ -106,7 +106,7 @@ extern void __iomem *it8152_base_address; struct pci_dev; struct pci_sys_data; -extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc); +extern void it8152_irq_demux(struct irq_desc *desc); extern void it8152_init_irq(void); extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 2092ee1e1300..de4634b51456 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -23,10 +23,10 @@ extern int show_fiq_list(struct seq_file *, int); /* * This is for easy migration, but should be changed in the source */ -#define do_bad_IRQ(irq,desc) \ +#define do_bad_IRQ(desc) \ do { \ raw_spin_lock(&desc->lock); \ - handle_bad_irq(irq, desc); \ + handle_bad_irq(desc); \ raw_spin_unlock(&desc->lock); \ } while(0) diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index 305d7c6242bb..bfb3703357c5 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c @@ -69,14 +69,14 @@ static struct irq_chip pmu_irq_chip = { .irq_ack = pmu_irq_ack, }; -static void pmu_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void pmu_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); unsigned long cause = readl(PMU_INTERRUPT_CAUSE); + unsigned int irq; cause &= readl(PMU_INTERRUPT_MASK); if (cause == 0) { - do_bad_IRQ(irq, desc); + do_bad_IRQ(desc); return; } diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c index fcd79bc3a3e1..c01fca11b224 100644 --- a/arch/arm/mach-footbridge/isa-irq.c +++ b/arch/arm/mach-footbridge/isa-irq.c @@ -87,13 +87,12 @@ static struct irq_chip isa_hi_chip = { .irq_unmask = isa_unmask_pic_hi_irq, }; -static void -isa_irq_handler(unsigned int irq, struct irq_desc *desc) +static void isa_irq_handler(struct irq_desc *desc) { unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE; if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) { - do_bad_IRQ(isa_irq, desc); + do_bad_IRQ(desc); return; } diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c index 220333ed741d..2478d9f4d92d 100644 --- a/arch/arm/mach-gemini/gpio.c +++ b/arch/arm/mach-gemini/gpio.c @@ -126,7 +126,7 @@ static int gpio_set_irq_type(struct irq_data *d, unsigned int type) return 0; } -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { unsigned int port = (unsigned int)irq_desc_get_handler_data(desc); unsigned int gpio_irq_no, irq_stat; diff --git a/arch/arm/mach-imx/3ds_debugboard.c b/arch/arm/mach-imx/3ds_debugboard.c index 45903be6e7b3..16496a071ecb 100644 --- a/arch/arm/mach-imx/3ds_debugboard.c +++ b/arch/arm/mach-imx/3ds_debugboard.c @@ -85,7 +85,7 @@ static struct platform_device smsc_lan9217_device = { .resource = smsc911x_resources, }; -static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc) +static void mxc_expio_irq_handler(struct irq_desc *desc) { u32 imr_val; u32 int_valid; diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c index 2c0853560bd2..2b147e4bf9c9 100644 --- a/arch/arm/mach-imx/mach-mx31ads.c +++ b/arch/arm/mach-imx/mach-mx31ads.c @@ -154,7 +154,7 @@ static inline void mxc_init_imx_uart(void) imx31_add_imx_uart0(&uart_pdata); } -static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc) +static void mx31ads_expio_irq_handler(struct irq_desc *desc) { u32 imr_val; u32 int_valid; diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c index 9f89e76dfbb9..f6235b28578c 100644 --- a/arch/arm/mach-iop13xx/msi.c +++ b/arch/arm/mach-iop13xx/msi.c @@ -91,7 +91,7 @@ static void (*write_imipr[])(u32) = { write_imipr_3, }; -static void iop13xx_msi_handler(unsigned int irq, struct irq_desc *desc) +static void iop13xx_msi_handler(struct irq_desc *desc) { int i, j; unsigned long status; diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c index cce4cef12b6e..2ae431e8bc1b 100644 --- a/arch/arm/mach-lpc32xx/irq.c +++ b/arch/arm/mach-lpc32xx/irq.c @@ -370,7 +370,7 @@ static struct irq_chip lpc32xx_irq_chip = { .irq_set_wake = lpc32xx_irq_wake }; -static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc) +static void lpc32xx_sic1_handler(struct irq_desc *desc) { unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE)); @@ -383,7 +383,7 @@ static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc) } } -static void lpc32xx_sic2_handler(unsigned int irq, struct irq_desc *desc) +static void lpc32xx_sic2_handler(struct irq_desc *desc) { unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE)); diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c index 6373e2bff203..842302df99c1 100644 --- a/arch/arm/mach-netx/generic.c +++ b/arch/arm/mach-netx/generic.c @@ -69,8 +69,7 @@ static struct platform_device *devices[] __initdata = { #define DEBUG_IRQ(fmt...) while (0) {} #endif -static void -netx_hif_demux_handler(unsigned int irq_unused, struct irq_desc *desc) +static void netx_hif_demux_handler(struct irq_desc *desc) { unsigned int irq = NETX_IRQ_HIF_CHAINED(0); unsigned int stat; diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c index dfec671b1639..39e20d0ead08 100644 --- a/arch/arm/mach-omap1/fpga.c +++ b/arch/arm/mach-omap1/fpga.c @@ -87,7 +87,7 @@ static void fpga_mask_ack_irq(struct irq_data *d) fpga_ack_irq(d); } -static void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc) +static void innovator_fpga_IRQ_demux(struct irq_desc *desc) { u32 stat; int fpga_irq; diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c index 257e98c26618..3fc2cbe52113 100644 --- a/arch/arm/mach-omap2/prm_common.c +++ b/arch/arm/mach-omap2/prm_common.c @@ -102,7 +102,7 @@ static void omap_prcm_events_filter_priority(unsigned long *events, * dispatched accordingly. Clearing of the wakeup events should be * done by the SoC specific individual handlers. */ -static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc) +static void omap_prcm_irq_handler(struct irq_desc *desc) { unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG]; unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG]; diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index 70366b35d299..a3ebb517cca1 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c @@ -496,7 +496,7 @@ static struct irq_chip balloon3_irq_chip = { .irq_unmask = balloon3_unmask_irq, }; -static void balloon3_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void balloon3_irq_handler(struct irq_desc *desc) { unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & balloon3_irq_enabled; diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c index 1fa79f1f832d..3221ae15bef7 100644 --- a/arch/arm/mach-pxa/cm-x2xx-pci.c +++ b/arch/arm/mach-pxa/cm-x2xx-pci.c @@ -29,13 +29,12 @@ void __iomem *it8152_base_address; static int cmx2xx_it8152_irq_gpio; -static void cmx2xx_it8152_irq_demux(unsigned int __irq, struct irq_desc *desc) +static void cmx2xx_it8152_irq_demux(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); /* clear our parent irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); - it8152_irq_demux(irq, desc); + it8152_irq_demux(desc); } void __cmx2xx_pci_init_irq(int irq_gpio) diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index b070167deef2..4823d972e647 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c @@ -120,7 +120,7 @@ static struct irq_chip lpd270_irq_chip = { .irq_unmask = lpd270_unmask_irq, }; -static void lpd270_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void lpd270_irq_handler(struct irq_desc *desc) { unsigned int irq; unsigned long pending; diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index 9a0c8affdadb..d8319b54299a 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c @@ -284,7 +284,7 @@ static struct irq_chip pcm990_irq_chip = { .irq_unmask = pcm990_unmask_irq, }; -static void pcm990_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void pcm990_irq_handler(struct irq_desc *desc) { unsigned int irq; unsigned long pending; diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 4841d6cefe76..8ab26370107e 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c @@ -276,7 +276,7 @@ static inline unsigned long viper_irq_pending(void) viper_irq_enabled_mask; } -static void viper_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void viper_irq_handler(struct irq_desc *desc) { unsigned int irq; unsigned long pending; diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 6f94dd7b4dee..30e62a3f0701 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -105,7 +105,7 @@ static inline unsigned long zeus_irq_pending(void) return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask; } -static void zeus_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void zeus_irq_handler(struct irq_desc *desc) { unsigned int irq; unsigned long pending; diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c index f726d4c4e6dd..dc67a7fb3831 100644 --- a/arch/arm/mach-rpc/ecard.c +++ b/arch/arm/mach-rpc/ecard.c @@ -551,8 +551,7 @@ static void ecard_check_lockup(struct irq_desc *desc) } } -static void -ecard_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ecard_irq_handler(struct irq_desc *desc) { ecard_t *ec; int called = 0; diff --git a/arch/arm/mach-s3c24xx/bast-irq.c b/arch/arm/mach-s3c24xx/bast-irq.c index ced1ab86ac83..2bb08961e934 100644 --- a/arch/arm/mach-s3c24xx/bast-irq.c +++ b/arch/arm/mach-s3c24xx/bast-irq.c @@ -100,9 +100,7 @@ static struct irq_chip bast_pc104_chip = { .irq_ack = bast_pc104_maskack }; -static void -bast_irq_pc104_demux(unsigned int irq, - struct irq_desc *desc) +static void bast_irq_pc104_demux(struct irq_desc *desc) { unsigned int stat; unsigned int irqno; diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index fd63ecfb2f81..ddb30b8434c5 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c @@ -388,22 +388,22 @@ static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end) } } -static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc) +static void s3c_irq_demux_eint0_3(struct irq_desc *desc) { s3c_irq_demux_eint(0, 3); } -static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc) +static void s3c_irq_demux_eint4_11(struct irq_desc *desc) { s3c_irq_demux_eint(4, 11); } -static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc) +static void s3c_irq_demux_eint12_19(struct irq_desc *desc) { s3c_irq_demux_eint(12, 19); } -static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc) +static void s3c_irq_demux_eint20_27(struct irq_desc *desc) { s3c_irq_demux_eint(20, 27); } diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 6d237b4f7a8e..8411985af9ff 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c @@ -166,7 +166,7 @@ static struct sa1100_port_fns neponset_port_fns = { * ensure that the IRQ signal is deasserted before returning. This * is rather unfortunate. */ -static void neponset_irq_handler(unsigned int irq, struct irq_desc *desc) +static void neponset_irq_handler(struct irq_desc *desc) { struct neponset_drvdata *d = irq_desc_get_handler_data(desc); unsigned int irr; diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 79c33eca09a3..7bd22d8e5b11 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -407,7 +407,7 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type) return 0; } -static void gpio_irq_handler(unsigned __irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc); u32 cause, type; diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c index d51ff8f1c541..96cabad68489 100644 --- a/arch/avr32/mach-at32ap/extint.c +++ b/arch/avr32/mach-at32ap/extint.c @@ -144,7 +144,7 @@ static struct irq_chip eic_chip = { .irq_set_type = eic_set_irq_type, }; -static void demux_eic_irq(unsigned int irq, struct irq_desc *desc) +static void demux_eic_irq(struct irq_desc *desc) { struct eic *eic = irq_desc_get_handler_data(desc); unsigned long status, pending; diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c index 157a5e0e789f..4f61378c3453 100644 --- a/arch/avr32/mach-at32ap/pio.c +++ b/arch/avr32/mach-at32ap/pio.c @@ -281,7 +281,7 @@ static struct irq_chip gpio_irqchip = { .irq_set_type = gpio_irq_type, }; -static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { struct pio_device *pio = irq_desc_get_chip_data(desc); unsigned gpio_irq; diff --git a/arch/blackfin/include/asm/irq_handler.h b/arch/blackfin/include/asm/irq_handler.h index 4b2a992794d7..d2f90c72378e 100644 --- a/arch/blackfin/include/asm/irq_handler.h +++ b/arch/blackfin/include/asm/irq_handler.h @@ -60,7 +60,7 @@ extern void bfin_internal_mask_irq(unsigned int irq); extern void bfin_internal_unmask_irq(unsigned int irq); struct irq_desc; -extern void bfin_demux_mac_status_irq(unsigned int, struct irq_desc *); -extern void bfin_demux_gpio_irq(unsigned int, struct irq_desc *); +extern void bfin_demux_mac_status_irq(struct irq_desc *); +extern void bfin_demux_gpio_irq(struct irq_desc *); #endif diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 0ba25764b8c0..052cde5ed2e4 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -107,7 +107,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) * than crashing, do something sensible. */ if (irq >= NR_IRQS) - handle_bad_irq(irq, &bad_irq_desc); + handle_bad_irq(&bad_irq_desc); else generic_handle_irq(irq); diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c index 14b2f74554dc..a48baae4384d 100644 --- a/arch/blackfin/mach-bf537/ints-priority.c +++ b/arch/blackfin/mach-bf537/ints-priority.c @@ -89,8 +89,7 @@ static struct irq_chip bf537_generic_error_irqchip = { .irq_unmask = bf537_generic_error_unmask_irq, }; -static void bf537_demux_error_irq(unsigned int int_err_irq, - struct irq_desc *inta_desc) +static void bf537_demux_error_irq(struct irq_desc *inta_desc) { int irq = 0; @@ -182,15 +181,12 @@ static struct irq_chip bf537_mac_rx_irqchip = { .irq_unmask = bf537_mac_rx_unmask_irq, }; -static void bf537_demux_mac_rx_irq(unsigned int __int_irq, - struct irq_desc *desc) +static void bf537_demux_mac_rx_irq(struct irq_desc *desc) { - unsigned int int_irq = irq_desc_get_irq(desc); - if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR)) bfin_handle_irq(IRQ_MAC_RX); else - bfin_demux_gpio_irq(int_irq, desc); + bfin_demux_gpio_irq(desc); } #endif diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index a6d1b03cdf36..e8d4d748d0fd 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -656,8 +656,7 @@ static struct irq_chip bfin_mac_status_irqchip = { .irq_set_wake = bfin_mac_status_set_wake, }; -void bfin_demux_mac_status_irq(unsigned int int_err_irq, - struct irq_desc *inta_desc) +void bfin_demux_mac_status_irq(struct irq_desc *inta_desc) { int i, irq = 0; u32 status = bfin_read_EMAC_SYSTAT(); @@ -825,7 +824,7 @@ static void bfin_demux_gpio_block(unsigned int irq) } } -void bfin_demux_gpio_irq(unsigned int __inta_irq, struct irq_desc *desc) +void bfin_demux_gpio_irq(struct irq_desc *desc) { unsigned int inta_irq = irq_desc_get_irq(desc); unsigned int irq; diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c index d487698e978a..ddcb45d7dfa7 100644 --- a/arch/c6x/platforms/megamod-pic.c +++ b/arch/c6x/platforms/megamod-pic.c @@ -93,7 +93,7 @@ static struct irq_chip megamod_chip = { .irq_unmask = unmask_megamod, }; -static void megamod_irq_cascade(unsigned int __irq, struct irq_desc *desc) +static void megamod_irq_cascade(struct irq_desc *desc) { struct megamod_cascade_data *cascade; struct megamod_pic *pic; diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c index 47b5f90002ab..7ff739e94896 100644 --- a/arch/m68k/amiga/amiints.c +++ b/arch/m68k/amiga/amiints.c @@ -46,7 +46,7 @@ static struct irq_chip amiga_irq_chip = { * The builtin Amiga hardware interrupt handlers. */ -static void ami_int1(unsigned int irq, struct irq_desc *desc) +static void ami_int1(struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; @@ -69,7 +69,7 @@ static void ami_int1(unsigned int irq, struct irq_desc *desc) } } -static void ami_int3(unsigned int irq, struct irq_desc *desc) +static void ami_int3(struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; @@ -92,7 +92,7 @@ static void ami_int3(unsigned int irq, struct irq_desc *desc) } } -static void ami_int4(unsigned int irq, struct irq_desc *desc) +static void ami_int4(struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; @@ -121,7 +121,7 @@ static void ami_int4(unsigned int irq, struct irq_desc *desc) } } -static void ami_int5(unsigned int irq, struct irq_desc *desc) +static void ami_int5(struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; diff --git a/arch/m68k/coldfire/intc-5272.c b/arch/m68k/coldfire/intc-5272.c index 47371de60427..b0a19e207a63 100644 --- a/arch/m68k/coldfire/intc-5272.c +++ b/arch/m68k/coldfire/intc-5272.c @@ -143,12 +143,10 @@ static int intc_irq_set_type(struct irq_data *d, unsigned int type) * We need to be careful with the masking/acking due to the side effects * of masking an interrupt. */ -static void intc_external_irq(unsigned int __irq, struct irq_desc *desc) +static void intc_external_irq(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); - irq_desc_get_chip(desc)->irq_ack(&desc->irq_data); - handle_simple_irq(irq, desc); + handle_simple_irq(desc); } static struct irq_chip intc_irq_chip = { diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h index 81ca118d58af..a644f4a53b94 100644 --- a/arch/m68k/include/asm/irq.h +++ b/arch/m68k/include/asm/irq.h @@ -64,8 +64,7 @@ extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *)); extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt); extern void m68k_setup_irq_controller(struct irq_chip *, - void (*handle)(unsigned int irq, - struct irq_desc *desc), + void (*handle)(struct irq_desc *desc), unsigned int irq, unsigned int cnt); extern unsigned int irq_canonicalize(unsigned int irq); diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h index fe3fc9ae1b69..53c632c85b03 100644 --- a/arch/m68k/include/asm/mac_via.h +++ b/arch/m68k/include/asm/mac_via.h @@ -261,7 +261,7 @@ extern void via_irq_enable(int); extern void via_irq_disable(int); extern void via_nubus_irq_startup(int irq); extern void via_nubus_irq_shutdown(int irq); -extern void via1_irq(unsigned int irq, struct irq_desc *desc); +extern void via1_irq(struct irq_desc *desc); extern void via1_set_head(int); extern int via2_scsi_drq_pending(void); diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c index 3fe0e43d44f6..f6f7d42713ec 100644 --- a/arch/m68k/mac/baboon.c +++ b/arch/m68k/mac/baboon.c @@ -45,7 +45,7 @@ void __init baboon_init(void) * Baboon interrupt handler. This works a lot like a VIA. */ -static void baboon_irq(unsigned int irq, struct irq_desc *desc) +static void baboon_irq(struct irq_desc *desc) { int irq_bit, irq_num; unsigned char events; diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c index 191610d97689..55d6592783f5 100644 --- a/arch/m68k/mac/oss.c +++ b/arch/m68k/mac/oss.c @@ -63,7 +63,7 @@ void __init oss_nubus_init(void) * Handle miscellaneous OSS interrupts. */ -static void oss_irq(unsigned int __irq, struct irq_desc *desc) +static void oss_irq(struct irq_desc *desc) { int events = oss->irq_pending & (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM); @@ -99,7 +99,7 @@ static void oss_irq(unsigned int __irq, struct irq_desc *desc) * Unlike the VIA/RBV this is on its own autovector interrupt level. */ -static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc) +static void oss_nubus_irq(struct irq_desc *desc) { int events, irq_bit, i; diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c index 3b9e302e7a37..cd38f29955c8 100644 --- a/arch/m68k/mac/psc.c +++ b/arch/m68k/mac/psc.c @@ -113,7 +113,7 @@ void __init psc_init(void) * PSC interrupt handler. It's a lot like the VIA interrupt handler. */ -static void psc_irq(unsigned int __irq, struct irq_desc *desc) +static void psc_irq(struct irq_desc *desc) { unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc); unsigned int irq = irq_desc_get_irq(desc); diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index e198dec868e4..ce56e04386e7 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -446,7 +446,7 @@ void via_nubus_irq_shutdown(int irq) * via6522.c :-), disable/pending masks added. */ -void via1_irq(unsigned int irq, struct irq_desc *desc) +void via1_irq(struct irq_desc *desc) { int irq_num; unsigned char irq_bit, events; @@ -467,7 +467,7 @@ void via1_irq(unsigned int irq, struct irq_desc *desc) } while (events >= irq_bit); } -static void via2_irq(unsigned int irq, struct irq_desc *desc) +static void via2_irq(struct irq_desc *desc) { int irq_num; unsigned char irq_bit, events; @@ -493,7 +493,7 @@ static void via2_irq(unsigned int irq, struct irq_desc *desc) * VIA2 dispatcher as a fast interrupt handler. */ -void via_nubus_irq(unsigned int irq, struct irq_desc *desc) +static void via_nubus_irq(struct irq_desc *desc) { int slot_irq; unsigned char slot_bit, events; diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c index a336094a7a6c..3074b64793e6 100644 --- a/arch/metag/kernel/irq.c +++ b/arch/metag/kernel/irq.c @@ -94,13 +94,11 @@ void do_IRQ(int irq, struct pt_regs *regs) "MOV D0.5,%0\n" "MOV D1Ar1,%1\n" "MOV D1RtP,%2\n" - "MOV D0Ar2,%3\n" "SWAP A0StP,D0.5\n" "SWAP PC,D1RtP\n" "MOV A0StP,D0.5\n" : - : "r" (isp), "r" (irq), "r" (desc->handle_irq), - "r" (desc) + : "r" (isp), "r" (desc), "r" (desc->handle_irq) : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", "D0.5" diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c index 4c496c50edf6..da9f9220048f 100644 --- a/arch/mips/alchemy/common/irq.c +++ b/arch/mips/alchemy/common/irq.c @@ -851,7 +851,7 @@ static struct syscore_ops alchemy_gpic_pmops = { /* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */ #define DISP(name, base, addr) \ -static void au1000_##name##_dispatch(unsigned int irq, struct irq_desc *d) \ +static void au1000_##name##_dispatch(struct irq_desc *d) \ { \ unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \ if (likely(r)) \ @@ -865,7 +865,7 @@ DISP(ic0r1, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ1INT) DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT) DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT) -static void alchemy_gpic_dispatch(unsigned int irq, struct irq_desc *d) +static void alchemy_gpic_dispatch(struct irq_desc *d) { int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC); generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i); diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c index 324ad72d7c36..faeddf119fd4 100644 --- a/arch/mips/alchemy/devboards/bcsr.c +++ b/arch/mips/alchemy/devboards/bcsr.c @@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(bcsr_mod); /* * DB1200/PB1200 CPLD IRQ muxer */ -static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d) +static void bcsr_csc_handler(struct irq_desc *d) { unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); struct irq_chip *chip = irq_desc_get_chip(d); diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c index ec9a371f1e62..8da996142d6a 100644 --- a/arch/mips/ath25/ar2315.c +++ b/arch/mips/ath25/ar2315.c @@ -69,7 +69,7 @@ static struct irqaction ar2315_ahb_err_interrupt = { .name = "ar2315-ahb-error", }; -static void ar2315_misc_irq_handler(unsigned irq, struct irq_desc *desc) +static void ar2315_misc_irq_handler(struct irq_desc *desc) { u32 pending = ar2315_rst_reg_read(AR2315_ISR) & ar2315_rst_reg_read(AR2315_IMR); diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c index e63e38fa4880..acd55a9cffe3 100644 --- a/arch/mips/ath25/ar5312.c +++ b/arch/mips/ath25/ar5312.c @@ -73,7 +73,7 @@ static struct irqaction ar5312_ahb_err_interrupt = { .name = "ar5312-ahb-error", }; -static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc) +static void ar5312_misc_irq_handler(struct irq_desc *desc) { u32 pending = ar5312_rst_reg_read(AR5312_ISR) & ar5312_rst_reg_read(AR5312_IMR); diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c index 807132b838b2..15ecb4831e12 100644 --- a/arch/mips/ath79/irq.c +++ b/arch/mips/ath79/irq.c @@ -26,7 +26,7 @@ #include "common.h" #include "machtypes.h" -static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ath79_misc_irq_handler(struct irq_desc *desc) { void __iomem *base = ath79_reset_base; u32 pending; @@ -119,7 +119,7 @@ static void __init ath79_misc_irq_init(void) irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler); } -static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) +static void ar934x_ip2_irq_dispatch(struct irq_desc *desc) { u32 status; @@ -148,7 +148,7 @@ static void ar934x_ip2_irq_init(void) irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch); } -static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) +static void qca955x_ip2_irq_dispatch(struct irq_desc *desc) { u32 status; @@ -171,7 +171,7 @@ static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) } } -static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc) +static void qca955x_ip3_irq_dispatch(struct irq_desc *desc) { u32 status; diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index f26c3c661cca..0352bc8d56b3 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2221,7 +2221,7 @@ static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) if (irqd_get_trigger_type(irq_data) & IRQ_TYPE_EDGE_BOTH) cvmx_write_csr(host_data->raw_reg, 1ull << i); - generic_handle_irq_desc(irq, desc); + generic_handle_irq_desc(desc); } } diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index 2a4c128277e4..be52c2125d71 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h @@ -57,8 +57,8 @@ #include struct irq_desc; -void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc); -void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc); +void nlm_smp_function_ipi_handler(struct irq_desc *desc); +void nlm_smp_resched_ipi_handler(struct irq_desc *desc); void nlm_smp_irq_init(int hwcpuid); void nlm_boot_secondary_cpus(void); int nlm_wakeup_secondary_cpus(void); diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index 6cd69fdaa1c5..a74e181058b0 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c @@ -291,7 +291,7 @@ static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int i writel(mask, reg); } -static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc) +static void jz_gpio_irq_demux_handler(struct irq_desc *desc) { uint32_t flag; unsigned int gpio_irq; diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index 0136b4f9c9cd..10d86d54880a 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -82,7 +82,7 @@ void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) } /* IRQ_IPI_SMP_FUNCTION Handler */ -void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc) +void nlm_smp_function_ipi_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); clear_c0_eimr(irq); @@ -92,7 +92,7 @@ void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc) } /* IRQ_IPI_SMP_RESCHEDULE handler */ -void nlm_smp_resched_ipi_handler(unsigned int __irq, struct irq_desc *desc) +void nlm_smp_resched_ipi_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); clear_c0_eimr(irq); diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c index f8d0acb4f973..b4fa6413c4e5 100644 --- a/arch/mips/pci/pci-ar2315.c +++ b/arch/mips/pci/pci-ar2315.c @@ -318,7 +318,7 @@ static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc) return 0; } -static void ar2315_pci_irq_handler(unsigned irq, struct irq_desc *desc) +static void ar2315_pci_irq_handler(struct irq_desc *desc) { struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc); u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) & diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c index ad35a5e6a56c..7db963deec73 100644 --- a/arch/mips/pci/pci-ar71xx.c +++ b/arch/mips/pci/pci-ar71xx.c @@ -226,7 +226,7 @@ static struct pci_ops ar71xx_pci_ops = { .write = ar71xx_pci_write_config, }; -static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ar71xx_pci_irq_handler(struct irq_desc *desc) { struct ar71xx_pci_controller *apc; void __iomem *base = ath79_reset_base; diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c index 907d11dd921b..2013dad700df 100644 --- a/arch/mips/pci/pci-ar724x.c +++ b/arch/mips/pci/pci-ar724x.c @@ -225,7 +225,7 @@ static struct pci_ops ar724x_pci_ops = { .write = ar724x_pci_write, }; -static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ar724x_pci_irq_handler(struct irq_desc *desc) { struct ar724x_pci_controller *apc; void __iomem *base; diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 53c8efaf1572..ed6732f9aa87 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -129,7 +129,7 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc, rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA); } -static void rt3883_pci_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void rt3883_pci_irq_handler(struct irq_desc *desc) { struct rt3883_pci_controller *rpc; u32 pending; diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 8c624a8b9ea2..4cf77f358395 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -96,7 +96,7 @@ unsigned int get_c0_compare_int(void) return CP0_LEGACY_COMPARE_IRQ; } -static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ralink_intc_irq_handler(struct irq_desc *desc) { u32 pending = rt_intc_r32(INTC_REG_STATUS0); diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h index 25784cc959a0..1e155ca6d33c 100644 --- a/arch/powerpc/include/asm/qe_ic.h +++ b/arch/powerpc/include/asm/qe_ic.h @@ -59,14 +59,14 @@ enum qe_ic_grp_id { #ifdef CONFIG_QUICC_ENGINE void qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(unsigned int irq, struct irq_desc *desc), - void (*high_handler)(unsigned int irq, struct irq_desc *desc)); + void (*low_handler)(struct irq_desc *desc), + void (*high_handler)(struct irq_desc *desc)); unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); #else static inline void qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(unsigned int irq, struct irq_desc *desc), - void (*high_handler)(unsigned int irq, struct irq_desc *desc)) + void (*low_handler)(struct irq_desc *desc), + void (*high_handler)(struct irq_desc *desc)) {} static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) { return 0; } @@ -78,8 +78,7 @@ void qe_ic_set_highest_priority(unsigned int virq, int high); int qe_ic_set_priority(unsigned int virq, unsigned int priority); int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); -static inline void qe_ic_cascade_low_ipic(unsigned int irq, - struct irq_desc *desc) +static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc) { struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); @@ -88,8 +87,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq, generic_handle_irq(cascade_irq); } -static inline void qe_ic_cascade_high_ipic(unsigned int irq, - struct irq_desc *desc) +static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc) { struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); @@ -98,8 +96,7 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq, generic_handle_irq(cascade_irq); } -static inline void qe_ic_cascade_low_mpic(unsigned int irq, - struct irq_desc *desc) +static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc) { struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); @@ -111,8 +108,7 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq, chip->irq_eoi(&desc->irq_data); } -static inline void qe_ic_cascade_high_mpic(unsigned int irq, - struct irq_desc *desc) +static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc) { struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); @@ -124,8 +120,7 @@ static inline void qe_ic_cascade_high_mpic(unsigned int irq, chip->irq_eoi(&desc->irq_data); } -static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, - struct irq_desc *desc) +static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) { struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq; diff --git a/arch/powerpc/include/asm/tsi108_pci.h b/arch/powerpc/include/asm/tsi108_pci.h index 5653d7cc3e24..ae59d5b672b0 100644 --- a/arch/powerpc/include/asm/tsi108_pci.h +++ b/arch/powerpc/include/asm/tsi108_pci.h @@ -39,7 +39,7 @@ extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary); extern void tsi108_pci_int_init(struct device_node *node); -extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc); +extern void tsi108_irq_cascade(struct irq_desc *desc); extern void tsi108_clear_pci_cfg_error(void); #endif /* _ASM_POWERPC_TSI108_PCI_H */ diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index cf8287487157..0035d146df73 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c @@ -104,8 +104,7 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp, return irq_linear_revmap(cpld_pic_host, cpld_irq); } -static void -cpld_pic_cascade(unsigned int __irq, struct irq_desc *desc) +static void cpld_pic_cascade(struct irq_desc *desc) { unsigned int irq; diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 32cae33c4266..8fb95480fd73 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -80,7 +80,7 @@ static struct irq_chip media5200_irq_chip = { .irq_mask_ack = media5200_irq_mask, }; -void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) +static void media5200_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); int sub_virq, val; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 63016621aff8..78ac19aefa4d 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -191,7 +191,7 @@ static struct irq_chip mpc52xx_gpt_irq_chip = { .irq_set_type = mpc52xx_gpt_irq_set_type, }; -void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) +static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc) { struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc); int sub_virq; diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 74861a7fb807..60e89fc9c753 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c @@ -78,7 +78,7 @@ static struct irq_chip pq2ads_pci_ic = { .irq_disable = pq2ads_pci_mask_irq }; -static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) +static void pq2ads_pci_irq_demux(struct irq_desc *desc) { struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); u32 stat, mask, pend; diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c index 7bfb9b184dd4..23791de7b688 100644 --- a/arch/powerpc/platforms/85xx/common.c +++ b/arch/powerpc/platforms/85xx/common.c @@ -49,7 +49,7 @@ int __init mpc85xx_common_publish_devices(void) return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL); } #ifdef CONFIG_CPM2 -static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) +static void cpm2_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq; diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 13a8d1a3d55c..5ac70de3e48a 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -192,10 +192,8 @@ void mpc85xx_cds_fixup_bus(struct pci_bus *bus) } #ifdef CONFIG_PPC_I8259 -static void mpc85xx_8259_cascade_handler(unsigned int __irq, - struct irq_desc *desc) +static void mpc85xx_8259_cascade_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); unsigned int cascade_irq = i8259_irq(); if (cascade_irq != NO_IRQ) @@ -203,7 +201,7 @@ static void mpc85xx_8259_cascade_handler(unsigned int __irq, generic_handle_irq(cascade_irq); /* check for any interrupts from the shared IRQ line */ - handle_fasteoi_irq(irq, desc); + handle_fasteoi_irq(desc); } static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id) diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index ffdf02121a7c..f858306dba6a 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -46,7 +46,7 @@ #endif #ifdef CONFIG_PPC_I8259 -static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) +static void mpc85xx_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index d78fda892e8b..b02d6a5bb035 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c @@ -91,7 +91,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) (irq_hw_number_t)i); } -void socrates_fpga_pic_cascade(unsigned int __irq, struct irq_desc *desc) +static void socrates_fpga_pic_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int irq = irq_desc_get_irq(desc); diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index d5b98c0f958a..845defa1fd19 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c @@ -17,7 +17,7 @@ #include #ifdef CONFIG_PPC_I8259 -static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) +static void mpc86xx_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index d3037747031d..c289fc77b4ba 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -214,7 +214,7 @@ void mpc8xx_restart(char *cmd) panic("Restart failed\n"); } -static void cpm_cascade(unsigned int irq, struct irq_desc *desc) +static void cpm_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq = cpm_get_irq(); diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 306888acb737..e0e68a1c0d3c 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -93,7 +93,7 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) dcr_write(msic->dcr_host, dcr_n, val); } -static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) +static void axon_msi_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct axon_msic *msic = irq_desc_get_handler_data(desc); diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 6558e7e8ee50..9f609fc8d331 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -99,7 +99,7 @@ static void iic_ioexc_eoi(struct irq_data *d) { } -static void iic_ioexc_cascade(unsigned int __irq, struct irq_desc *desc) +static void iic_ioexc_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct cbe_iic_regs __iomem *node_iic = diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 1f72f4ab6353..9d27de62dc62 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -199,7 +199,7 @@ static const struct irq_domain_ops spider_host_ops = { .xlate = spider_host_xlate, }; -static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) +static void spider_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct spider_pic *pic = irq_desc_get_handler_data(desc); diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 15ebc4e8a151..987d1b8d68e3 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -363,7 +363,7 @@ void __init chrp_setup_arch(void) if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0); } -static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) +static void chrp_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 9dd154d6f89a..9b7975706bfc 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -120,8 +120,7 @@ static unsigned int __hlwd_pic_get_irq(struct irq_domain *h) return irq_linear_revmap(h, irq); } -static void hlwd_pic_irq_cascade(unsigned int cascade_virq, - struct irq_desc *desc) +static void hlwd_pic_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_domain *irq_domain = irq_desc_get_handler_data(desc); diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c index 1613303177e6..8f65aa3747f5 100644 --- a/arch/powerpc/platforms/embedded6xx/mvme5100.c +++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c @@ -42,7 +42,7 @@ static phys_addr_t pci_membase; static u_char *restart; -static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc) +static void mvme5100_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 39a74fad3e04..9a83eb71b030 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -111,7 +111,7 @@ static void __init fwnmi_init(void) fwnmi_active = 1; } -static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) +static void pseries_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c index 2bcb78bb3a15..d57b77573068 100644 --- a/arch/powerpc/sysdev/ge/ge_pic.c +++ b/arch/powerpc/sysdev/ge/ge_pic.c @@ -91,7 +91,7 @@ static int gef_pic_cascade_irq; * should be masked out. */ -void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) +static void gef_pic_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq; diff --git a/arch/powerpc/sysdev/ge/ge_pic.h b/arch/powerpc/sysdev/ge/ge_pic.h index 908dbd9826b6..5bf7e4b81e36 100644 --- a/arch/powerpc/sysdev/ge/ge_pic.h +++ b/arch/powerpc/sysdev/ge/ge_pic.h @@ -1,8 +1,6 @@ #ifndef __GEF_PIC_H__ #define __GEF_PIC_H__ - -void gef_pic_cascade(unsigned int, struct irq_desc *); unsigned int gef_pic_get_irq(void); void gef_pic_init(struct device_node *); diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 97a8ae8f94dd..537e5db85a06 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -1181,7 +1181,7 @@ static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct, } /* IRQ handler for a secondary MPIC cascaded from another IRQ controller */ -static void mpic_cascade(unsigned int irq, struct irq_desc *desc) +static void mpic_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct mpic *mpic = irq_desc_get_handler_data(desc); diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 47b352e4bc74..fbcc1f855a7f 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c @@ -311,8 +311,8 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) } void __init qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(unsigned int irq, struct irq_desc *desc), - void (*high_handler)(unsigned int irq, struct irq_desc *desc)) + void (*low_handler)(struct irq_desc *desc), + void (*high_handler)(struct irq_desc *desc)) { struct qe_ic *qe_ic; struct resource res; diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 57b54476e747..379de955aae3 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -428,7 +428,7 @@ void __init tsi108_pci_int_init(struct device_node *node) init_pci_source(); } -void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) +void tsi108_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = get_pci_source(); diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index d77345338671..6893d8f236df 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c @@ -194,7 +194,7 @@ static const struct irq_domain_ops uic_host_ops = { .xlate = irq_domain_xlate_twocell, }; -void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) +static void uic_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_data *idata = irq_desc_get_irq_data(desc); diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 43b8b275bc5c..0f52d7955796 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c @@ -222,7 +222,7 @@ int xilinx_intc_get_irq(void) /* * Support code for cascading to 8259 interrupt controllers */ -static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) +static void xilinx_i8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c index 6f97a8f0d0d6..6129aef6db76 100644 --- a/arch/sh/boards/mach-se/7343/irq.c +++ b/arch/sh/boards/mach-se/7343/irq.c @@ -29,7 +29,7 @@ static void __iomem *se7343_irq_regs; struct irq_domain *se7343_irq_domain; -static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc) +static void se7343_irq_demux(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c index 60aebd14ccf8..24c74a88290c 100644 --- a/arch/sh/boards/mach-se/7722/irq.c +++ b/arch/sh/boards/mach-se/7722/irq.c @@ -28,7 +28,7 @@ static void __iomem *se7722_irq_regs; struct irq_domain *se7722_irq_domain; -static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc) +static void se7722_irq_demux(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c index 9f2033898652..64e681e66c57 100644 --- a/arch/sh/boards/mach-se/7724/irq.c +++ b/arch/sh/boards/mach-se/7724/irq.c @@ -92,7 +92,7 @@ static struct irq_chip se7724_irq_chip __read_mostly = { .irq_unmask = enable_se7724_irq, }; -static void se7724_irq_demux(unsigned int __irq, struct irq_desc *desc) +static void se7724_irq_demux(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct fpga_irq set = get_fpga_irq(irq); diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c index 24555c364d5b..1fb2cbee25f2 100644 --- a/arch/sh/boards/mach-x3proto/gpio.c +++ b/arch/sh/boards/mach-x3proto/gpio.c @@ -60,7 +60,7 @@ static int x3proto_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) return virq; } -static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void x3proto_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c index e9735616bdc8..8180092502f7 100644 --- a/arch/sh/cchips/hd6446x/hd64461.c +++ b/arch/sh/cchips/hd6446x/hd64461.c @@ -56,7 +56,7 @@ static struct irq_chip hd64461_irq_chip = { .irq_unmask = hd64461_unmask_irq, }; -static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc) +static void hd64461_irq_demux(struct irq_desc *desc) { unsigned short intv = __raw_readw(HD64461_NIRR); unsigned int ext_irq = HD64461_IRQBASE; diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 0299f052a2ef..42efcf85f721 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -53,7 +53,7 @@ static inline unsigned int leon_eirq_get(int cpu) } /* Handle one or multiple IRQs from the extended interrupt controller */ -static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) +static void leon_handle_ext_irq(struct irq_desc *desc) { unsigned int eirq; struct irq_bucket *p; diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index 3382f7b3eeef..1e77128a8f88 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -357,7 +357,7 @@ static struct irq_chip grpci1_irq = { }; /* Handle one or multiple IRQs from the PCI core */ -static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc) +static void grpci1_pci_flow_irq(struct irq_desc *desc) { struct grpci1_priv *priv = grpci1priv; int i, ack = 0; diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index 814fb1729b12..f727c4de1316 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -498,7 +498,7 @@ static struct irq_chip grpci2_irq = { }; /* Handle one or multiple IRQs from the PCI core */ -static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc) +static void grpci2_pci_flow_irq(struct irq_desc *desc) { struct grpci2_priv *priv = grpci2priv; int i, ack = 0; diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index b3f73fd764a3..4c017d0d2de8 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -304,17 +304,16 @@ static struct irq_chip tilegx_legacy_irq_chip = { * to Linux which just calls handle_level_irq() after clearing the * MAC INTx Assert status bit associated with this interrupt. */ -static void trio_handle_level_irq(unsigned int __irq, struct irq_desc *desc) +static void trio_handle_level_irq(struct irq_desc *desc) { struct pci_controller *controller = irq_desc_get_handler_data(desc); gxio_trio_context_t *trio_context = controller->trio; uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); - unsigned int irq = irq_desc_get_irq(desc); int mac = controller->mac; unsigned int reg_offset; uint64_t level_mask; - handle_level_irq(irq, desc); + handle_level_irq(desc); /* * Clear the INTx Level status, otherwise future interrupts are diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c index c53729d92e8d..eb1fd0030359 100644 --- a/arch/unicore32/kernel/irq.c +++ b/arch/unicore32/kernel/irq.c @@ -112,7 +112,7 @@ static struct irq_chip puv3_low_gpio_chip = { * irq_controller_lock held, and IRQs disabled. Decode the IRQ * and call the handler. */ -static void puv3_gpio_handler(unsigned int __irq, struct irq_desc *desc) +static void puv3_gpio_handler(struct irq_desc *desc) { unsigned int mask, irq; diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index c80cf6699678..38da8f29a9c8 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -68,11 +68,10 @@ static inline void *current_stack(void) return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); } -static inline int -execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) +static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) { struct irq_stack *curstk, *irqstk; - u32 *isp, *prev_esp, arg1, arg2; + u32 *isp, *prev_esp, arg1; curstk = (struct irq_stack *) current_stack(); irqstk = __this_cpu_read(hardirq_stack); @@ -98,8 +97,8 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) asm volatile("xchgl %%ebx,%%esp \n" "call *%%edi \n" "movl %%ebx,%%esp \n" - : "=a" (arg1), "=d" (arg2), "=b" (isp) - : "0" (irq), "1" (desc), "2" (isp), + : "=a" (arg1), "=b" (isp) + : "0" (desc), "1" (isp), "D" (desc->handle_irq) : "memory", "cc", "ecx"); return 1; @@ -150,19 +149,15 @@ void do_softirq_own_stack(void) bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) { - unsigned int irq; - int overflow; - - overflow = check_stack_overflow(); + int overflow = check_stack_overflow(); if (IS_ERR_OR_NULL(desc)) return false; - irq = irq_desc_get_irq(desc); - if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) { + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) { if (unlikely(overflow)) print_stack_overflow(); - generic_handle_irq_desc(irq, desc); + generic_handle_irq_desc(desc); } return true; diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index ff16ccb918f2..c767cf2bc80a 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -75,6 +75,6 @@ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) if (unlikely(IS_ERR_OR_NULL(desc))) return false; - generic_handle_irq_desc(irq_desc_get_irq(desc), desc); + generic_handle_irq_desc(desc); return true; } diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 161804de124a..a0d09f6c6533 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1015,7 +1015,7 @@ static struct clock_event_device lguest_clockevent = { * This is the Guest timer interrupt handler (hardware interrupt 0). We just * call the clockevent infrastructure and it does whatever needs doing. */ -static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) +static void lguest_time_irq(struct irq_desc *desc) { unsigned long flags; diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index 4768a829253a..2bf37e68ad0f 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c @@ -266,7 +266,7 @@ int ipu_irq_unmap(unsigned int source) } /* Chained IRQ handler for IPU function and error interrupt */ -static void ipu_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void ipu_irq_handler(struct irq_desc *desc) { struct ipu *ipu = irq_desc_get_handler_data(desc); u32 status; diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 9b7e0b3db387..1b44941574fa 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -201,8 +201,7 @@ static int altera_gpio_direction_output(struct gpio_chip *gc, return 0; } -static void altera_gpio_irq_edge_handler(unsigned int irq, - struct irq_desc *desc) +static void altera_gpio_irq_edge_handler(struct irq_desc *desc) { struct altera_gpio_chip *altera_gc; struct irq_chip *chip; @@ -231,8 +230,7 @@ static void altera_gpio_irq_edge_handler(unsigned int irq, } -static void altera_gpio_irq_leveL_high_handler(unsigned int irq, - struct irq_desc *desc) +static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) { struct altera_gpio_chip *altera_gc; struct irq_chip *chip; diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 31b90ac15204..33a1f9779b86 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -433,7 +433,7 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type) return 0; } -static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void bcm_kona_gpio_irq_handler(struct irq_desc *desc) { void __iomem *reg_base; int bit, bank_id; diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index 9ea86d2ac054..4c64627c6bb5 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -236,7 +236,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank) } /* Each UPG GIO block has one IRQ for all banks */ -static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void brcmstb_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc); diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 94b0ab709721..5e715388803d 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -326,8 +326,7 @@ static struct irq_chip gpio_irqchip = { .flags = IRQCHIP_SET_TYPE_MASKED, }; -static void -gpio_irq_handler(unsigned __irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct davinci_gpio_regs __iomem *g; diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index c5be4b9b8baf..fcd5b0acfc72 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -147,7 +147,7 @@ static u32 dwapb_do_irq(struct dwapb_gpio *gpio) return ret; } -static void dwapb_irq_handler(u32 irq, struct irq_desc *desc) +static void dwapb_irq_handler(struct irq_desc *desc) { struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c index 9d90366ea259..3e3947b35c83 100644 --- a/drivers/gpio/gpio-ep93xx.c +++ b/drivers/gpio/gpio-ep93xx.c @@ -78,7 +78,7 @@ static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable) EP93XX_GPIO_REG(int_debounce_register_offset[port])); } -static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc) { unsigned char status; int i; @@ -100,8 +100,7 @@ static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) } } -static void ep93xx_gpio_f_irq_handler(unsigned int __irq, - struct irq_desc *desc) +static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc) { /* * map discontiguous hw irq range to continuous sw irq range: diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c index aa28c65eb6b4..70097472b02c 100644 --- a/drivers/gpio/gpio-intel-mid.c +++ b/drivers/gpio/gpio-intel-mid.c @@ -301,7 +301,7 @@ static const struct pci_device_id intel_gpio_ids[] = { }; MODULE_DEVICE_TABLE(pci, intel_gpio_ids); -static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc) +static void intel_mid_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct intel_mid_gpio *priv = to_intel_gpio_priv(gc); diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index 153af464c7a7..127c37b380ae 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c @@ -234,7 +234,7 @@ static int lp_gpio_direction_output(struct gpio_chip *chip, return 0; } -static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc) +static void lp_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 8ef7a12de983..48ef368347ab 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -194,7 +194,7 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) return -ENXIO; } -static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) +static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c index 7bcfb87a5fa6..22523aae8abe 100644 --- a/drivers/gpio/gpio-msic.c +++ b/drivers/gpio/gpio-msic.c @@ -232,7 +232,7 @@ static struct irq_chip msic_irqchip = { .irq_bus_sync_unlock = msic_bus_sync_unlock, }; -static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void msic_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct msic_gpio *mg = irq_data_get_irq_handler_data(data); diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index d2012cfb5571..4b4222145f10 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c @@ -305,7 +305,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) * which have been set as summary IRQ lines and which are triggered, * and to call their interrupt handlers. */ -static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) +static void msm_summary_irq_handler(struct irq_desc *desc) { unsigned long i; struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index b396bf3bf294..df418b81456d 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -458,7 +458,7 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type) return 0; } -static void mvebu_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void mvebu_gpio_irq_handler(struct irq_desc *desc) { struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index b752b560126e..5e3235a73bf9 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -272,7 +272,7 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) } /* MX1 and MX3 has one interrupt *per* gpio port */ -static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void mx3_gpio_irq_handler(struct irq_desc *desc) { u32 irq_stat; struct mxc_gpio_port *port = irq_desc_get_handler_data(desc); @@ -288,7 +288,7 @@ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) } /* MX2 has one interrupt *for all* gpio ports */ -static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void mx2_gpio_irq_handler(struct irq_desc *desc) { u32 irq_msk, irq_stat; struct mxc_gpio_port *port; diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index b7f383eb18d9..b7763f078b23 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -154,7 +154,7 @@ static void mxs_flip_edge(struct mxs_gpio_port *port, u32 gpio) } /* MXS has one interrupt *per* gpio port */ -static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void mxs_gpio_irq_handler(struct irq_desc *desc) { u32 irq_stat; struct mxs_gpio_port *port = irq_desc_get_handler_data(desc); diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 2ae0d47e9554..9df014c0e3e4 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -709,7 +709,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) * line's interrupt handler has been run, we may miss some nested * interrupts. */ -static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void omap_gpio_irq_handler(struct irq_desc *desc) { void __iomem *isr_reg = NULL; u32 isr; diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 04756130437f..229ef653e0f8 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -187,7 +187,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger) return 0; } -static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) +static void pl061_irq_handler(struct irq_desc *desc) { unsigned long pending; int offset; diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 55a11de3d5b7..df2ce550f309 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -401,7 +401,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) +static void pxa_gpio_demux_handler(struct irq_desc *desc) { struct pxa_gpio_chip *c; int loop, gpio, gpio_base, n; diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c index 67bd2f5d89e8..990fa9023e22 100644 --- a/drivers/gpio/gpio-sa1100.c +++ b/drivers/gpio/gpio-sa1100.c @@ -172,8 +172,7 @@ static struct irq_domain *sa1100_gpio_irqdomain; * irq_controller_lock held, and IRQs disabled. Decode the IRQ * and call the handler. */ -static void -sa1100_gpio_handler(unsigned int __irq, struct irq_desc *desc) +static void sa1100_gpio_handler(struct irq_desc *desc) { unsigned int irq, mask; diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 9b14aafb576d..027e5f47dd28 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -266,7 +266,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d) gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio); } -static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void tegra_gpio_irq_handler(struct irq_desc *desc) { int port; int pin; diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index 5a492054589f..30653e6319e9 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -192,7 +192,7 @@ out: return ret; } -static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) +static void timbgpio_irq(struct irq_desc *desc) { struct timbgpio *tgpio = irq_desc_get_handler_data(desc); struct irq_data *data = irq_desc_get_irq_data(desc); diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c index bbac92ae4c32..87bb1b1eee8d 100644 --- a/drivers/gpio/gpio-tz1090.c +++ b/drivers/gpio/gpio-tz1090.c @@ -375,7 +375,7 @@ static int gpio_set_irq_wake(struct irq_data *data, unsigned int on) #define gpio_set_irq_wake NULL #endif -static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void tz1090_gpio_irq_handler(struct irq_desc *desc) { irq_hw_number_t hw; unsigned int irq_stat, irq_no; @@ -400,7 +400,7 @@ static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) == IRQ_TYPE_EDGE_BOTH) tz1090_gpio_irq_next_edge(bank, hw); - generic_handle_irq_desc(irq_no, child_desc); + generic_handle_irq_desc(child_desc); } } diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 7a6640b51911..069f9e4b7daa 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -120,7 +120,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, return pinctrl_gpio_direction_output(chip->base + gpio); } -static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void vf610_gpio_irq_handler(struct irq_desc *desc) { struct vf610_gpio_port *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c index 12ee1969298c..4b8a26910705 100644 --- a/drivers/gpio/gpio-zx.c +++ b/drivers/gpio/gpio-zx.c @@ -177,7 +177,7 @@ static int zx_irq_type(struct irq_data *d, unsigned trigger) return 0; } -static void zx_irq_handler(unsigned irq, struct irq_desc *desc) +static void zx_irq_handler(struct irq_desc *desc) { unsigned long pending; int offset; diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 27348e7cb705..1d1a5865ede9 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -514,7 +514,7 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio, * application for that pin. * Note: A bug is reported if no handler is set for the gpio pin. */ -static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc) +static void zynq_gpio_irqhandler(struct irq_desc *desc) { u32 int_sts, int_enb; unsigned int bank_num; diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 243f99a80253..649d03be2f38 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -912,7 +912,7 @@ static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs) } } -static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ipu_irq_handler(struct irq_desc *desc) { struct ipu_soc *ipu = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); @@ -925,7 +925,7 @@ static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ipu_err_irq_handler(struct irq_desc *desc) { struct ipu_soc *ipu = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c index e9c6f2a5b52d..94ddc96f2f7d 100644 --- a/drivers/irqchip/exynos-combiner.c +++ b/drivers/irqchip/exynos-combiner.c @@ -65,12 +65,10 @@ static void combiner_unmask_irq(struct irq_data *data) __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); } -static void combiner_handle_cascade_irq(unsigned int __irq, - struct irq_desc *desc) +static void combiner_handle_cascade_irq(struct irq_desc *desc) { struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned int irq = irq_desc_get_irq(desc); unsigned int cascade_irq, combiner_irq; unsigned long status; @@ -88,7 +86,7 @@ static void combiner_handle_cascade_irq(unsigned int __irq, cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); if (unlikely(!cascade_irq)) - handle_bad_irq(irq, desc); + handle_bad_irq(desc); else generic_handle_irq(cascade_irq); diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 39b72da0c143..693b9fb879bd 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -447,8 +447,7 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained) static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {} #endif -static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq, - struct irq_desc *desc) +static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long irqmap, irqn, irqsrc, cpuid; diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index ed4ca9deca70..56c9cf488e6c 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c @@ -96,7 +96,7 @@ struct armctrl_ic { static struct armctrl_ic intc __read_mostly; static void __exception_irq_entry bcm2835_handle_irq( struct pt_regs *regs); -static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc); +static void bcm2836_chained_handle_irq(struct irq_desc *desc); static void armctrl_mask_irq(struct irq_data *d) { @@ -245,7 +245,7 @@ static void __exception_irq_entry bcm2835_handle_irq( handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); } -static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc) +static void bcm2836_chained_handle_irq(struct irq_desc *desc) { u32 hwirq; diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 409bdc6366c2..0fea985ef1dc 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -115,7 +115,7 @@ static inline void l1_writel(u32 val, void __iomem *reg) writel(val, reg); } -static void bcm7038_l1_irq_handle(unsigned int irq, struct irq_desc *desc) +static void bcm7038_l1_irq_handle(struct irq_desc *desc) { struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc); struct bcm7038_l1_cpu *cpu; diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index d3f976913a6f..61b18ab33ad9 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -56,7 +56,7 @@ struct bcm7120_l2_intc_data { const __be32 *map_mask_prop; }; -static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) +static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc) { struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc); struct bcm7120_l2_intc_data *b = data->b; diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index aedda06191eb..65cd341f331a 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -49,13 +49,12 @@ struct brcmstb_l2_intc_data { u32 saved_mask; /* for suspend/resume */ }; -static void brcmstb_l2_intc_irq_handle(unsigned int __irq, - struct irq_desc *desc) +static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) { struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned int irq = irq_desc_get_irq(desc); + unsigned int irq; u32 status; chained_irq_enter(chip, desc); @@ -65,7 +64,7 @@ static void brcmstb_l2_intc_irq_handle(unsigned int __irq, if (status == 0) { raw_spin_lock(&desc->lock); - handle_bad_irq(irq, desc); + handle_bad_irq(desc); raw_spin_unlock(&desc->lock); goto out; } diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c index efd95d9955e7..052f266364c0 100644 --- a/drivers/irqchip/irq-dw-apb-ictl.c +++ b/drivers/irqchip/irq-dw-apb-ictl.c @@ -26,7 +26,7 @@ #define APB_INT_FINALSTATUS_H 0x34 #define APB_INT_BASE_OFFSET 0x04 -static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc) +static void dw_apb_ictl_handler(struct irq_desc *desc) { struct irq_domain *d = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 9bccdd295769..00bb7c05a55e 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -341,7 +341,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) } while (1); } -static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) +static void gic_handle_cascade_irq(struct irq_desc *desc) { struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); @@ -360,7 +360,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); if (unlikely(gic_irq < 32 || gic_irq > 1020)) - handle_bad_irq(cascade_irq, desc); + handle_bad_irq(desc); else generic_handle_irq(cascade_irq); diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index 4836102ba312..e484fd255321 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -352,7 +352,7 @@ void __init init_i8259_irqs(void) __init_i8259_irqs(NULL); } -static void i8259_irq_dispatch(unsigned int __irq, struct irq_desc *desc) +static void i8259_irq_dispatch(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); int hwirq = i8259_irq(); diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c index 841604b81004..c02d29c9dc05 100644 --- a/drivers/irqchip/irq-imgpdc.c +++ b/drivers/irqchip/irq-imgpdc.c @@ -218,7 +218,7 @@ static int pdc_irq_set_wake(struct irq_data *data, unsigned int on) return 0; } -static void pdc_intc_perip_isr(unsigned int __irq, struct irq_desc *desc) +static void pdc_intc_perip_isr(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct pdc_intc_priv *priv; @@ -240,7 +240,7 @@ found: generic_handle_irq(irq_no); } -static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc) +static void pdc_intc_syswake_isr(struct irq_desc *desc) { struct pdc_intc_priv *priv; unsigned int syswake, irq_no; diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index c1517267b5db..7b784b692f3c 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c @@ -83,7 +83,7 @@ static void keystone_irq_ack(struct irq_data *d) /* nothing to do here */ } -static void keystone_irq_handler(unsigned __irq, struct irq_desc *desc) +static void keystone_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c index 5f4c52928d16..8c38b3d92e1c 100644 --- a/drivers/irqchip/irq-metag-ext.c +++ b/drivers/irqchip/irq-metag-ext.c @@ -446,7 +446,7 @@ static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type) * Whilst using TR2 to detect external interrupts is a software convention it is * (hopefully) unlikely to change. */ -static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc) +static void meta_intc_irq_demux(struct irq_desc *desc) { struct meta_intc_priv *priv = &meta_intc_priv; irq_hw_number_t hw; diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c index 3d23ce3edb5c..a5f053bd2f44 100644 --- a/drivers/irqchip/irq-metag.c +++ b/drivers/irqchip/irq-metag.c @@ -220,7 +220,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data, * occurred. It is this function's job to demux this irq and * figure out exactly which trigger needs servicing. */ -static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc) +static void metag_internal_irq_demux(struct irq_desc *desc) { struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc); irq_hw_number_t hw; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 1764bcf8ee6b..af2f16bb8a94 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -546,7 +546,7 @@ static void __gic_irq_dispatch(void) gic_handle_shared_int(false); } -static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) +static void gic_irq_dispatch(struct irq_desc *desc) { gic_handle_local_int(true); gic_handle_shared_int(true); diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 781ed6e71dbb..ea6e3a96f3b3 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -129,7 +129,7 @@ struct irq_chip icu_irq_chip = { .irq_unmask = icu_unmask_irq, }; -static void icu_mux_irq_demux(unsigned int __irq, struct irq_desc *desc) +static void icu_mux_irq_demux(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct irq_domain *domain; diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c index 5ea999a724b5..be4c5a8c9659 100644 --- a/drivers/irqchip/irq-orion.c +++ b/drivers/irqchip/irq-orion.c @@ -106,7 +106,7 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init); #define ORION_BRIDGE_IRQ_CAUSE 0x00 #define ORION_BRIDGE_IRQ_MASK 0x04 -static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) +static void orion_bridge_irq_handler(struct irq_desc *desc) { struct irq_domain *d = irq_desc_get_handler_data(desc); diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c index 506d9f20ca51..e47572a9bbb2 100644 --- a/drivers/irqchip/irq-s3c24xx.c +++ b/drivers/irqchip/irq-s3c24xx.c @@ -298,7 +298,7 @@ static struct irq_chip s3c_irq_eint0t4 = { .irq_set_type = s3c_irqext0_type, }; -static void s3c_irq_demux(unsigned int __irq, struct irq_desc *desc) +static void s3c_irq_demux(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc); diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index 772a82cacbf7..c143dd58410c 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -58,7 +58,7 @@ static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) return irq_reg_readl(gc, off); } -static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) +static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c index 331829661366..848d782a2a3b 100644 --- a/drivers/irqchip/irq-tb10x.c +++ b/drivers/irqchip/irq-tb10x.c @@ -97,7 +97,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) return IRQ_SET_MASK_OK; } -static void tb10x_irq_cascade(unsigned int __irq, struct irq_desc *desc) +static void tb10x_irq_cascade(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); unsigned int irq = irq_desc_get_irq(desc); diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 16123f688768..1b1c63e8d249 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -65,19 +65,19 @@ static void fpga_irq_unmask(struct irq_data *d) writel(mask, f->base + IRQ_ENABLE_SET); } -static void fpga_irq_handle(unsigned int __irq, struct irq_desc *desc) +static void fpga_irq_handle(struct irq_desc *desc) { struct fpga_irq_data *f = irq_desc_get_handler_data(desc); - unsigned int irq = irq_desc_get_irq(desc); u32 status = readl(f->base + IRQ_STATUS); if (status == 0) { - do_bad_IRQ(irq, desc); + do_bad_IRQ(desc); return; } do { - irq = ffs(status) - 1; + unsigned int irq = ffs(status) - 1; + status &= ~(1 << irq); generic_handle_irq(irq_find_mapping(f->domain, irq)); } while (status); diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 03846dff4212..cb85504c2ff1 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -225,7 +225,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) return handled; } -static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc) +static void vic_handle_irq_cascaded(struct irq_desc *desc) { u32 stat, hwirq; struct irq_chip *host_chip = irq_desc_get_chip(desc); diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 4cbd9c5dc1e6..c838c9286ea5 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -182,7 +182,7 @@ static struct spear_shirq *spear320_shirq_blocks[] = { &spear320_shirq_intrcomm_ras, }; -static void shirq_handler(unsigned __irq, struct irq_desc *desc) +static void shirq_handler(struct irq_desc *desc) { struct spear_shirq *shirq = irq_desc_get_handler_data(desc); u32 pend; diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 4b54128bc78e..a726f01e3b02 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -138,7 +138,7 @@ static void asic3_irq_flip_edge(struct asic3 *asic, spin_unlock_irqrestore(&asic->lock, flags); } -static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) +static void asic3_irq_demux(struct irq_desc *desc) { struct asic3 *asic = irq_desc_get_handler_data(desc); struct irq_data *data = irq_desc_get_irq_data(desc); diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index a76eb6ef47a0..b279205659a4 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -205,7 +205,7 @@ static void pcap_isr_work(struct work_struct *work) } while (gpio_get_value(pdata->gpio)); } -static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) +static void pcap_irq_handler(struct irq_desc *desc) { struct pcap_chip *pcap = irq_desc_get_handler_data(desc); diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c index 9131cdcdc64a..6ccaf90d98fd 100644 --- a/drivers/mfd/htc-egpio.c +++ b/drivers/mfd/htc-egpio.c @@ -98,7 +98,7 @@ static struct irq_chip egpio_muxed_chip = { .irq_unmask = egpio_unmask, }; -static void egpio_handler(unsigned int irq, struct irq_desc *desc) +static void egpio_handler(struct irq_desc *desc) { struct egpio_info *ei = irq_desc_get_handler_data(desc); int irqpin; diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 5bb49f08955d..798e44306382 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c @@ -65,7 +65,7 @@ struct jz4740_adc { spinlock_t lock; }; -static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) +static void jz4740_adc_irq_demux(struct irq_desc *desc) { struct irq_chip_generic *gc = irq_desc_get_handler_data(desc); uint8_t status; diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c index 59502d02cd15..1b7ec0870c2a 100644 --- a/drivers/mfd/pm8921-core.c +++ b/drivers/mfd/pm8921-core.c @@ -156,7 +156,7 @@ static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master) return ret; } -static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc) +static void pm8xxx_irq_handler(struct irq_desc *desc) { struct pm_irq_chip *chip = irq_desc_get_handler_data(desc); struct irq_chip *irq_chip = irq_desc_get_chip(desc); diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index 16fc1adc4fa3..94bd89cb1f06 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c @@ -185,7 +185,7 @@ static struct mfd_cell t7l66xb_cells[] = { /*--------------------------------------------------------------------------*/ /* Handle the T7L66XB interrupt mux */ -static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) +static void t7l66xb_irq(struct irq_desc *desc) { struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc); unsigned int isr; diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c index 775b9aca871a..8c84a513016b 100644 --- a/drivers/mfd/tc6393xb.c +++ b/drivers/mfd/tc6393xb.c @@ -522,8 +522,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base) /*--------------------------------------------------------------------------*/ -static void -tc6393xb_irq(unsigned int irq, struct irq_desc *desc) +static void tc6393xb_irq(struct irq_desc *desc) { struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc); unsigned int isr; diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c index 9a2302129711..f691d7ecad52 100644 --- a/drivers/mfd/ucb1x00-core.c +++ b/drivers/mfd/ucb1x00-core.c @@ -282,7 +282,7 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb) * SIBCLK to talk to the chip. We leave the clock running until * we have finished processing all interrupts from the chip. */ -static void ucb1x00_irq(unsigned int __irq, struct irq_desc *desc) +static void ucb1x00_irq(struct irq_desc *desc) { struct ucb1x00 *ucb = irq_desc_get_handler_data(desc); unsigned int isr, i; diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index 81253e70b1c5..0aa81bd3de12 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c @@ -110,7 +110,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) return -EINVAL; } -static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void ks_pcie_msi_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); @@ -138,8 +138,7 @@ static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc) * Traverse through pending legacy interrupts and invoke handler for each. Also * takes care of interrupt controller level mask/ack operation. */ -static void ks_pcie_legacy_irq_handler(unsigned int __irq, - struct irq_desc *desc) +static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index 996327cfa1e1..e491681daf22 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c @@ -295,7 +295,7 @@ static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) return 0; } -static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc) +static void xgene_msi_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct xgene_msi_group *msi_groups; diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c index 7d9482bf8252..1ca783098e47 100644 --- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c @@ -143,7 +143,7 @@ static inline bool cygnus_get_bit(struct cygnus_gpio *chip, unsigned int reg, return !!(readl(chip->base + offset) & BIT(shift)); } -static void cygnus_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void cygnus_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct cygnus_gpio *chip = to_cygnus_gpio(gc); diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index dac4865f3203..f79ea430f651 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -425,7 +425,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) } } -static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void byt_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 2d5d3ddc36e5..270c127e03ea 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1414,7 +1414,7 @@ static struct irq_chip chv_gpio_irqchip = { .flags = IRQCHIP_SKIP_SET_WAKE, }; -static void chv_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void chv_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc); diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index bb377c110541..54848b8decef 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -836,7 +836,7 @@ static void intel_gpio_community_irq_handler(struct gpio_chip *gc, } } -static void intel_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void intel_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 7726c6caaf83..1b22f96ba839 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1190,7 +1190,7 @@ mtk_eint_debounce_process(struct mtk_pinctrl *pctl, int index) } } -static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc) +static void mtk_eint_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc); diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 352ede13a9e9..96cf03908e93 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -860,7 +860,7 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status) chained_irq_exit(host_chip, desc); } -static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void nmk_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); @@ -873,7 +873,7 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) __nmk_gpio_irq_handler(desc, status); } -static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc) +static void nmk_gpio_latent_irq_handler(struct irq_desc *desc) { struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c index a5976ebc4482..f6be68518c87 100644 --- a/drivers/pinctrl/pinctrl-adi2.c +++ b/drivers/pinctrl/pinctrl-adi2.c @@ -530,8 +530,7 @@ static inline void preflow_handler(struct irq_desc *desc) static inline void preflow_handler(struct irq_desc *desc) { } #endif -static void adi_gpio_handle_pint_irq(unsigned int inta_irq, - struct irq_desc *desc) +static void adi_gpio_handle_pint_irq(struct irq_desc *desc) { u32 request; u32 level_mask, hwirq; diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 5e86bb8ca80e..3318f1d6193c 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -492,15 +492,15 @@ static struct irq_chip amd_gpio_irqchip = { .irq_set_type = amd_gpio_irq_set_type, }; -static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void amd_gpio_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); u32 i; u32 off; u32 reg; u32 pin_reg; u64 reg64; int handled = 0; + unsigned int irq; unsigned long flags; struct irq_chip *chip = irq_desc_get_chip(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); @@ -541,7 +541,7 @@ static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) } if (handled == 0) - handle_bad_irq(irq, desc); + handle_bad_irq(desc); spin_lock_irqsave(&gpio_dev->lock, flags); reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index bae0012ee356..b0fde0f385e6 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1585,7 +1585,7 @@ static struct irq_chip gpio_irqchip = { .irq_set_wake = gpio_irq_set_wake, }; -static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc); diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c index 3731cc67a88b..9c9b88934bcc 100644 --- a/drivers/pinctrl/pinctrl-coh901.c +++ b/drivers/pinctrl/pinctrl-coh901.c @@ -519,7 +519,7 @@ static struct irq_chip u300_gpio_irqchip = { .irq_set_type = u300_gpio_irq_type, }; -static void u300_gpio_irq_handler(unsigned __irq, struct irq_desc *desc) +static void u300_gpio_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct irq_chip *parent_chip = irq_desc_get_chip(desc); diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index f22d585d9300..952b1c623887 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c @@ -1310,13 +1310,11 @@ static int pistachio_gpio_irq_set_type(struct irq_data *data, unsigned int type) return 0; } -static void pistachio_gpio_irq_handler(unsigned int __irq, - struct irq_desc *desc) +static void pistachio_gpio_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct pistachio_gpio_bank *bank = gc_to_bank(gc); - struct irq_chip *chip = irq_get_chip(irq); + struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long pending; unsigned int pin; diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index c5246c05f70c..88bb707e107a 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -1475,7 +1475,7 @@ static const struct gpio_chip rockchip_gpiolib_chip = { * Interrupt handling */ -static void rockchip_irq_demux(unsigned int __irq, struct irq_desc *desc) +static void rockchip_irq_demux(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc); diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index bf548c2a7a9d..ef04b962c3d5 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1679,7 +1679,7 @@ static irqreturn_t pcs_irq_handler(int irq, void *d) * Use this if you have a separate interrupt for each * pinctrl-single instance. */ -static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc) +static void pcs_irq_chain_handler(struct irq_desc *desc) { struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc); struct irq_chip *chip; diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index f8338d2e6b6b..389526e704fb 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1460,7 +1460,7 @@ static void __gpio_irq_handler(struct st_gpio_bank *bank) } } -static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void st_gpio_irq_handler(struct irq_desc *desc) { /* interrupt dedicated per bank */ struct irq_chip *chip = irq_desc_get_chip(desc); @@ -1472,7 +1472,7 @@ static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc) +static void st_gpio_irqmux_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct st_pinctrl *info = irq_desc_get_handler_data(desc); diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 492cdd51dc5c..a0c7407c1cac 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -765,9 +765,8 @@ static struct irq_chip msm_gpio_irq_chip = { .irq_set_wake = msm_gpio_irq_set_wake, }; -static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void msm_gpio_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); const struct msm_pingroup *g; struct msm_pinctrl *pctrl = to_msm_pinctrl(gc); @@ -795,7 +794,7 @@ static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) /* No interrupts were flagged */ if (handled == 0) - handle_bad_irq(irq, desc); + handle_bad_irq(desc); chained_irq_exit(chip, desc); } diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 5f45caaef46d..71ccf6a90b22 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -419,7 +419,7 @@ static const struct of_device_id exynos_wkup_irq_ids[] = { }; /* interrupt handler for wakeup interrupts 0..15 */ -static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc) +static void exynos_irq_eint0_15(struct irq_desc *desc) { struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc); struct samsung_pin_bank *bank = eintd->bank; @@ -451,7 +451,7 @@ static inline void exynos_irq_demux_eint(unsigned long pend, } /* interrupt handler for wakeup interrupt 16 */ -static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) +static void exynos_irq_demux_eint16_31(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc); diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index 019844d479bb..b3cd9ae3f4a2 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c @@ -240,7 +240,7 @@ static struct irq_chip s3c2410_eint0_3_chip = { .irq_set_type = s3c24xx_eint_type, }; -static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc) +static void s3c2410_demux_eint0_3(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc); @@ -295,7 +295,7 @@ static struct irq_chip s3c2412_eint0_3_chip = { .irq_set_type = s3c24xx_eint_type, }; -static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc) +static void s3c2412_demux_eint0_3(struct irq_desc *desc) { struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc); struct irq_data *data = irq_desc_get_irq_data(desc); @@ -388,12 +388,12 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc, chained_irq_exit(chip, desc); } -static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc) +static void s3c24xx_demux_eint4_7(struct irq_desc *desc) { s3c24xx_demux_eint(desc, 0, 0xf0); } -static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc) +static void s3c24xx_demux_eint8_23(struct irq_desc *desc) { s3c24xx_demux_eint(desc, 8, 0xffff00); } diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index f5ea40a69711..43407ab248f5 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -407,7 +407,7 @@ static const struct irq_domain_ops s3c64xx_gpio_irqd_ops = { .xlate = irq_domain_xlate_twocell, }; -static void s3c64xx_eint_gpio_irq(unsigned int irq, struct irq_desc *desc) +static void s3c64xx_eint_gpio_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc); @@ -631,22 +631,22 @@ static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range) chained_irq_exit(chip, desc); } -static void s3c64xx_demux_eint0_3(unsigned int irq, struct irq_desc *desc) +static void s3c64xx_demux_eint0_3(struct irq_desc *desc) { s3c64xx_irq_demux_eint(desc, 0xf); } -static void s3c64xx_demux_eint4_11(unsigned int irq, struct irq_desc *desc) +static void s3c64xx_demux_eint4_11(struct irq_desc *desc) { s3c64xx_irq_demux_eint(desc, 0xff0); } -static void s3c64xx_demux_eint12_19(unsigned int irq, struct irq_desc *desc) +static void s3c64xx_demux_eint12_19(struct irq_desc *desc) { s3c64xx_irq_demux_eint(desc, 0xff000); } -static void s3c64xx_demux_eint20_27(unsigned int irq, struct irq_desc *desc) +static void s3c64xx_demux_eint20_27(struct irq_desc *desc) { s3c64xx_irq_demux_eint(desc, 0xff00000); } diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c index 9df0c5f25824..0d24d9e4b70c 100644 --- a/drivers/pinctrl/sirf/pinctrl-atlas7.c +++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c @@ -4489,7 +4489,7 @@ static struct irq_chip atlas7_gpio_irq_chip = { .irq_set_type = atlas7_gpio_irq_type, }; -static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) +static void atlas7_gpio_handle_irq(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc); @@ -4512,7 +4512,7 @@ static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) if (!status) { pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n", __func__, gc->label, status); - handle_bad_irq(irq, desc); + handle_bad_irq(desc); return; } diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c index f8bd9fb52033..2a8d69725de8 100644 --- a/drivers/pinctrl/sirf/pinctrl-sirf.c +++ b/drivers/pinctrl/sirf/pinctrl-sirf.c @@ -545,7 +545,7 @@ static struct irq_chip sirfsoc_irq_chip = { .irq_set_type = sirfsoc_gpio_irq_type, }; -static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) +static void sirfsoc_gpio_handle_irq(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); @@ -570,7 +570,7 @@ static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) printk(KERN_WARNING "%s: gpio id %d status %#x no interrupt is flagged\n", __func__, bank->id, status); - handle_bad_irq(irq, desc); + handle_bad_irq(desc); return; } diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c index ae8f29fb5536..1f0af250dbb5 100644 --- a/drivers/pinctrl/spear/pinctrl-plgpio.c +++ b/drivers/pinctrl/spear/pinctrl-plgpio.c @@ -356,7 +356,7 @@ static struct irq_chip plgpio_irqchip = { .irq_set_type = plgpio_irq_set_type, }; -static void plgpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void plgpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct plgpio *plgpio = container_of(gc, struct plgpio, chip); diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 31af97d89272..38e0c7bdd2ac 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -740,7 +740,7 @@ static struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = { .xlate = sunxi_pinctrl_irq_of_xlate, }; -static void sunxi_pinctrl_irq_handler(unsigned __irq, struct irq_desc *desc) +static void sunxi_pinctrl_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 043419dcee92..8e72bcbd3d6d 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -65,7 +65,7 @@ void intc_set_prio_level(unsigned int irq, unsigned int level) raw_spin_unlock_irqrestore(&intc_big_lock, flags); } -static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) +static void intc_redirect_irq(struct irq_desc *desc) { generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc)); } diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index bafc51c6f0ba..e7899624aa0b 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c @@ -109,7 +109,7 @@ static int add_virq_to_pirq(unsigned int irq, unsigned int virq) return 0; } -static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc) +static void intc_virq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct irq_data *data = irq_desc_get_irq_data(desc); @@ -127,7 +127,7 @@ static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc) handle = (unsigned long)irq_desc_get_handler_data(vdesc); addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) - generic_handle_irq_desc(entry->irq, vdesc); + generic_handle_irq_desc(vdesc); } } diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c index 6bc13f99489a..052aecf29893 100644 --- a/drivers/soc/dove/pmu.c +++ b/drivers/soc/dove/pmu.c @@ -222,7 +222,7 @@ static void __pmu_domain_register(struct pmu_domain *domain, } /* PMU IRQ controller */ -static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) +static void pmu_irq_handler(struct irq_desc *desc) { struct pmu_data *pmu = irq_desc_get_handler_data(desc); struct irq_chip_generic *gc = pmu->irq_gc; @@ -232,7 +232,7 @@ static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) u32 done = ~0; if (stat == 0) { - handle_bad_irq(irq_desc_get_irq(desc), desc); + handle_bad_irq(desc); return; } diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index bdfb3c84c3cb..4a3cf9ba152f 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -451,7 +451,7 @@ static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid) } } -static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc) +static void pmic_arb_chained_irq(struct irq_desc *desc) { struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/include/linux/irq.h b/include/linux/irq.h index 4913c32db942..11bf09288ddb 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -475,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq) * Built-in IRQ handlers for various IRQ types, * callable via desc->handle_irq() */ -extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_level_irq(struct irq_desc *desc); +extern void handle_fasteoi_irq(struct irq_desc *desc); +extern void handle_edge_irq(struct irq_desc *desc); +extern void handle_edge_eoi_irq(struct irq_desc *desc); +extern void handle_simple_irq(struct irq_desc *desc); +extern void handle_percpu_irq(struct irq_desc *desc); +extern void handle_percpu_devid_irq(struct irq_desc *desc); +extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index fbb4d5afc32b..a587a33363c7 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -135,9 +135,9 @@ static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) * Architectures call this to let the generic IRQ layer * handle an interrupt. */ -static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) +static inline void generic_handle_irq_desc(struct irq_desc *desc) { - desc->handle_irq(irq, desc); + desc->handle_irq(desc); } int generic_handle_irq(unsigned int irq); diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h index 62d543004197..661bed0ed1f3 100644 --- a/include/linux/irqhandler.h +++ b/include/linux/irqhandler.h @@ -8,7 +8,7 @@ struct irq_desc; struct irq_data; -typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); +typedef void (*irq_flow_handler_t)(struct irq_desc *desc); typedef void (*irq_preflow_handler_t)(struct irq_data *data); #endif diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 8c55d545558f..e28169dd1c36 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -372,7 +372,6 @@ static bool irq_may_run(struct irq_desc *desc) /** * handle_simple_irq - Simple and software-decoded IRQs. - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Simple interrupts are either sent from a demultiplexing interrupt @@ -382,8 +381,7 @@ static bool irq_may_run(struct irq_desc *desc) * Note: The caller is expected to handle the ack, clear, mask and * unmask issues if necessary. */ -void -handle_simple_irq(unsigned int irq, struct irq_desc *desc) +void handle_simple_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); @@ -425,7 +423,6 @@ static void cond_unmask_irq(struct irq_desc *desc) /** * handle_level_irq - Level type irq handler - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Level type interrupts are active as long as the hardware line has @@ -433,8 +430,7 @@ static void cond_unmask_irq(struct irq_desc *desc) * it after the associated handler has acknowledged the device, so the * interrupt line is back to inactive. */ -void -handle_level_irq(unsigned int irq, struct irq_desc *desc) +void handle_level_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); mask_ack_irq(desc); @@ -496,7 +492,6 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) /** * handle_fasteoi_irq - irq handler for transparent controllers - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Only a single callback will be issued to the chip: an ->eoi() @@ -504,8 +499,7 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) * for modern forms of interrupt handlers, which handle the flow * details in hardware, transparently. */ -void -handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) +void handle_fasteoi_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; @@ -546,7 +540,6 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_irq); /** * handle_edge_irq - edge type IRQ handler - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Interrupt occures on the falling and/or rising edge of a hardware @@ -560,8 +553,7 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_irq); * the handler was running. If all pending interrupts are handled, the * loop is left. */ -void -handle_edge_irq(unsigned int irq, struct irq_desc *desc) +void handle_edge_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); @@ -618,13 +610,12 @@ EXPORT_SYMBOL(handle_edge_irq); #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER /** * handle_edge_eoi_irq - edge eoi type IRQ handler - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Similar as the above handle_edge_irq, but using eoi and w/o the * mask/unmask logic. */ -void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) +void handle_edge_eoi_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); @@ -665,13 +656,11 @@ out_eoi: /** * handle_percpu_irq - Per CPU local irq handler - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements */ -void -handle_percpu_irq(unsigned int irq, struct irq_desc *desc) +void handle_percpu_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); @@ -688,7 +677,6 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) /** * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements. Same as @@ -698,11 +686,12 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) * contain the real device id for the cpu on which this handler is * called */ -void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) +void handle_percpu_devid_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; void *dev_id = raw_cpu_ptr(action->percpu_dev_id); + unsigned int irq = irq_desc_get_irq(desc); irqreturn_t res; kstat_incr_irqs_this_cpu(desc); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index b6eeea8a80c5..de41a68fc038 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -27,8 +27,10 @@ * * Handles spurious and unhandled IRQ's. It also prints a debugmessage. */ -void handle_bad_irq(unsigned int irq, struct irq_desc *desc) +void handle_bad_irq(struct irq_desc *desc) { + unsigned int irq = irq_desc_get_irq(desc); + print_irq_desc(irq, desc); kstat_incr_irqs_this_cpu(desc); ack_bad_irq(irq); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 596669436f7a..239e2ae2c947 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -347,7 +347,7 @@ int generic_handle_irq(unsigned int irq) if (!desc) return -EINVAL; - generic_handle_irq_desc(irq, desc); + generic_handle_irq_desc(desc); return 0; } EXPORT_SYMBOL_GPL(generic_handle_irq); diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index dd95f44f99b2..b86886beee4f 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -38,7 +38,7 @@ static void resend_irqs(unsigned long arg) clear_bit(irq, irqs_resend); desc = irq_to_desc(irq); local_irq_disable(); - desc->handle_irq(irq, desc); + desc->handle_irq(desc); local_irq_enable(); } } -- cgit v1.2.3 From eb811129ed9ea50ef2dfe8a83ddde6a16d1eb8d4 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Sat, 29 Aug 2015 18:01:24 -0500 Subject: ARM: Remove ununsed set_irq_flags Now that all users of set_irq_flags and custom flags are converted to genirq functions, the ARM specific set_irq_flags can be removed. Signed-off-by: Rob Herring Tested-by: Kevin Hilman Cc: linux-arm-kernel@lists.infradead.org Cc: Russell King Signed-off-by: Thomas Gleixner --- arch/arm/include/asm/hw_irq.h | 6 ------ arch/arm/kernel/irq.c | 20 -------------------- 2 files changed, 26 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index af79da40af2a..9beb92914f4d 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h @@ -11,12 +11,6 @@ static inline void ack_bad_irq(int irq) pr_crit("unexpected IRQ trap at vector %02x\n", irq); } -void set_irq_flags(unsigned int irq, unsigned int flags); - -#define IRQF_VALID (1 << 0) -#define IRQF_PROBE (1 << 1) -#define IRQF_NOAUTOEN (1 << 2) - #define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) #endif diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 5ff4826cb154..2766183e69df 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -79,26 +79,6 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs) handle_IRQ(irq, regs); } -void set_irq_flags(unsigned int irq, unsigned int iflags) -{ - unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; - - if (irq >= nr_irqs) { - pr_err("Trying to set irq flags for IRQ%d\n", irq); - return; - } - - if (iflags & IRQF_VALID) - clr |= IRQ_NOREQUEST; - if (iflags & IRQF_PROBE) - clr |= IRQ_NOPROBE; - if (!(iflags & IRQF_NOAUTOEN)) - clr |= IRQ_NOAUTOEN; - /* Order is clear bits in "clr" then set bits in "set" */ - irq_modify_status(irq, clr, set & ~clr); -} -EXPORT_SYMBOL_GPL(set_irq_flags); - void __init init_IRQ(void) { int ret; -- cgit v1.2.3 From ef748917b529847277f07c98c55e1c0ce416449f Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Wed, 2 Sep 2015 14:31:21 +0800 Subject: arm/arm64: KVM: Remove 'config KVM_ARM_MAX_VCPUS' This patch removes config option of KVM_ARM_MAX_VCPUS, and like other ARCHs, just choose the maximum allowed value from hardware, and follows the reasons: 1) from distribution view, the option has to be defined as the max allowed value because it need to meet all kinds of virtulization applications and need to support most of SoCs; 2) using a bigger value doesn't introduce extra memory consumption, and the help text in Kconfig isn't accurate because kvm_vpu structure isn't allocated until request of creating VCPU is sent from QEMU; 3) the main effect is that the field of vcpus[] in 'struct kvm' becomes a bit bigger(sizeof(void *) per vcpu) and need more cache lines to hold the structure, but 'struct kvm' is one generic struct, and it has worked well on other ARCHs already in this way. Also, the world switch frequecy is often low, for example, it is ~2000 when running kernel building load in VM from APM xgene KVM host, so the effect is very small, and the difference can't be observed in my test at all. Cc: Dann Frazier Signed-off-by: Ming Lei Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 8 ++------ arch/arm/kvm/Kconfig | 11 ----------- arch/arm64/include/asm/kvm_host.h | 8 ++------ arch/arm64/kvm/Kconfig | 11 ----------- include/kvm/arm_vgic.h | 6 +----- virt/kvm/arm/vgic-v3.c | 2 +- 6 files changed, 6 insertions(+), 40 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index dcba0fa5176e..c8c226a19dbe 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -29,12 +29,6 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#if defined(CONFIG_KVM_ARM_MAX_VCPUS) -#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS -#else -#define KVM_MAX_VCPUS 0 -#endif - #define KVM_USER_MEM_SLOTS 32 #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 @@ -44,6 +38,8 @@ #include +#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS + u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index bfb915d05665..210eccadb69a 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig @@ -45,15 +45,4 @@ config KVM_ARM_HOST ---help--- Provides host support for ARM processors. -config KVM_ARM_MAX_VCPUS - int "Number maximum supported virtual CPUs per VM" - depends on KVM_ARM_HOST - default 4 - help - Static number of max supported virtual CPUs per VM. - - If you choose a high number, the vcpu structures will be quite - large, so only choose a reasonable number that you expect to - actually use. - endif # VIRTUALIZATION diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 415938dc45cf..3fb58ea944eb 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,12 +30,6 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#if defined(CONFIG_KVM_ARM_MAX_VCPUS) -#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS -#else -#define KVM_MAX_VCPUS 0 -#endif - #define KVM_USER_MEM_SLOTS 32 #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 @@ -43,6 +37,8 @@ #include #include +#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS + #define KVM_VCPU_MAX_FEATURES 3 int __attribute_const__ kvm_target_cpu(void); diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index bfffe8f4bd53..5c7e920e4861 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -41,15 +41,4 @@ config KVM_ARM_HOST ---help--- Provides host support for ARM processors. -config KVM_ARM_MAX_VCPUS - int "Number maximum supported virtual CPUs per VM" - depends on KVM_ARM_HOST - default 4 - help - Static number of max supported virtual CPUs per VM. - - If you choose a high number, the vcpu structures will be quite - large, so only choose a reasonable number that you expect to - actually use. - endif # VIRTUALIZATION diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index d901f1a47be6..4e14dac282bb 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -35,11 +35,7 @@ #define VGIC_V3_MAX_LRS 16 #define VGIC_MAX_IRQS 1024 #define VGIC_V2_MAX_CPUS 8 - -/* Sanity checks... */ -#if (KVM_MAX_VCPUS > 255) -#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now -#endif +#define VGIC_V3_MAX_CPUS 255 #if (VGIC_NR_IRQS_LEGACY & 31) #error "VGIC_NR_IRQS must be a multiple of 32" diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index afbf925b00f4..7dd5d62f10a1 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -288,7 +288,7 @@ int vgic_v3_probe(struct device_node *vgic_node, vgic->vctrl_base = NULL; vgic->type = VGIC_V3; - vgic->max_gic_vcpus = KVM_MAX_VCPUS; + vgic->max_gic_vcpus = VGIC_V3_MAX_CPUS; kvm_info("%s@%llx IRQ%d\n", vgic_node->name, vcpu_res.start, vgic->maint_irq); -- cgit v1.2.3 From 208473c1f3ac3eccec097021eec3890f5e20fcc7 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 17 Sep 2015 14:13:44 +0100 Subject: ARM: wire up new syscalls Wire up the new userfaultfd and membarrier syscalls for ARM. Signed-off-by: Russell King --- arch/arm/include/asm/unistd.h | 2 +- arch/arm/include/uapi/asm/unistd.h | 2 ++ arch/arm/kernel/calls.S | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 32640c431a08..7cba573c2cc9 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -19,7 +19,7 @@ * This may need to be greater than __NR_last_syscall+1 in order to * account for the padding in the syscall table */ -#define __NR_syscalls (388) +#define __NR_syscalls (392) /* * *NOTE*: This is a ghost syscall private to the kernel. Only the diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 0c3f5a0dafd3..7a2a32a1d5a8 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -414,6 +414,8 @@ #define __NR_memfd_create (__NR_SYSCALL_BASE+385) #define __NR_bpf (__NR_SYSCALL_BASE+386) #define __NR_execveat (__NR_SYSCALL_BASE+387) +#define __NR_userfaultfd (__NR_SYSCALL_BASE+388) +#define __NR_membarrier (__NR_SYSCALL_BASE+389) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 05745eb838c5..fde6c88d560c 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -397,6 +397,8 @@ /* 385 */ CALL(sys_memfd_create) CALL(sys_bpf) CALL(sys_execveat) + CALL(sys_userfaultfd) + CALL(sys_membarrier) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted -- cgit v1.2.3 From 920552b213e3dc832a874b4e7ba29ecddbab31bc Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 18 Sep 2015 12:34:53 +0200 Subject: KVM: disable halt_poll_ns as default for s390x We observed some performance degradation on s390x with dynamic halt polling. Until we can provide a proper fix, let's enable halt_poll_ns as default only for supported architectures. Architectures are now free to set their own halt_poll_ns default value. Signed-off-by: David Hildenbrand Signed-off-by: Paolo Bonzini --- arch/arm/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/kvm_host.h | 1 + arch/mips/include/asm/kvm_host.h | 1 + arch/powerpc/include/asm/kvm_host.h | 1 + arch/s390/include/asm/kvm_host.h | 1 + arch/x86/include/asm/kvm_host.h | 1 + virt/kvm/kvm_main.c | 4 ++-- 7 files changed, 8 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 3df1e975f72a..c4072d9f32c7 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -33,6 +33,7 @@ #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_HAVE_ONE_REG +#define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_VCPU_MAX_FEATURES 2 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4562459456a6..ed039688c221 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -33,6 +33,7 @@ #define KVM_USER_MEM_SLOTS 32 #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_HALT_POLL_NS_DEFAULT 500000 #include #include diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 3a54dbca9f7e..5a1a882e0a75 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -61,6 +61,7 @@ #define KVM_PRIVATE_MEM_SLOTS 0 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_HALT_POLL_NS_DEFAULT 500000 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 195886a583ba..827a38d7a9db 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -44,6 +44,7 @@ #ifdef CONFIG_KVM_MMIO #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #endif +#define KVM_HALT_POLL_NS_DEFAULT 500000 /* These values are internal and can be increased later */ #define KVM_NR_IRQCHIPS 1 diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 6ce4a0b7e8da..8ced426091e1 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -35,6 +35,7 @@ */ #define KVM_NR_IRQCHIPS 1 #define KVM_IRQCHIP_NUM_PINS 4096 +#define KVM_HALT_POLL_NS_DEFAULT 0 #define SIGP_CTRL_C 0x80 #define SIGP_CTRL_SCN_MASK 0x3f diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 349f80a82b82..2beee0382088 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -40,6 +40,7 @@ #define KVM_PIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 +#define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 04146a2e1d81..8db1d9361993 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -66,8 +66,8 @@ MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); -/* halt polling only reduces halt latency by 5-7 us, 500us is enough */ -static unsigned int halt_poll_ns = 500000; +/* Architectures should define their poll value according to the halt latency */ +static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); /* Default doubles per-vcpu halt_poll_ns. */ -- cgit v1.2.3