From 3543d7ddd55fe12c37e8a9db846216c51846015b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 23 Jan 2020 14:51:12 +0000 Subject: arm64: dts: fast models: Fix FVP PCI interrupt-map property The interrupt map for the FVP's PCI node is missing the parent-unit-address cells for each of the INTx entries, leading to the kernel code failing to parse the entries correctly. Add the missing zero cells, which are pretty useless as far as the GIC is concerned, but that the spec requires. This allows INTx to be usable on the model, and VFIO to work correctly. Fixes: fa083b99eb28 ("arm64: dts: fast models: Add DTS fo Base RevC FVP") Signed-off-by: Marc Zyngier Signed-off-by: Sudeep Holla --- arch/arm64/boot/dts/arm/fvp-base-revc.dts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts index 62ab0d54ff71..335fff762451 100644 --- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts +++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts @@ -161,10 +161,10 @@ bus-range = <0x0 0x1>; reg = <0x0 0x40000000 0x0 0x10000000>; ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>; - interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gic 0 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gic 0 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; msi-map = <0x0 &its 0x0 0x10000>; iommu-map = <0x0 &smmu 0x0 0x10000>; -- cgit v1.2.3 From f166795871be4a6a679a5f61ac7130b3c0b21cab Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Fri, 24 Jan 2020 22:08:33 +0100 Subject: arm64: defconfig: Set bcm2835-dma as built-in With the introduction of 738987a1d6f1 ("mmc: bcm2835: Use dma_request_chan() instead dma_request_slave_channel()") sdhost-bcm2835 now waits for its DMA channel to be available when defined in the device-tree (it would previously default to PIO). Albeit the right behaviour, the MMC host is needed for booting. So this makes sure the DMA channel shows up in time. Fixes: 738987a1d6f1 ("mmc: bcm2835: Use dma_request_chan() instead dma_request_slave_channel()") Signed-off-by: Nicolas Saenz Julienne Signed-off-by: Florian Fainelli --- arch/arm64/configs/defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 0f212889c931..b598bd7b7d62 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -681,7 +681,7 @@ CONFIG_RTC_DRV_SNVS=m CONFIG_RTC_DRV_IMX_SC=m CONFIG_RTC_DRV_XGENE=y CONFIG_DMADEVICES=y -CONFIG_DMA_BCM2835=m +CONFIG_DMA_BCM2835=y CONFIG_DMA_SUN6I=m CONFIG_FSL_EDMA=y CONFIG_IMX_SDMA=y -- cgit v1.2.3 From 4a453ccf87d507e8c9f156f95e708cec0e70ffed Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Sun, 29 Dec 2019 11:17:06 +0000 Subject: arm64: defconfig: Enable CONFIG_SUN8I_THERMAL Many sunxi based board needs CONFIG_SUN8I_THERMAL for thermal support. Signed-off-by: Yangtao Li Signed-off-by: Maxime Ripard --- arch/arm64/configs/defconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 0f212889c931..db873d8e03e9 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -452,6 +452,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y CONFIG_CPU_THERMAL=y CONFIG_THERMAL_EMULATION=y CONFIG_QORIQ_THERMAL=m +CONFIG_SUN8I_THERMAL=y CONFIG_ROCKCHIP_THERMAL=m CONFIG_RCAR_THERMAL=y CONFIG_RCAR_GEN3_THERMAL=y -- cgit v1.2.3 From 03c6bf4644287601bf10d0ed9f6137c1854d3e23 Mon Sep 17 00:00:00 2001 From: Jagan Teki Date: Tue, 31 Dec 2019 12:25:08 +0530 Subject: arm64: defconfig: Enable DRM_SUN6I_DSI Now, Allwiner MIPI-DSI support is available for ARM64 Allwinner SoC like A64. So, let's build it as a module. Signed-off-by: Jagan Teki Signed-off-by: Maxime Ripard --- arch/arm64/configs/defconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index db873d8e03e9..4631a1190719 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -548,6 +548,7 @@ CONFIG_ROCKCHIP_DW_MIPI_DSI=y CONFIG_ROCKCHIP_INNO_HDMI=y CONFIG_DRM_RCAR_DU=m CONFIG_DRM_SUN4I=m +CONFIG_DRM_SUN6I_DSI=m CONFIG_DRM_SUN8I_DW_HDMI=m CONFIG_DRM_SUN8I_MIXER=m CONFIG_DRM_MSM=m -- cgit v1.2.3 From 5cb7a1113f94cec20ff16d3981b94b7fdd8d73fa Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 27 Jan 2020 15:19:35 +0530 Subject: arm64: Drop do_el0_ia_bp_hardening() & do_sp_pc_abort() declarations There is a redundant do_sp_pc_abort() declaration in exceptions.h which can be removed. Also do_el0_ia_bp_hardening() as been already been dropped with the commit bfe298745afc ("arm64: entry-common: don't touch daif before bp-hardening") and hence does not need a declaration any more. This should not introduce any functional change. Cc: Catalin Marinas Cc: Will Deacon Cc: James Morse Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Acked-by: Mark Rutland Signed-off-by: Anshuman Khandual Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index b87c6e276ab1..7a6e81ca23a8 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -33,7 +33,6 @@ static inline u32 disr_to_esr(u64 disr) asmlinkage void enter_from_user_mode(void); void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); -void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr); void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, @@ -47,7 +46,4 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); void do_cp15instr(unsigned int esr, struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); -void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr, - struct pt_regs *regs); - #endif /* __ASM_EXCEPTION_H */ -- cgit v1.2.3 From 2c614c1194f2803750c14b751871bd168dcc8054 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Fri, 24 Jan 2020 16:51:27 +0100 Subject: arm64: use shared sysctl constants Use shared sysctl variables for zero and one constants, as in commit eec4844fae7c ("proc/sysctl: add shared variables for range check") Fixes: 63f0c6037965 ("arm64: Introduce prctl() options to control the tagged user addresses ABI") Signed-off-by: Matteo Croce Signed-off-by: Will Deacon --- arch/arm64/kernel/process.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index bbb0f0c145f6..a480b6760808 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -608,8 +608,6 @@ long get_tagged_addr_ctrl(void) * only prevents the tagged address ABI enabling via prctl() and does not * disable it for tasks that already opted in to the relaxed ABI. */ -static int zero; -static int one = 1; static struct ctl_table tagged_addr_sysctl_table[] = { { @@ -618,8 +616,8 @@ static struct ctl_table tagged_addr_sysctl_table[] = { .data = &tagged_addr_disabled, .maxlen = sizeof(int), .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, { } }; -- cgit v1.2.3 From fca3d33d8ad61eb53eca3ee4cac476d1e31b9008 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 6 Feb 2020 10:42:58 +0000 Subject: arm64: ssbs: Fix context-switch when SSBS is present on all CPUs When all CPUs in the system implement the SSBS extension, the SSBS field in PSTATE is the definitive indication of the mitigation state. Further, when the CPUs implement the SSBS manipulation instructions (advertised to userspace via an HWCAP), EL0 can toggle the SSBS field directly and so we cannot rely on any shadow state such as TIF_SSBD at all. Avoid forcing the SSBS field in context-switch on such a system, and simply rely on the PSTATE register instead. Cc: Cc: Catalin Marinas Cc: Srinivas Ramana Fixes: cbdf8a189a66 ("arm64: Force SSBS on context switch") Reviewed-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/kernel/process.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index a480b6760808..00626057a384 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -466,6 +466,13 @@ static void ssbs_thread_switch(struct task_struct *next) if (unlikely(next->flags & PF_KTHREAD)) return; + /* + * If all CPUs implement the SSBS extension, then we just need to + * context-switch the PSTATE field. + */ + if (cpu_have_feature(cpu_feature(SSBS))) + return; + /* If the mitigation is enabled, then we leave SSBS clear. */ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || test_tsk_thread_flag(next, TIF_SSBD)) -- cgit v1.2.3 From 345d52c184dc7de98cff63f1bfa6f90e9db19809 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Thu, 23 Jan 2020 15:20:51 -0500 Subject: arm64/spinlock: fix a -Wunused-function warning The commit f5bfdc8e3947 ("locking/osq: Use optimized spinning loop for arm64") introduced a warning from Clang because vcpu_is_preempted() is compiled away, kernel/locking/osq_lock.c:25:19: warning: unused function 'node_cpu' [-Wunused-function] static inline int node_cpu(struct optimistic_spin_node *node) ^ 1 warning generated. Fix it by converting vcpu_is_preempted() to a static inline function. Fixes: f5bfdc8e3947 ("locking/osq: Use optimized spinning loop for arm64") Acked-by: Waiman Long Signed-off-by: Qian Cai Signed-off-by: Will Deacon --- arch/arm64/include/asm/spinlock.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 102404dc1e13..9083d6992603 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -18,6 +18,10 @@ * See: * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net */ -#define vcpu_is_preempted(cpu) false +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(int cpu) +{ + return false; +} #endif /* __ASM_SPINLOCK_H */ -- cgit v1.2.3 From 74a44bed8d93782affb707a33469bda7052b4207 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 10 Feb 2020 19:21:01 +0000 Subject: arm64: Fix CONFIG_ARCH_RANDOM=n build The entire asm/archrandom.h header is generically included via linux/archrandom.h only when CONFIG_ARCH_RANDOM is already set, so the stub definitions of __arm64_rndr() and __early_cpu_has_rndr() are only visible to KASLR if it explicitly includes the arch-internal header. Acked-by: Mark Brown Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/kernel/kaslr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 53b8a4ee64ff..91a83104c6e8 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3 From d91771848f0ae2eec250a9345926a1a3558fa943 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 12 Feb 2020 11:09:34 +0100 Subject: arm64: time: Replace by The arm64 time code is not a clock provider, and just needs to call of_clk_init(). Hence it can include instead of . Reviewed-by: Stephen Boyd Signed-off-by: Geert Uytterhoeven Signed-off-by: Will Deacon --- arch/arm64/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 73f06d4b3aae..eebbc8d7123e 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include -- cgit v1.2.3 From b3f15ec3d809ccf2e171ca4e272a220d3c1a3e05 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 10 Feb 2020 11:47:57 +0000 Subject: kvm: arm/arm64: Fold VHE entry/exit work into kvm_vcpu_run_vhe() With VHE, running a vCPU always requires the sequence: 1. kvm_arm_vhe_guest_enter(); 2. kvm_vcpu_run_vhe(); 3. kvm_arm_vhe_guest_exit() ... and as we invoke this from the shared arm/arm64 KVM code, 32-bit arm has to provide stubs for all three functions. To simplify the common code, and make it easier to make further modifications to the arm64-specific portions in the near future, let's fold kvm_arm_vhe_guest_enter() and kvm_arm_vhe_guest_exit() into kvm_vcpu_run_vhe(). The 32-bit stubs for kvm_arm_vhe_guest_enter() and kvm_arm_vhe_guest_exit() are removed, as they are no longer used. The 32-bit stub for kvm_vcpu_run_vhe() is left as-is. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200210114757.2889-1-mark.rutland@arm.com --- arch/arm/include/asm/kvm_host.h | 3 --- arch/arm64/include/asm/kvm_host.h | 32 -------------------------------- arch/arm64/kvm/hyp/switch.c | 39 +++++++++++++++++++++++++++++++++++++-- virt/kvm/arm/arm.c | 2 -- 4 files changed, 37 insertions(+), 39 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index bd2233805d99..cbd26ae95e7e 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -394,9 +394,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} -static inline void kvm_arm_vhe_guest_enter(void) {} -static inline void kvm_arm_vhe_guest_exit(void) {} - #define KVM_BP_HARDEN_UNKNOWN -1 #define KVM_BP_HARDEN_WA_NEEDED 0 #define KVM_BP_HARDEN_NOT_REQUIRED 1 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f6a77ddab956..d740ec00ecd3 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -628,38 +628,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u32 clr) {} #endif -static inline void kvm_arm_vhe_guest_enter(void) -{ - local_daif_mask(); - - /* - * Having IRQs masked via PMR when entering the guest means the GIC - * will not signal the CPU of interrupts of lower priority, and the - * only way to get out will be via guest exceptions. - * Naturally, we want to avoid this. - * - * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a - * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. - */ - pmr_sync(); -} - -static inline void kvm_arm_vhe_guest_exit(void) -{ - /* - * local_daif_restore() takes care to properly restore PSTATE.DAIF - * and the GIC PMR if the host is using IRQ priorities. - */ - local_daif_restore(DAIF_PROCCTX_NOIRQ); - - /* - * When we exit from the guest we change a number of CPU configuration - * parameters, such as traps. Make sure these changes take effect - * before running the host or additional guests. - */ - isb(); -} - #define KVM_BP_HARDEN_UNKNOWN -1 #define KVM_BP_HARDEN_WA_NEEDED 0 #define KVM_BP_HARDEN_NOT_REQUIRED 1 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 72fbbd86eb5e..457067706b75 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -617,7 +617,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) } /* Switch to the guest for VHE systems running in EL2 */ -int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) +static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; @@ -670,7 +670,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) return exit_code; } -NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); +NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe); + +int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) +{ + int ret; + + local_daif_mask(); + + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + * + * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a + * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. + */ + pmr_sync(); + + ret = __kvm_vcpu_run_vhe(vcpu); + + /* + * local_daif_restore() takes care to properly restore PSTATE.DAIF + * and the GIC PMR if the host is using IRQ priorities. + */ + local_daif_restore(DAIF_PROCCTX_NOIRQ); + + /* + * When we exit from the guest we change a number of CPU configuration + * parameters, such as traps. Make sure these changes take effect + * before running the host or additional guests. + */ + isb(); + + return ret; +} /* Switch to the guest for legacy non-VHE systems */ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index efda376ab3c5..560d6f258297 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -797,9 +797,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) guest_enter_irqoff(); if (has_vhe()) { - kvm_arm_vhe_guest_enter(); ret = kvm_vcpu_run_vhe(vcpu); - kvm_arm_vhe_guest_exit(); } else { ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu); } -- cgit v1.2.3 From dd1f6308b28edf0452dd5dc7877992903ec61e69 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Tue, 18 Feb 2020 16:49:06 +0000 Subject: arm64: lse: Fix LSE atomics with LLVM Commit e0d5896bd356 ("arm64: lse: fix LSE atomics with LLVM's integrated assembler") broke the build when clang is used in connjunction with the binutils assembler ("-no-integrated-as"). This happens because __LSE_PREAMBLE is defined as ".arch armv8-a+lse", which overrides the version of the CPU architecture passed via the "-march" paramter to gas: $ aarch64-none-linux-gnu-as -EL -I ./arch/arm64/include -I ./arch/arm64/include/generated -I ./include -I ./include -I ./arch/arm64/include/uapi -I ./arch/arm64/include/generated/uapi -I ./include/uapi -I ./include/generated/uapi -I ./init -I ./init -march=armv8.3-a -o init/do_mounts.o /tmp/do_mounts-d7992a.s /tmp/do_mounts-d7992a.s: Assembler messages: /tmp/do_mounts-d7992a.s:1959: Error: selected processor does not support `autiasp' /tmp/do_mounts-d7992a.s:2021: Error: selected processor does not support `paciasp' /tmp/do_mounts-d7992a.s:2157: Error: selected processor does not support `autiasp' /tmp/do_mounts-d7992a.s:2175: Error: selected processor does not support `paciasp' /tmp/do_mounts-d7992a.s:2494: Error: selected processor does not support `autiasp' Fix the issue by replacing ".arch armv8-a+lse" with ".arch_extension lse". Sami confirms that the clang integrated assembler does now support the '.arch_extension' directive, so this change will be fine even for LTO builds in future. Fixes: e0d5896bd356cd ("arm64: lse: fix LSE atomics with LLVM's integrated assembler") Cc: Catalin Marinas Cc: Will Deacon Reported-by: Amit Kachhap Tested-by: Sami Tolvanen Signed-off-by: Vincenzo Frascino Signed-off-by: Will Deacon --- arch/arm64/include/asm/lse.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index d429f7701c36..5d10051c3e62 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -6,7 +6,7 @@ #ifdef CONFIG_ARM64_LSE_ATOMICS -#define __LSE_PREAMBLE ".arch armv8-a+lse\n" +#define __LSE_PREAMBLE ".arch_extension lse\n" #include #include -- cgit v1.2.3 From 26c4b4758fce8f0ae744335e1762213be29db441 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 17 Feb 2020 16:15:03 -0300 Subject: arm64: dts: imx8qxp-mek: Remove unexisting Ethernet PHY There is only on Ethernet port and one Ethernet PHY on imx8qxp-mek. Remove the unexisting ethphy1 port. This fixes a run-time warning: mdio_bus 5b040000.ethernet-1: MDIO device at address 1 is missing. Fixes: fdea904e85e1 ("arm64: dts: imx: add imx8qxp mek support") Signed-off-by: Fabio Estevam Reviewed-by: Leonard Crestez Signed-off-by: Shawn Guo --- arch/arm64/boot/dts/freescale/imx8qxp-mek.dts | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts index d3d26cca7d52..13460a360c6a 100644 --- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts +++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts @@ -52,11 +52,6 @@ compatible = "ethernet-phy-ieee802.3-c22"; reg = <0>; }; - - ethphy1: ethernet-phy@1 { - compatible = "ethernet-phy-ieee802.3-c22"; - reg = <1>; - }; }; }; -- cgit v1.2.3 From d0022c0ef29b78bcbe8a5c5894bd2307143afce1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 19 Feb 2020 10:19:13 +0000 Subject: arm64: memory: Add missing brackets to untagged_addr() macro Add brackets around the evaluation of the 'addr' parameter to the untagged_addr() macro so that the cast to 'u64' applies to the result of the expression. Cc: Fixes: 597399d0cb91 ("arm64: tags: Preserve tags for addresses translated via TTBR1") Reported-by: Linus Torvalds Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index a4f9ca5479b0..4d94676e5a8b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -213,7 +213,7 @@ static inline unsigned long kaslr_offset(void) ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) #define untagged_addr(addr) ({ \ - u64 __addr = (__force u64)addr; \ + u64 __addr = (__force u64)(addr); \ __addr &= __untagged_addr(__addr); \ (__force __typeof__(addr))__addr; \ }) -- cgit v1.2.3 From d5888c8e55861789a8d9dca73d8a2a279d25822d Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 17 Feb 2020 11:17:41 +0100 Subject: arm64: defconfig: Replace ARCH_R8A7796 by ARCH_R8A77960 CONFIG_ARCH_R8A7796 was replaced by CONFIG_ARCH_R8A77960, cfr. commits 39e57e14d7eaf818 ("soc: renesas: Add ARCH_R8A77960 for existing R-Car M3-W") and 24240845c87185fe ("soc: renesas: Remove ARCH_R8A7796"). Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20200217101741.3758-1-geert+renesas@glider.be --- arch/arm64/configs/defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 0f212889c931..51b8b8555a6e 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -771,7 +771,7 @@ CONFIG_ARCH_R8A774A1=y CONFIG_ARCH_R8A774B1=y CONFIG_ARCH_R8A774C0=y CONFIG_ARCH_R8A7795=y -CONFIG_ARCH_R8A7796=y +CONFIG_ARCH_R8A77960=y CONFIG_ARCH_R8A77961=y CONFIG_ARCH_R8A77965=y CONFIG_ARCH_R8A77970=y -- cgit v1.2.3 From 5c37f1ae1c335800d16b207cb578009c695dcd39 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 20 Feb 2020 16:58:37 +0000 Subject: KVM: arm64: Ask the compiler to __always_inline functions used at HYP On non VHE CPUs, KVM's __hyp_text contains code run at EL2 while the rest of the kernel runs at EL1. This code lives in its own section with start and end markers so we can map it to EL2. The compiler may decide not to inline static-inline functions from the header file. It may also decide not to put these out-of-line functions in the same section, meaning they aren't mapped when called at EL2. Clang-9 does exactly this with __kern_hyp_va() and a few others when x18 is reserved for the shadow call stack. Add the additional __always_ hint to all the static-inlines that are called from a hyp file. Signed-off-by: James Morse Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200220165839.256881-2-james.morse@arm.com ---- kvm_get_hyp_vector() pulls in all the regular per-cpu accessors and this_cpu_has_cap(), fortunately its only called for VHE. --- arch/arm64/include/asm/arch_gicv3.h | 2 +- arch/arm64/include/asm/cpufeature.h | 2 +- arch/arm64/include/asm/kvm_emulate.h | 48 ++++++++++++++++++------------------ arch/arm64/include/asm/kvm_mmu.h | 3 ++- arch/arm64/include/asm/virt.h | 2 +- 5 files changed, 29 insertions(+), 28 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 89e4c8b79349..07597028bb00 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq) isb(); } -static inline void gic_write_dir(u32 irq) +static __always_inline void gic_write_dir(u32 irq) { write_sysreg_s(irq, SYS_ICC_DIR_EL1); isb(); diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 4261d55e8506..0e6d03c7e368 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -581,7 +581,7 @@ static inline bool system_supports_sve(void) cpus_have_const_cap(ARM64_SVE); } -static inline bool system_supports_cnp(void) +static __always_inline bool system_supports_cnp(void) { return IS_ENABLED(CONFIG_ARM64_CNP) && cpus_have_const_cap(ARM64_HAS_CNP); diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 688c63412cc2..f658dda12364 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu); void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); -static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) +static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) { return !(vcpu->arch.hcr_el2 & HCR_RW); } @@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) vcpu->arch.vsesr_el2 = vsesr; } -static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) +static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; } @@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long *__vcpu_elr_el1(vcpu) = v; } -static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) +static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; } -static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) +static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) { return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); } -static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) return kvm_condition_valid32(vcpu); @@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on * AArch32 with banked registers. */ -static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, +static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, u8 reg_num) { return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; } -static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, +static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, unsigned long val) { if (reg_num != 31) @@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) return mode != PSR_MODE_EL0t; } -static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) +static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.esr_el2; } -static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) +static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) { u32 esr = kvm_vcpu_get_hsr(vcpu); @@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) return -1; } -static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) +static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.far_el2; } -static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) +static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) { return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; } @@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; } -static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); } @@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); } -static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) +static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) { return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); } -static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ @@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); } -static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) +static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) { return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); } /* This one is not specific to Data Abort */ -static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); } -static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) +static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) { return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); } @@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; } -static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) +static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; } -static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) +static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; } -static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) { switch (kvm_vcpu_trap_get_fault(vcpu)) { case FSC_SEA: @@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) } } -static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) +static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) { u32 esr = kvm_vcpu_get_hsr(vcpu); return ESR_ELx_SYS64_ISS_RT(esr); @@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, return data; /* Leave LE untouched */ } -static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) +static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) { if (vcpu_mode_is_32bit(vcpu)) kvm_skip_instr32(vcpu, is_wide_instr); @@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) * Skip an instruction which has been emulated at hyp while most guest sysregs * are live. */ -static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) +static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) { *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 53d846f1bfe7..785762860c63 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void kvm_compute_layout(void); -static inline unsigned long __kern_hyp_va(unsigned long v) +static __always_inline unsigned long __kern_hyp_va(unsigned long v) { asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" "ror %0, %0, #1\n" @@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, extern void *__kvm_bp_vect_base; extern int __kvm_harden_el2_vector_slot; +/* This is only called on a VHE system */ static inline void *kvm_get_hyp_vector(void) { struct bp_hardening_data *data = arm64_get_bp_hardening_data(); diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 0958ed6191aa..61fd26752adc 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void) return read_sysreg(CurrentEL) == CurrentEL_EL2; } -static inline bool has_vhe(void) +static __always_inline bool has_vhe(void) { if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN)) return true; -- cgit v1.2.3 From 8c2d146ee7a2e0782eea4dd70fddc1c837140136 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 20 Feb 2020 16:58:38 +0000 Subject: KVM: arm64: Define our own swab32() to avoid a uapi static inline KVM uses swab32() when mediating GIC MMIO accesses if the GICV is badly aligned, and the host and guest differ in endianness. arm64 doesn't provide a __arch_swab32(), so __fswab32() is always backed by the macro implementation that the compiler reduces to a single instruction. But the static-inline causes problems for KVM if the compiler chooses not to inline this function, it may not be located in the __hyp_text where __vgic_v2_perform_cpuif_access() needs it. Create our own __kvm_swab32() macro that calls ___constant_swab32() directly. This way we know it will always be inlined. Signed-off-by: James Morse Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200220165839.256881-3-james.morse@arm.com --- arch/arm64/include/asm/kvm_hyp.h | 7 +++++++ arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 97f21cc66657..5fde137b5150 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -47,6 +47,13 @@ #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) +/* + * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the + * static inline can allow the compiler to out-of-line this. KVM always wants + * the macro version as its always inlined. + */ +#define __kvm_swab32(x) ___constant_swab32(x) + int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); void __vgic_v3_save_state(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c index 29ee1feba4eb..4f3a087e36d5 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c @@ -69,14 +69,14 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) u32 data = vcpu_get_reg(vcpu, rd); if (__is_be(vcpu)) { /* guest pre-swabbed data, undo this for writel() */ - data = swab32(data); + data = __kvm_swab32(data); } writel_relaxed(data, addr); } else { u32 data = readl_relaxed(addr); if (__is_be(vcpu)) { /* guest expects swabbed data */ - data = swab32(data); + data = __kvm_swab32(data); } vcpu_set_reg(vcpu, rd, data); } -- cgit v1.2.3 From e43f1331e2ef913b8c566920c9af75e0ccdd1d3f Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 20 Feb 2020 16:58:39 +0000 Subject: arm64: Ask the compiler to __always_inline functions used by KVM at HYP KVM uses some of the static-inline helpers like icache_is_vipt() from its HYP code. This assumes the function is inlined so that the code is mapped to EL2. The compiler may decide not to inline these, and the out-of-line version may not be in the __hyp_text section. Add the additional __always_ hint to these static-inlines that are used by KVM. Signed-off-by: James Morse Signed-off-by: Marc Zyngier Acked-by: Will Deacon Link: https://lore.kernel.org/r/20200220165839.256881-4-james.morse@arm.com --- arch/arm64/include/asm/cache.h | 2 +- arch/arm64/include/asm/cacheflush.h | 2 +- arch/arm64/include/asm/cpufeature.h | 8 ++++---- arch/arm64/include/asm/io.h | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 806e9dc2a852..a4d1b5f771f6 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void) return test_bit(ICACHEF_ALIASING, &__icache_flags); } -static inline int icache_is_vpipt(void) +static __always_inline int icache_is_vpipt(void) { return test_bit(ICACHEF_VPIPT, &__icache_flags); } diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 665c78e0665a..e6cca3d4acf7 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); -static inline void __flush_icache_all(void) +static __always_inline void __flush_icache_all(void) { if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) return; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 0e6d03c7e368..be078699ac4b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field) return cpuid_feature_extract_signed_field_width(features, field, 4); } -static inline unsigned int __attribute_const__ +static __always_inline unsigned int __attribute_const__ cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) { return (u64)(features << (64 - width - field)) >> (64 - width); } -static inline unsigned int __attribute_const__ +static __always_inline unsigned int __attribute_const__ cpuid_feature_extract_unsigned_field(u64 features, int field) { return cpuid_feature_extract_unsigned_field_width(features, field, 4); @@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void) return val == 0x1; } -static inline bool system_supports_fpsimd(void) +static __always_inline bool system_supports_fpsimd(void) { return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); } @@ -575,7 +575,7 @@ static inline bool system_uses_ttbr0_pan(void) !cpus_have_const_cap(ARM64_HAS_PAN); } -static inline bool system_supports_sve(void) +static __always_inline bool system_supports_sve(void) { return IS_ENABLED(CONFIG_ARM64_SVE) && cpus_have_const_cap(ARM64_SVE); diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 4e531f57147d..6facd1308e7c 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr) } #define __raw_writel __raw_writel -static inline void __raw_writel(u32 val, volatile void __iomem *addr) +static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr) { asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); } @@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) } #define __raw_readl __raw_readl -static inline u32 __raw_readl(const volatile void __iomem *addr) +static __always_inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; asm volatile(ALTERNATIVE("ldr %w0, [%1]", -- cgit v1.2.3 From 5bea1336ed2c939328999c64de28792e8dc0699b Mon Sep 17 00:00:00 2001 From: Guillaume La Roque Date: Fri, 17 Jan 2020 14:34:23 +0100 Subject: arm64: dts: meson-sm1-sei610: add missing interrupt-names add missing "host-wakeup interrupt names Fixes: 30388cc07572 ("arm64: dts: meson-sm1-sei610: add gpio bluetooth interrupt") Signed-off-by: Guillaume La Roque Acked-by: Neil Armstrong Link: https://lore.kernel.org/r/20200117133423.22602-1-glaroque@baylibre.com Signed-off-by: Kevin Hilman --- arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts index a8bb3fa9fec9..cb1b48f5b8b1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts @@ -593,6 +593,7 @@ compatible = "brcm,bcm43438-bt"; interrupt-parent = <&gpio_intc>; interrupts = <95 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host-wakeup"; shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; max-speed = <2000000>; clocks = <&wifi32k>; -- cgit v1.2.3 From 146033562e7e5d1c9aae9653986806664995f1d5 Mon Sep 17 00:00:00 2001 From: Christian Hewitt Date: Thu, 20 Feb 2020 19:33:10 +0400 Subject: arm64: dts: meson: fix gxm-khadas-vim2 wifi before [6.418252] brcmfmac: F1 signature read @0x18000000=0x17224356 [6.435663] brcmfmac: brcmf_fw_alloc_request: using brcm/brcmfmac4356-sdio for chip BCM4356/2 [6.551259] brcmfmac: brcmf_sdiod_ramrw: membytes transfer failed [6.551275] brcmfmac: brcmf_sdio_verifymemory: error -84 on reading 2048 membytes at 0x00184000 [6.551352] brcmfmac: brcmf_sdio_download_firmware: dongle image file download failed after [6.657165] brcmfmac: F1 signature read @0x18000000=0x17224356 [6.660807] brcmfmac: brcmf_fw_alloc_request: using brcm/brcmfmac4356-sdio for chip BCM4356/2 [6.918643] brcmfmac: brcmf_fw_alloc_request: using brcm/brcmfmac4356-sdio for chip BCM4356/2 [6.918734] brcmfmac: brcmf_c_process_clm_blob: no clm_blob available (err=-2), device may have limited channels available [6.922724] brcmfmac: brcmf_c_preinit_dcmds: Firmware: BCM4356/2 wl0: Jun 16 2015 14:25:06 version 7.35.184.r1 (TOB) (r559293) FWID 01-b22ae69c Fixes: adc52bf7ef16 ("arm64: dts: meson: fix mmc v2 chips max frequencies") Suggested-by: Art Nikpal Signed-off-by: Christian Hewitt Signed-off-by: Kevin Hilman Link: https://lore.kernel.org/r/1582212790-11402-1-git-send-email-christianshewitt@gmail.com --- arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index f82f25c1a5f9..d5dc12878dfe 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts @@ -327,7 +327,7 @@ #size-cells = <0>; bus-width = <4>; - max-frequency = <50000000>; + max-frequency = <60000000>; non-removable; disable-wp; -- cgit v1.2.3 From 9abd515a6e4a5c58c6eb4d04110430325eb5f5ac Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 27 Feb 2020 09:34:47 +0100 Subject: arm64: context: Fix ASID limit in boot messages Since commit f88f42f853a8 ("arm64: context: Free up kernel ASIDs if KPTI is not in use"), the NUM_USER_ASIDS macro doesn't correspond to the effective number of ASIDs when KPTI is enabled. Get an accurate number of available ASIDs in an arch_initcall, once we've discovered all CPUs' capabilities and know if we still need to halve the ASID space for KPTI. Fixes: f88f42f853a8 ("arm64: context: Free up kernel ASIDs if KPTI is not in use") Reviewed-by: Vladimir Murzin Signed-off-by: Jean-Philippe Brucker Signed-off-by: Will Deacon --- arch/arm64/mm/context.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 8ef73e89d514..d89bb22589f6 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -260,14 +260,26 @@ asmlinkage void post_ttbr_update_workaround(void) CONFIG_CAVIUM_ERRATUM_27456)); } -static int asids_init(void) +static int asids_update_limit(void) { - asid_bits = get_cpu_asid_bits(); + unsigned long num_available_asids = NUM_USER_ASIDS; + + if (arm64_kernel_unmapped_at_el0()) + num_available_asids /= 2; /* * Expect allocation after rollover to fail if we don't have at least * one more ASID than CPUs. ASID #0 is reserved for init_mm. */ - WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); + WARN_ON(num_available_asids - 1 <= num_possible_cpus()); + pr_info("ASID allocator initialised with %lu entries\n", + num_available_asids); + return 0; +} +arch_initcall(asids_update_limit); + +static int asids_init(void) +{ + asid_bits = get_cpu_asid_bits(); atomic64_set(&asid_generation, ASID_FIRST_VERSION); asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), GFP_KERNEL); @@ -282,8 +294,6 @@ static int asids_init(void) */ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) set_kpti_asid_bits(); - - pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); return 0; } early_initcall(asids_init); -- cgit v1.2.3 From 8c867387160e89c9ffd12459f38e56844312a7a7 Mon Sep 17 00:00:00 2001 From: Ley Foon Tan Date: Thu, 27 Feb 2020 04:20:14 +0800 Subject: arm64: dts: socfpga: agilex: Fix gmac compatible Fix gmac compatible string to "altr,socfpga-stmmac-a10-s10". Gmac for Agilex should use same compatible as Stratix 10. Fixes: 4b36daf9ada3 ("arm64: dts: agilex: Add initial support for Intel's Agilex SoCFPGA") Cc: stable@vger.kernel.org Signed-off-by: Ley Foon Tan Signed-off-by: Dinh Nguyen --- arch/arm64/boot/dts/intel/socfpga_agilex.dtsi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi index e1d357eaad7c..d8c44d3ca15a 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi @@ -102,7 +102,7 @@ }; gmac0: ethernet@ff800000 { - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac"; + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff800000 0x2000>; interrupts = <0 90 4>; interrupt-names = "macirq"; @@ -118,7 +118,7 @@ }; gmac1: ethernet@ff802000 { - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac"; + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff802000 0x2000>; interrupts = <0 91 4>; interrupt-names = "macirq"; @@ -134,7 +134,7 @@ }; gmac2: ethernet@ff804000 { - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac"; + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff804000 0x2000>; interrupts = <0 92 4>; interrupt-names = "macirq"; -- cgit v1.2.3 From b54d3900862374e1bb2846e6b39d79c896c0b200 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Wed, 4 Mar 2020 18:04:26 +0200 Subject: arm64: dts: ls1043a: FMan erratum A050385 The LS1043A SoC is affected by the A050385 erratum stating that FMAN DMA read or writes under heavy traffic load may cause FMAN internal resource leak thus stopping further packet processing. Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi index 6082ae022136..d237162a8744 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi @@ -20,6 +20,8 @@ }; &fman0 { + fsl,erratum-a050385; + /* these aliases provide the FMan ports mapping */ enet0: ethernet@e0000 { }; -- cgit v1.2.3 From d0bab0c39e32d39a8c5cddca72e5b4a3059fe050 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 11 Mar 2020 17:12:44 +0000 Subject: arm64: smp: fix smp_send_stop() behaviour On a system with only one CPU online, when another one CPU panics while starting-up, smp_send_stop() will fail to send any STOP message to the other already online core, resulting in a system still responsive and alive at the end of the panic procedure. [ 186.700083] CPU3: shutdown [ 187.075462] CPU2: shutdown [ 187.162869] CPU1: shutdown [ 188.689998] ------------[ cut here ]------------ [ 188.691645] kernel BUG at arch/arm64/kernel/cpufeature.c:886! [ 188.692079] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP [ 188.692444] Modules linked in: [ 188.693031] CPU: 3 PID: 0 Comm: swapper/3 Not tainted 5.6.0-rc4-00001-g338d25c35a98 #104 [ 188.693175] Hardware name: Foundation-v8A (DT) [ 188.693492] pstate: 200001c5 (nzCv dAIF -PAN -UAO) [ 188.694183] pc : has_cpuid_feature+0xf0/0x348 [ 188.694311] lr : verify_local_elf_hwcaps+0x84/0xe8 [ 188.694410] sp : ffff800011b1bf60 [ 188.694536] x29: ffff800011b1bf60 x28: 0000000000000000 [ 188.694707] x27: 0000000000000000 x26: 0000000000000000 [ 188.694801] x25: 0000000000000000 x24: ffff80001189a25c [ 188.694905] x23: 0000000000000000 x22: 0000000000000000 [ 188.694996] x21: ffff8000114aa018 x20: ffff800011156a38 [ 188.695089] x19: ffff800010c944a0 x18: 0000000000000004 [ 188.695187] x17: 0000000000000000 x16: 0000000000000000 [ 188.695280] x15: 0000249dbde5431e x14: 0262cbe497efa1fa [ 188.695371] x13: 0000000000000002 x12: 0000000000002592 [ 188.695472] x11: 0000000000000080 x10: 00400032b5503510 [ 188.695572] x9 : 0000000000000000 x8 : ffff800010c80204 [ 188.695659] x7 : 00000000410fd0f0 x6 : 0000000000000001 [ 188.695750] x5 : 00000000410fd0f0 x4 : 0000000000000000 [ 188.695836] x3 : 0000000000000000 x2 : ffff8000100939d8 [ 188.695919] x1 : 0000000000180420 x0 : 0000000000180480 [ 188.696253] Call trace: [ 188.696410] has_cpuid_feature+0xf0/0x348 [ 188.696504] verify_local_elf_hwcaps+0x84/0xe8 [ 188.696591] check_local_cpu_capabilities+0x44/0x128 [ 188.696666] secondary_start_kernel+0xf4/0x188 [ 188.697150] Code: 52805001 72a00301 6b01001f 54000ec0 (d4210000) [ 188.698639] ---[ end trace 3f12ca47652f7b72 ]--- [ 188.699160] Kernel panic - not syncing: Attempted to kill the idle task! [ 188.699546] Kernel Offset: disabled [ 188.699828] CPU features: 0x00004,20c02008 [ 188.700012] Memory Limit: none [ 188.700538] ---[ end Kernel panic - not syncing: Attempted to kill the idle task! ]--- [root@arch ~]# echo Helo Helo [root@arch ~]# cat /proc/cpuinfo | grep proce processor : 0 Make smp_send_stop() account also for the online status of the calling CPU while evaluating how many CPUs are effectively online: this way, the right number of STOPs is sent, so enforcing a proper freeze of the system at the end of panic even under the above conditions. Fixes: 08e875c16a16c ("arm64: SMP support") Reported-by: Dave Martin Acked-by: Mark Rutland Signed-off-by: Cristian Marussi Signed-off-by: Will Deacon --- arch/arm64/kernel/smp.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d4ed9a19d8fe..e4dc241c5a8e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -958,11 +958,22 @@ void tick_broadcast(const struct cpumask *mask) } #endif +/* + * The number of CPUs online, not counting this CPU (which may not be + * fully online and so not counted in num_online_cpus()). + */ +static inline unsigned int num_other_online_cpus(void) +{ + unsigned int this_cpu_online = cpu_online(smp_processor_id()); + + return num_online_cpus() - this_cpu_online; +} + void smp_send_stop(void) { unsigned long timeout; - if (num_online_cpus() > 1) { + if (num_other_online_cpus()) { cpumask_t mask; cpumask_copy(&mask, cpu_online_mask); @@ -975,10 +986,10 @@ void smp_send_stop(void) /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; - while (num_online_cpus() > 1 && timeout--) + while (num_other_online_cpus() && timeout--) udelay(1); - if (num_online_cpus() > 1) + if (num_other_online_cpus()) pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", cpumask_pr_args(cpu_online_mask)); -- cgit v1.2.3 From f50b7dacccbab2b9e3ef18f52a6dcc18ed2050b9 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 11 Mar 2020 17:12:45 +0000 Subject: arm64: smp: fix crash_smp_send_stop() behaviour On a system configured to trigger a crash_kexec() reboot, when only one CPU is online and another CPU panics while starting-up, crash_smp_send_stop() will fail to send any STOP message to the other already online core, resulting in fail to freeze and registers not properly saved. Moreover even if the proper messages are sent (case CPUs > 2) it will similarly fail to account for the booting CPU when executing the final stop wait-loop, so potentially resulting in some CPU not been waited for shutdown before rebooting. A tangible effect of this behaviour can be observed when, after a panic with kexec enabled and loaded, on the following reboot triggered by kexec, the cpu that could not be successfully stopped fails to come back online: [ 362.291022] ------------[ cut here ]------------ [ 362.291525] kernel BUG at arch/arm64/kernel/cpufeature.c:886! [ 362.292023] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP [ 362.292400] Modules linked in: [ 362.292970] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.6.0-rc4-00003-gc780b890948a #105 [ 362.293136] Hardware name: Foundation-v8A (DT) [ 362.293382] pstate: 200001c5 (nzCv dAIF -PAN -UAO) [ 362.294063] pc : has_cpuid_feature+0xf0/0x348 [ 362.294177] lr : verify_local_elf_hwcaps+0x84/0xe8 [ 362.294280] sp : ffff800011b1bf60 [ 362.294362] x29: ffff800011b1bf60 x28: 0000000000000000 [ 362.294534] x27: 0000000000000000 x26: 0000000000000000 [ 362.294631] x25: 0000000000000000 x24: ffff80001189a25c [ 362.294718] x23: 0000000000000000 x22: 0000000000000000 [ 362.294803] x21: ffff8000114aa018 x20: ffff800011156a00 [ 362.294897] x19: ffff800010c944a0 x18: 0000000000000004 [ 362.294987] x17: 0000000000000000 x16: 0000000000000000 [ 362.295073] x15: 00004e53b831ae3c x14: 00004e53b831ae3c [ 362.295165] x13: 0000000000000384 x12: 0000000000000000 [ 362.295251] x11: 0000000000000000 x10: 00400032b5503510 [ 362.295334] x9 : 0000000000000000 x8 : ffff800010c7e204 [ 362.295426] x7 : 00000000410fd0f0 x6 : 0000000000000001 [ 362.295508] x5 : 00000000410fd0f0 x4 : 0000000000000000 [ 362.295592] x3 : 0000000000000000 x2 : ffff8000100939d8 [ 362.295683] x1 : 0000000000180420 x0 : 0000000000180480 [ 362.296011] Call trace: [ 362.296257] has_cpuid_feature+0xf0/0x348 [ 362.296350] verify_local_elf_hwcaps+0x84/0xe8 [ 362.296424] check_local_cpu_capabilities+0x44/0x128 [ 362.296497] secondary_start_kernel+0xf4/0x188 [ 362.296998] Code: 52805001 72a00301 6b01001f 54000ec0 (d4210000) [ 362.298652] SMP: stopping secondary CPUs [ 362.300615] Starting crashdump kernel... [ 362.301168] Bye! [ 0.000000] Booting Linux on physical CPU 0x0000000003 [0x410fd0f0] [ 0.000000] Linux version 5.6.0-rc4-00003-gc780b890948a (crimar01@e120937-lin) (gcc version 8.3.0 (GNU Toolchain for the A-profile Architecture 8.3-2019.03 (arm-rel-8.36))) #105 SMP PREEMPT Fri Mar 6 17:00:42 GMT 2020 [ 0.000000] Machine model: Foundation-v8A [ 0.000000] earlycon: pl11 at MMIO 0x000000001c090000 (options '') [ 0.000000] printk: bootconsole [pl11] enabled ..... [ 0.138024] rcu: Hierarchical SRCU implementation. [ 0.153472] its@2f020000: unable to locate ITS domain [ 0.154078] its@2f020000: Unable to locate ITS domain [ 0.157541] EFI services will not be available. [ 0.175395] smp: Bringing up secondary CPUs ... [ 0.209182] psci: failed to boot CPU1 (-22) [ 0.209377] CPU1: failed to boot: -22 [ 0.274598] Detected PIPT I-cache on CPU2 [ 0.278707] GICv3: CPU2: found redistributor 1 region 0:0x000000002f120000 [ 0.285212] CPU2: Booted secondary processor 0x0000000001 [0x410fd0f0] [ 0.369053] Detected PIPT I-cache on CPU3 [ 0.372947] GICv3: CPU3: found redistributor 2 region 0:0x000000002f140000 [ 0.378664] CPU3: Booted secondary processor 0x0000000002 [0x410fd0f0] [ 0.401707] smp: Brought up 1 node, 3 CPUs [ 0.404057] SMP: Total of 3 processors activated. Make crash_smp_send_stop() account also for the online status of the calling CPU while evaluating how many CPUs are effectively online: this way the right number of STOPs is sent and all other stopped-cores's registers are properly saved. Fixes: 78fd584cdec05 ("arm64: kdump: implement machine_crash_shutdown()") Acked-by: Mark Rutland Signed-off-by: Cristian Marussi Signed-off-by: Will Deacon --- arch/arm64/kernel/smp.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index e4dc241c5a8e..5407bf5d98ac 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -1012,7 +1012,11 @@ void crash_smp_send_stop(void) cpus_stopped = 1; - if (num_online_cpus() == 1) { + /* + * If this cpu is the only one alive at this point in time, online or + * not, there are no stop messages to be sent around, so just back out. + */ + if (num_other_online_cpus() == 0) { sdei_mask_local_cpu(); return; } @@ -1020,7 +1024,7 @@ void crash_smp_send_stop(void) cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + atomic_set(&waiting_for_crash_ipi, num_other_online_cpus()); pr_crit("SMP: stopping secondary CPUs\n"); smp_cross_call(&mask, IPI_CPU_CRASH_STOP); -- cgit v1.2.3 From c83557859eaa1286330a4d3d2e1ea0c0988c4604 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 18 Mar 2020 20:38:29 +0000 Subject: arm64: kpti: Fix "kpti=off" when KASLR is enabled Enabling KASLR forces the use of non-global page-table entries for kernel mappings, as this is a decision that we have to make very early on before mapping the kernel proper. When used in conjunction with the "kpti=off" command-line option, it is possible to use non-global kernel mappings but with the kpti trampoline disabled. Since commit 09e3c22a86f6 ("arm64: Use a variable to store non-global mappings decision"), arm64_kernel_unmapped_at_el0() reflects only the use of non-global mappings and does not take into account whether the kpti trampoline is enabled. This breaks context switching of the TPIDRRO_EL0 register for 64-bit tasks, where the clearing of the register is deferred to the ret-to-user code, but it also breaks the ARM SPE PMU driver which helpfully recommends passing "kpti=off" on the command line! Report whether or not KPTI is actually enabled in arm64_kernel_unmapped_at_el0() and check the 'arm64_use_ng_mappings' global variable directly when determining the protection flags for kernel mappings. Cc: Mark Brown Reported-by: Hongbo Yao Tested-by: Hongbo Yao Fixes: 09e3c22a86f6 ("arm64: Use a variable to store non-global mappings decision") Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu.h | 4 +--- arch/arm64/include/asm/pgtable-prot.h | 6 ++++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index e4d862420bb4..d79ce6df9e12 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -29,11 +29,9 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) -extern bool arm64_use_ng_mappings; - static inline bool arm64_kernel_unmapped_at_el0(void) { - return arm64_use_ng_mappings; + return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); } typedef void (*bp_hardening_cb_t)(void); diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 6f87839f0249..1305e28225fc 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -23,11 +23,13 @@ #include +extern bool arm64_use_ng_mappings; + #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) -#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) +#define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0) #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) -- cgit v1.2.3 From 3568b88944fef28db3ee989b957da49ffc627ede Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Thu, 19 Mar 2020 14:11:38 +0000 Subject: arm64: compat: Fix syscall number of compat_clock_getres The syscall number of compat_clock_getres was erroneously set to 247 (__NR_io_cancel!) instead of 264. This causes the vDSO fallback of clock_getres() to land on the wrong syscall for compat tasks. Fix the numbering. Cc: Fixes: 53c489e1dfeb6 ("arm64: compat: Add missing syscall numbers") Acked-by: Catalin Marinas Reviewed-by: Nick Desaulniers Signed-off-by: Vincenzo Frascino Signed-off-by: Will Deacon --- arch/arm64/include/asm/unistd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 1dd22da1c3a9..803039d504de 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -25,8 +25,8 @@ #define __NR_compat_gettimeofday 78 #define __NR_compat_sigreturn 119 #define __NR_compat_rt_sigreturn 173 -#define __NR_compat_clock_getres 247 #define __NR_compat_clock_gettime 263 +#define __NR_compat_clock_getres 264 #define __NR_compat_clock_gettime64 403 #define __NR_compat_clock_getres_time64 406 -- cgit v1.2.3