From 8bbe9fee5848371d4af101be445303cac8d880c5 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 17 May 2023 22:30:33 +1000 Subject: powerpc/64s: Fix native_hpte_remove() to be irq-safe Lockdep warns that the use of the hpte_lock in native_hpte_remove() is not safe against an IRQ coming in: ================================ WARNING: inconsistent lock state 6.4.0-rc2-g0c54f4d30ecc #1 Not tainted -------------------------------- inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage. qemu-system-ppc/93865 [HC0[0]:SC0[0]:HE1:SE1] takes: c0000000021f5180 (hpte_lock){+.?.}-{0:0}, at: native_lock_hpte+0x8/0xd0 {IN-SOFTIRQ-W} state was registered at: lock_acquire+0x134/0x3f0 native_lock_hpte+0x44/0xd0 native_hpte_insert+0xd4/0x2a0 __hash_page_64K+0x218/0x4f0 hash_page_mm+0x464/0x840 do_hash_fault+0x11c/0x260 data_access_common_virt+0x210/0x220 __ip_select_ident+0x140/0x150 ... net_rx_action+0x3bc/0x440 __do_softirq+0x180/0x534 ... sys_sendmmsg+0x34/0x50 system_call_exception+0x128/0x320 system_call_common+0x160/0x2e4 ... Possible unsafe locking scenario: CPU0 ---- lock(hpte_lock); lock(hpte_lock); *** DEADLOCK *** ... Call Trace: dump_stack_lvl+0x98/0xe0 (unreliable) print_usage_bug.part.0+0x250/0x278 mark_lock+0xc9c/0xd30 __lock_acquire+0x440/0x1ca0 lock_acquire+0x134/0x3f0 native_lock_hpte+0x44/0xd0 native_hpte_remove+0xb0/0x190 kvmppc_mmu_map_page+0x650/0x698 [kvm_pr] kvmppc_handle_pagefault+0x534/0x6e8 [kvm_pr] kvmppc_handle_exit_pr+0x6d8/0xe90 [kvm_pr] after_sprg3_load+0x80/0x90 [kvm_pr] kvmppc_vcpu_run_pr+0x108/0x270 [kvm_pr] kvmppc_vcpu_run+0x34/0x48 [kvm] kvm_arch_vcpu_ioctl_run+0x340/0x470 [kvm] kvm_vcpu_ioctl+0x338/0x8b8 [kvm] sys_ioctl+0x7c4/0x13e0 system_call_exception+0x128/0x320 system_call_common+0x160/0x2e4 I suspect kvm_pr is the only caller that doesn't already have IRQs disabled, which is why this hasn't been reported previously. Fix it by disabling IRQs in native_hpte_remove(). Fixes: 35159b5717fa ("powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe") Cc: stable@vger.kernel.org # v6.1+ Signed-off-by: Michael Ellerman Link: https://msgid.link/20230517123033.18430-1-mpe@ellerman.id.au --- arch/powerpc/mm/book3s64/hash_native.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c index 9342e79870df..430d1d935a7c 100644 --- a/arch/powerpc/mm/book3s64/hash_native.c +++ b/arch/powerpc/mm/book3s64/hash_native.c @@ -328,10 +328,12 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, static long native_hpte_remove(unsigned long hpte_group) { + unsigned long hpte_v, flags; struct hash_pte *hptep; int i; int slot_offset; - unsigned long hpte_v; + + local_irq_save(flags); DBG_LOW(" remove(group=%lx)\n", hpte_group); @@ -356,13 +358,16 @@ static long native_hpte_remove(unsigned long hpte_group) slot_offset &= 0x7; } - if (i == HPTES_PER_GROUP) - return -1; + if (i == HPTES_PER_GROUP) { + i = -1; + goto out; + } /* Invalidate the hpte. NOTE: this also unlocks it */ release_hpte_lock(); hptep->v = 0; - +out: + local_irq_restore(flags); return i; } -- cgit v1.2.3 From 5bcedc5931e7bd6928a2d8207078d4cb476b3b55 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 17 May 2023 17:49:45 +1000 Subject: powerpc/security: Fix Speculation_Store_Bypass reporting on Power10 Nageswara reported that /proc/self/status was showing "vulnerable" for the Speculation_Store_Bypass feature on Power10, eg: $ grep Speculation_Store_Bypass: /proc/self/status Speculation_Store_Bypass: vulnerable But at the same time the sysfs files, and lscpu, were showing "Not affected". This turns out to simply be a bug in the reporting of the Speculation_Store_Bypass, aka. PR_SPEC_STORE_BYPASS, case. When SEC_FTR_STF_BARRIER was added, so that firmware could communicate the vulnerability was not present, the code in ssb_prctl_get() was not updated to check the new flag. So add the check for SEC_FTR_STF_BARRIER being disabled. Rather than adding the new check to the existing if block and expanding the comment to cover both cases, rewrite the three cases to be separate so they can be commented separately for clarity. Fixes: 84ed26fd00c5 ("powerpc/security: Add a security feature for STF barrier") Cc: stable@vger.kernel.org # v5.14+ Reported-by: Nageswara R Sastry Tested-by: Nageswara R Sastry Reviewed-by: Russell Currey Signed-off-by: Michael Ellerman Link: https://msgid.link/20230517074945.53188-1-mpe@ellerman.id.au --- arch/powerpc/kernel/security.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 206475e3e0b4..4856e1a5161c 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -364,26 +364,27 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute * static int ssb_prctl_get(struct task_struct *task) { + /* + * The STF_BARRIER feature is on by default, so if it's off that means + * firmware has explicitly said the CPU is not vulnerable via either + * the hypercall or device tree. + */ + if (!security_ftr_enabled(SEC_FTR_STF_BARRIER)) + return PR_SPEC_NOT_AFFECTED; + + /* + * If the system's CPU has no known barrier (see setup_stf_barrier()) + * then assume that the CPU is not vulnerable. + */ if (stf_enabled_flush_types == STF_BARRIER_NONE) - /* - * We don't have an explicit signal from firmware that we're - * vulnerable or not, we only have certain CPU revisions that - * are known to be vulnerable. - * - * We assume that if we're on another CPU, where the barrier is - * NONE, then we are not vulnerable. - */ return PR_SPEC_NOT_AFFECTED; - else - /* - * If we do have a barrier type then we are vulnerable. The - * barrier is not a global or per-process mitigation, so the - * only value we can report here is PR_SPEC_ENABLE, which - * appears as "vulnerable" in /proc. - */ - return PR_SPEC_ENABLE; - - return -EINVAL; + + /* + * Otherwise the CPU is vulnerable. The barrier is not a global or + * per-process mitigation, so the only value that can be reported here + * is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc. + */ + return PR_SPEC_ENABLE; } int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) -- cgit v1.2.3 From cf65b12c17b4910d099d78f6ed6919ec040ecdbc Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 22 Jun 2023 21:24:51 +1000 Subject: powerpc/64e: Fix obtool warnings in exceptions-64e.S Since commit aec0ba7472a7 ("powerpc/64: Use -mprofile-kernel for big endian ELFv2 kernels"), this file is checked by objtool. Fix warnings such as: arch/powerpc/kernel/idle_64e.o: warning: objtool: .text+0x20: unannotated intra-function call arch/powerpc/kernel/exceptions-64e.o: warning: objtool: .text+0x218: unannotated intra-function call Signed-off-by: Michael Ellerman Link: https://msgid.link/20230622112451.735268-1-mpe@ellerman.id.au --- arch/powerpc/kernel/exceptions-64e.S | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 3f86091e68b3..7ab4c8c0f1ab 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -5,6 +5,7 @@ * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. */ +#include #include #include #include @@ -66,7 +67,7 @@ #define SPECIAL_EXC_LOAD(reg, name) \ ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) -special_reg_save: +SYM_CODE_START_LOCAL(special_reg_save) /* * We only need (or have stack space) to save this stuff if * we interrupted the kernel. @@ -131,8 +132,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) SPECIAL_EXC_STORE(r10,CSRR1) blr +SYM_CODE_END(special_reg_save) -ret_from_level_except: +SYM_CODE_START_LOCAL(ret_from_level_except) ld r3,_MSR(r1) andi. r3,r3,MSR_PR beq 1f @@ -206,6 +208,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mtxer r11 blr +SYM_CODE_END(ret_from_level_except) .macro ret_from_level srr0 srr1 paca_ex scratch bl ret_from_level_except @@ -232,13 +235,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfspr r13,\scratch .endm -ret_from_crit_except: +SYM_CODE_START_LOCAL(ret_from_crit_except) ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH rfci +SYM_CODE_END(ret_from_crit_except) -ret_from_mc_except: +SYM_CODE_START_LOCAL(ret_from_mc_except) ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH rfmci +SYM_CODE_END(ret_from_mc_except) /* Exception prolog code for all exceptions */ #define EXCEPTION_PROLOG(n, intnum, type, addition) \ @@ -978,20 +983,22 @@ masked_interrupt_book3e_0x2c0: * r14 and r15 containing the fault address and error code, with the * original values stashed away in the PACA */ -storage_fault_common: +SYM_CODE_START_LOCAL(storage_fault_common) addi r3,r1,STACK_INT_FRAME_REGS bl do_page_fault b interrupt_return +SYM_CODE_END(storage_fault_common) /* * Alignment exception doesn't fit entirely in the 0x100 bytes so it * continues here. */ -alignment_more: +SYM_CODE_START_LOCAL(alignment_more) addi r3,r1,STACK_INT_FRAME_REGS bl alignment_exception REST_NVGPRS(r1) b interrupt_return +SYM_CODE_END(alignment_more) /* * Trampolines used when spotting a bad kernel stack pointer in @@ -1030,8 +1037,7 @@ BAD_STACK_TRAMPOLINE(0xe00) BAD_STACK_TRAMPOLINE(0xf00) BAD_STACK_TRAMPOLINE(0xf20) - .globl bad_stack_book3e -bad_stack_book3e: +_GLOBAL(bad_stack_book3e) /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */ mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */ ld r1,PACAEMERGSP(r13) @@ -1285,8 +1291,7 @@ have_hes: * ever takes any parameters, the SCOM code must also be updated to * provide them. */ - .globl a2_tlbinit_code_start -a2_tlbinit_code_start: +_GLOBAL(a2_tlbinit_code_start) ori r11,r3,MAS0_WQ_ALLWAYS oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */ @@ -1479,8 +1484,7 @@ _GLOBAL(book3e_secondary_thread_init) mflr r28 b 3b - .globl init_core_book3e -init_core_book3e: +_GLOBAL(init_core_book3e) /* Establish the interrupt vector base */ tovirt(r2,r2) LOAD_REG_ADDR(r3, interrupt_base_book3e) @@ -1488,7 +1492,7 @@ init_core_book3e: sync blr -init_thread_book3e: +SYM_CODE_START_LOCAL(init_thread_book3e) lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h mtspr SPRN_EPCR,r3 @@ -1502,6 +1506,7 @@ init_thread_book3e: mtspr SPRN_TSR,r3 blr +SYM_CODE_END(init_thread_book3e) _GLOBAL(__setup_base_ivors) SET_IVOR(0, 0x020) /* Critical Input */ -- cgit v1.2.3 From cf53564b11cef5cdfafc548b172345c9aa753f89 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 6 Jul 2023 07:54:05 +0530 Subject: powerpc/mm/book3s64/hash/4k: Add pmd_same callback for 4K page size With commit 0d940a9b270b ("mm/pgtable: allow pte_offset_map[_lock]() to fail") the kernel is now using pmd_same to compare pmd values that are pointing to a level 4 page table page. Move the functions out of #ifdef CONFIG_TRANSPARENT_HUGEPAGE and add a variant that can work with both 4K and 64K page size. kernel BUG at arch/powerpc/include/asm/book3s/64/hash-4k.h:141! Oops: Exception in kernel mode, sig: 5 [#1] LE PAGE_SIZE=4K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries ..... NIP [c00000000048aee0] __pte_offset_map_lock+0xf0/0x164 LR [c00000000048ae78] __pte_offset_map_lock+0x88/0x164 Call Trace: 0xc0003f000009a340 (unreliable) __handle_mm_fault+0x1340/0x1980 handle_mm_fault+0xbc/0x380 __get_user_pages+0x320/0x550 get_user_pages_remote+0x13c/0x520 get_arg_page+0x80/0x1d0 copy_string_kernel+0xc8/0x250 kernel_execve+0x11c/0x270 run_init_process+0xe4/0x10c kernel_init+0xbc/0x1a0 ret_from_kernel_user_thread+0x14/0x1c Reported-by: Michael Ellerman Signed-off-by: "Aneesh Kumar K.V" Acked-by: Hugh Dickins Signed-off-by: Michael Ellerman Link: https://msgid.link/20230706022405.798157-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/hash-4k.h | 6 ------ arch/powerpc/include/asm/book3s/64/hash-64k.h | 5 ----- arch/powerpc/include/asm/book3s/64/hash.h | 5 +++++ 3 files changed, 5 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index b6ac4f86c87b..6472b08fa1b0 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -136,12 +136,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd) return 0; } -static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) -{ - BUG(); - return 0; -} - static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) { BUG(); diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 338e62fbea0b..0bf6fd0bf42a 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -263,11 +263,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd) (_PAGE_PTE | H_PAGE_THP_HUGE)); } -static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) -{ - return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); -} - static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) { return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE)); diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 17e7a778c856..d4a19e6547ac 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -132,6 +132,11 @@ static inline int get_region_id(unsigned long ea) return region_id; } +static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); +} + #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) static inline int hash__p4d_bad(p4d_t p4d) -- cgit v1.2.3