summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-19 14:13:33 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-19 14:13:33 +0100
commit5f6e430f931d245da838db3e10e918681207029b (patch)
tree6adc54a582652ae470ce95d9205f798568339ff0 /arch/powerpc/kernel
parentMerge tag 'mm-nonmm-stable-2022-12-17-20-32' of git://git.kernel.org/pub/scm/... (diff)
parentpowerpc/code-patching: Fix oops with DEBUG_VM enabled (diff)
downloadlinux-5f6e430f931d245da838db3e10e918681207029b.tar.xz
linux-5f6e430f931d245da838db3e10e918681207029b.zip
Merge tag 'powerpc-6.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: - Add powerpc qspinlock implementation optimised for large system scalability and paravirt. See the merge message for more details - Enable objtool to be built on powerpc to generate mcount locations - Use a temporary mm for code patching with the Radix MMU, so the writable mapping is restricted to the patching CPU - Add an option to build the 64-bit big-endian kernel with the ELFv2 ABI - Sanitise user registers on interrupt entry on 64-bit Book3S - Many other small features and fixes Thanks to Aboorva Devarajan, Angel Iglesias, Benjamin Gray, Bjorn Helgaas, Bo Liu, Chen Lifu, Christoph Hellwig, Christophe JAILLET, Christophe Leroy, Christopher M. Riedl, Colin Ian King, Deming Wang, Disha Goel, Dmitry Torokhov, Finn Thain, Geert Uytterhoeven, Gustavo A. R. Silva, Haowen Bai, Joel Stanley, Jordan Niethe, Julia Lawall, Kajol Jain, Laurent Dufour, Li zeming, Miaoqian Lin, Michael Jeanson, Nathan Lynch, Naveen N. Rao, Nayna Jain, Nicholas Miehlbradt, Nicholas Piggin, Pali Rohár, Randy Dunlap, Rohan McLure, Russell Currey, Sathvika Vasireddy, Shaomin Deng, Stephen Kitt, Stephen Rothwell, Thomas Weißschuh, Tiezhu Yang, Uwe Kleine-König, Xie Shaowen, Xiu Jianfeng, XueBing Chen, Yang Yingliang, Zhang Jiaming, ruanjinjie, Jessica Yu, and Wolfram Sang. * tag 'powerpc-6.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (181 commits) powerpc/code-patching: Fix oops with DEBUG_VM enabled powerpc/qspinlock: Fix 32-bit build powerpc/prom: Fix 32-bit build powerpc/rtas: mandate RTAS syscall filtering powerpc/rtas: define pr_fmt and convert printk call sites powerpc/rtas: clean up includes powerpc/rtas: clean up rtas_error_log_max initialization powerpc/pseries/eeh: use correct API for error log size powerpc/rtas: avoid scheduling in rtas_os_term() powerpc/rtas: avoid device tree lookups in rtas_os_term() powerpc/rtasd: use correct OF API for event scan rate powerpc/rtas: document rtas_call() powerpc/pseries: unregister VPA when hot unplugging a CPU powerpc/pseries: reset the RCU watchdogs after a LPM powerpc: Take in account addition CPU node when building kexec FDT powerpc: export the CPU node count powerpc/cpuidle: Set CPUIDLE_FLAG_POLLING for snooze state powerpc/dts/fsl: Fix pca954x i2c-mux node names cxl: Remove unnecessary cxl_pci_window_alignment() selftests/powerpc: Fix resource leaks ...
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c34
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S26
-rw-r--r--arch/powerpc/kernel/cpu_setup_e500.S8
-rw-r--r--arch/powerpc/kernel/entry_32.S23
-rw-r--r--arch/powerpc/kernel/entry_64.S2
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S48
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S120
-rw-r--r--arch/powerpc/kernel/head_32.h4
-rw-r--r--arch/powerpc/kernel/head_40x.S7
-rw-r--r--arch/powerpc/kernel/head_44x.S6
-rw-r--r--arch/powerpc/kernel/head_64.S13
-rw-r--r--arch/powerpc/kernel/head_85xx.S13
-rw-r--r--arch/powerpc/kernel/head_8xx.S7
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S33
-rw-r--r--arch/powerpc/kernel/head_booke.h4
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/interrupt_64.S54
-rw-r--r--arch/powerpc/kernel/irq.c4
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c14
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/misc_64.S8
-rw-r--r--arch/powerpc/kernel/module_64.c10
-rw-r--r--arch/powerpc/kernel/optprobes.c2
-rw-r--r--arch/powerpc/kernel/optprobes_head.S4
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S57
-rw-r--r--arch/powerpc/kernel/process.c97
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/rtas.c192
-rw-r--r--arch/powerpc/kernel/rtasd.c7
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/stacktrace.c10
-rw-r--r--arch/powerpc/kernel/swsusp_32.S5
-rw-r--r--arch/powerpc/kernel/time.c23
-rw-r--r--arch/powerpc/kernel/tm.S8
-rw-r--r--arch/powerpc/kernel/trace/ftrace_mprofile.S2
-rw-r--r--arch/powerpc/kernel/vdso/Makefile2
-rw-r--r--arch/powerpc/kernel/vector.S4
38 files changed, 498 insertions, 365 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 4ce2a4aa3985..d24a59a98c0c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -72,7 +72,7 @@
#endif
#define STACK_PT_REGS_OFFSET(sym, val) \
- DEFINE(sym, STACK_FRAME_OVERHEAD + offsetof(struct pt_regs, val))
+ DEFINE(sym, STACK_INT_FRAME_REGS + offsetof(struct pt_regs, val))
int main(void)
{
@@ -167,9 +167,8 @@ int main(void)
OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state.vr);
OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave);
OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state.fpr);
- /* Local pt_regs on stack for Transactional Memory funcs. */
- DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
- sizeof(struct pt_regs) + 16);
+ /* Local pt_regs on stack in int frame form, plus 16 bytes for TM */
+ DEFINE(TM_FRAME_SIZE, STACK_INT_FRAME_SIZE + 16);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
@@ -261,7 +260,7 @@ int main(void)
/* Interrupt register frame */
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
- DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS);
+ DEFINE(SWITCH_FRAME_SIZE, STACK_SWITCH_FRAME_SIZE);
STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
@@ -418,21 +417,18 @@ int main(void)
/* book3s */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- OFFSET(KVM_TLB_SETS, kvm, arch.tlb_sets);
OFFSET(KVM_SDR1, kvm, arch.sdr1);
OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
- OFFSET(KVM_RADIX, kvm, arch.radix);
OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty);
OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst);
- OFFSET(VCPU_NESTED, kvm_vcpu, arch.nested);
OFFSET(VCPU_CPU, kvm_vcpu, cpu);
OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu);
#endif
@@ -449,16 +445,12 @@ int main(void)
OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx);
OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0);
OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0);
- OFFSET(VCPU_DAWR1, kvm_vcpu, arch.dawr1);
- OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1);
OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires);
OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded);
- OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending);
- OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request);
OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra);
OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs);
@@ -486,8 +478,6 @@ int main(void)
OFFSET(VCPU_TCSCR, kvm_vcpu, arch.tcscr);
OFFSET(VCPU_ACOP, kvm_vcpu, arch.acop);
OFFSET(VCPU_WORT, kvm_vcpu, arch.wort);
- OFFSET(VCPU_TID, kvm_vcpu, arch.tid);
- OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr);
OFFSET(VCPU_HFSCR, kvm_vcpu, arch.hfscr);
OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map);
OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest);
@@ -582,8 +572,6 @@ int main(void)
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
- HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
- HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
@@ -594,9 +582,6 @@ int main(void)
HSTATE_FIELD(HSTATE_SDAR, host_mmcr[4]);
HSTATE_FIELD(HSTATE_MMCR2, host_mmcr[5]);
HSTATE_FIELD(HSTATE_SIER, host_mmcr[6]);
- HSTATE_FIELD(HSTATE_MMCR3, host_mmcr[7]);
- HSTATE_FIELD(HSTATE_SIER2, host_mmcr[8]);
- HSTATE_FIELD(HSTATE_SIER3, host_mmcr[9]);
HSTATE_FIELD(HSTATE_PMC1, host_pmc[0]);
HSTATE_FIELD(HSTATE_PMC2, host_pmc[1]);
HSTATE_FIELD(HSTATE_PMC3, host_pmc[2]);
@@ -672,17 +657,6 @@ int main(void)
OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6);
#endif
-#ifdef CONFIG_KVM_XICS
- DEFINE(VCPU_XIVE_SAVED_STATE, offsetof(struct kvm_vcpu,
- arch.xive_saved_state));
- DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu,
- arch.xive_cam_word));
- DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed));
- DEFINE(VCPU_XIVE_ESC_ON, offsetof(struct kvm_vcpu, arch.xive_esc_on));
- DEFINE(VCPU_XIVE_ESC_RADDR, offsetof(struct kvm_vcpu, arch.xive_esc_raddr));
- DEFINE(VCPU_XIVE_ESC_VADDR, offsetof(struct kvm_vcpu, arch.xive_esc_vaddr));
-#endif
-
#ifdef CONFIG_KVM_EXIT_TIMING
OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu);
OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl);
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index f8b5ff64b604..f29ce3dd6140 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -4,6 +4,8 @@
* Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*/
+#include <linux/linkage.h>
+
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cputable.h>
@@ -81,7 +83,7 @@ _GLOBAL(__setup_cpu_745x)
blr
/* Enable caches for 603's, 604, 750 & 7400 */
-setup_common_caches:
+SYM_FUNC_START_LOCAL(setup_common_caches)
mfspr r11,SPRN_HID0
andi. r0,r11,HID0_DCE
ori r11,r11,HID0_ICE|HID0_DCE
@@ -95,11 +97,12 @@ setup_common_caches:
sync
isync
blr
+SYM_FUNC_END(setup_common_caches)
/* 604, 604e, 604ev, ...
* Enable superscalar execution & branch history table
*/
-setup_604_hid0:
+SYM_FUNC_START_LOCAL(setup_604_hid0)
mfspr r11,SPRN_HID0
ori r11,r11,HID0_SIED|HID0_BHTE
ori r8,r11,HID0_BTCD
@@ -110,6 +113,7 @@ setup_604_hid0:
sync
isync
blr
+SYM_FUNC_END(setup_604_hid0)
/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
* erratas we work around here.
@@ -125,13 +129,14 @@ setup_604_hid0:
* needed once we have applied workaround #5 (though it's
* not set by Apple's firmware at least).
*/
-setup_7400_workarounds:
+SYM_FUNC_START_LOCAL(setup_7400_workarounds)
mfpvr r3
rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0207
ble 1f
blr
-setup_7410_workarounds:
+SYM_FUNC_END(setup_7400_workarounds)
+SYM_FUNC_START_LOCAL(setup_7410_workarounds)
mfpvr r3
rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0100
@@ -151,6 +156,7 @@ setup_7410_workarounds:
sync
isync
blr
+SYM_FUNC_END(setup_7410_workarounds)
/* 740/750/7400/7410
* Enable Store Gathering (SGE), Address Broadcast (ABE),
@@ -158,7 +164,7 @@ setup_7410_workarounds:
* Dynamic Power Management (DPM), Speculative (SPD)
* Clear Instruction cache throttling (ICTC)
*/
-setup_750_7400_hid0:
+SYM_FUNC_START_LOCAL(setup_750_7400_hid0)
mfspr r11,SPRN_HID0
ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
oris r11,r11,HID0_DPM@h
@@ -177,12 +183,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
sync
isync
blr
+SYM_FUNC_END(setup_750_7400_hid0)
/* 750cx specific
* Looks like we have to disable NAP feature for some PLL settings...
* (waiting for confirmation)
*/
-setup_750cx:
+SYM_FUNC_START_LOCAL(setup_750cx)
mfspr r10, SPRN_HID1
rlwinm r10,r10,4,28,31
cmpwi cr0,r10,7
@@ -196,11 +203,13 @@ setup_750cx:
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r4)
blr
+SYM_FUNC_END(setup_750cx)
/* 750fx specific
*/
-setup_750fx:
+SYM_FUNC_START_LOCAL(setup_750fx)
blr
+SYM_FUNC_END(setup_750fx)
/* MPC 745x
* Enable Store Gathering (SGE), Branch Folding (FOLD)
@@ -212,7 +221,7 @@ setup_750fx:
* Clear Instruction cache throttling (ICTC)
* Enable L2 HW prefetch
*/
-setup_745x_specifics:
+SYM_FUNC_START_LOCAL(setup_745x_specifics)
/* We check for the presence of an L3 cache setup by
* the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier
@@ -270,6 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
sync
isync
blr
+SYM_FUNC_END(setup_745x_specifics)
/*
* Initialize the FPU registers. This is needed to work around an errata
diff --git a/arch/powerpc/kernel/cpu_setup_e500.S b/arch/powerpc/kernel/cpu_setup_e500.S
index 2ab25161b0ad..077cfccc3461 100644
--- a/arch/powerpc/kernel/cpu_setup_e500.S
+++ b/arch/powerpc/kernel/cpu_setup_e500.S
@@ -8,6 +8,8 @@
* Benjamin Herrenschmidt <benh@kernel.crashing.org>
*/
+#include <linux/linkage.h>
+
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/cputable.h>
@@ -274,7 +276,7 @@ _GLOBAL(flush_dcache_L1)
blr
-has_L2_cache:
+SYM_FUNC_START_LOCAL(has_L2_cache)
/* skip L2 cache on P2040/P2040E as they have no L2 cache */
mfspr r3, SPRN_SVR
/* shift right by 8 bits and clear E bit of SVR */
@@ -290,9 +292,10 @@ has_L2_cache:
1:
li r3, 0
blr
+SYM_FUNC_END(has_L2_cache)
/* flush backside L2 cache */
-flush_backside_L2_cache:
+SYM_FUNC_START_LOCAL(flush_backside_L2_cache)
mflr r10
bl has_L2_cache
mtlr r10
@@ -313,6 +316,7 @@ flush_backside_L2_cache:
bne 1b
2:
blr
+SYM_FUNC_END(flush_backside_L2_cache)
_GLOBAL(cpu_down_flush_e500v2)
mflr r0
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3fc7c9886bb7..5604c9a1ac22 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -18,6 +18,8 @@
#include <linux/err.h>
#include <linux/sys.h>
#include <linux/threads.h>
+#include <linux/linkage.h>
+
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -74,17 +76,18 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
- .globl __kuep_lock
-__kuep_lock:
+SYM_FUNC_START(__kuep_lock)
lwz r9, THREAD+THSR0(r2)
update_user_segments_by_4 r9, r10, r11, r12
blr
+SYM_FUNC_END(__kuep_lock)
-__kuep_unlock:
+SYM_FUNC_START_LOCAL(__kuep_unlock)
lwz r9, THREAD+THSR0(r2)
rlwinm r9,r9,0,~SR_NX
update_user_segments_by_4 r9, r10, r11, r12
blr
+SYM_FUNC_END(__kuep_unlock)
.macro kuep_lock
bl __kuep_lock
@@ -114,7 +117,7 @@ transfer_to_syscall:
addi r12,r12,STACK_FRAME_REGS_MARKER@l
stw r9,_MSR(r1)
li r2, INTERRUPT_SYSCALL
- stw r12,8(r1)
+ stw r12,STACK_INT_FRAME_MARKER(r1)
stw r2,_TRAP(r1)
SAVE_GPR(0, r1)
SAVE_GPRS(3, 8, r1)
@@ -123,12 +126,12 @@ transfer_to_syscall:
kuep_lock
/* Calling convention has r3 = regs, r4 = orig r0 */
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
mr r4,r0
bl system_call_exception
ret_from_syscall:
- addi r4,r1,STACK_FRAME_OVERHEAD
+ addi r4,r1,STACK_INT_FRAME_REGS
li r5,0
bl syscall_exit_prepare
#ifdef CONFIG_PPC_47x
@@ -215,9 +218,9 @@ ret_from_kernel_thread:
* in arch/ppc/kernel/process.c
*/
_GLOBAL(_switch)
- stwu r1,-INT_FRAME_SIZE(r1)
+ stwu r1,-SWITCH_FRAME_SIZE(r1)
mflr r0
- stw r0,INT_FRAME_SIZE+4(r1)
+ stw r0,SWITCH_FRAME_SIZE+4(r1)
/* r3-r12 are caller saved -- Cort */
SAVE_NVGPRS(r1)
stw r0,_NIP(r1) /* Return to switch caller */
@@ -248,7 +251,7 @@ _GLOBAL(_switch)
lwz r4,_NIP(r1) /* Return to _switch caller in new task */
mtlr r4
- addi r1,r1,INT_FRAME_SIZE
+ addi r1,r1,SWITCH_FRAME_SIZE
blr
.globl fast_exception_return
@@ -293,7 +296,7 @@ _ASM_NOKPROBE_SYMBOL(fast_exception_return)
.globl interrupt_return
interrupt_return:
lwz r4,_MSR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
andi. r0,r4,MSR_PR
beq .Lkernel_interrupt_return
bl interrupt_exit_user_prepare
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 3e2e37e6ecab..1bf1121e17f1 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -14,6 +14,7 @@
* code, and exception/interrupt return code for PowerPC.
*/
+#include <linux/objtool.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <asm/cache.h>
@@ -73,6 +74,7 @@ flush_branch_caches:
// Flush the link stack
.rept 64
+ ANNOTATE_INTRA_FUNCTION_CALL
bl .+4
.endr
b 1f
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 2f68fb2ee4fc..3f86091e68b3 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -358,7 +358,6 @@ ret_from_mc_except:
std r14,PACA_EXMC+EX_R14(r13); \
std r15,PACA_EXMC+EX_R15(r13)
-
/* Core exception code for all exceptions except TLB misses. */
#define EXCEPTION_COMMON_LVL(n, scratch, excf) \
exc_##n##_common: \
@@ -391,10 +390,11 @@ exc_##n##_common: \
std r10,_CCR(r1); /* store orig CR in stackframe */ \
std r9,GPR1(r1); /* store stack frame back link */ \
std r11,SOFTE(r1); /* and save it to stackframe */ \
- std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
+ std r12,STACK_INT_FRAME_MARKER(r1); /* mark the frame */ \
std r3,_TRAP(r1); /* set trap number */ \
std r0,RESULT(r1); /* clear regs->result */ \
- SAVE_NVGPRS(r1);
+ SAVE_NVGPRS(r1); \
+ SANITIZE_NVGPRS(); /* minimise speculation influence */
#define EXCEPTION_COMMON(n) \
EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)
@@ -455,7 +455,7 @@ exc_##n##_bad_stack: \
EXCEPTION_COMMON(trapnum) \
ack(r8); \
CHECK_NAPPING(); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
+ addi r3,r1,STACK_INT_FRAME_REGS; \
bl hdlr; \
b interrupt_return
@@ -504,7 +504,7 @@ __end_interrupts:
EXCEPTION_COMMON_CRIT(0x100)
bl special_reg_save
CHECK_NAPPING();
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unknown_nmi_exception
b ret_from_crit_except
@@ -515,7 +515,7 @@ __end_interrupts:
EXCEPTION_COMMON_MC(0x000)
bl special_reg_save
CHECK_NAPPING();
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl machine_check_exception
b ret_from_mc_except
@@ -570,7 +570,7 @@ __end_interrupts:
std r14,_ESR(r1)
ld r14,PACA_EXGEN+EX_R14(r13)
EXCEPTION_COMMON(0x700)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl program_check_exception
REST_NVGPRS(r1)
b interrupt_return
@@ -586,7 +586,7 @@ __end_interrupts:
beq- 1f
bl load_up_fpu
b fast_interrupt_return
-1: addi r3,r1,STACK_FRAME_OVERHEAD
+1: addi r3,r1,STACK_INT_FRAME_REGS
bl kernel_fp_unavailable_exception
b interrupt_return
@@ -606,7 +606,7 @@ BEGIN_FTR_SECTION
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl altivec_unavailable_exception
b interrupt_return
@@ -616,7 +616,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
BOOKE_INTERRUPT_ALTIVEC_ASSIST,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x220)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
bl altivec_assist_exception
@@ -643,7 +643,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
EXCEPTION_COMMON_CRIT(0x9f0)
bl special_reg_save
CHECK_NAPPING();
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
#ifdef CONFIG_BOOKE_WDT
bl WatchdogException
#else
@@ -664,7 +664,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0xf20)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unknown_exception
b interrupt_return
@@ -731,7 +731,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
ld r14,PACA_EXCRIT+EX_R14(r13)
ld r15,PACA_EXCRIT+EX_R15(r13)
EXCEPTION_COMMON_CRIT(0xd00)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl DebugException
REST_NVGPRS(r1)
b interrupt_return
@@ -802,7 +802,7 @@ kernel_dbg_exc:
ld r14,PACA_EXDBG+EX_R14(r13)
ld r15,PACA_EXDBG+EX_R15(r13)
EXCEPTION_COMMON_DBG(0xd08)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl DebugException
REST_NVGPRS(r1)
b interrupt_return
@@ -812,7 +812,7 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x260)
CHECK_NAPPING()
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
/*
* XXX: Returning from performance_monitor_exception taken as a
* soft-NMI (Linux irqs disabled) may be risky to use interrupt_return
@@ -834,7 +834,7 @@ kernel_dbg_exc:
EXCEPTION_COMMON_CRIT(0x2a0)
bl special_reg_save
CHECK_NAPPING();
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unknown_nmi_exception
b ret_from_crit_except
@@ -846,7 +846,7 @@ kernel_dbg_exc:
GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x2c0)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unknown_exception
b interrupt_return
@@ -857,7 +857,7 @@ kernel_dbg_exc:
EXCEPTION_COMMON_CRIT(0x2e0)
bl special_reg_save
CHECK_NAPPING();
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unknown_nmi_exception
b ret_from_crit_except
@@ -866,7 +866,7 @@ kernel_dbg_exc:
NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x310)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unknown_exception
b interrupt_return
@@ -875,7 +875,7 @@ kernel_dbg_exc:
NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x320)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unknown_exception
b interrupt_return
@@ -884,7 +884,7 @@ kernel_dbg_exc:
NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x340)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unknown_exception
b interrupt_return
@@ -979,7 +979,7 @@ masked_interrupt_book3e_0x2c0:
* original values stashed away in the PACA
*/
storage_fault_common:
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl do_page_fault
b interrupt_return
@@ -988,7 +988,7 @@ storage_fault_common:
* continues here.
*/
alignment_more:
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl alignment_exception
REST_NVGPRS(r1)
b interrupt_return
@@ -1069,7 +1069,7 @@ bad_stack_book3e:
ZEROIZE_GPR(12)
std r12,0(r11)
LOAD_PACA_TOC()
-1: addi r3,r1,STACK_FRAME_OVERHEAD
+1: addi r3,r1,STACK_INT_FRAME_REGS
bl kernel_bad_stack
b 1b
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 651c36b056bd..6441a1ba57ac 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -13,6 +13,7 @@
*
*/
+#include <linux/linkage.h>
#include <asm/hw_irq.h>
#include <asm/exception-64s.h>
#include <asm/ptrace.h>
@@ -111,6 +112,7 @@ name:
#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */
#define __ISTACK(name) .L_ISTACK_ ## name
#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */
+#define IMSR_R12 .L_IMSR_R12_\name\() /* Assumes MSR saved to r12 */
#define INT_DEFINE_BEGIN(n) \
.macro int_define_ ## n name
@@ -176,6 +178,9 @@ do_define_int n
.ifndef IKUAP
IKUAP=1
.endif
+ .ifndef IMSR_R12
+ IMSR_R12=0
+ .endif
.endm
/*
@@ -502,6 +507,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
std r10,0(r1) /* make stack chain pointer */
std r0,GPR0(r1) /* save r0 in stackframe */
std r10,GPR1(r1) /* save r1 in stackframe */
+ SANITIZE_GPR(0)
/* Mark our [H]SRRs valid for return */
li r10,1
@@ -544,8 +550,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
std r9,GPR11(r1)
std r10,GPR12(r1)
std r11,GPR13(r1)
+ .if !IMSR_R12
+ SANITIZE_GPRS(9, 12)
+ .else
+ SANITIZE_GPRS(9, 11)
+ .endif
SAVE_NVGPRS(r1)
+ SANITIZE_NVGPRS()
.if IDAR
.if IISIDE
@@ -577,8 +589,8 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
ld r10,IAREA+EX_CTR(r13)
std r10,_CTR(r1)
- std r2,GPR2(r1) /* save r2 in stackframe */
- SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */
+ SAVE_GPRS(2, 8, r1) /* save r2 - r8 in stackframe */
+ SANITIZE_GPRS(2, 8)
mflr r9 /* Get LR, later save to stack */
LOAD_PACA_TOC() /* get kernel TOC into r2 */
std r9,_LINK(r1)
@@ -591,7 +603,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
li r10,0
LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
std r10,RESULT(r1) /* clear regs->result */
- std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */
+ std r11,STACK_INT_FRAME_MARKER(r1) /* mark the frame */
.endm
/*
@@ -696,6 +708,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
mtlr r9
ld r9,_CCR(r1)
mtcr r9
+ SANITIZE_RESTORE_NVGPRS()
REST_GPRS(2, 13, r1)
REST_GPR(0, r1)
/* restore original r1. */
@@ -1061,7 +1074,7 @@ EXC_COMMON_BEGIN(system_reset_common)
subi r1,r1,INT_FRAME_SIZE
__GEN_COMMON_BODY system_reset
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl system_reset_exception
/* Clear MSR_RI before setting SRR0 and SRR1. */
@@ -1208,7 +1221,7 @@ EXC_COMMON_BEGIN(machine_check_early_common)
BEGIN_FTR_SECTION
bl enable_machine_check
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
BEGIN_FTR_SECTION
bl machine_check_early_boot
END_FTR_SECTION(0, 1) // nop out after boot
@@ -1298,7 +1311,7 @@ EXC_COMMON_BEGIN(machine_check_common)
* save area: PACA_EXMC instead of PACA_EXGEN.
*/
GEN_COMMON machine_check
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl machine_check_exception_async
b interrupt_return_srr
@@ -1364,14 +1377,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
* This is the NMI version of the handler because we are called from
* the early handler which is a true NMI.
*/
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl machine_check_exception
/*
* We will not reach here. Even if we did, there is no way out.
* Call unrecoverable_exception and die.
*/
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unrecoverable_exception
b .
@@ -1422,7 +1435,7 @@ EXC_VIRT_END(data_access, 0x4300, 0x80)
EXC_COMMON_BEGIN(data_access_common)
GEN_COMMON data_access
ld r4,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
andis. r0,r4,DSISR_DABRMATCH@h
bne- 1f
#ifdef CONFIG_PPC_64S_HASH_MMU
@@ -1441,7 +1454,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
* do_break() may have changed the NV GPRS while handling a breakpoint.
* If so, we need to restore them with their updated values.
*/
- REST_NVGPRS(r1)
+ HANDLER_RESTORE_NVGPRS()
b interrupt_return_srr
@@ -1479,7 +1492,7 @@ EXC_COMMON_BEGIN(data_access_slb_common)
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl do_slb_fault
cmpdi r3,0
bne- 1f
@@ -1493,7 +1506,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
li r3,-EFAULT
#endif
std r3,RESULT(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl do_bad_segment_interrupt
b interrupt_return_srr
@@ -1525,7 +1538,7 @@ EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
EXC_VIRT_END(instruction_access, 0x4400, 0x80)
EXC_COMMON_BEGIN(instruction_access_common)
GEN_COMMON instruction_access
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
bl do_hash_fault
@@ -1567,7 +1580,7 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl do_slb_fault
cmpdi r3,0
bne- 1f
@@ -1581,7 +1594,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
li r3,-EFAULT
#endif
std r3,RESULT(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl do_bad_segment_interrupt
b interrupt_return_srr
@@ -1635,7 +1648,7 @@ EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
EXC_COMMON_BEGIN(hardware_interrupt_common)
GEN_COMMON hardware_interrupt
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl do_IRQ
BEGIN_FTR_SECTION
b interrupt_return_hsrr
@@ -1665,9 +1678,9 @@ EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
EXC_VIRT_END(alignment, 0x4600, 0x100)
EXC_COMMON_BEGIN(alignment_common)
GEN_COMMON alignment
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl alignment_exception
- REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
b interrupt_return_srr
@@ -1731,9 +1744,9 @@ EXC_COMMON_BEGIN(program_check_common)
__GEN_COMMON_BODY program_check
.Ldo_program_check:
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl program_check_exception
- REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
b interrupt_return_srr
@@ -1751,6 +1764,7 @@ INT_DEFINE_BEGIN(fp_unavailable)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
+ IMSR_R12=1
INT_DEFINE_END(fp_unavailable)
EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
@@ -1762,7 +1776,7 @@ EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
EXC_COMMON_BEGIN(fp_unavailable_common)
GEN_COMMON fp_unavailable
bne 1f /* if from user, just load it up */
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl kernel_fp_unavailable_exception
0: trap
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
@@ -1780,7 +1794,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl fp_unavailable_tm
b interrupt_return_srr
#endif
@@ -1824,7 +1838,7 @@ EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
EXC_VIRT_END(decrementer, 0x4900, 0x80)
EXC_COMMON_BEGIN(decrementer_common)
GEN_COMMON decrementer
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl timer_interrupt
b interrupt_return_srr
@@ -1909,7 +1923,7 @@ EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
EXC_COMMON_BEGIN(doorbell_super_common)
GEN_COMMON doorbell_super
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
#ifdef CONFIG_PPC_DOORBELL
bl doorbell_exception
#else
@@ -2076,7 +2090,7 @@ EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
EXC_VIRT_END(single_step, 0x4d00, 0x100)
EXC_COMMON_BEGIN(single_step_common)
GEN_COMMON single_step
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl single_step_exception
b interrupt_return_srr
@@ -2110,7 +2124,7 @@ EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
EXC_COMMON_BEGIN(h_data_storage_common)
GEN_COMMON h_data_storage
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
BEGIN_MMU_FTR_SECTION
bl do_bad_page_fault_segv
MMU_FTR_SECTION_ELSE
@@ -2139,7 +2153,7 @@ EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
EXC_COMMON_BEGIN(h_instr_storage_common)
GEN_COMMON h_instr_storage
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unknown_exception
b interrupt_return_hsrr
@@ -2162,9 +2176,9 @@ EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
EXC_COMMON_BEGIN(emulation_assist_common)
GEN_COMMON emulation_assist
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl emulation_assist_interrupt
- REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
b interrupt_return_hsrr
@@ -2222,7 +2236,7 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
__GEN_COMMON_BODY hmi_exception_early
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl hmi_exception_realmode
cmpdi cr0,r3,0
bne 1f
@@ -2240,7 +2254,7 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
EXC_COMMON_BEGIN(hmi_exception_common)
GEN_COMMON hmi_exception
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl handle_hmi_exception
b interrupt_return_hsrr
@@ -2274,7 +2288,7 @@ EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
EXC_COMMON_BEGIN(h_doorbell_common)
GEN_COMMON h_doorbell
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
#ifdef CONFIG_PPC_DOORBELL
bl doorbell_exception
#else
@@ -2310,7 +2324,7 @@ EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
EXC_COMMON_BEGIN(h_virt_irq_common)
GEN_COMMON h_virt_irq
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl do_IRQ
b interrupt_return_hsrr
@@ -2356,7 +2370,7 @@ EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
EXC_COMMON_BEGIN(performance_monitor_common)
GEN_COMMON performance_monitor
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
lbz r4,PACAIRQSOFTMASK(r13)
cmpdi r4,IRQS_ENABLED
bne 1f
@@ -2384,6 +2398,7 @@ INT_DEFINE_BEGIN(altivec_unavailable)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
+ IMSR_R12=1
INT_DEFINE_END(altivec_unavailable)
EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
@@ -2410,14 +2425,14 @@ BEGIN_FTR_SECTION
b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl altivec_unavailable_tm
b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl altivec_unavailable_exception
b interrupt_return_srr
@@ -2433,6 +2448,7 @@ INT_DEFINE_BEGIN(vsx_unavailable)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
+ IMSR_R12=1
INT_DEFINE_END(vsx_unavailable)
EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
@@ -2458,14 +2474,14 @@ BEGIN_FTR_SECTION
b load_up_vsx
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl vsx_unavailable_tm
b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl vsx_unavailable_exception
b interrupt_return_srr
@@ -2492,9 +2508,9 @@ EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
EXC_COMMON_BEGIN(facility_unavailable_common)
GEN_COMMON facility_unavailable
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl facility_unavailable_exception
- REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
b interrupt_return_srr
@@ -2520,9 +2536,10 @@ EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
EXC_COMMON_BEGIN(h_facility_unavailable_common)
GEN_COMMON h_facility_unavailable
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl facility_unavailable_exception
- REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
+ /* XXX Shouldn't be necessary in practice */
+ HANDLER_RESTORE_NVGPRS()
b interrupt_return_hsrr
@@ -2550,7 +2567,7 @@ EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
EXC_VIRT_NONE(0x5200, 0x100)
EXC_COMMON_BEGIN(cbe_system_error_common)
GEN_COMMON cbe_system_error
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl cbe_system_error_exception
b interrupt_return_hsrr
@@ -2581,7 +2598,7 @@ EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
EXC_COMMON_BEGIN(instruction_breakpoint_common)
GEN_COMMON instruction_breakpoint
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl instruction_breakpoint_exception
b interrupt_return_srr
@@ -2703,7 +2720,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
EXC_COMMON_BEGIN(denorm_exception_common)
GEN_COMMON denorm_exception
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unknown_exception
b interrupt_return_hsrr
@@ -2720,7 +2737,7 @@ EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
EXC_VIRT_NONE(0x5600, 0x100)
EXC_COMMON_BEGIN(cbe_maintenance_common)
GEN_COMMON cbe_maintenance
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl cbe_maintenance_exception
b interrupt_return_hsrr
@@ -2745,10 +2762,10 @@ EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
EXC_COMMON_BEGIN(altivec_assist_common)
GEN_COMMON altivec_assist
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
#ifdef CONFIG_ALTIVEC
bl altivec_assist_exception
- REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
#else
bl unknown_exception
#endif
@@ -2767,7 +2784,7 @@ EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
EXC_COMMON_BEGIN(cbe_thermal_common)
GEN_COMMON cbe_thermal
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl cbe_thermal_exception
b interrupt_return_hsrr
@@ -2800,7 +2817,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
subi r1,r1,INT_FRAME_SIZE
__GEN_COMMON_BODY soft_nmi
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl soft_nmi_interrupt
/* Clear MSR_RI before setting SRR0 and SRR1. */
@@ -3124,7 +3141,7 @@ _GLOBAL(enable_machine_check)
blr
/* MSR[RI] should be clear because this uses SRR[01] */
-disable_machine_check:
+SYM_FUNC_START_LOCAL(disable_machine_check)
mflr r0
bcl 20,31,$+4
0: mflr r3
@@ -3137,3 +3154,4 @@ disable_machine_check:
RFI_TO_KERNEL
1: mtlr r0
blr
+SYM_FUNC_END(disable_machine_check)
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index c3286260a7d1..f8e2911478a7 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -112,7 +112,7 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
stw r0,GPR0(r1)
lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
addi r10,r10,STACK_FRAME_REGS_MARKER@l
- stw r10,8(r1)
+ stw r10,STACK_INT_FRAME_MARKER(r1)
li r10, \trapno
stw r10,_TRAP(r1)
SAVE_GPRS(3, 8, r1)
@@ -127,7 +127,7 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
mfspr r10,SPRN_XER
addi r2, r2, -THREAD
stw r10,_XER(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
.endm
.macro prepare_transfer_to_handler
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 088f500896c7..3f68a1624646 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -28,6 +28,8 @@
#include <linux/init.h>
#include <linux/pgtable.h>
#include <linux/sizes.h>
+#include <linux/linkage.h>
+
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -602,7 +604,7 @@ start_here:
lis r1,init_thread_union@ha
addi r1,r1,init_thread_union@l
li r0,0
- stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+ stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
bl early_init /* We have to do this with MMU on */
@@ -662,7 +664,7 @@ start_here:
* kernel initialization. This maps the first 32 MBytes of memory 1:1
* virtual to physical and more importantly sets the cache mode.
*/
-initial_mmu:
+SYM_FUNC_START_LOCAL(initial_mmu)
tlbia /* Invalidate all TLB entries */
isync
@@ -711,6 +713,7 @@ initial_mmu:
mtspr SPRN_EVPR,r0
blr
+SYM_FUNC_END(initial_mmu)
_GLOBAL(abort)
mfspr r13,SPRN_DBCR0
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index f15cb9fdb692..63a85c16fef4 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -109,7 +109,7 @@ _GLOBAL(_start);
lis r1,init_thread_union@h
ori r1,r1,init_thread_union@l
li r0,0
- stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+ stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
bl early_init
@@ -1012,7 +1012,7 @@ _GLOBAL(start_secondary_47x)
*/
lis r1,temp_boot_stack@h
ori r1,r1,temp_boot_stack@l
- addi r1,r1,1024-STACK_FRAME_OVERHEAD
+ addi r1,r1,1024-STACK_FRAME_MIN_SIZE
li r0,0
stw r0,0(r1)
bl mmu_init_secondary
@@ -1025,7 +1025,7 @@ _GLOBAL(start_secondary_47x)
lwz r1,TASK_STACK(r2)
/* Current stack pointer */
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
li r0,0
stw r0,0(r1)
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index dedcc6fe2263..7558ba4eb864 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -18,6 +18,7 @@
* variants.
*/
+#include <linux/linkage.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <asm/reg.h>
@@ -424,7 +425,7 @@ generic_secondary_common_init:
/* Create a temp kernel stack for use before relocation is on. */
ld r1,PACAEMERGSP(r13)
- subi r1,r1,STACK_FRAME_OVERHEAD
+ subi r1,r1,STACK_FRAME_MIN_SIZE
/* See if we need to call a cpu state restore handler */
LOAD_REG_ADDR(r23, cur_cpu_spec)
@@ -462,7 +463,7 @@ generic_secondary_common_init:
* Assumes we're mapped EA == RA if the MMU is on.
*/
#ifdef CONFIG_PPC_BOOK3S
-__mmu_off:
+SYM_FUNC_START_LOCAL(__mmu_off)
mfmsr r3
andi. r0,r3,MSR_IR|MSR_DR
beqlr
@@ -473,6 +474,7 @@ __mmu_off:
sync
rfid
b . /* prevent speculative execution */
+SYM_FUNC_END(__mmu_off)
#endif
@@ -780,7 +782,7 @@ _GLOBAL(pmac_secondary_start)
/* Create a temp kernel stack for use before relocation is on. */
ld r1,PACAEMERGSP(r13)
- subi r1,r1,STACK_FRAME_OVERHEAD
+ subi r1,r1,STACK_FRAME_MIN_SIZE
b __secondary_start
@@ -869,7 +871,7 @@ _GLOBAL(start_secondary_resume)
/*
* This subroutine clobbers r11 and r12
*/
-enable_64b_mode:
+SYM_FUNC_START_LOCAL(enable_64b_mode)
mfmsr r11 /* grab the current MSR */
#ifdef CONFIG_PPC_BOOK3E_64
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
@@ -881,6 +883,7 @@ enable_64b_mode:
isync
#endif
blr
+SYM_FUNC_END(enable_64b_mode)
/*
* This puts the TOC pointer into r2, offset by 0x8000 (as expected
@@ -958,7 +961,7 @@ start_here_multiplatform:
LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)
add r1,r3,r1
li r0,0
- stdu r0,-STACK_FRAME_OVERHEAD(r1)
+ stdu r0,-STACK_FRAME_MIN_SIZE(r1)
/*
* Do very early kernel initializations, including initial hash table
diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S
index 52c0ab416326..d438ca74e96c 100644
--- a/arch/powerpc/kernel/head_85xx.S
+++ b/arch/powerpc/kernel/head_85xx.S
@@ -29,6 +29,8 @@
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/pgtable.h>
+#include <linux/linkage.h>
+
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -229,7 +231,7 @@ set_ivor:
lis r1,init_thread_union@h
ori r1,r1,init_thread_union@l
li r0,0
- stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+ stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
#ifdef CONFIG_SMP
stw r24, TASK_CPU(r2)
@@ -885,7 +887,7 @@ KernelSPE:
* Translate the effec addr in r3 to phys addr. The phys addr will be put
* into r3(higher 32bit) and r4(lower 32bit)
*/
-get_phys_addr:
+SYM_FUNC_START_LOCAL(get_phys_addr)
mfmsr r8
mfspr r9,SPRN_PID
rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */
@@ -907,6 +909,7 @@ get_phys_addr:
mfspr r3,SPRN_MAS7
#endif
blr
+SYM_FUNC_END(get_phys_addr)
/*
* Global functions
@@ -972,10 +975,10 @@ _GLOBAL(__giveup_spe)
li r4,THREAD_ACC
evstddx evr6, r4, r3 /* save off accumulator */
beq 1f
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lwz r4,_MSR-STACK_INT_FRAME_REGS(r5)
lis r3,MSR_SPE@h
andc r4,r4,r3 /* disable SPE for previous task */
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ stw r4,_MSR-STACK_INT_FRAME_REGS(r5)
1:
blr
#endif /* CONFIG_SPE */
@@ -1044,7 +1047,7 @@ __secondary_start:
lwz r1,TASK_STACK(r2)
/* stack */
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
li r0,0
stw r0,0(r1)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 0b05f2be66b9..a79751e05781 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -18,6 +18,8 @@
#include <linux/magic.h>
#include <linux/pgtable.h>
#include <linux/sizes.h>
+#include <linux/linkage.h>
+
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -537,7 +539,7 @@ start_here:
ori r0, r0, STACK_END_MAGIC@l
stw r0, 0(r1)
li r0,0
- stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+ stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
lis r6, swapper_pg_dir@ha
tophys(r6,r6)
@@ -625,7 +627,7 @@ start_here:
* 24 Mbytes of data, and the 512k IMMR space. Anything not covered by
* these mappings is mapped by page tables.
*/
-initial_mmu:
+SYM_FUNC_START_LOCAL(initial_mmu)
li r8, 0
mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */
lis r10, MD_TWAM@h
@@ -686,6 +688,7 @@ initial_mmu:
#endif
mtspr SPRN_DER, r8
blr
+SYM_FUNC_END(initial_mmu)
_GLOBAL(mmu_pin_tlb)
lis r9, (1f - PAGE_OFFSET)@h
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 519b60695167..c51f28b5abc0 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -18,6 +18,8 @@
#include <linux/init.h>
#include <linux/pgtable.h>
+#include <linux/linkage.h>
+
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -840,7 +842,7 @@ __secondary_start:
lwz r1,TASK_STACK(r1)
/* stack */
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
li r0,0
tophys(r3,r1)
stw r0,0(r3)
@@ -877,7 +879,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
* Load stuff into the MMU. Intended to be called with
* IR=0 and DR=0.
*/
-early_hash_table:
+SYM_FUNC_START_LOCAL(early_hash_table)
sync /* Force all PTE updates to finish */
isync
tlbia /* Clear all TLB entries */
@@ -888,8 +890,9 @@ early_hash_table:
ori r6, r6, 3 /* 256kB table */
mtspr SPRN_SDR1, r6
blr
+SYM_FUNC_END(early_hash_table)
-load_up_mmu:
+SYM_FUNC_START_LOCAL(load_up_mmu)
sync /* Force all PTE updates to finish */
isync
tlbia /* Clear all TLB entries */
@@ -918,6 +921,7 @@ BEGIN_MMU_FTR_SECTION
LOAD_BAT(7,r3,r4,r5)
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
+SYM_FUNC_END(load_up_mmu)
_GLOBAL(load_segment_registers)
li r0, NUM_USER_SEGMENTS /* load up user segment register values */
@@ -966,7 +970,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
lis r1,init_thread_union@ha
addi r1,r1,init_thread_union@l
li r0,0
- stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+ stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
/*
* Do early platform-specific initialization,
* and set up the MMU.
@@ -1028,7 +1032,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
* this makes sure it's done.
* -- Cort
*/
-clear_bats:
+SYM_FUNC_START_LOCAL(clear_bats)
li r10,0
mtspr SPRN_DBAT0U,r10
@@ -1072,6 +1076,7 @@ BEGIN_MMU_FTR_SECTION
mtspr SPRN_IBAT7L,r10
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
+SYM_FUNC_END(clear_bats)
_GLOBAL(update_bats)
lis r4, 1f@h
@@ -1108,15 +1113,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
mtspr SPRN_SRR1, r6
rfi
-flush_tlbs:
+SYM_FUNC_START_LOCAL(flush_tlbs)
lis r10, 0x40
1: addic. r10, r10, -0x1000
tlbie r10
bgt 1b
sync
blr
+SYM_FUNC_END(flush_tlbs)
-mmu_off:
+SYM_FUNC_START_LOCAL(mmu_off)
addi r4, r3, __after_mmu_off - _start
mfmsr r3
andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
@@ -1128,9 +1134,10 @@ mmu_off:
mtspr SPRN_SRR1,r3
sync
rfi
+SYM_FUNC_END(mmu_off)
/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
-initial_bats:
+SYM_FUNC_START_LOCAL(initial_bats)
lis r11,PAGE_OFFSET@h
tophys(r8,r11)
#ifdef CONFIG_SMP
@@ -1146,9 +1153,10 @@ initial_bats:
mtspr SPRN_IBAT0U,r11
isync
blr
+SYM_FUNC_END(initial_bats)
#ifdef CONFIG_BOOTX_TEXT
-setup_disp_bat:
+SYM_FUNC_START_LOCAL(setup_disp_bat)
/*
* setup the display bat prepared for us in prom.c
*/
@@ -1164,10 +1172,11 @@ setup_disp_bat:
mtspr SPRN_DBAT3L,r8
mtspr SPRN_DBAT3U,r11
blr
+SYM_FUNC_END(setup_disp_bat)
#endif /* CONFIG_BOOTX_TEXT */
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
-setup_cpm_bat:
+SYM_FUNC_START_LOCAL(setup_cpm_bat)
lis r8, 0xf000
ori r8, r8, 0x002a
mtspr SPRN_DBAT1L, r8
@@ -1177,10 +1186,11 @@ setup_cpm_bat:
mtspr SPRN_DBAT1U, r11
blr
+SYM_FUNC_END(setup_cpm_bat)
#endif
#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
-setup_usbgecko_bat:
+SYM_FUNC_START_LOCAL(setup_usbgecko_bat)
/* prepare a BAT for early io */
#if defined(CONFIG_GAMECUBE)
lis r8, 0x0c00
@@ -1199,6 +1209,7 @@ setup_usbgecko_bat:
mtspr SPRN_DBAT1L, r8
mtspr SPRN_DBAT1U, r11
blr
+SYM_FUNC_END(setup_usbgecko_bat)
#endif
.data
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 1cb9d0f7cbf2..37d43c172676 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -84,7 +84,7 @@ END_BTB_FLUSH_SECTION
stw r0,GPR0(r1)
lis r10, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
addi r10, r10, STACK_FRAME_REGS_MARKER@l
- stw r10, 8(r1)
+ stw r10, STACK_INT_FRAME_MARKER(r1)
li r10, \trapno
stw r10,_TRAP(r1)
SAVE_GPRS(3, 8, r1)
@@ -99,7 +99,7 @@ END_BTB_FLUSH_SECTION
mfspr r10,SPRN_XER
addi r2, r2, -THREAD
stw r10,_XER(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
.endm
.macro prepare_transfer_to_handler
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 8db1a15d7acb..e1b4e70c8fd0 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -646,7 +646,7 @@ int hw_breakpoint_handler(struct die_args *args)
ppc_inst_t instr = ppc_inst(0);
int type = 0;
int size = 0;
- unsigned long ea;
+ unsigned long ea = 0;
/* Disable breakpoints during exception handling */
hw_breakpoint_disable();
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index a019ed6fc839..fccc34489add 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -77,11 +77,11 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
std r11,_TRAP(r1)
std r12,_CCR(r1)
std r3,ORIG_GPR3(r1)
+ LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
+ std r11,STACK_INT_FRAME_MARKER(r1) /* "regs" marker */
/* Calling convention has r3 = regs, r4 = orig r0 */
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
mr r4,r0
- LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
- std r11,-16(r3) /* "regshere" marker */
BEGIN_FTR_SECTION
HMT_MEDIUM
@@ -96,10 +96,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
* but this is the best we can do.
*/
+ /*
+ * Zero user registers to prevent influencing speculative execution
+ * state of kernel code.
+ */
+ SANITIZE_SYSCALL_GPRS()
bl system_call_exception
.Lsyscall_vectored_\name\()_exit:
- addi r4,r1,STACK_FRAME_OVERHEAD
+ addi r4,r1,STACK_INT_FRAME_REGS
li r5,1 /* scv */
bl syscall_exit_prepare
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
@@ -124,6 +129,7 @@ BEGIN_FTR_SECTION
HMT_MEDIUM_LOW
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ SANITIZE_RESTORE_NVGPRS()
cmpdi r3,0
bne .Lsyscall_vectored_\name\()_restore_regs
@@ -159,7 +165,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r4,_LINK(r1)
ld r5,_XER(r1)
- REST_NVGPRS(r1)
+ HANDLER_RESTORE_NVGPRS()
REST_GPR(0, r1)
mtcr r2
mtctr r3
@@ -176,7 +182,7 @@ _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
ld r1,PACA_EXIT_SAVE_R1(r13)
LOAD_PACA_TOC()
ld r3,RESULT(r1)
- addi r4,r1,STACK_FRAME_OVERHEAD
+ addi r4,r1,STACK_INT_FRAME_REGS
li r11,IRQS_ALL_DISABLED
stb r11,PACAIRQSOFTMASK(r13)
bl syscall_exit_restart
@@ -250,11 +256,11 @@ END_BTB_FLUSH_SECTION
std r11,_TRAP(r1)
std r12,_CCR(r1)
std r3,ORIG_GPR3(r1)
+ LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
+ std r11,STACK_INT_FRAME_MARKER(r1) /* "regs" marker */
/* Calling convention has r3 = regs, r4 = orig r0 */
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
mr r4,r0
- LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
- std r11,-16(r3) /* "regshere" marker */
#ifdef CONFIG_PPC_BOOK3S
li r11,1
@@ -275,10 +281,15 @@ END_BTB_FLUSH_SECTION
wrteei 1
#endif
+ /*
+ * Zero user registers to prevent influencing speculative execution
+ * state of kernel code.
+ */
+ SANITIZE_SYSCALL_GPRS()
bl system_call_exception
.Lsyscall_exit:
- addi r4,r1,STACK_FRAME_OVERHEAD
+ addi r4,r1,STACK_INT_FRAME_REGS
li r5,0 /* !scv */
bl syscall_exit_prepare
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
@@ -315,6 +326,7 @@ BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+ SANITIZE_RESTORE_NVGPRS()
cmpdi r3,0
bne .Lsyscall_restore_regs
/* Zero volatile regs that may contain sensitive kernel data */
@@ -342,7 +354,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
.Lsyscall_restore_regs:
ld r3,_CTR(r1)
ld r4,_XER(r1)
- REST_NVGPRS(r1)
+ HANDLER_RESTORE_NVGPRS()
mtctr r3
mtspr SPRN_XER,r4
REST_GPR(0, r1)
@@ -357,7 +369,7 @@ _ASM_NOKPROBE_SYMBOL(syscall_restart)
ld r1,PACA_EXIT_SAVE_R1(r13)
LOAD_PACA_TOC()
ld r3,RESULT(r1)
- addi r4,r1,STACK_FRAME_OVERHEAD
+ addi r4,r1,STACK_INT_FRAME_REGS
li r11,IRQS_ALL_DISABLED
stb r11,PACAIRQSOFTMASK(r13)
bl syscall_exit_restart
@@ -388,7 +400,7 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
andi. r0,r5,MSR_RI
li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
bne+ .Lfast_kernel_interrupt_return_srr
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl unrecoverable_exception
b . /* should not get here */
#else
@@ -406,11 +418,13 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
beq interrupt_return_\srr\()_kernel
interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl interrupt_exit_user_prepare
+#ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
cmpdi r3,0
bne- .Lrestore_nvgprs_\srr
.Lrestore_nvgprs_\srr\()_cont:
+#endif
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
#ifdef CONFIG_PPC_BOOK3S
.Linterrupt_return_\srr\()_user_rst_start:
@@ -424,6 +438,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
.Lfast_user_interrupt_return_\srr\():
+ SANITIZE_RESTORE_NVGPRS()
#ifdef CONFIG_PPC_BOOK3S
.ifc \srr,srr
lbz r4,PACASRR_VALID(r13)
@@ -493,9 +508,11 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
b . /* prevent speculative execution */
.Linterrupt_return_\srr\()_user_rst_end:
+#ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
.Lrestore_nvgprs_\srr\():
REST_NVGPRS(r1)
b .Lrestore_nvgprs_\srr\()_cont
+#endif
#ifdef CONFIG_PPC_BOOK3S
interrupt_return_\srr\()_user_restart:
@@ -503,7 +520,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
GET_PACA(r13)
ld r1,PACA_EXIT_SAVE_R1(r13)
LOAD_PACA_TOC()
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
li r11,IRQS_ALL_DISABLED
stb r11,PACAIRQSOFTMASK(r13)
bl interrupt_exit_user_restart
@@ -518,7 +535,7 @@ RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr
.balign IFETCH_ALIGN_BYTES
interrupt_return_\srr\()_kernel:
_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
bl interrupt_exit_kernel_prepare
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
@@ -585,6 +602,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
.Lfast_kernel_interrupt_return_\srr\():
+ SANITIZE_RESTORE_NVGPRS()
cmpdi cr1,r3,0
#ifdef CONFIG_PPC_BOOK3S
.ifc \srr,srr
@@ -637,7 +655,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
* Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
* the reliable stack unwinder later on. Clear it.
*/
- std r0,STACK_FRAME_OVERHEAD-16(r1)
+ std r0,STACK_INT_FRAME_MARKER(r1)
REST_GPRS(2, 5, r1)
@@ -684,7 +702,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
GET_PACA(r13)
ld r1,PACA_EXIT_SAVE_R1(r13)
LOAD_PACA_TOC()
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
li r11,IRQS_ALL_DISABLED
stb r11,PACAIRQSOFTMASK(r13)
bl interrupt_exit_kernel_restart
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9ede61a5a469..c5b9ce887483 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -210,7 +210,7 @@ static __always_inline void call_do_softirq(const void *sp)
PPC_LL " %%r1, 0(%%r1) ;"
: // Outputs
: // Inputs
- [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
+ [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE),
[callee] "i" (__do_softirq)
: // Clobbers
"lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
@@ -264,7 +264,7 @@ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
: // Outputs
"+r" (r3)
: // Inputs
- [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
+ [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE),
[callee] "i" (__do_irq)
: // Clobbers
"lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 1a1e9995dae3..ebe4d1645ca1 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -191,7 +191,7 @@ static int kgdb_break_match(struct pt_regs *regs)
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
- STACK_FRAME_OVERHEAD);
+ STACK_INT_FRAME_REGS);
unsigned long *ptr = gdb_regs;
int reg;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7a89de302609..b20ee72e873a 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -157,9 +157,7 @@ int arch_prepare_kprobe(struct kprobe *p)
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
- preempt_disable();
prev = get_kprobe(p->addr - 1);
- preempt_enable_no_resched();
/*
* When prev is a ftrace-based kprobe, we don't have an insn, and it
@@ -370,7 +368,7 @@ int kprobe_handler(struct pt_regs *regs)
if (ret > 0) {
restore_previous_kprobe(kcb);
- preempt_enable_no_resched();
+ preempt_enable();
return 1;
}
}
@@ -383,7 +381,7 @@ int kprobe_handler(struct pt_regs *regs)
if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler changed execution path, so skip ss setup */
reset_current_kprobe();
- preempt_enable_no_resched();
+ preempt_enable();
return 1;
}
@@ -396,7 +394,7 @@ int kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_HIT_SSDONE;
reset_current_kprobe();
- preempt_enable_no_resched();
+ preempt_enable();
return 1;
}
}
@@ -405,7 +403,7 @@ int kprobe_handler(struct pt_regs *regs)
return 1;
no_kprobe:
- preempt_enable_no_resched();
+ preempt_enable();
return ret;
}
NOKPROBE_SYMBOL(kprobe_handler);
@@ -491,7 +489,7 @@ int kprobe_post_handler(struct pt_regs *regs)
}
reset_current_kprobe();
out:
- preempt_enable_no_resched();
+ preempt_enable();
/*
* if somebody else is singlestepping across a probe point, msr
@@ -530,7 +528,7 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
- preempt_enable_no_resched();
+ preempt_enable();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index e5127b19fec2..daf8f87d2372 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -382,7 +382,7 @@ EXPORT_SYMBOL(__bswapdi2)
_GLOBAL(start_secondary_resume)
/* Reset stack */
rlwinm r1, r1, 0, 0, 31 - THREAD_SHIFT
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
li r3,0
stw r3,0(r1) /* Zero the stack frame pointer */
bl start_secondary
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 36184cada00b..c39c07a4c06e 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -9,6 +9,7 @@
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*/
+#include <linux/linkage.h>
#include <linux/sys.h>
#include <asm/unistd.h>
#include <asm/errno.h>
@@ -353,7 +354,7 @@ _GLOBAL(kexec_smp_wait)
*
* don't overwrite r3 here, it is live for kexec_wait above.
*/
-real_mode: /* assume normal blr return */
+SYM_FUNC_START_LOCAL(real_mode) /* assume normal blr return */
#ifdef CONFIG_PPC_BOOK3E_64
/* Create an identity mapping. */
b kexec_create_tlb
@@ -370,6 +371,7 @@ real_mode: /* assume normal blr return */
mtspr SPRN_SRR0,r11
rfid
#endif
+SYM_FUNC_END(real_mode)
/*
* kexec_sequence(newstack, start, image, control, clear_all(),
@@ -384,7 +386,7 @@ _GLOBAL(kexec_sequence)
std r0,16(r1)
/* switch stacks to newstack -- &kexec_stack.stack */
- stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
+ stdu r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r3)
mr r1,r3
li r0,0
@@ -401,7 +403,7 @@ _GLOBAL(kexec_sequence)
std r26,-48(r1)
std r25,-56(r1)
- stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
+ stdu r1,-STACK_FRAME_MIN_SIZE-64(r1)
/* save args into preserved regs */
mr r31,r3 /* newstack (both) */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 7e45dc98df8a..ff045644f13f 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -31,6 +31,16 @@
this, and makes other things simpler. Anton?
--RR. */
+bool module_elf_check_arch(Elf_Ehdr *hdr)
+{
+ unsigned long abi_level = hdr->e_flags & 0x3;
+
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ return abi_level == 2;
+ else
+ return abi_level < 2;
+}
+
#ifdef CONFIG_PPC64_ELF_ABI_V2
static func_desc_t func_desc(unsigned long addr)
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 3b1c2236cbee..004fae2044a3 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -112,7 +112,7 @@ static void optimized_callback(struct optimized_kprobe *op,
__this_cpu_write(current_kprobe, NULL);
}
- preempt_enable_no_resched();
+ preempt_enable();
}
NOKPROBE_SYMBOL(optimized_callback);
diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S
index cd4e7bc32609..35932f45fb4e 100644
--- a/arch/powerpc/kernel/optprobes_head.S
+++ b/arch/powerpc/kernel/optprobes_head.S
@@ -85,7 +85,7 @@ optprobe_template_op_address:
TEMPLATE_FOR_IMM_LOAD_INSNS
/* 2. pt_regs pointer in r4 */
- addi r4,r1,STACK_FRAME_OVERHEAD
+ addi r4,r1,STACK_INT_FRAME_REGS
.global optprobe_template_call_handler
optprobe_template_call_handler:
@@ -96,7 +96,7 @@ optprobe_template_call_handler:
* Parameters for instruction emulation:
* 1. Pass SP in register r3.
*/
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
.global optprobe_template_insn
optprobe_template_insn:
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index 2d4d21bb46a9..49813f982468 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -21,60 +21,33 @@
* different ABIs, though).
*/
_GLOBAL(ppc_save_regs)
- PPC_STL r0,0*SZL(r3)
+ /* This allows stack frame accessor macros and offsets to be used */
+ subi r3,r3,STACK_INT_FRAME_REGS
+ PPC_STL r0,GPR0(r3)
#ifdef CONFIG_PPC32
- stmw r2, 2*SZL(r3)
+ stmw r2,GPR2(r3)
#else
- PPC_STL r2,2*SZL(r3)
- PPC_STL r3,3*SZL(r3)
- PPC_STL r4,4*SZL(r3)
- PPC_STL r5,5*SZL(r3)
- PPC_STL r6,6*SZL(r3)
- PPC_STL r7,7*SZL(r3)
- PPC_STL r8,8*SZL(r3)
- PPC_STL r9,9*SZL(r3)
- PPC_STL r10,10*SZL(r3)
- PPC_STL r11,11*SZL(r3)
- PPC_STL r12,12*SZL(r3)
- PPC_STL r13,13*SZL(r3)
- PPC_STL r14,14*SZL(r3)
- PPC_STL r15,15*SZL(r3)
- PPC_STL r16,16*SZL(r3)
- PPC_STL r17,17*SZL(r3)
- PPC_STL r18,18*SZL(r3)
- PPC_STL r19,19*SZL(r3)
- PPC_STL r20,20*SZL(r3)
- PPC_STL r21,21*SZL(r3)
- PPC_STL r22,22*SZL(r3)
- PPC_STL r23,23*SZL(r3)
- PPC_STL r24,24*SZL(r3)
- PPC_STL r25,25*SZL(r3)
- PPC_STL r26,26*SZL(r3)
- PPC_STL r27,27*SZL(r3)
- PPC_STL r28,28*SZL(r3)
- PPC_STL r29,29*SZL(r3)
- PPC_STL r30,30*SZL(r3)
- PPC_STL r31,31*SZL(r3)
+ SAVE_GPRS(2, 31, r3)
lbz r0,PACAIRQSOFTMASK(r13)
- PPC_STL r0,SOFTE-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,SOFTE(r3)
#endif
/* go up one stack frame for SP */
PPC_LL r4,0(r1)
- PPC_STL r4,1*SZL(r3)
+ PPC_STL r4,GPR1(r3)
/* get caller's LR */
PPC_LL r0,LRSAVE(r4)
- PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_LINK(r3)
mflr r0
- PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_NIP(r3)
mfmsr r0
- PPC_STL r0,_MSR-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_MSR(r3)
mfctr r0
- PPC_STL r0,_CTR-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_CTR(r3)
mfxer r0
- PPC_STL r0,_XER-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_XER(r3)
mfcr r0
- PPC_STL r0,_CCR-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_CCR(r3)
li r0,0
- PPC_STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
- PPC_STL r0,ORIG_GPR3-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_TRAP(r3)
+ PPC_STL r0,ORIG_GPR3(r3)
blr
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index fcf604370c66..c22cc234672f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -862,10 +862,8 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
return 0;
}
-void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
+static void set_hw_breakpoint(int nr, struct arch_hw_breakpoint *brk)
{
- memcpy(this_cpu_ptr(&current_brk[nr]), brk, sizeof(*brk));
-
if (dawr_enabled())
// Power8 or later
set_dawr(nr, brk);
@@ -879,6 +877,12 @@ void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
WARN_ON_ONCE(1);
}
+void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
+{
+ memcpy(this_cpu_ptr(&current_brk[nr]), brk, sizeof(*brk));
+ set_hw_breakpoint(nr, brk);
+}
+
/* Check if we have DAWR or DABR hardware */
bool ppc_breakpoint_available(void)
{
@@ -891,6 +895,34 @@ bool ppc_breakpoint_available(void)
}
EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
+/* Disable the breakpoint in hardware without touching current_brk[] */
+void suspend_breakpoints(void)
+{
+ struct arch_hw_breakpoint brk = {0};
+ int i;
+
+ if (!ppc_breakpoint_available())
+ return;
+
+ for (i = 0; i < nr_wp_slots(); i++)
+ set_hw_breakpoint(i, &brk);
+}
+
+/*
+ * Re-enable breakpoints suspended by suspend_breakpoints() in hardware
+ * from current_brk[]
+ */
+void restore_breakpoints(void)
+{
+ int i;
+
+ if (!ppc_breakpoint_available())
+ return;
+
+ for (i = 0; i < nr_wp_slots(); i++)
+ set_hw_breakpoint(i, this_cpu_ptr(&current_brk[i]));
+}
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline bool tm_enabled(struct task_struct *tsk)
@@ -1359,7 +1391,7 @@ static void show_instructions(struct pt_regs *regs)
unsigned long nip = regs->nip;
unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
- printk("Instruction dump:");
+ printk("Code: ");
/*
* If we were executing with the MMU off for instructions, adjust pc
@@ -1373,9 +1405,6 @@ static void show_instructions(struct pt_regs *regs)
for (i = 0; i < NR_INSN_TO_PRINT; i++) {
int instr;
- if (!(i % 8))
- pr_cont("\n");
-
if (!__kernel_text_address(pc) ||
get_kernel_nofault(instr, (const void *)pc)) {
pr_cont("XXXXXXXX ");
@@ -1726,13 +1755,17 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
klp_init_thread_info(p);
+ /* Create initial stack frame. */
+ sp -= STACK_USER_INT_FRAME_SIZE;
+ *(unsigned long *)(sp + STACK_INT_FRAME_MARKER) = STACK_FRAME_REGS_MARKER;
+
/* Copy registers */
- sp -= sizeof(struct pt_regs);
- childregs = (struct pt_regs *) sp;
+ childregs = (struct pt_regs *)(sp + STACK_INT_FRAME_REGS);
if (unlikely(args->fn)) {
/* kernel thread */
+ ((unsigned long *)sp)[0] = 0;
memset(childregs, 0, sizeof(struct pt_regs));
- childregs->gpr[1] = sp + sizeof(struct pt_regs);
+ childregs->gpr[1] = sp + STACK_USER_INT_FRAME_SIZE;
/* function */
if (args->fn)
childregs->gpr[14] = ppc_function_entry((void *)args->fn);
@@ -1750,6 +1783,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
*childregs = *regs;
if (usp)
childregs->gpr[1] = usp;
+ ((unsigned long *)sp)[0] = childregs->gpr[1];
p->thread.regs = childregs;
/* 64s sets this in ret_from_fork */
if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
@@ -1767,7 +1801,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
f = ret_from_fork;
}
childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
- sp -= STACK_FRAME_OVERHEAD;
/*
* The way this works is that at some point in the future
@@ -1777,11 +1810,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
* do some house keeping and then return from the fork or clone
* system call, using the stack frame created above.
*/
- ((unsigned long *)sp)[0] = 0;
- sp -= sizeof(struct pt_regs);
- kregs = (struct pt_regs *) sp;
- sp -= STACK_FRAME_OVERHEAD;
+ ((unsigned long *)sp)[STACK_FRAME_LR_SAVE] = (unsigned long)f;
+ sp -= STACK_SWITCH_FRAME_SIZE;
+ ((unsigned long *)sp)[0] = sp + STACK_SWITCH_FRAME_SIZE;
+ kregs = (struct pt_regs *)(sp + STACK_SWITCH_FRAME_REGS);
p->thread.ksp = sp;
+
#ifdef CONFIG_HAVE_HW_BREAKPOINT
for (i = 0; i < nr_wp_slots(); i++)
p->thread.ptrace_bps[i] = NULL;
@@ -2123,9 +2157,12 @@ static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
return 0;
}
-
-int validate_sp(unsigned long sp, struct task_struct *p,
- unsigned long nbytes)
+/*
+ * validate the stack frame of a particular minimum size, used for when we are
+ * looking at a certain object in the stack beyond the minimum.
+ */
+int validate_sp_size(unsigned long sp, struct task_struct *p,
+ unsigned long nbytes)
{
unsigned long stack_page = (unsigned long)task_stack_page(p);
@@ -2141,7 +2178,10 @@ int validate_sp(unsigned long sp, struct task_struct *p,
return valid_emergency_stack(sp, p, nbytes);
}
-EXPORT_SYMBOL(validate_sp);
+int validate_sp(unsigned long sp, struct task_struct *p)
+{
+ return validate_sp_size(sp, p, STACK_FRAME_MIN_SIZE);
+}
static unsigned long ___get_wchan(struct task_struct *p)
{
@@ -2149,13 +2189,12 @@ static unsigned long ___get_wchan(struct task_struct *p)
int count = 0;
sp = p->thread.ksp;
- if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
+ if (!validate_sp(sp, p))
return 0;
do {
sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
- if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
- task_is_running(p))
+ if (!validate_sp(sp, p) || task_is_running(p))
return 0;
if (count > 0) {
ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
@@ -2209,7 +2248,7 @@ void __no_sanitize_address show_stack(struct task_struct *tsk,
lr = 0;
printk("%sCall Trace:\n", loglvl);
do {
- if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
+ if (!validate_sp(sp, tsk))
break;
stack = (unsigned long *) sp;
@@ -2230,12 +2269,16 @@ void __no_sanitize_address show_stack(struct task_struct *tsk,
/*
* See if this is an exception frame.
- * We look for the "regshere" marker in the current frame.
+ * We look for the "regs" marker in the current frame.
+ *
+ * STACK_SWITCH_FRAME_SIZE being the smallest frame that
+ * could hold a pt_regs, if that does not fit then it can't
+ * have regs.
*/
- if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
- && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
+ if (validate_sp_size(sp, tsk, STACK_SWITCH_FRAME_SIZE)
+ && stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
struct pt_regs *regs = (struct pt_regs *)
- (sp + STACK_FRAME_OVERHEAD);
+ (sp + STACK_INT_FRAME_REGS);
lr = regs->link;
printk("%s--- interrupt: %lx at %pS\n",
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 1eed87d954ba..4f1c920aa13e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -72,6 +72,7 @@ int __initdata iommu_is_off;
int __initdata iommu_force_on;
unsigned long tce_alloc_start, tce_alloc_end;
u64 ppc64_rma_size;
+unsigned int boot_cpu_node_count __ro_after_init;
#endif
static phys_addr_t first_memblock_size;
static int __initdata boot_cpu_count;
@@ -335,6 +336,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
+ if (IS_ENABLED(CONFIG_PPC64))
+ boot_cpu_node_count++;
+
/* Get physical cpuid */
intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index e847f9b1c5b9..deded51a7978 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -7,43 +7,35 @@
* Copyright (C) 2001 IBM.
*/
-#include <linux/stdarg.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/init.h>
+#define pr_fmt(fmt) "rtas: " fmt
+
#include <linux/capability.h>
#include <linux/delay.h>
-#include <linux/cpu.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/completion.h>
-#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/memblock.h>
-#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <linux/reboot.h>
+#include <linux/sched.h>
#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stdarg.h>
#include <linux/syscalls.h>
-#include <linux/of.h>
-#include <linux/of_fdt.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <asm/delay.h>
+#include <asm/firmware.h>
#include <asm/interrupt.h>
-#include <asm/rtas.h>
-#include <asm/hvcall.h>
#include <asm/machdep.h>
-#include <asm/firmware.h>
+#include <asm/mmu.h>
#include <asm/page.h>
-#include <asm/param.h>
-#include <asm/delay.h>
-#include <linux/uaccess.h>
-#include <asm/udbg.h>
-#include <asm/syscalls.h>
-#include <asm/smp.h>
-#include <linux/atomic.h>
+#include <asm/rtas.h>
#include <asm/time.h>
-#include <asm/mmu.h>
-#include <asm/topology.h>
+#include <asm/udbg.h>
/* This is here deliberately so it's only used in this file */
void enter_rtas(unsigned long);
@@ -353,6 +345,9 @@ int rtas_service_present(const char *service)
EXPORT_SYMBOL(rtas_service_present);
#ifdef CONFIG_RTAS_ERROR_LOGGING
+
+static u32 rtas_error_log_max __ro_after_init = RTAS_ERROR_LOG_MAX;
+
/*
* Return the firmware-specified size of the error log buffer
* for all rtas calls that require an error buffer argument.
@@ -360,21 +355,30 @@ EXPORT_SYMBOL(rtas_service_present);
*/
int rtas_get_error_log_max(void)
{
- static int rtas_error_log_max;
- if (rtas_error_log_max)
- return rtas_error_log_max;
-
- rtas_error_log_max = rtas_token ("rtas-error-log-max");
- if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
- (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
- printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
- rtas_error_log_max);
- rtas_error_log_max = RTAS_ERROR_LOG_MAX;
- }
return rtas_error_log_max;
}
EXPORT_SYMBOL(rtas_get_error_log_max);
+static void __init init_error_log_max(void)
+{
+ static const char propname[] __initconst = "rtas-error-log-max";
+ u32 max;
+
+ if (of_property_read_u32(rtas.dev, propname, &max)) {
+ pr_warn("%s not found, using default of %u\n",
+ propname, RTAS_ERROR_LOG_MAX);
+ max = RTAS_ERROR_LOG_MAX;
+ }
+
+ if (max > RTAS_ERROR_LOG_MAX) {
+ pr_warn("%s = %u, clamping max error log size to %u\n",
+ propname, max, RTAS_ERROR_LOG_MAX);
+ max = RTAS_ERROR_LOG_MAX;
+ }
+
+ rtas_error_log_max = max;
+}
+
static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
static int rtas_last_error_token;
@@ -432,6 +436,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
#else /* CONFIG_RTAS_ERROR_LOGGING */
#define __fetch_rtas_last_error(x) NULL
#define get_errorlog_buffer() NULL
+static void __init init_error_log_max(void) {}
#endif
@@ -467,6 +472,64 @@ void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
static int ibm_open_errinjct_token;
static int ibm_errinjct_token;
+/**
+ * rtas_call() - Invoke an RTAS firmware function.
+ * @token: Identifies the function being invoked.
+ * @nargs: Number of input parameters. Does not include token.
+ * @nret: Number of output parameters, including the call status.
+ * @outputs: Array of @nret output words.
+ * @....: List of @nargs input parameters.
+ *
+ * Invokes the RTAS function indicated by @token, which the caller
+ * should obtain via rtas_token().
+ *
+ * The @nargs and @nret arguments must match the number of input and
+ * output parameters specified for the RTAS function.
+ *
+ * rtas_call() returns RTAS status codes, not conventional Linux errno
+ * values. Callers must translate any failure to an appropriate errno
+ * in syscall context. Most callers of RTAS functions that can return
+ * -2 or 990x should use rtas_busy_delay() to correctly handle those
+ * statuses before calling again.
+ *
+ * The return value descriptions are adapted from 7.2.8 [RTAS] Return
+ * Codes of the PAPR and CHRP specifications.
+ *
+ * Context: Process context preferably, interrupt context if
+ * necessary. Acquires an internal spinlock and may perform
+ * GFP_ATOMIC slab allocation in error path. Unsafe for NMI
+ * context.
+ * Return:
+ * * 0 - RTAS function call succeeded.
+ * * -1 - RTAS function encountered a hardware or
+ * platform error, or the token is invalid,
+ * or the function is restricted by kernel policy.
+ * * -2 - Specs say "A necessary hardware device was busy,
+ * and the requested function could not be
+ * performed. The operation should be retried at
+ * a later time." This is misleading, at least with
+ * respect to current RTAS implementations. What it
+ * usually means in practice is that the function
+ * could not be completed while meeting RTAS's
+ * deadline for returning control to the OS (250us
+ * for PAPR/PowerVM, typically), but the call may be
+ * immediately reattempted to resume work on it.
+ * * -3 - Parameter error.
+ * * -7 - Unexpected state change.
+ * * 9000...9899 - Vendor-specific success codes.
+ * * 9900...9905 - Advisory extended delay. Caller should try
+ * again after ~10^x ms has elapsed, where x is
+ * the last digit of the status [0-5]. Again going
+ * beyond the PAPR text, 990x on PowerVM indicates
+ * contention for RTAS-internal resources. Other
+ * RTAS call sequences in progress should be
+ * allowed to complete before reattempting the
+ * call.
+ * * -9000 - Multi-level isolation error.
+ * * -9999...-9004 - Vendor-specific error codes.
+ * * Additional negative values - Function-specific error.
+ * * Additional positive values - Function-specific success.
+ */
int rtas_call(int token, int nargs, int nret, int *outputs, ...)
{
va_list list;
@@ -657,8 +720,7 @@ static int rtas_error_rc(int rtas_rc)
rc = -ENODEV;
break;
default:
- printk(KERN_ERR "%s: unexpected RTAS error %d\n",
- __func__, rtas_rc);
+ pr_err("%s: unexpected error %d\n", __func__, rtas_rc);
rc = -ERANGE;
break;
}
@@ -862,8 +924,8 @@ void __noreturn rtas_restart(char *cmd)
{
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_RESTART);
- printk("RTAS system-reboot returned %d\n",
- rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
+ pr_emerg("system-reboot returned %d\n",
+ rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
for (;;);
}
@@ -872,8 +934,8 @@ void rtas_power_off(void)
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_POWER_OFF);
/* allow power on only with power button press */
- printk("RTAS power-off returned %d\n",
- rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
+ pr_emerg("power-off returned %d\n",
+ rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
for (;;);
}
@@ -882,13 +944,14 @@ void __noreturn rtas_halt(void)
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_HALT);
/* allow power on only with power button press */
- printk("RTAS power-off returned %d\n",
- rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
+ pr_emerg("power-off returned %d\n",
+ rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
for (;;);
}
/* Must be in the RMO region, so we place it here */
static char rtas_os_term_buf[2048];
+static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE;
void rtas_os_term(char *str)
{
@@ -900,19 +963,23 @@ void rtas_os_term(char *str)
* this property may terminate the partition which we want to avoid
* since it interferes with panic_timeout.
*/
- if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
- RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
+ if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE)
return;
snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
+ /*
+ * Keep calling as long as RTAS returns a "try again" status,
+ * but don't use rtas_busy_delay(), which potentially
+ * schedules.
+ */
do {
- status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
+ status = rtas_call(ibm_os_term_token, 1, 1, NULL,
__pa(rtas_os_term_buf));
- } while (rtas_busy_delay(status));
+ } while (rtas_busy_delay_time(status));
if (status != 0)
- printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
+ pr_emerg("ibm,os-term call failed %d\n", status);
}
/**
@@ -983,8 +1050,6 @@ noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log
return NULL;
}
-#ifdef CONFIG_PPC_RTAS_FILTER
-
/*
* The sys_rtas syscall, as originally designed, allows root to pass
* arbitrary physical addresses to RTAS calls. A number of RTAS calls
@@ -1133,20 +1198,6 @@ static void __init rtas_syscall_filter_init(void)
rtas_filters[i].token = rtas_token(rtas_filters[i].name);
}
-#else
-
-static bool block_rtas_call(int token, int nargs,
- struct rtas_args *args)
-{
- return false;
-}
-
-static void __init rtas_syscall_filter_init(void)
-{
-}
-
-#endif /* CONFIG_PPC_RTAS_FILTER */
-
/* We assume to be passed big endian arguments */
SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
{
@@ -1277,6 +1328,15 @@ void __init rtas_initialize(void)
no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
rtas.entry = no_entry ? rtas.base : entry;
+ init_error_log_max();
+
+ /*
+ * Discover these now to avoid device tree lookups in the
+ * panic path.
+ */
+ if (of_property_read_bool(rtas.dev, "ibm,extended-os-term"))
+ ibm_os_term_token = rtas_token("ibm,os-term");
+
/* If RTAS was found, allocate the RMO buffer for it and look for
* the stop-self token if any
*/
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 5270b450bbde..cc56ac6ba4b0 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
@@ -499,6 +500,8 @@ EXPORT_SYMBOL_GPL(rtas_cancel_event_scan);
static int __init rtas_event_scan_init(void)
{
+ int err;
+
if (!machine_is(pseries) && !machine_is(chrp))
return 0;
@@ -509,8 +512,8 @@ static int __init rtas_event_scan_init(void)
return -ENODEV;
}
- rtas_event_scan_rate = rtas_token("rtas-event-scan-rate");
- if (rtas_event_scan_rate == RTAS_UNKNOWN_SERVICE) {
+ err = of_property_read_u32(rtas.dev, "rtas-event-scan-rate", &rtas_event_scan_rate);
+ if (err) {
printk(KERN_ERR "rtasd: no rtas-event-scan-rate on system\n");
return -ENODEV;
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 0da6e59161cd..6b90f10a6c81 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1249,7 +1249,7 @@ static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
#ifdef CONFIG_PPC64
paca_ptrs[cpu]->__current = idle;
paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
- THREAD_SIZE - STACK_FRAME_OVERHEAD;
+ THREAD_SIZE - STACK_FRAME_MIN_SIZE;
#endif
task_thread_info(idle)->cpu = cpu;
secondary_current = current_set[cpu] = idle;
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index a2443d61728e..5de8597eaab8 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -43,7 +43,7 @@ void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry,
unsigned long *stack = (unsigned long *) sp;
unsigned long newsp, ip;
- if (!validate_sp(sp, task, STACK_FRAME_OVERHEAD))
+ if (!validate_sp(sp, task))
return;
newsp = stack[0];
@@ -77,7 +77,7 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum
/*
* For user tasks, this is the SP value loaded on
* kernel entry, see "PACAKSAVE(r13)" in _switch() and
- * system_call_common()/EXCEPTION_PROLOG_COMMON().
+ * system_call_common().
*
* Likewise for non-swapper kernel threads,
* this also happens to be the top of the stack
@@ -88,13 +88,13 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum
* an unreliable stack trace until it's been
* _switch()'ed to for the first time.
*/
- stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ stack_end -= STACK_USER_INT_FRAME_SIZE;
} else {
/*
* idle tasks have a custom stack layout,
* c.f. cpu_idle_thread_init().
*/
- stack_end -= STACK_FRAME_OVERHEAD;
+ stack_end -= STACK_FRAME_MIN_SIZE;
}
if (task == current)
@@ -136,7 +136,7 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum
/* Mark stacktraces with exception frames as unreliable. */
if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
- stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
+ stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
return -EINVAL;
}
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index e0cbd63007f2..ffb79326483c 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/threads.h>
+#include <linux/linkage.h>
+
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cputable.h>
@@ -400,7 +402,7 @@ _ASM_NOKPROBE_SYMBOL(swsusp_arch_resume)
/* FIXME:This construct is actually not useful since we don't shut
* down the instruction MMU, we could just flip back MSR-DR on.
*/
-turn_on_mmu:
+SYM_FUNC_START_LOCAL(turn_on_mmu)
mflr r4
mtsrr0 r4
mtsrr1 r3
@@ -408,4 +410,5 @@ turn_on_mmu:
isync
rfi
_ASM_NOKPROBE_SYMBOL(turn_on_mmu)
+SYM_FUNC_END(turn_on_mmu)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index a2ab397065c6..d68de3618741 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -130,7 +130,7 @@ unsigned long tb_ticks_per_jiffy;
unsigned long tb_ticks_per_usec = 100; /* sane default */
EXPORT_SYMBOL(tb_ticks_per_usec);
unsigned long tb_ticks_per_sec;
-EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
+EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime conversions */
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
@@ -151,21 +151,6 @@ bool tb_invalid;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
- * Factor for converting from cputime_t (timebase ticks) to
- * microseconds. This is stored as 0.64 fixed-point binary fraction.
- */
-u64 __cputime_usec_factor;
-EXPORT_SYMBOL(__cputime_usec_factor);
-
-static void calc_cputime_factors(void)
-{
- struct div_result res;
-
- div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
- __cputime_usec_factor = res.result_low;
-}
-
-/*
* Read the SPURR on systems that have it, otherwise the PURR,
* or if that doesn't exist return the timebase value passed in.
*/
@@ -369,10 +354,7 @@ void vtime_flush(struct task_struct *tsk)
acct->hardirq_time = 0;
acct->softirq_time = 0;
}
-
-#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-#define calc_cputime_factors()
-#endif
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
void __delay(unsigned long loops)
{
@@ -914,7 +896,6 @@ void __init time_init(void)
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
tb_ticks_per_sec = ppc_tb_freq;
tb_ticks_per_usec = ppc_tb_freq / 1000000;
- calc_cputime_factors();
/*
* Compute scale factor for sched_clock.
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 5a0f023a26e9..9feab5e0485b 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -117,7 +117,7 @@ _GLOBAL(tm_reclaim)
std r2, STK_GOT(r1)
stdu r1, -TM_FRAME_SIZE(r1)
- /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */
+ /* We've a struct pt_regs at [r1+STACK_INT_FRAME_REGS]. */
std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1)
@@ -222,7 +222,7 @@ _GLOBAL(tm_reclaim)
* Make r7 look like an exception frame so that we can use the neat
* GPRx(n) macros. r7 is NOT a pt_regs ptr!
*/
- subi r7, r7, STACK_FRAME_OVERHEAD
+ subi r7, r7, STACK_INT_FRAME_REGS
/* Sync the userland GPRs 2-12, 14-31 to thread->regs: */
SAVE_GPR(0, r7) /* user r0 */
@@ -359,7 +359,7 @@ _GLOBAL(__tm_recheckpoint)
stdu r1, -TM_FRAME_SIZE(r1)
/*
- * We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD].
+ * We've a struct pt_regs at [r1+STACK_INT_FRAME_REGS].
* This is used for backing up the NVGPRs:
*/
SAVE_NVGPRS(r1)
@@ -379,7 +379,7 @@ _GLOBAL(__tm_recheckpoint)
* Make r7 look like an exception frame so that we can use the neat
* GPRx(n) macros. r7 is now NOT a pt_regs ptr!
*/
- subi r7, r7, STACK_FRAME_OVERHEAD
+ subi r7, r7, STACK_INT_FRAME_REGS
/* We need to setup MSR for FP/VMX/VSX register save instructions. */
mfmsr r6
diff --git a/arch/powerpc/kernel/trace/ftrace_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S
index d031093bc436..ffb1db386849 100644
--- a/arch/powerpc/kernel/trace/ftrace_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S
@@ -110,7 +110,7 @@
.endif
/* Load &pt_regs in r6 for call below */
- addi r6, r1, STACK_FRAME_OVERHEAD
+ addi r6, r1, STACK_INT_FRAME_REGS
.endm
.macro ftrace_regs_exit allregs
diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
index a2e7b0ce5b19..6a977b0d8ffc 100644
--- a/arch/powerpc/kernel/vdso/Makefile
+++ b/arch/powerpc/kernel/vdso/Makefile
@@ -102,3 +102,5 @@ quiet_cmd_vdso64ld_and_check = VDSO64L $@
cmd_vdso64ld_and_check = $(VDSOCC) $(c_flags) $(CC64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check)
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(VDSOCC) $(a_flags) $(CC64FLAGS) $(AS64FLAGS) -c -o $@ $<
+
+OBJECT_FILES_NON_STANDARD := y
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 5cf64740edb8..ffe5d90abe17 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/reg.h>
@@ -185,7 +186,7 @@ fphalf:
* Internal routine to enable floating point and set FPSCR to 0.
* Don't call it from C; it doesn't use the normal calling convention.
*/
-fpenable:
+SYM_FUNC_START_LOCAL(fpenable)
#ifdef CONFIG_PPC32
stwu r1,-64(r1)
#else
@@ -202,6 +203,7 @@ fpenable:
mffs fr31
MTFSF_L(fr1)
blr
+SYM_FUNC_END(fpenable)
fpdisable:
mtlr r12