From cd8a5673e9abb3fde0a1c25ee63a60fe1908c6f5 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 3 Mar 2006 17:11:40 +1100 Subject: powerpc: Fix might-sleep warning in program check exception handler On 32-bit, the exception prolog for the program check exception doesn't enable interrupts early on. If it is an illegal instruction exception, we read the instruction in order to emulate certain instructions, and the get_user of the instruction triggers a WARN_ON since interrupts are still disabled. This adds a local_irq_enable() to enable interrupts before reading the instruction. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/traps.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 7509aa6474f2..98660aedeeb7 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -814,6 +814,8 @@ void __kprobes program_check_exception(struct pt_regs *regs) return; } + local_irq_enable(); + /* Try to emulate it if we should. */ if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { switch (emulate_instruction(regs)) { -- cgit v1.2.3 From 76a0ee3d1633b035f4090ab591445ae7b087f129 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 3 Mar 2006 20:50:29 +1100 Subject: powerpc: Turn off verbose debug output in powermac platform functions This is along the lines suggested by Chris Lumens but goes further in that it leaves the DEBUG symbol undefined, making the DBG macro empty. Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/powermac/pfunc_base.c | 5 +++++ arch/powerpc/platforms/powermac/pfunc_core.c | 6 ++++++ 2 files changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index 4ffd2a9832a0..9b7150f10414 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -9,7 +9,12 @@ #include #include +#undef DEBUG +#ifdef DEBUG #define DBG(fmt...) printk(fmt) +#else +#define DBG(fmt...) +#endif static irqreturn_t macio_gpio_irq(int irq, void *data, struct pt_regs *regs) { diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c index 356a739e52b2..4baa75b1d36f 100644 --- a/arch/powerpc/platforms/powermac/pfunc_core.c +++ b/arch/powerpc/platforms/powermac/pfunc_core.c @@ -20,7 +20,13 @@ #define LOG_PARSE(fmt...) #define LOG_ERROR(fmt...) printk(fmt) #define LOG_BLOB(t,b,c) + +#undef DEBUG +#ifdef DEBUG #define DBG(fmt...) printk(fmt) +#else +#define DBG(fmt...) +#endif /* Command numbers */ #define PMF_CMD_LIST 0 -- cgit v1.2.3 From 0c2aca88bdac4254a13466fb108733d243a118b6 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 3 Mar 2006 21:31:25 +1100 Subject: powerpc32: Fix timebase synchronization on 32-bit powermacs The variable `timebase' used to transfer the current timebase value from one cpu to the other in smp_core99_give/take_timebase was only an unsigned long, i.e. 32 bits on 32-bit machines. It needs to be 64 bits. This makes it a u64, and fixes the issue reported by Kyle Moffett, that the two cpus see wildly different values for the time of day. Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/powermac/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 0df2cdcd805c..6d64a9bf3474 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -435,7 +435,7 @@ struct smp_ops_t psurge_smp_ops = { */ static void (*pmac_tb_freeze)(int freeze); -static unsigned long timebase; +static u64 timebase; static int tb_req; static void smp_core99_give_timebase(void) -- cgit v1.2.3 From aa5cb02143123289bd37c30c0ad60339f8da0bad Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 1 Mar 2006 15:07:07 +1100 Subject: [PATCH] powerpc: Expose SMT and L1 icache snoop userland features This patch makes userland aware of the icache snoop capability of the POWER5 (and possibly others in the future) and of SMT capabilities. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/cputable.c | 9 ++++++--- include/asm-powerpc/cputable.h | 2 ++ 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 10696456a4c6..e4e81374cb9a 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -53,8 +53,10 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); PPC_FEATURE_HAS_MMU) #define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64) #define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) -#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5) -#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS) +#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) +#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ PPC_FEATURE_BOOKE) @@ -267,7 +269,8 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "Cell Broadband Engine", .cpu_features = CPU_FTRS_CELL, .cpu_user_features = COMMON_USER_PPC64 | - PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP, + PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_SMT, .icache_bsize = 128, .dcache_bsize = 128, .cpu_setup = __setup_cpu_be, diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 90d005bb4d1c..5638518968c3 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -20,6 +20,8 @@ #define PPC_FEATURE_POWER5_PLUS 0x00020000 #define PPC_FEATURE_CELL 0x00010000 #define PPC_FEATURE_BOOKE 0x00008000 +#define PPC_FEATURE_SMT 0x00004000 +#define PPC_FEATURE_ICACHE_SNOOP 0x00002000 #ifdef __KERNEL__ #ifndef __ASSEMBLY__ -- cgit v1.2.3 From ab1b55e21f6977e420341727e9f4a50691057b5e Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 3 Mar 2006 10:35:40 +1100 Subject: [PATCH] powerpc: incorrect rmo_top handling in prom_init On Thu, 2006-03-02 at 19:55 +0100, Olaf Hering wrote: > My iBook1 has 2 memory regions in reg. Depending on how I boot it > (vmlinux+initrd) or zImage.initrd, it will not boot with current Linus > tree. > rmo_top should be 160MB instead of 32MB. On logically-partitioned machines the first element of the reg property in the memory node is defined to be the "RMO" region, i.e. the memory that the processor can access in real mode. On other machines the first element has no special meaning, so only take it to be the RMO region on LPAR machines. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index d34fe537400e..813c2cd194c2 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -978,7 +978,7 @@ static void __init prom_init_mem(void) if (size == 0) continue; prom_debug(" %x %x\n", base, size); - if (base == 0) + if (base == 0 && (RELOC(of_platform) & PLATFORM_LPAR)) RELOC(rmo_top) = size; if ((base + size) > RELOC(ram_top)) RELOC(ram_top) = base + size; -- cgit v1.2.3 From 1bd79336a426c5e4f3bab142407059ceb12cadf9 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Mar 2006 13:24:22 +1100 Subject: powerpc: Fix various syscall/signal/swapcontext bugs A careful reading of the recent changes to the system call entry/exit paths revealed several problems, plus some things that could be simplified and improved: * 32-bit wasn't testing the _TIF_NOERROR bit in the syscall fast exit path, so it was only doing anything with it once it saw some other bit being set. In other words, the noerror behaviour would apply to the next system call where we had to reschedule or deliver a signal, which is not necessarily the current system call. * 32-bit wasn't doing the call to ptrace_notify in the syscall exit path when the _TIF_SINGLESTEP bit was set. * _TIF_RESTOREALL was in both _TIF_USER_WORK_MASK and _TIF_PERSYSCALL_MASK, which is odd since _TIF_RESTOREALL is only set by system calls. I took it out of _TIF_USER_WORK_MASK. * On 64-bit, _TIF_RESTOREALL wasn't causing the non-volatile registers to be restored (unless perhaps a signal was delivered or the syscall was traced or single-stepped). Thus the non-volatile registers weren't restored on exit from a signal handler. We probably got away with it mostly because signal handlers written in C wouldn't alter the non-volatile registers. * On 32-bit I simplified the code and made it more like 64-bit by making the syscall exit path jump to ret_from_except to handle preemption and signal delivery. * 32-bit was calling do_signal unnecessarily when _TIF_RESTOREALL was set - but I think because of that 32-bit was actually restoring the non-volatile registers on exit from a signal handler. * I changed the order of enabling interrupts and saving the non-volatile registers before calling do_syscall_trace_leave; now we enable interrupts first. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/asm-offsets.c | 1 - arch/powerpc/kernel/entry_32.S | 95 ++++++++++----------------------------- arch/powerpc/kernel/entry_64.S | 94 +++++++++----------------------------- arch/powerpc/kernel/ptrace.c | 5 +-- arch/powerpc/kernel/signal_32.c | 19 ++------ arch/powerpc/kernel/signal_64.c | 9 +--- arch/powerpc/kernel/systbl.S | 2 +- arch/ppc/kernel/asm-offsets.c | 1 - arch/ppc/kernel/entry.S | 95 ++++++++++----------------------------- include/asm-powerpc/thread_info.h | 8 +--- 10 files changed, 78 insertions(+), 251 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 840aad43a98b..c9a660e4c2db 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -92,7 +92,6 @@ int main(void) DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); - DEFINE(TI_SIGFRAME, offsetof(struct thread_info, nvgprs_frame)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); #ifdef CONFIG_PPC32 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index f20a67261ec7..4827ca1ec89b 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -227,7 +227,7 @@ ret_from_syscall: MTMSRD(r10) lwz r9,TI_FLAGS(r12) li r8,-_LAST_ERRNO - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK) + andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) bne- syscall_exit_work cmplw 0,r3,r8 blt+ syscall_exit_cont @@ -287,8 +287,10 @@ syscall_dotrace: syscall_exit_work: andi. r0,r9,_TIF_RESTOREALL - bne- 2f - cmplw 0,r3,r8 + beq+ 0f + REST_NVGPRS(r1) + b 2f +0: cmplw 0,r3,r8 blt+ 1f andi. r0,r9,_TIF_NOERROR bne- 1f @@ -302,9 +304,7 @@ syscall_exit_work: 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) beq 4f - /* Clear per-syscall TIF flags if any are set, but _leave_ - _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that - yet. */ + /* Clear per-syscall TIF flags if any are set. */ li r11,_TIF_PERSYSCALL_MASK addi r12,r12,TI_FLAGS @@ -318,8 +318,13 @@ syscall_exit_work: subi r12,r12,TI_FLAGS 4: /* Anything which requires enabling interrupts? */ - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS) - beq 7f + andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) + beq ret_from_except + + /* Re-enable interrupts */ + ori r10,r10,MSR_EE + SYNC + MTMSRD(r10) /* Save NVGPRS if they're not saved already */ lwz r4,_TRAP(r1) @@ -328,71 +333,11 @@ syscall_exit_work: SAVE_NVGPRS(r1) li r4,0xc00 stw r4,_TRAP(r1) - - /* Re-enable interrupts */ -5: ori r10,r10,MSR_EE - SYNC - MTMSRD(r10) - - andi. r0,r9,_TIF_SAVE_NVGPRS - bne save_user_nvgprs - -save_user_nvgprs_cont: - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) - beq 7f - +5: addi r3,r1,STACK_FRAME_OVERHEAD bl do_syscall_trace_leave - REST_NVGPRS(r1) - -6: lwz r3,GPR3(r1) - LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ - SYNC - MTMSRD(r10) /* disable interrupts again */ - rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ - lwz r9,TI_FLAGS(r12) -7: - andi. r0,r9,_TIF_NEED_RESCHED - bne 8f - lwz r5,_MSR(r1) - andi. r5,r5,MSR_PR - beq ret_from_except - andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK - beq ret_from_except - b do_user_signal -8: - ori r10,r10,MSR_EE - SYNC - MTMSRD(r10) /* re-enable interrupts */ - bl schedule - b 6b - -save_user_nvgprs: - lwz r8,TI_SIGFRAME(r12) - -.macro savewords start, end - 1: stw \start,4*(\start)(r8) - .section __ex_table,"a" - .align 2 - .long 1b,save_user_nvgprs_fault - .previous - .if \end - \start - savewords "(\start+1)",\end - .endif -.endm - savewords 14,31 - b save_user_nvgprs_cont - - -save_user_nvgprs_fault: - li r3,11 /* SIGSEGV */ - lwz r4,TI_TASK(r12) - bl force_sigsegv + b ret_from_except_full - rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ - lwz r9,TI_FLAGS(r12) - b save_user_nvgprs_cont - #ifdef SHOW_SYSCALLS do_show_syscall: #ifdef SHOW_SYSCALLS_TASK @@ -490,6 +435,14 @@ ppc_clone: stw r0,_TRAP(r1) /* register set saved */ b sys_clone + .globl ppc_swapcontext +ppc_swapcontext: + SAVE_NVGPRS(r1) + lwz r0,_TRAP(r1) + rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ + stw r0,_TRAP(r1) /* register set saved */ + b sys_swapcontext + /* * Top-level page fault handling. * This is in assembler because if do_page_fault tells us that @@ -683,7 +636,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */ /* Check current_thread_info()->flags */ rlwinm r9,r1,0,0,(31-THREAD_SHIFT) lwz r9,TI_FLAGS(r9) - andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK) + andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED) bne do_work restore_user: diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 388f861b8ed1..24be0cf86d7f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -160,7 +160,7 @@ syscall_exit: mtmsrd r10,1 ld r9,TI_FLAGS(r12) li r11,-_LAST_ERRNO - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR|_TIF_RESTORE_SIGMASK) + andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) bne- syscall_exit_work cmpld r3,r11 ld r5,_CCR(r1) @@ -216,8 +216,10 @@ syscall_exit_work: If TIF_NOERROR is set, just save r3 as it is. */ andi. r0,r9,_TIF_RESTOREALL - bne- 2f - cmpld r3,r11 /* r10 is -LAST_ERRNO */ + beq+ 0f + REST_NVGPRS(r1) + b 2f +0: cmpld r3,r11 /* r10 is -LAST_ERRNO */ blt+ 1f andi. r0,r9,_TIF_NOERROR bne- 1f @@ -229,9 +231,7 @@ syscall_exit_work: 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) beq 4f - /* Clear per-syscall TIF flags if any are set, but _leave_ - _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that - yet. */ + /* Clear per-syscall TIF flags if any are set. */ li r11,_TIF_PERSYSCALL_MASK addi r12,r12,TI_FLAGS @@ -240,10 +240,9 @@ syscall_exit_work: stdcx. r10,0,r12 bne- 3b subi r12,r12,TI_FLAGS - -4: bl .save_nvgprs - /* Anything else left to do? */ - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS) + +4: /* Anything else left to do? */ + andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) beq .ret_from_except_lite /* Re-enable interrupts */ @@ -251,26 +250,10 @@ syscall_exit_work: ori r10,r10,MSR_EE mtmsrd r10,1 - andi. r0,r9,_TIF_SAVE_NVGPRS - bne save_user_nvgprs - - /* If tracing, re-enable interrupts and do it */ -save_user_nvgprs_cont: - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) - beq 5f - + bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl .do_syscall_trace_leave - REST_NVGPRS(r1) - clrrdi r12,r1,THREAD_SHIFT - - /* Disable interrupts again and handle other work if any */ -5: mfmsr r10 - rldicl r10,r10,48,1 - rotldi r10,r10,16 - mtmsrd r10,1 - - b .ret_from_except_lite + b .ret_from_except /* Save non-volatile GPRs, if not already saved. */ _GLOBAL(save_nvgprs) @@ -282,51 +265,6 @@ _GLOBAL(save_nvgprs) std r0,_TRAP(r1) blr - -save_user_nvgprs: - ld r10,TI_SIGFRAME(r12) - andi. r0,r9,_TIF_32BIT - beq- save_user_nvgprs_64 - - /* 32-bit save to userspace */ - -.macro savewords start, end - 1: stw \start,4*(\start)(r10) - .section __ex_table,"a" - .align 3 - .llong 1b,save_user_nvgprs_fault - .previous - .if \end - \start - savewords "(\start+1)",\end - .endif -.endm - savewords 14,31 - b save_user_nvgprs_cont - -save_user_nvgprs_64: - /* 64-bit save to userspace */ - -.macro savelongs start, end - 1: std \start,8*(\start)(r10) - .section __ex_table,"a" - .align 3 - .llong 1b,save_user_nvgprs_fault - .previous - .if \end - \start - savelongs "(\start+1)",\end - .endif -.endm - savelongs 14,31 - b save_user_nvgprs_cont - -save_user_nvgprs_fault: - li r3,11 /* SIGSEGV */ - ld r4,TI_TASK(r12) - bl .force_sigsegv - - clrrdi r12,r1,THREAD_SHIFT - ld r9,TI_FLAGS(r12) - b save_user_nvgprs_cont /* * The sigsuspend and rt_sigsuspend system calls can call do_signal @@ -352,6 +290,16 @@ _GLOBAL(ppc_clone) bl .sys_clone b syscall_exit +_GLOBAL(ppc32_swapcontext) + bl .save_nvgprs + bl .compat_sys_swapcontext + b syscall_exit + +_GLOBAL(ppc64_swapcontext) + bl .save_nvgprs + bl .sys_swapcontext + b syscall_exit + _GLOBAL(ret_from_fork) bl .schedule_tail REST_NVGPRS(r1) diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 400793c71304..bcb83574335b 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -561,10 +561,7 @@ void do_syscall_trace_leave(struct pt_regs *regs) regs->result); if ((test_thread_flag(TIF_SYSCALL_TRACE) -#ifdef CONFIG_PPC64 - || test_thread_flag(TIF_SINGLESTEP) -#endif - ) + || test_thread_flag(TIF_SINGLESTEP)) && (current->ptrace & PT_PTRACED)) do_syscall_trace(); } diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index bd837b5dbf06..d7a4e814974d 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -151,10 +151,7 @@ static inline int save_general_regs(struct pt_regs *regs, elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; - if (!FULL_REGS(regs)) { - set_thread_flag(TIF_SAVE_NVGPRS); - current_thread_info()->nvgprs_frame = frame->mc_gregs; - } + WARN_ON(!FULL_REGS(regs)); for (i = 0; i <= PT_RESULT; i ++) { if (i == 14 && !FULL_REGS(regs)) @@ -215,15 +212,7 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka, static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { - if (!FULL_REGS(regs)) { - /* Zero out the unsaved GPRs to avoid information - leak, and set TIF_SAVE_NVGPRS to ensure that the - registers do actually get saved later. */ - memset(®s->gpr[14], 0, 18 * sizeof(unsigned long)); - current_thread_info()->nvgprs_frame = &frame->mc_gregs; - set_thread_flag(TIF_SAVE_NVGPRS); - } - + WARN_ON(!FULL_REGS(regs)); return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); } @@ -826,8 +815,8 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int } long sys_swapcontext(struct ucontext __user *old_ctx, - struct ucontext __user *new_ctx, - int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) + struct ucontext __user *new_ctx, + int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) { unsigned char tmp; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 497a5d3df359..4324f8a8ba24 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -118,14 +118,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, err |= __put_user(0, &sc->v_regs); #endif /* CONFIG_ALTIVEC */ err |= __put_user(&sc->gp_regs, &sc->regs); - if (!FULL_REGS(regs)) { - /* Zero out the unsaved GPRs to avoid information - leak, and set TIF_SAVE_NVGPRS to ensure that the - registers do actually get saved later. */ - memset(®s->gpr[14], 0, 18 * sizeof(unsigned long)); - set_thread_flag(TIF_SAVE_NVGPRS); - current_thread_info()->nvgprs_frame = &sc->gp_regs; - } + WARN_ON(!FULL_REGS(regs)); err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); err |= __copy_to_user(&sc->fp_regs, ¤t->thread.fpr, FP_REGS_SIZE); err |= __put_user(signr, &sc->signal); diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 8a9f994ed917..1ad55f0466fd 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -288,7 +288,7 @@ COMPAT_SYS(clock_settime) COMPAT_SYS(clock_gettime) COMPAT_SYS(clock_getres) COMPAT_SYS(clock_nanosleep) -COMPAT_SYS(swapcontext) +SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext) COMPAT_SYS(tgkill) COMPAT_SYS(utimes) COMPAT_SYS(statfs64) diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c index 7964bf660e92..77e4dc780f8c 100644 --- a/arch/ppc/kernel/asm-offsets.c +++ b/arch/ppc/kernel/asm-offsets.c @@ -131,7 +131,6 @@ main(void) DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); - DEFINE(TI_SIGFRAME, offsetof(struct thread_info, nvgprs_frame)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S index a48b950722a1..3a2815978488 100644 --- a/arch/ppc/kernel/entry.S +++ b/arch/ppc/kernel/entry.S @@ -227,7 +227,7 @@ ret_from_syscall: MTMSRD(r10) lwz r9,TI_FLAGS(r12) li r8,-_LAST_ERRNO - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL) + andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) bne- syscall_exit_work cmplw 0,r3,r8 blt+ syscall_exit_cont @@ -287,8 +287,10 @@ syscall_dotrace: syscall_exit_work: andi. r0,r9,_TIF_RESTOREALL - bne- 2f - cmplw 0,r3,r8 + beq+ 0f + REST_NVGPRS(r1) + b 2f +0: cmplw 0,r3,r8 blt+ 1f andi. r0,r9,_TIF_NOERROR bne- 1f @@ -302,9 +304,7 @@ syscall_exit_work: 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) beq 4f - /* Clear per-syscall TIF flags if any are set, but _leave_ - _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that - yet. */ + /* Clear per-syscall TIF flags if any are set. */ li r11,_TIF_PERSYSCALL_MASK addi r12,r12,TI_FLAGS @@ -318,8 +318,13 @@ syscall_exit_work: subi r12,r12,TI_FLAGS 4: /* Anything which requires enabling interrupts? */ - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS) - beq 7f + andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) + beq ret_from_except + + /* Re-enable interrupts */ + ori r10,r10,MSR_EE + SYNC + MTMSRD(r10) /* Save NVGPRS if they're not saved already */ lwz r4,TRAP(r1) @@ -328,71 +333,11 @@ syscall_exit_work: SAVE_NVGPRS(r1) li r4,0xc00 stw r4,TRAP(r1) - - /* Re-enable interrupts */ -5: ori r10,r10,MSR_EE - SYNC - MTMSRD(r10) - - andi. r0,r9,_TIF_SAVE_NVGPRS - bne save_user_nvgprs - -save_user_nvgprs_cont: - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) - beq 7f - +5: addi r3,r1,STACK_FRAME_OVERHEAD bl do_syscall_trace_leave - REST_NVGPRS(r1) - -6: lwz r3,GPR3(r1) - LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ - SYNC - MTMSRD(r10) /* disable interrupts again */ - rlwinm r12,r1,0,0,18 /* current_thread_info() */ - lwz r9,TI_FLAGS(r12) -7: - andi. r0,r9,_TIF_NEED_RESCHED - bne 8f - lwz r5,_MSR(r1) - andi. r5,r5,MSR_PR - beq ret_from_except - andi. r0,r9,_TIF_SIGPENDING - beq ret_from_except - b do_user_signal -8: - ori r10,r10,MSR_EE - SYNC - MTMSRD(r10) /* re-enable interrupts */ - bl schedule - b 6b - -save_user_nvgprs: - lwz r8,TI_SIGFRAME(r12) - -.macro savewords start, end - 1: stw \start,4*(\start)(r8) - .section __ex_table,"a" - .align 2 - .long 1b,save_user_nvgprs_fault - .previous - .if \end - \start - savewords "(\start+1)",\end - .endif -.endm - savewords 14,31 - b save_user_nvgprs_cont - - -save_user_nvgprs_fault: - li r3,11 /* SIGSEGV */ - lwz r4,TI_TASK(r12) - bl force_sigsegv + b ret_from_except_full - rlwinm r12,r1,0,0,18 /* current_thread_info() */ - lwz r9,TI_FLAGS(r12) - b save_user_nvgprs_cont - #ifdef SHOW_SYSCALLS do_show_syscall: #ifdef SHOW_SYSCALLS_TASK @@ -490,6 +435,14 @@ ppc_clone: stw r0,TRAP(r1) /* register set saved */ b sys_clone + .globl ppc_swapcontext +ppc_swapcontext: + SAVE_NVGPRS(r1) + lwz r0,TRAP(r1) + rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ + stw r0,TRAP(r1) /* register set saved */ + b sys_swapcontext + /* * Top-level page fault handling. * This is in assembler because if do_page_fault tells us that @@ -683,7 +636,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */ /* Check current_thread_info()->flags */ rlwinm r9,r1,0,0,18 lwz r9,TI_FLAGS(r9) - andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL) + andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED) bne do_work restore_user: diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h index 237fc2b72974..ffc7462d77ba 100644 --- a/include/asm-powerpc/thread_info.h +++ b/include/asm-powerpc/thread_info.h @@ -37,7 +37,6 @@ struct thread_info { int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; - void __user *nvgprs_frame; /* low level flags - has atomic operations done on it */ unsigned long flags ____cacheline_aligned_in_smp; }; @@ -120,7 +119,6 @@ static inline struct thread_info *current_thread_info(void) #define TIF_MEMDIE 10 #define TIF_SECCOMP 11 /* secure computing */ #define TIF_RESTOREALL 12 /* Restore all regs (implies NOERROR) */ -#define TIF_SAVE_NVGPRS 13 /* Save r14-r31 in signal frame */ #define TIF_NOERROR 14 /* Force successful syscall return */ #define TIF_RESTORE_SIGMASK 15 /* Restore signal mask in do_signal */ @@ -137,15 +135,13 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SINGLESTEP (1<