diff options
Diffstat (limited to 'arch')
62 files changed, 1098 insertions, 647 deletions
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index acca35aebe28..aeef960ff795 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -112,9 +112,6 @@ EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); #endif - /* crypto hash */ -EXPORT_SYMBOL(sha_transform); - /* gcc lib functions */ EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index d7ee0d4c072d..1a347f481e5e 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -197,7 +197,7 @@ void cpu_idle(void) cpu_relax(); } else { stop_critical_timings(); - if (cpuidle_call_idle()) + if (cpuidle_idle_call()) pm_idle(); start_critical_timings(); /* diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index 59ff42ddf0ae..cf73a7f742dd 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -12,7 +12,7 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ strchr.o strrchr.o \ testchangebit.o testclearbit.o testsetbit.o \ ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ - ucmpdi2.o lib1funcs.o div64.o sha1.o \ + ucmpdi2.o lib1funcs.o div64.o \ io-readsb.o io-writesb.o io-readsl.o io-writesl.o mmu-y := clear_user.o copy_page.o getuser.o putuser.o diff --git a/arch/arm/lib/sha1.S b/arch/arm/lib/sha1.S deleted file mode 100644 index eb0edb80d7b8..000000000000 --- a/arch/arm/lib/sha1.S +++ /dev/null @@ -1,211 +0,0 @@ -/* - * linux/arch/arm/lib/sha1.S - * - * SHA transform optimized for ARM - * - * Copyright: (C) 2005 by Nicolas Pitre <nico@fluxnic.net> - * Created: September 17, 2005 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * The reference implementation for this code is linux/lib/sha1.c - */ - -#include <linux/linkage.h> - - .text - - -/* - * void sha_transform(__u32 *digest, const char *in, __u32 *W) - * - * Note: the "in" ptr may be unaligned. - */ - -ENTRY(sha_transform) - - stmfd sp!, {r4 - r8, lr} - - @ for (i = 0; i < 16; i++) - @ W[i] = be32_to_cpu(in[i]); - -#ifdef __ARMEB__ - mov r4, r0 - mov r0, r2 - mov r2, #64 - bl memcpy - mov r2, r0 - mov r0, r4 -#else - mov r3, r2 - mov lr, #16 -1: ldrb r4, [r1], #1 - ldrb r5, [r1], #1 - ldrb r6, [r1], #1 - ldrb r7, [r1], #1 - subs lr, lr, #1 - orr r5, r5, r4, lsl #8 - orr r6, r6, r5, lsl #8 - orr r7, r7, r6, lsl #8 - str r7, [r3], #4 - bne 1b -#endif - - @ for (i = 0; i < 64; i++) - @ W[i+16] = ror(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 31); - - sub r3, r2, #4 - mov lr, #64 -2: ldr r4, [r3, #4]! - subs lr, lr, #1 - ldr r5, [r3, #8] - ldr r6, [r3, #32] - ldr r7, [r3, #52] - eor r4, r4, r5 - eor r4, r4, r6 - eor r4, r4, r7 - mov r4, r4, ror #31 - str r4, [r3, #64] - bne 2b - - /* - * The SHA functions are: - * - * f1(B,C,D) = (D ^ (B & (C ^ D))) - * f2(B,C,D) = (B ^ C ^ D) - * f3(B,C,D) = ((B & C) | (D & (B | C))) - * - * Then the sub-blocks are processed as follows: - * - * A' = ror(A, 27) + f(B,C,D) + E + K + *W++ - * B' = A - * C' = ror(B, 2) - * D' = C - * E' = D - * - * We therefore unroll each loop 5 times to avoid register shuffling. - * Also the ror for C (and also D and E which are successivelyderived - * from it) is applied in place to cut on an additional mov insn for - * each round. - */ - - .macro sha_f1, A, B, C, D, E - ldr r3, [r2], #4 - eor ip, \C, \D - add \E, r1, \E, ror #2 - and ip, \B, ip, ror #2 - add \E, \E, \A, ror #27 - eor ip, ip, \D, ror #2 - add \E, \E, r3 - add \E, \E, ip - .endm - - .macro sha_f2, A, B, C, D, E - ldr r3, [r2], #4 - add \E, r1, \E, ror #2 - eor ip, \B, \C, ror #2 - add \E, \E, \A, ror #27 - eor ip, ip, \D, ror #2 - add \E, \E, r3 - add \E, \E, ip - .endm - - .macro sha_f3, A, B, C, D, E - ldr r3, [r2], #4 - add \E, r1, \E, ror #2 - orr ip, \B, \C, ror #2 - add \E, \E, \A, ror #27 - and ip, ip, \D, ror #2 - add \E, \E, r3 - and r3, \B, \C, ror #2 - orr ip, ip, r3 - add \E, \E, ip - .endm - - ldmia r0, {r4 - r8} - - mov lr, #4 - ldr r1, .L_sha_K + 0 - - /* adjust initial values */ - mov r6, r6, ror #30 - mov r7, r7, ror #30 - mov r8, r8, ror #30 - -3: subs lr, lr, #1 - sha_f1 r4, r5, r6, r7, r8 - sha_f1 r8, r4, r5, r6, r7 - sha_f1 r7, r8, r4, r5, r6 - sha_f1 r6, r7, r8, r4, r5 - sha_f1 r5, r6, r7, r8, r4 - bne 3b - - ldr r1, .L_sha_K + 4 - mov lr, #4 - -4: subs lr, lr, #1 - sha_f2 r4, r5, r6, r7, r8 - sha_f2 r8, r4, r5, r6, r7 - sha_f2 r7, r8, r4, r5, r6 - sha_f2 r6, r7, r8, r4, r5 - sha_f2 r5, r6, r7, r8, r4 - bne 4b - - ldr r1, .L_sha_K + 8 - mov lr, #4 - -5: subs lr, lr, #1 - sha_f3 r4, r5, r6, r7, r8 - sha_f3 r8, r4, r5, r6, r7 - sha_f3 r7, r8, r4, r5, r6 - sha_f3 r6, r7, r8, r4, r5 - sha_f3 r5, r6, r7, r8, r4 - bne 5b - - ldr r1, .L_sha_K + 12 - mov lr, #4 - -6: subs lr, lr, #1 - sha_f2 r4, r5, r6, r7, r8 - sha_f2 r8, r4, r5, r6, r7 - sha_f2 r7, r8, r4, r5, r6 - sha_f2 r6, r7, r8, r4, r5 - sha_f2 r5, r6, r7, r8, r4 - bne 6b - - ldmia r0, {r1, r2, r3, ip, lr} - add r4, r1, r4 - add r5, r2, r5 - add r6, r3, r6, ror #2 - add r7, ip, r7, ror #2 - add r8, lr, r8, ror #2 - stmia r0, {r4 - r8} - - ldmfd sp!, {r4 - r8, pc} - -ENDPROC(sha_transform) - - .align 2 -.L_sha_K: - .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 - - -/* - * void sha_init(__u32 *buf) - */ - - .align 2 -.L_sha_initial_digest: - .word 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0 - -ENTRY(sha_init) - - str lr, [sp, #-4]! - adr r1, .L_sha_initial_digest - ldmia r1, {r1, r2, r3, ip, lr} - stmia r0, {r1, r2, r3, ip, lr} - ldr pc, [sp], #4 - -ENDPROC(sha_init) diff --git a/arch/arm/mach-omap2/cminst44xx.h b/arch/arm/mach-omap2/cminst44xx.h index f2ea6453ade0..a018a7327879 100644 --- a/arch/arm/mach-omap2/cminst44xx.h +++ b/arch/arm/mach-omap2/cminst44xx.h @@ -18,13 +18,36 @@ extern void omap4_cminst_clkdm_force_sleep(u8 part, s16 inst, u16 cdoffs); extern void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs); extern int omap4_cminst_wait_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); -extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); + +# ifdef CONFIG_ARCH_OMAP4 +extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, + u16 clkctrl_offs); extern void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); extern void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); +# else + +static inline int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, + u16 clkctrl_offs) +{ + return 0; +} + +static inline void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, + s16 cdoffs, u16 clkctrl_offs) +{ +} + +static inline void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs, + u16 clkctrl_offs) +{ +} + +# endif + /* * In an ideal world, we would not export these low-level functions, * but this will probably take some time to fix properly diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c index 2543342dbccb..daa056ed8738 100644 --- a/arch/arm/mach-omap2/twl-common.c +++ b/arch/arm/mach-omap2/twl-common.c @@ -48,14 +48,7 @@ void __init omap_pmic_init(int bus, u32 clkrate, omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); } -static struct twl4030_usb_data omap4_usb_pdata = { - .phy_init = omap4430_phy_init, - .phy_exit = omap4430_phy_exit, - .phy_power = omap4430_phy_power, - .phy_set_clock = omap4430_phy_set_clk, - .phy_suspend = omap4430_phy_suspend, -}; - +#if defined(CONFIG_ARCH_OMAP3) static struct twl4030_usb_data omap3_usb_pdata = { .usb_mode = T2_USB_MODE_ULPI, }; @@ -122,6 +115,45 @@ static struct regulator_init_data omap3_vpll2_idata = { .consumer_supplies = omap3_vpll2_supplies, }; +void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data, + u32 pdata_flags, u32 regulators_flags) +{ + if (!pmic_data->irq_base) + pmic_data->irq_base = TWL4030_IRQ_BASE; + if (!pmic_data->irq_end) + pmic_data->irq_end = TWL4030_IRQ_END; + + /* Common platform data configurations */ + if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb) + pmic_data->usb = &omap3_usb_pdata; + + if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci) + pmic_data->bci = &omap3_bci_pdata; + + if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc) + pmic_data->madc = &omap3_madc_pdata; + + if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio) + pmic_data->audio = &omap3_audio_pdata; + + /* Common regulator configurations */ + if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac) + pmic_data->vdac = &omap3_vdac_idata; + + if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2) + pmic_data->vpll2 = &omap3_vpll2_idata; +} +#endif /* CONFIG_ARCH_OMAP3 */ + +#if defined(CONFIG_ARCH_OMAP4) +static struct twl4030_usb_data omap4_usb_pdata = { + .phy_init = omap4430_phy_init, + .phy_exit = omap4430_phy_exit, + .phy_power = omap4430_phy_power, + .phy_set_clock = omap4430_phy_set_clk, + .phy_suspend = omap4430_phy_suspend, +}; + static struct regulator_init_data omap4_vdac_idata = { .constraints = { .min_uV = 1800000, @@ -273,32 +305,4 @@ void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data, !pmic_data->clk32kg) pmic_data->clk32kg = &omap4_clk32kg_idata; } - -void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data, - u32 pdata_flags, u32 regulators_flags) -{ - if (!pmic_data->irq_base) - pmic_data->irq_base = TWL4030_IRQ_BASE; - if (!pmic_data->irq_end) - pmic_data->irq_end = TWL4030_IRQ_END; - - /* Common platform data configurations */ - if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb) - pmic_data->usb = &omap3_usb_pdata; - - if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci) - pmic_data->bci = &omap3_bci_pdata; - - if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc) - pmic_data->madc = &omap3_madc_pdata; - - if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio) - pmic_data->audio = &omap3_audio_pdata; - - /* Common regulator configurations */ - if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac) - pmic_data->vdac = &omap3_vdac_idata; - - if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2) - pmic_data->vpll2 = &omap3_vpll2_idata; -} +#endif /* CONFIG_ARCH_OMAP4 */ diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 6fc03aff046c..c38d22e5e902 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -156,7 +156,7 @@ prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \ #define STUB_SET_VARIABLE(prefix, adjust_arg) \ static efi_status_t \ prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \ - unsigned long attr, unsigned long data_size, \ + u32 attr, unsigned long data_size, \ void *data) \ { \ struct ia64_fpreg fr[6]; \ diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index b1dc71f5534e..4054b31e0fa9 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -258,10 +258,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) -static __inline__ int +static __inline__ s64 __atomic64_add_return(s64 i, atomic64_t *v) { - int ret; + s64 ret; unsigned long flags; _atomic_spin_lock_irqsave(v, flags); diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index 67a33cc27ef2..2388bdb32832 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -5,11 +5,14 @@ #include <linux/futex.h> #include <linux/uaccess.h> +#include <asm/atomic.h> #include <asm/errno.h> static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { + unsigned long int flags; + u32 val; int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; @@ -18,21 +21,58 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr))) return -EFAULT; pagefault_disable(); + _atomic_spin_lock_irqsave(uaddr, flags); + switch (op) { case FUTEX_OP_SET: + /* *(int *)UADDR2 = OPARG; */ + ret = get_user(oldval, uaddr); + if (!ret) + ret = put_user(oparg, uaddr); + break; case FUTEX_OP_ADD: + /* *(int *)UADDR2 += OPARG; */ + ret = get_user(oldval, uaddr); + if (!ret) { + val = oldval + oparg; + ret = put_user(val, uaddr); + } + break; case FUTEX_OP_OR: + /* *(int *)UADDR2 |= OPARG; */ + ret = get_user(oldval, uaddr); + if (!ret) { + val = oldval | oparg; + ret = put_user(val, uaddr); + } + break; case FUTEX_OP_ANDN: + /* *(int *)UADDR2 &= ~OPARG; */ + ret = get_user(oldval, uaddr); + if (!ret) { + val = oldval & ~oparg; + ret = put_user(val, uaddr); + } + break; case FUTEX_OP_XOR: + /* *(int *)UADDR2 ^= OPARG; */ + ret = get_user(oldval, uaddr); + if (!ret) { + val = oldval ^ oparg; + ret = put_user(val, uaddr); + } + break; default: ret = -ENOSYS; } + _atomic_spin_unlock_irqrestore(uaddr, flags); + pagefault_enable(); if (!ret) { @@ -54,7 +94,9 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { + int ret; u32 val; + unsigned long flags; /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is * our gateway page, and causes no end of trouble... @@ -65,12 +107,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - if (get_user(val, uaddr)) - return -EFAULT; - if (val == oldval && put_user(newval, uaddr)) - return -EFAULT; + /* HPPA has no cmpxchg in hardware and therefore the + * best we can do here is use an array of locks. The + * lock selected is based on a hash of the userspace + * address. This should scale to a couple of CPUs. + */ + + _atomic_spin_lock_irqsave(uaddr, flags); + + ret = get_user(val, uaddr); + + if (!ret && val == oldval) + ret = put_user(newval, uaddr); + *uval = val; - return 0; + + _atomic_spin_unlock_irqrestore(uaddr, flags); + + return ret; } #endif /*__KERNEL__*/ diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index 3392de3e7be0..d61de64f990a 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -821,8 +821,9 @@ #define __NR_open_by_handle_at (__NR_Linux + 326) #define __NR_syncfs (__NR_Linux + 327) #define __NR_setns (__NR_Linux + 328) +#define __NR_sendmmsg (__NR_Linux + 329) -#define __NR_Linux_syscalls (__NR_setns + 1) +#define __NR_Linux_syscalls (__NR_sendmmsg + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 34a4f5a2fffb..e66366fd2abc 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -427,6 +427,7 @@ ENTRY_COMP(open_by_handle_at) ENTRY_SAME(syncfs) ENTRY_SAME(setns) + ENTRY_COMP(sendmmsg) /* Nothing yet */ diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0f98bbddade5..ed5cb5af5281 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -274,11 +274,11 @@ config MARCH_Z10 on older machines. config MARCH_Z196 - bool "IBM zEnterprise 196" + bool "IBM zEnterprise 114 and 196" help - Select this to enable optimizations for IBM zEnterprise 196 - (2817 series). The kernel will be slightly faster but will not work - on older machines. + Select this to enable optimizations for IBM zEnterprise 114 and 196 + (2818 and 2817 series). The kernel will be slightly faster but will + not work on older machines. endchoice diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 5e95d95450b3..97cc4403fabf 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -167,5 +167,6 @@ enum diag308_rc { }; extern int diag308(unsigned long subcode, void *addr); +extern void diag308_reset(void); #endif /* _ASM_S390_IPL_H */ diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index f26280d9e88d..e85c911aabf0 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -18,6 +18,7 @@ void system_call(void); void pgm_check_handler(void); void mcck_int_handler(void); void io_int_handler(void); +void psw_restart_int_handler(void); #ifdef CONFIG_32BIT @@ -150,7 +151,10 @@ struct _lowcore { */ __u32 ipib; /* 0x0e00 */ __u32 ipib_checksum; /* 0x0e04 */ - __u8 pad_0x0e08[0x0f00-0x0e08]; /* 0x0e08 */ + + /* 64 bit save area */ + __u64 save_area_64; /* 0x0e08 */ + __u8 pad_0x0e10[0x0f00-0x0e10]; /* 0x0e10 */ /* Extended facility list */ __u64 stfle_fac_list[32]; /* 0x0f00 */ @@ -286,7 +290,10 @@ struct _lowcore { */ __u64 ipib; /* 0x0e00 */ __u32 ipib_checksum; /* 0x0e08 */ - __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */ + + /* 64 bit save area */ + __u64 save_area_64; /* 0x0e0c */ + __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */ /* Extended facility list */ __u64 stfle_fac_list[32]; /* 0x0f00 */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 55dfcc8bdc0d..a4b6229e5d4b 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -119,14 +119,12 @@ struct stack_frame { * Do necessary setup to start up a new thread. */ #define start_thread(regs, new_psw, new_stackp) do { \ - set_fs(USER_DS); \ regs->psw.mask = psw_user_bits; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->gprs[15] = new_stackp; \ } while (0) #define start_thread31(regs, new_psw, new_stackp) do { \ - set_fs(USER_DS); \ regs->psw.mask = psw_user32_bits; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->gprs[15] = new_stackp; \ diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index d382629a0172..6582f69f2389 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -113,6 +113,7 @@ extern void pfault_fini(void); extern void cmma_init(void); extern int memcpy_real(void *, void *, size_t); +extern void copy_to_absolute_zero(void *dest, void *src, size_t count); #define finish_arch_switch(prev) do { \ set_fs(current->thread.mm_segment); \ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 05d8f38734ec..532fd4322156 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -27,12 +27,9 @@ int main(void) BLANK(); DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); BLANK(); - DEFINE(__THREAD_per_cause, - offsetof(struct task_struct, thread.per_event.cause)); - DEFINE(__THREAD_per_address, - offsetof(struct task_struct, thread.per_event.address)); - DEFINE(__THREAD_per_paid, - offsetof(struct task_struct, thread.per_event.paid)); + DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); + DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); + DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); BLANK(); DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); @@ -142,6 +139,7 @@ int main(void) DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); + DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64)); #ifdef CONFIG_32BIT DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); #else /* CONFIG_32BIT */ diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index 209938c1dfc8..255435663bf8 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -76,6 +76,42 @@ s390_base_pgm_handler_fn: .quad 0 .previous +# +# Calls diag 308 subcode 1 and continues execution +# +# The following conditions must be ensured before calling this function: +# * Prefix register = 0 +# * Lowcore protection is disabled +# +ENTRY(diag308_reset) + larl %r4,.Lctlregs # Save control registers + stctg %c0,%c15,0(%r4) + larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 + lghi %r3,0 + lg %r4,0(%r4) # Save PSW + sturg %r4,%r3 # Use sturg, because of large pages + lghi %r1,1 + diag %r1,%r1,0x308 +.Lrestart_part2: + lhi %r0,0 # Load r0 with zero + lhi %r1,2 # Use mode 2 = ESAME (dump) + sigp %r1,%r0,0x12 # Switch to ESAME mode + sam64 # Switch to 64 bit addressing mode + larl %r4,.Lctlregs # Restore control registers + lctlg %c0,%c15,0(%r4) + br %r14 +.align 16 +.Lrestart_psw: + .long 0x00080000,0x80000000 + .Lrestart_part2 + + .section .bss +.align 8 +.Lctlregs: + .rept 16 + .quad 0 + .endr + .previous + #else /* CONFIG_64BIT */ ENTRY(s390_base_mcck_handler) diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index eee999853a7c..a9a285b8c4ad 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -380,20 +380,13 @@ asmlinkage long sys32_sigreturn(void) goto badframe; if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - + set_current_blocked(&set); if (restore_sigregs32(regs, &frame->sregs)) goto badframe; if (restore_sigregs_gprs_high(regs, frame->gprs_high)) goto badframe; - return regs->gprs[2]; - badframe: force_sig(SIGSEGV, current); return 0; @@ -413,31 +406,22 @@ asmlinkage long sys32_rt_sigreturn(void) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - + set_current_blocked(&set); if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_gprs_high(regs, frame->gprs_high)) goto badframe; - err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp); st.ss_sp = compat_ptr(ss_sp); err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size); err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags); if (err) goto badframe; - set_fs (KERNEL_DS); do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]); set_fs (old_fs); - return regs->gprs[2]; - badframe: force_sig(SIGSEGV, current); return 0; @@ -605,10 +589,10 @@ give_sigsegv: * OK, we're invoking a handler */ -int -handle_signal32(unsigned long sig, struct k_sigaction *ka, - siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) +int handle_signal32(unsigned long sig, struct k_sigaction *ka, + siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { + sigset_t blocked; int ret; /* Set up the stack frame */ @@ -616,15 +600,12 @@ handle_signal32(unsigned long sig, struct k_sigaction *ka, ret = setup_rt_frame32(sig, ka, info, oldset, regs); else ret = setup_frame32(sig, ka, oldset, regs); - - if (ret == 0) { - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - } - return ret; + if (ret) + return ret; + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(&blocked, sig); + set_current_blocked(&blocked); + return 0; } diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 3eab7cfab07c..02ec8fe7d03f 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -849,6 +849,34 @@ restart_crash: restart_go: #endif +# +# PSW restart interrupt handler +# +ENTRY(psw_restart_int_handler) + st %r15,__LC_SAVE_AREA_64(%r0) # save r15 + basr %r15,0 +0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack + l %r15,0(%r15) + ahi %r15,-SP_SIZE # make room for pt_regs + stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack + mvc SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack + mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 + basr %r14,0 +1: l %r14,.Ldo_restart-1b(%r14) + basr %r14,%r14 + + basr %r14,0 # load disabled wait PSW if +2: lpsw restart_psw_crash-2b(%r14) # do_restart returns + .align 4 +.Ldo_restart: + .long do_restart +.Lrestart_stack: + .long restart_stack + .align 8 +restart_psw_crash: + .long 0x000a0000,0x00000000 + restart_psw_crash + .section .kprobes.text, "ax" #ifdef CONFIG_CHECK_STACK diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 7a0fd426ca92..5f729d627cef 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -865,6 +865,26 @@ restart_crash: restart_go: #endif +# +# PSW restart interrupt handler +# +ENTRY(psw_restart_int_handler) + stg %r15,__LC_SAVE_AREA_64(%r0) # save r15 + larl %r15,restart_stack # load restart stack + lg %r15,0(%r15) + aghi %r15,-SP_SIZE # make room for pt_regs + stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack + mvc SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack + mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 + brasl %r14,do_restart + + larl %r14,restart_psw_crash # load disabled wait PSW if + lpswe 0(%r14) # do_restart returns + .align 8 +restart_psw_crash: + .quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash + .section .kprobes.text, "ax" #ifdef CONFIG_CHECK_STACK diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index a689070be287..04361d5a4279 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -45,11 +45,13 @@ * - halt * - power off * - reipl + * - restart */ #define ON_PANIC_STR "on_panic" #define ON_HALT_STR "on_halt" #define ON_POFF_STR "on_poff" #define ON_REIPL_STR "on_reboot" +#define ON_RESTART_STR "on_restart" struct shutdown_action; struct shutdown_trigger { @@ -1544,17 +1546,20 @@ static char vmcmd_on_reboot[128]; static char vmcmd_on_panic[128]; static char vmcmd_on_halt[128]; static char vmcmd_on_poff[128]; +static char vmcmd_on_restart[128]; DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot); DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic); DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt); DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff); +DEFINE_IPL_ATTR_STR_RW(vmcmd, on_restart, "%s\n", "%s\n", vmcmd_on_restart); static struct attribute *vmcmd_attrs[] = { &sys_vmcmd_on_reboot_attr.attr, &sys_vmcmd_on_panic_attr.attr, &sys_vmcmd_on_halt_attr.attr, &sys_vmcmd_on_poff_attr.attr, + &sys_vmcmd_on_restart_attr.attr, NULL, }; @@ -1576,6 +1581,8 @@ static void vmcmd_run(struct shutdown_trigger *trigger) cmd = vmcmd_on_halt; else if (strcmp(trigger->name, ON_POFF_STR) == 0) cmd = vmcmd_on_poff; + else if (strcmp(trigger->name, ON_RESTART_STR) == 0) + cmd = vmcmd_on_restart; else return; @@ -1707,6 +1714,34 @@ static void do_panic(void) stop_run(&on_panic_trigger); } +/* on restart */ + +static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR, + &reipl_action}; + +static ssize_t on_restart_show(struct kobject *kobj, + struct kobj_attribute *attr, char *page) +{ + return sprintf(page, "%s\n", on_restart_trigger.action->name); +} + +static ssize_t on_restart_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t len) +{ + return set_trigger(buf, &on_restart_trigger, len); +} + +static struct kobj_attribute on_restart_attr = + __ATTR(on_restart, 0644, on_restart_show, on_restart_store); + +void do_restart(void) +{ + smp_send_stop(); + on_restart_trigger.action->fn(&on_restart_trigger); + stop_run(&on_restart_trigger); +} + /* on halt */ static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action}; @@ -1783,7 +1818,9 @@ static void __init shutdown_triggers_init(void) if (sysfs_create_file(&shutdown_actions_kset->kobj, &on_poff_attr.attr)) goto fail; - + if (sysfs_create_file(&shutdown_actions_kset->kobj, + &on_restart_attr.attr)) + goto fail; return; fail: panic("shutdown_triggers_init failed\n"); @@ -1959,6 +1996,12 @@ static void do_reset_calls(void) { struct reset_call *reset; +#ifdef CONFIG_64BIT + if (diag308_set_works) { + diag308_reset(); + return; + } +#endif list_for_each_entry(reset, &rcall, list) reset->fn(); } diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index 78eb7cfbd3d1..e690975403f4 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp 2000,2009 + * Copyright IBM Corp 2000,2011 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, * Denis Joseph Barrow, */ @@ -8,6 +8,64 @@ #include <asm/asm-offsets.h> # +# store_status +# +# Prerequisites to run this function: +# - Prefix register is set to zero +# - Original prefix register is stored in "dump_prefix_page" +# - Lowcore protection is off +# +ENTRY(store_status) + /* Save register one and load save area base */ + stg %r1,__LC_SAVE_AREA_64(%r0) + lghi %r1,SAVE_AREA_BASE + /* General purpose registers */ + stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + lg %r2,__LC_SAVE_AREA_64(%r0) + stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) + /* Control registers */ + stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Access registers */ + stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Floating point registers */ + std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Floating point control register */ + stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* CPU timer */ + stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Saved prefix register */ + larl %r2,dump_prefix_page + mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2) + /* Clock comparator - seven bytes */ + larl %r2,.Lclkcmp + stckc 0(%r2) + mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2) + /* Program status word */ + epsw %r2,%r3 + st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1) + st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1) + larl %r2,store_status + stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) + br %r14 +.align 8 +.Lclkcmp: .quad 0x0000000000000000 + +# # do_reipl_asm # Parameter: r2 = schid of reipl device # @@ -15,22 +73,7 @@ ENTRY(do_reipl_asm) basr %r13,0 .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) -.Lpg1: # do store status of all registers - - stg %r1,.Lregsave-.Lpg0(%r13) - lghi %r1,0x1000 - stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1) - lg %r0,.Lregsave-.Lpg0(%r13) - stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1) - stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1) - stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1) - lg %r10,.Ldump_pfx-.Lpg0(%r13) - mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10) - stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) - stckc .Lclkcmp-.Lpg0(%r13) - mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(7,%r1),.Lclkcmp-.Lpg0(%r13) - stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1) - stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1) +.Lpg1: brasl %r14,store_status lctlg %c6,%c6,.Lall-.Lpg0(%r13) lgr %r1,%r2 @@ -67,10 +110,7 @@ ENTRY(do_reipl_asm) st %r14,.Ldispsw+12-.Lpg0(%r13) lpswe .Ldispsw-.Lpg0(%r13) .align 8 -.Lclkcmp: .quad 0x0000000000000000 .Lall: .quad 0x00000000ff000000 -.Ldump_pfx: .quad dump_prefix_page -.Lregsave: .quad 0x0000000000000000 .align 16 /* * These addresses have to be 31 bit otherwise diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 0c35dee10b00..7b371c37061d 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -346,7 +346,7 @@ setup_lowcore(void) lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; lc->restart_psw.addr = - PSW_ADDR_AMODE | (unsigned long) restart_int_handler; + PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; if (user_mode != HOME_SPACE_MODE) lc->restart_psw.mask |= PSW_ASC_HOME; lc->external_new_psw.mask = psw_kernel_bits; @@ -529,6 +529,27 @@ static void __init setup_memory_end(void) memory_end = memory_size; } +void *restart_stack __attribute__((__section__(".data"))); + +/* + * Setup new PSW and allocate stack for PSW restart interrupt + */ +static void __init setup_restart_psw(void) +{ + psw_t psw; + + restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); + restart_stack += ASYNC_SIZE; + + /* + * Setup restart PSW for absolute zero lowcore. This is necesary + * if PSW restart is done on an offline CPU that has lowcore zero + */ + psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; + psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; + copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); +} + static void __init setup_memory(void) { @@ -731,6 +752,7 @@ static void __init setup_hwcaps(void) strcpy(elf_platform, "z10"); break; case 0x2817: + case 0x2818: strcpy(elf_platform, "z196"); break; } @@ -792,6 +814,7 @@ setup_arch(char **cmdline_p) setup_addressing_mode(); setup_memory(); setup_resources(); + setup_restart_psw(); setup_lowcore(); cpu_init(); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index abbb3c3c7aab..9a40e1cc5ec3 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -57,17 +57,15 @@ typedef struct */ SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask) { - mask &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); - current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, mask); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + sigset_t blocked; + current->saved_sigmask = current->blocked; + mask &= _BLOCKABLE; + siginitset(&blocked, mask); + set_current_blocked(&blocked); set_current_state(TASK_INTERRUPTIBLE); schedule(); - set_thread_flag(TIF_RESTORE_SIGMASK); - + set_restore_sigmask(); return -ERESTARTNOHAND; } @@ -172,18 +170,11 @@ SYSCALL_DEFINE0(sigreturn) goto badframe; if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - + set_current_blocked(&set); if (restore_sigregs(regs, &frame->sregs)) goto badframe; - return regs->gprs[2]; - badframe: force_sig(SIGSEGV, current); return 0; @@ -199,21 +190,14 @@ SYSCALL_DEFINE0(rt_sigreturn) goto badframe; if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - + set_current_blocked(&set); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; - if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]) == -EFAULT) goto badframe; return regs->gprs[2]; - badframe: force_sig(SIGSEGV, current); return 0; @@ -385,14 +369,11 @@ give_sigsegv: return -EFAULT; } -/* - * OK, we're invoking a handler - */ - -static int -handle_signal(unsigned long sig, struct k_sigaction *ka, - siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) +static int handle_signal(unsigned long sig, struct k_sigaction *ka, + siginfo_t *info, sigset_t *oldset, + struct pt_regs *regs) { + sigset_t blocked; int ret; /* Set up the stack frame */ @@ -400,17 +381,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, ret = setup_rt_frame(sig, ka, info, oldset, regs); else ret = setup_frame(sig, ka, oldset, regs); - - if (ret == 0) { - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - } - - return ret; + if (ret) + return ret; + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(&blocked, sig); + set_current_blocked(&blocked); + return 0; } /* diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a6d85c0a7f20..6ab16ac64d29 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -452,23 +452,27 @@ out: */ int __cpuinit start_secondary(void *cpuvoid) { - /* Setup the cpu */ cpu_init(); preempt_disable(); - /* Enable TOD clock interrupts on the secondary cpu. */ init_cpu_timer(); - /* Enable cpu timer interrupts on the secondary cpu. */ init_cpu_vtimer(); - /* Enable pfault pseudo page faults on this cpu. */ pfault_init(); - /* call cpu notifiers */ notify_cpu_starting(smp_processor_id()); - /* Mark this cpu as online */ ipi_call_lock(); set_cpu_online(smp_processor_id(), true); ipi_call_unlock(); - /* Switch on interrupts */ + __ctl_clear_bit(0, 28); /* Disable lowcore protection */ + S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; + S390_lowcore.restart_psw.addr = + PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; + __ctl_set_bit(0, 28); /* Enable lowcore protection */ + /* + * Wait until the cpu which brought this one up marked it + * active before enabling interrupts. + */ + while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) + cpu_relax(); local_irq_enable(); /* cpu_idle will call schedule for us */ cpu_idle(); @@ -507,7 +511,11 @@ static int __cpuinit smp_alloc_lowcore(int cpu) memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); lowcore->async_stack = async_stack + ASYNC_SIZE; lowcore->panic_stack = panic_stack + PAGE_SIZE; - + lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; + lowcore->restart_psw.addr = + PSW_ADDR_AMODE | (unsigned long) restart_int_handler; + if (user_mode != HOME_SPACE_MODE) + lowcore->restart_psw.mask |= PSW_ASC_HOME; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { unsigned long save_area; diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 51e5cd9b906a..5dbbaa6e594c 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -85,3 +85,19 @@ int memcpy_real(void *dest, void *src, size_t count) arch_local_irq_restore(flags); return rc; } + +/* + * Copy memory to absolute zero + */ +void copy_to_absolute_zero(void *dest, void *src, size_t count) +{ + unsigned long cr0; + + BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore)); + preempt_disable(); + __ctl_store(cr0, 0, 0); + __ctl_clear_bit(0, 28); /* disable lowcore protection */ + memcpy_real(dest + store_prefix(), src, count); + __ctl_load(cr0, 0, 0); + preempt_enable(); +} diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 2adb23938a7f..4d1f2bce87b3 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -528,6 +528,7 @@ static inline void page_table_free_pgste(unsigned long *table) static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, unsigned long vmaddr) { + return NULL; } static inline void page_table_free_pgste(unsigned long *table) diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 3c45de1db716..32114e0941ae 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -101,7 +101,7 @@ void cpu_idle(void) local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); - if (cpuidle_call_idle()) + if (cpuidle_idle_call()) pm_idle(); /* * Sanity check to ensure that pm_idle() returns diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 3c93f08ce187..2c2e38821f60 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -16,3 +16,8 @@ header-y += traps.h header-y += uctx.h header-y += utrap.h header-y += watchdog.h + +generic-y += div64.h +generic-y += local64.h +generic-y += irq_regs.h +generic-y += local.h diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h index 325e295d60de..29011cc0e4be 100644 --- a/arch/sparc/include/asm/bitops_64.h +++ b/arch/sparc/include/asm/bitops_64.h @@ -26,61 +26,28 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr); #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() -#include <asm-generic/bitops/ffz.h> -#include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> #ifdef __KERNEL__ +extern int ffs(int x); +extern unsigned long __ffs(unsigned long); + +#include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/ffs.h> /* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ -#ifdef ULTRA_HAS_POPULATION_COUNT - -static inline unsigned int __arch_hweight64(unsigned long w) -{ - unsigned int res; - - __asm__ ("popc %1,%0" : "=r" (res) : "r" (w)); - return res; -} - -static inline unsigned int __arch_hweight32(unsigned int w) -{ - unsigned int res; - - __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff)); - return res; -} +extern unsigned long __arch_hweight64(__u64 w); +extern unsigned int __arch_hweight32(unsigned int w); +extern unsigned int __arch_hweight16(unsigned int w); +extern unsigned int __arch_hweight8(unsigned int w); -static inline unsigned int __arch_hweight16(unsigned int w) -{ - unsigned int res; - - __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff)); - return res; -} - -static inline unsigned int __arch_hweight8(unsigned int w) -{ - unsigned int res; - - __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff)); - return res; -} - -#else - -#include <asm-generic/bitops/arch_hweight.h> - -#endif #include <asm-generic/bitops/const_hweight.h> #include <asm-generic/bitops/lock.h> #endif /* __KERNEL__ */ diff --git a/arch/sparc/include/asm/div64.h b/arch/sparc/include/asm/div64.h deleted file mode 100644 index 6cd978cefb28..000000000000 --- a/arch/sparc/include/asm/div64.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/div64.h> diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 64f7a00b3747..7df8b7f544d4 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -59,15 +59,33 @@ #define R_SPARC_6 45 /* Bits present in AT_HWCAP, primarily for Sparc32. */ - -#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */ -#define HWCAP_SPARC_STBAR 2 -#define HWCAP_SPARC_SWAP 4 -#define HWCAP_SPARC_MULDIV 8 -#define HWCAP_SPARC_V9 16 -#define HWCAP_SPARC_ULTRA3 32 -#define HWCAP_SPARC_BLKINIT 64 -#define HWCAP_SPARC_N2 128 +#define HWCAP_SPARC_FLUSH 0x00000001 +#define HWCAP_SPARC_STBAR 0x00000002 +#define HWCAP_SPARC_SWAP 0x00000004 +#define HWCAP_SPARC_MULDIV 0x00000008 +#define HWCAP_SPARC_V9 0x00000010 +#define HWCAP_SPARC_ULTRA3 0x00000020 +#define HWCAP_SPARC_BLKINIT 0x00000040 +#define HWCAP_SPARC_N2 0x00000080 + +/* Solaris compatible AT_HWCAP bits. */ +#define AV_SPARC_MUL32 0x00000100 /* 32x32 multiply is efficient */ +#define AV_SPARC_DIV32 0x00000200 /* 32x32 divide is efficient */ +#define AV_SPARC_FSMULD 0x00000400 /* 'fsmuld' is efficient */ +#define AV_SPARC_V8PLUS 0x00000800 /* v9 insn available to 32bit */ +#define AV_SPARC_POPC 0x00001000 /* 'popc' is efficient */ +#define AV_SPARC_VIS 0x00002000 /* VIS insns available */ +#define AV_SPARC_VIS2 0x00004000 /* VIS2 insns available */ +#define AV_SPARC_ASI_BLK_INIT 0x00008000 /* block init ASIs available */ +#define AV_SPARC_FMAF 0x00010000 /* fused multiply-add */ +#define AV_SPARC_VIS3 0x00020000 /* VIS3 insns available */ +#define AV_SPARC_HPC 0x00040000 /* HPC insns available */ +#define AV_SPARC_RANDOM 0x00080000 /* 'random' insn available */ +#define AV_SPARC_TRANS 0x00100000 /* transaction insns available */ +#define AV_SPARC_FJFMAU 0x00200000 /* unfused multiply-add */ +#define AV_SPARC_IMA 0x00400000 /* integer multiply-add */ +#define AV_SPARC_ASI_CACHE_SPARING \ + 0x00800000 /* cache sparing ASIs available */ #define CORE_DUMP_USE_REGSET @@ -162,33 +180,8 @@ typedef struct { #define ELF_ET_DYN_BASE 0x0000010000000000UL #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL - -/* This yields a mask that user programs can use to figure out what - instruction set this cpu supports. */ - -/* On Ultra, we support all of the v8 capabilities. */ -static inline unsigned int sparc64_elf_hwcap(void) -{ - unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | - HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | - HWCAP_SPARC_V9); - - if (tlb_type == cheetah || tlb_type == cheetah_plus) - cap |= HWCAP_SPARC_ULTRA3; - else if (tlb_type == hypervisor) { - if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || - sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || - sun4v_chip_type == SUN4V_CHIP_NIAGARA3) - cap |= HWCAP_SPARC_BLKINIT; - if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || - sun4v_chip_type == SUN4V_CHIP_NIAGARA3) - cap |= HWCAP_SPARC_N2; - } - - return cap; -} - -#define ELF_HWCAP sparc64_elf_hwcap() +extern unsigned long sparc64_elf_hwcap; +#define ELF_HWCAP sparc64_elf_hwcap /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 7a5f80df15d0..015a761eaa32 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -2927,6 +2927,13 @@ extern unsigned long sun4v_ncs_request(unsigned long request, #define HV_FAST_FIRE_GET_PERFREG 0x120 #define HV_FAST_FIRE_SET_PERFREG 0x121 +#define HV_FAST_REBOOT_DATA_SET 0x172 + +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_reboot_data_set(unsigned long ra, + unsigned long len); +#endif + /* Function numbers for HV_CORE_TRAP. */ #define HV_CORE_SET_VER 0x00 #define HV_CORE_PUTCHAR 0x01 @@ -2940,11 +2947,17 @@ extern unsigned long sun4v_ncs_request(unsigned long request, #define HV_GRP_CORE 0x0001 #define HV_GRP_INTR 0x0002 #define HV_GRP_SOFT_STATE 0x0003 +#define HV_GRP_TM 0x0080 #define HV_GRP_PCI 0x0100 #define HV_GRP_LDOM 0x0101 #define HV_GRP_SVC_CHAN 0x0102 #define HV_GRP_NCS 0x0103 #define HV_GRP_RNG 0x0104 +#define HV_GRP_PBOOT 0x0105 +#define HV_GRP_TPM 0x0107 +#define HV_GRP_SDIO 0x0108 +#define HV_GRP_SDIO_ERR 0x0109 +#define HV_GRP_REBOOT_DATA 0x0110 #define HV_GRP_NIAG_PERF 0x0200 #define HV_GRP_FIRE_PERF 0x0201 #define HV_GRP_N2_CPU 0x0202 diff --git a/arch/sparc/include/asm/irq_regs.h b/arch/sparc/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b70270..000000000000 --- a/arch/sparc/include/asm/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/irq_regs.h> diff --git a/arch/sparc/include/asm/local.h b/arch/sparc/include/asm/local.h deleted file mode 100644 index bc80815a435c..000000000000 --- a/arch/sparc/include/asm/local.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _SPARC_LOCAL_H -#define _SPARC_LOCAL_H - -#include <asm-generic/local.h> - -#endif diff --git a/arch/sparc/include/asm/local64.h b/arch/sparc/include/asm/local64.h deleted file mode 100644 index 36c93b5cc239..000000000000 --- a/arch/sparc/include/asm/local64.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/local64.h> diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 83c571d8c8a7..1a8afd1ad04f 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -133,29 +133,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; sub TSB, 0x8, TSB; \ TSB_STORE(TSB, TAG); -#define KTSB_LOAD_QUAD(TSB, REG) \ - ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; - -#define KTSB_STORE(ADDR, VAL) \ - stxa VAL, [ADDR] ASI_N; - -#define KTSB_LOCK_TAG(TSB, REG1, REG2) \ -99: lduwa [TSB] ASI_N, REG1; \ - sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ - andcc REG1, REG2, %g0; \ - bne,pn %icc, 99b; \ - nop; \ - casa [TSB] ASI_N, REG1, REG2;\ - cmp REG1, REG2; \ - bne,pn %icc, 99b; \ - nop; \ - -#define KTSB_WRITE(TSB, TTE, TAG) \ - add TSB, 0x8, TSB; \ - stxa TTE, [TSB] ASI_N; \ - sub TSB, 0x8, TSB; \ - stxa TAG, [TSB] ASI_N; - /* Do a kernel page table walk. Leaves physical PTE pointer in * REG1. Jumps to FAIL_LABEL on early page table walk termination. * VADDR will not be clobbered, but REG2 will. @@ -239,6 +216,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; (KERNEL_TSB_SIZE_BYTES / 16) #define KERNEL_TSB4M_NENTRIES 4096 +#define KTSB_PHYS_SHIFT 15 + /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries * and the found TTE will be left in REG1. REG3 and REG4 must @@ -247,13 +226,22 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; * VADDR and TAG will be preserved and not clobbered by this macro. */ #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ - sethi %hi(swapper_tsb), REG1; \ +661: sethi %hi(swapper_tsb), REG1; \ or REG1, %lo(swapper_tsb), REG1; \ + .section .swapper_tsb_phys_patch, "ax"; \ + .word 661b; \ + .previous; \ +661: nop; \ + .section .tsb_ldquad_phys_patch, "ax"; \ + .word 661b; \ + sllx REG1, KTSB_PHYS_SHIFT, REG1; \ + sllx REG1, KTSB_PHYS_SHIFT, REG1; \ + .previous; \ srlx VADDR, PAGE_SHIFT, REG2; \ and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \ sllx REG2, 4, REG2; \ add REG1, REG2, REG2; \ - KTSB_LOAD_QUAD(REG2, REG3); \ + TSB_LOAD_QUAD(REG2, REG3); \ cmp REG3, TAG; \ be,a,pt %xcc, OK_LABEL; \ mov REG4, REG1; @@ -263,12 +251,21 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; * we can make use of that for the index computation. */ #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ - sethi %hi(swapper_4m_tsb), REG1; \ +661: sethi %hi(swapper_4m_tsb), REG1; \ or REG1, %lo(swapper_4m_tsb), REG1; \ + .section .swapper_4m_tsb_phys_patch, "ax"; \ + .word 661b; \ + .previous; \ +661: nop; \ + .section .tsb_ldquad_phys_patch, "ax"; \ + .word 661b; \ + sllx REG1, KTSB_PHYS_SHIFT, REG1; \ + sllx REG1, KTSB_PHYS_SHIFT, REG1; \ + .previous; \ and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \ sllx REG2, 4, REG2; \ add REG1, REG2, REG2; \ - KTSB_LOAD_QUAD(REG2, REG3); \ + TSB_LOAD_QUAD(REG2, REG3); \ cmp REG3, TAG; \ be,a,pt %xcc, OK_LABEL; \ mov REG4, REG1; diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index 17cf290dc2bc..9810fd881058 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c @@ -396,6 +396,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) , cpu_data(0).clock_tick #endif ); + cpucap_info(m); #ifdef CONFIG_SMP smp_bogo(m); #endif diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index dd1342c0a3be..490e5418740d 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -15,12 +15,15 @@ #include <linux/reboot.h> #include <linux/cpu.h> +#include <asm/hypervisor.h> #include <asm/ldc.h> #include <asm/vio.h> #include <asm/mdesc.h> #include <asm/head.h> #include <asm/irq.h> +#include "kernel.h" + #define DRV_MODULE_NAME "ds" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "1.0" @@ -828,18 +831,32 @@ void ldom_set_var(const char *var, const char *value) } } +static char full_boot_str[256] __attribute__((aligned(32))); +static int reboot_data_supported; + void ldom_reboot(const char *boot_command) { /* Don't bother with any of this if the boot_command * is empty. */ if (boot_command && strlen(boot_command)) { - char full_boot_str[256]; + unsigned long len; strcpy(full_boot_str, "boot "); strcpy(full_boot_str + strlen("boot "), boot_command); + len = strlen(full_boot_str); - ldom_set_var("reboot-command", full_boot_str); + if (reboot_data_supported) { + unsigned long ra = kimage_addr_to_ra(full_boot_str); + unsigned long hv_ret; + + hv_ret = sun4v_reboot_data_set(ra, len); + if (hv_ret != HV_EOK) + pr_err("SUN4V: Unable to set reboot data " + "hv_ret=%lu\n", hv_ret); + } else { + ldom_set_var("reboot-command", full_boot_str); + } } sun4v_mach_sir(); } @@ -1237,6 +1254,15 @@ static struct vio_driver ds_driver = { static int __init ds_init(void) { + unsigned long hv_ret, major, minor; + + hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor); + if (hv_ret == HV_EOK) { + pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n", + major, minor); + reboot_data_supported = 1; + } + kthread_run(ds_thread, NULL, "kldomd"); return vio_register_driver(&ds_driver); diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index d1f1361c4167..e27f8ea8656e 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -42,6 +42,20 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, extern void fpload(unsigned long *fpregs, unsigned long *fsr); #else /* CONFIG_SPARC32 */ +struct popc_3insn_patch_entry { + unsigned int addr; + unsigned int insns[3]; +}; +extern struct popc_3insn_patch_entry __popc_3insn_patch, + __popc_3insn_patch_end; + +struct popc_6insn_patch_entry { + unsigned int addr; + unsigned int insns[6]; +}; +extern struct popc_6insn_patch_entry __popc_6insn_patch, + __popc_6insn_patch_end; + extern void __init per_cpu_patch(void); extern void __init sun4v_patch(void); extern void __init boot_cpu_id_too_large(int cpu); diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index c752603a7c0d..0eac1b2fc53d 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -559,7 +559,7 @@ niagara2_patch: nop call niagara_patch_bzero nop - call niagara2_patch_pageops + call niagara_patch_pageops nop ba,a,pt %xcc, 80f diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index d306e648c33c..c2d055d8ba9e 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c @@ -28,11 +28,17 @@ static struct api_info api_table[] = { { .group = HV_GRP_CORE, .flags = FLAG_PRE_API }, { .group = HV_GRP_INTR, }, { .group = HV_GRP_SOFT_STATE, }, + { .group = HV_GRP_TM, }, { .group = HV_GRP_PCI, .flags = FLAG_PRE_API }, { .group = HV_GRP_LDOM, }, { .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API }, { .group = HV_GRP_NCS, .flags = FLAG_PRE_API }, { .group = HV_GRP_RNG, }, + { .group = HV_GRP_PBOOT, }, + { .group = HV_GRP_TPM, }, + { .group = HV_GRP_SDIO, }, + { .group = HV_GRP_SDIO_ERR, }, + { .group = HV_GRP_REBOOT_DATA, }, { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, { .group = HV_GRP_FIRE_PERF, }, { .group = HV_GRP_N2_CPU, }, diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index 8a5f35ffb15e..58d60de4d65b 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S @@ -798,3 +798,10 @@ ENTRY(sun4v_niagara2_setperf) retl nop ENDPROC(sun4v_niagara2_setperf) + +ENTRY(sun4v_reboot_data_set) + mov HV_FAST_REBOOT_DATA_SET, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_reboot_data_set) diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h index 6f6544cfa0ef..fd6c36b1df74 100644 --- a/arch/sparc/kernel/kernel.h +++ b/arch/sparc/kernel/kernel.h @@ -4,12 +4,27 @@ #include <linux/interrupt.h> #include <asm/traps.h> +#include <asm/head.h> +#include <asm/io.h> /* cpu.c */ extern const char *sparc_pmu_type; extern unsigned int fsr_storage; extern int ncpus_probed; +#ifdef CONFIG_SPARC64 +/* setup_64.c */ +struct seq_file; +extern void cpucap_info(struct seq_file *); + +static inline unsigned long kimage_addr_to_ra(const char *p) +{ + unsigned long val = (unsigned long) p; + + return kern_base + (val - KERNBASE); +} +#endif + #ifdef CONFIG_SPARC32 /* cpu.c */ extern void cpu_probe(void); diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 1d361477d7d6..79f310364849 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -47,16 +47,16 @@ kvmap_itlb_tsb_miss: kvmap_itlb_vmalloc_addr: KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g7) + TSB_LOCK_TAG(%g1, %g2, %g7) /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 mov 1, %g7 sllx %g7, TSB_TAG_INVALID_BIT, %g7 brgez,a,pn %g5, kvmap_itlb_longpath - KTSB_STORE(%g1, %g7) + TSB_STORE(%g1, %g7) - KTSB_WRITE(%g1, %g5, %g6) + TSB_WRITE(%g1, %g5, %g6) /* fallthrough to TLB load */ @@ -102,9 +102,9 @@ kvmap_itlb_longpath: kvmap_itlb_obp: OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g7) + TSB_LOCK_TAG(%g1, %g2, %g7) - KTSB_WRITE(%g1, %g5, %g6) + TSB_WRITE(%g1, %g5, %g6) ba,pt %xcc, kvmap_itlb_load nop @@ -112,17 +112,17 @@ kvmap_itlb_obp: kvmap_dtlb_obp: OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g7) + TSB_LOCK_TAG(%g1, %g2, %g7) - KTSB_WRITE(%g1, %g5, %g6) + TSB_WRITE(%g1, %g5, %g6) ba,pt %xcc, kvmap_dtlb_load nop .align 32 kvmap_dtlb_tsb4m_load: - KTSB_LOCK_TAG(%g1, %g2, %g7) - KTSB_WRITE(%g1, %g5, %g6) + TSB_LOCK_TAG(%g1, %g2, %g7) + TSB_WRITE(%g1, %g5, %g6) ba,pt %xcc, kvmap_dtlb_load nop @@ -222,16 +222,16 @@ kvmap_linear_patch: kvmap_dtlb_vmalloc_addr: KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g7) + TSB_LOCK_TAG(%g1, %g2, %g7) /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 mov 1, %g7 sllx %g7, TSB_TAG_INVALID_BIT, %g7 brgez,a,pn %g5, kvmap_dtlb_longpath - KTSB_STORE(%g1, %g7) + TSB_STORE(%g1, %g7) - KTSB_WRITE(%g1, %g5, %g6) + TSB_WRITE(%g1, %g5, %g6) /* fallthrough to TLB load */ diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 42f28c7420e1..acaebb63c4fd 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -508,6 +508,8 @@ const char *mdesc_node_name(struct mdesc_handle *hp, u64 node) } EXPORT_SYMBOL(mdesc_node_name); +static u64 max_cpus = 64; + static void __init report_platform_properties(void) { struct mdesc_handle *hp = mdesc_grab(); @@ -543,8 +545,10 @@ static void __init report_platform_properties(void) if (v) printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v); v = mdesc_get_property(hp, pn, "max-cpus", NULL); - if (v) - printk("PLATFORM: max-cpus [%llu]\n", *v); + if (v) { + max_cpus = *v; + printk("PLATFORM: max-cpus [%llu]\n", max_cpus); + } #ifdef CONFIG_SMP { @@ -715,7 +719,7 @@ static void __cpuinit set_proc_ids(struct mdesc_handle *hp) } static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask, - unsigned char def) + unsigned long def, unsigned long max) { u64 val; @@ -726,6 +730,9 @@ static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask, if (!val || val >= 64) goto use_default; + if (val > max) + val = max; + *mask = ((1U << val) * 64U) - 1U; return; @@ -736,19 +743,28 @@ use_default: static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp, struct trap_per_cpu *tb) { + static int printed; const u64 *val; val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL); - get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); + get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2)); val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL); - get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); + get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8); val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL); - get_one_mondo_bits(val, &tb->resum_qmask, 6); + get_one_mondo_bits(val, &tb->resum_qmask, 6, 7); val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL); - get_one_mondo_bits(val, &tb->nonresum_qmask, 2); + get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2); + if (!printed++) { + pr_info("SUN4V: Mondo queue sizes " + "[cpu(%u) dev(%u) r(%u) nr(%u)]\n", + tb->cpu_mondo_qmask + 1, + tb->dev_mondo_qmask + 1, + tb->resum_qmask + 1, + tb->nonresum_qmask + 1); + } } static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index c4dd0999da86..3e9daea1653d 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -29,6 +29,7 @@ #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/initrd.h> +#include <linux/module.h> #include <asm/system.h> #include <asm/io.h> @@ -46,6 +47,8 @@ #include <asm/mmu.h> #include <asm/ns87303.h> #include <asm/btext.h> +#include <asm/elf.h> +#include <asm/mdesc.h> #ifdef CONFIG_IP_PNP #include <net/ipconfig.h> @@ -269,6 +272,40 @@ void __init sun4v_patch(void) sun4v_hvapi_init(); } +static void __init popc_patch(void) +{ + struct popc_3insn_patch_entry *p3; + struct popc_6insn_patch_entry *p6; + + p3 = &__popc_3insn_patch; + while (p3 < &__popc_3insn_patch_end) { + unsigned long i, addr = p3->addr; + + for (i = 0; i < 3; i++) { + *(unsigned int *) (addr + (i * 4)) = p3->insns[i]; + wmb(); + __asm__ __volatile__("flush %0" + : : "r" (addr + (i * 4))); + } + + p3++; + } + + p6 = &__popc_6insn_patch; + while (p6 < &__popc_6insn_patch_end) { + unsigned long i, addr = p6->addr; + + for (i = 0; i < 6; i++) { + *(unsigned int *) (addr + (i * 4)) = p6->insns[i]; + wmb(); + __asm__ __volatile__("flush %0" + : : "r" (addr + (i * 4))); + } + + p6++; + } +} + #ifdef CONFIG_SMP void __init boot_cpu_id_too_large(int cpu) { @@ -278,6 +315,154 @@ void __init boot_cpu_id_too_large(int cpu) } #endif +/* On Ultra, we support all of the v8 capabilities. */ +unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | + HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | + HWCAP_SPARC_V9); +EXPORT_SYMBOL(sparc64_elf_hwcap); + +static const char *hwcaps[] = { + "flush", "stbar", "swap", "muldiv", "v9", + "ultra3", "blkinit", "n2", + + /* These strings are as they appear in the machine description + * 'hwcap-list' property for cpu nodes. + */ + "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2", + "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau", + "ima", "cspare", +}; + +void cpucap_info(struct seq_file *m) +{ + unsigned long caps = sparc64_elf_hwcap; + int i, printed = 0; + + seq_puts(m, "cpucaps\t\t: "); + for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { + unsigned long bit = 1UL << i; + if (caps & bit) { + seq_printf(m, "%s%s", + printed ? "," : "", hwcaps[i]); + printed++; + } + } + seq_putc(m, '\n'); +} + +static void __init report_hwcaps(unsigned long caps) +{ + int i, printed = 0; + + printk(KERN_INFO "CPU CAPS: ["); + for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { + unsigned long bit = 1UL << i; + if (caps & bit) { + printk(KERN_CONT "%s%s", + printed ? "," : "", hwcaps[i]); + if (++printed == 8) { + printk(KERN_CONT "]\n"); + printk(KERN_INFO "CPU CAPS: ["); + printed = 0; + } + } + } + printk(KERN_CONT "]\n"); +} + +static unsigned long __init mdesc_cpu_hwcap_list(void) +{ + struct mdesc_handle *hp; + unsigned long caps = 0; + const char *prop; + int len; + u64 pn; + + hp = mdesc_grab(); + if (!hp) + return 0; + + pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu"); + if (pn == MDESC_NODE_NULL) + goto out; + + prop = mdesc_get_property(hp, pn, "hwcap-list", &len); + if (!prop) + goto out; + + while (len) { + int i, plen; + + for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { + unsigned long bit = 1UL << i; + + if (!strcmp(prop, hwcaps[i])) { + caps |= bit; + break; + } + } + + plen = strlen(prop) + 1; + prop += plen; + len -= plen; + } + +out: + mdesc_release(hp); + return caps; +} + +/* This yields a mask that user programs can use to figure out what + * instruction set this cpu supports. + */ +static void __init init_sparc64_elf_hwcap(void) +{ + unsigned long cap = sparc64_elf_hwcap; + unsigned long mdesc_caps; + + if (tlb_type == cheetah || tlb_type == cheetah_plus) + cap |= HWCAP_SPARC_ULTRA3; + else if (tlb_type == hypervisor) { + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA3) + cap |= HWCAP_SPARC_BLKINIT; + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA3) + cap |= HWCAP_SPARC_N2; + } + + cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS); + + mdesc_caps = mdesc_cpu_hwcap_list(); + if (!mdesc_caps) { + if (tlb_type == spitfire) + cap |= AV_SPARC_VIS; + if (tlb_type == cheetah || tlb_type == cheetah_plus) + cap |= AV_SPARC_VIS | AV_SPARC_VIS2; + if (tlb_type == cheetah_plus) + cap |= AV_SPARC_POPC; + if (tlb_type == hypervisor) { + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) + cap |= AV_SPARC_ASI_BLK_INIT; + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA3) + cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | + AV_SPARC_ASI_BLK_INIT | + AV_SPARC_POPC); + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3) + cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | + AV_SPARC_FMAF); + } + } + sparc64_elf_hwcap = cap | mdesc_caps; + + report_hwcaps(sparc64_elf_hwcap); + + if (sparc64_elf_hwcap & AV_SPARC_POPC) + popc_patch(); +} + void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ @@ -337,6 +522,7 @@ void __init setup_arch(char **cmdline_p) init_cur_cpu_trap(current_thread_info()); paging_init(); + init_sparc64_elf_hwcap(); } extern int stop_a_enabled; diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c index 372ad59c4cba..83b47ab02d96 100644 --- a/arch/sparc/kernel/sparc_ksyms_64.c +++ b/arch/sparc/kernel/sparc_ksyms_64.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> +#include <linux/bitops.h> #include <asm/system.h> #include <asm/cpudata.h> @@ -38,5 +39,15 @@ EXPORT_SYMBOL(sun4v_niagara_setperf); EXPORT_SYMBOL(sun4v_niagara2_getperf); EXPORT_SYMBOL(sun4v_niagara2_setperf); +/* from hweight.S */ +EXPORT_SYMBOL(__arch_hweight8); +EXPORT_SYMBOL(__arch_hweight16); +EXPORT_SYMBOL(__arch_hweight32); +EXPORT_SYMBOL(__arch_hweight64); + +/* from ffs_ffz.S */ +EXPORT_SYMBOL(ffs); +EXPORT_SYMBOL(__ffs); + /* Exporting a symbol from /init/main.c */ EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c index 8cdbe5946b43..c59af546f522 100644 --- a/arch/sparc/kernel/sstate.c +++ b/arch/sparc/kernel/sstate.c @@ -14,14 +14,9 @@ #include <asm/head.h> #include <asm/io.h> -static int hv_supports_soft_state; - -static unsigned long kimage_addr_to_ra(const char *p) -{ - unsigned long val = (unsigned long) p; +#include "kernel.h" - return kern_base + (val - KERNBASE); -} +static int hv_supports_soft_state; static void do_set_sstate(unsigned long state, const char *msg) { diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 35cff1673aa4..76e4ac1a13e1 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -22,6 +22,7 @@ #include <linux/bitops.h> #include <linux/perf_event.h> #include <linux/ratelimit.h> +#include <linux/bitops.h> #include <asm/fpumacro.h> enum direction { @@ -373,16 +374,11 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) } } -static char popc_helper[] = { -0, 1, 1, 2, 1, 2, 2, 3, -1, 2, 2, 3, 2, 3, 3, 4, -}; - int handle_popc(u32 insn, struct pt_regs *regs) { - u64 value; - int ret, i, rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + int ret, rd = ((insn >> 25) & 0x1f); + u64 value; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (insn & 0x2000) { @@ -392,10 +388,7 @@ int handle_popc(u32 insn, struct pt_regs *regs) maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); value = fetch_reg(insn & 0x1f, regs); } - for (ret = 0, i = 0; i < 16; i++) { - ret += popc_helper[value & 0xf]; - value >>= 4; - } + ret = hweight64(value); if (rd < 16) { if (rd) regs->u_regs[rd] = ret; diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index c0220759003e..0e1605697b49 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -107,7 +107,26 @@ SECTIONS *(.sun4v_2insn_patch) __sun4v_2insn_patch_end = .; } - + .swapper_tsb_phys_patch : { + __swapper_tsb_phys_patch = .; + *(.swapper_tsb_phys_patch) + __swapper_tsb_phys_patch_end = .; + } + .swapper_4m_tsb_phys_patch : { + __swapper_4m_tsb_phys_patch = .; + *(.swapper_4m_tsb_phys_patch) + __swapper_4m_tsb_phys_patch_end = .; + } + .popc_3insn_patch : { + __popc_3insn_patch = .; + *(.popc_3insn_patch) + __popc_3insn_patch_end = .; + } + .popc_6insn_patch : { + __popc_6insn_patch = .; + *(.popc_6insn_patch) + __popc_6insn_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) . = ALIGN(PAGE_SIZE); diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 7f01b8fce8bc..a3fc4375a150 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -31,13 +31,13 @@ lib-$(CONFIG_SPARC64) += NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o lib-$(CONFIG_SPARC64) += NGpatch.o NGpage.o NGbzero.o lib-$(CONFIG_SPARC64) += NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o -lib-$(CONFIG_SPARC64) += NG2patch.o NG2page.o +lib-$(CONFIG_SPARC64) += NG2patch.o lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o -lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o +lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o obj-y += iomap.o obj-$(CONFIG_SPARC32) += atomic32.o diff --git a/arch/sparc/lib/NG2page.S b/arch/sparc/lib/NG2page.S deleted file mode 100644 index 73b6b7c72cbf..000000000000 --- a/arch/sparc/lib/NG2page.S +++ /dev/null @@ -1,61 +0,0 @@ -/* NG2page.S: Niagara-2 optimized clear and copy page. - * - * Copyright (C) 2007 (davem@davemloft.net) - */ - -#include <asm/asi.h> -#include <asm/page.h> -#include <asm/visasm.h> - - .text - .align 32 - - /* This is heavily simplified from the sun4u variants - * because Niagara-2 does not have any D-cache aliasing issues. - */ -NG2copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ - prefetch [%o1 + 0x00], #one_read - prefetch [%o1 + 0x40], #one_read - VISEntryHalf - set PAGE_SIZE, %g7 - sub %o0, %o1, %g3 -1: stxa %g0, [%o1 + %g3] ASI_BLK_INIT_QUAD_LDD_P - subcc %g7, 64, %g7 - ldda [%o1] ASI_BLK_P, %f0 - stda %f0, [%o1 + %g3] ASI_BLK_P - add %o1, 64, %o1 - bne,pt %xcc, 1b - prefetch [%o1 + 0x40], #one_read - membar #Sync - VISExitHalf - retl - nop - -#define BRANCH_ALWAYS 0x10680000 -#define NOP 0x01000000 -#define NG_DO_PATCH(OLD, NEW) \ - sethi %hi(NEW), %g1; \ - or %g1, %lo(NEW), %g1; \ - sethi %hi(OLD), %g2; \ - or %g2, %lo(OLD), %g2; \ - sub %g1, %g2, %g1; \ - sethi %hi(BRANCH_ALWAYS), %g3; \ - sll %g1, 11, %g1; \ - srl %g1, 11 + 2, %g1; \ - or %g3, %lo(BRANCH_ALWAYS), %g3; \ - or %g3, %g1, %g3; \ - stw %g3, [%g2]; \ - sethi %hi(NOP), %g3; \ - or %g3, %lo(NOP), %g3; \ - stw %g3, [%g2 + 0x4]; \ - flush %g2; - - .globl niagara2_patch_pageops - .type niagara2_patch_pageops,#function -niagara2_patch_pageops: - NG_DO_PATCH(copy_user_page, NG2copy_user_page) - NG_DO_PATCH(_clear_page, NGclear_page) - NG_DO_PATCH(clear_user_page, NGclear_user_page) - retl - nop - .size niagara2_patch_pageops,.-niagara2_patch_pageops diff --git a/arch/sparc/lib/NGpage.S b/arch/sparc/lib/NGpage.S index 428920de05ba..b9e790b9c6b8 100644 --- a/arch/sparc/lib/NGpage.S +++ b/arch/sparc/lib/NGpage.S @@ -16,55 +16,91 @@ */ NGcopy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ - prefetch [%o1 + 0x00], #one_read - mov 8, %g1 - mov 16, %g2 - mov 24, %g3 + save %sp, -192, %sp + rd %asi, %g3 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi set PAGE_SIZE, %g7 + prefetch [%i1 + 0x00], #one_read + prefetch [%i1 + 0x40], #one_read -1: ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2 - ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4 - prefetch [%o1 + 0x40], #one_read - add %o1, 32, %o1 - stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P - stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P - ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2 - stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P - stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P - ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4 - add %o1, 32, %o1 - add %o0, 32, %o0 - stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P - stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P - stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P - stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P - subcc %g7, 64, %g7 +1: prefetch [%i1 + 0x80], #one_read + prefetch [%i1 + 0xc0], #one_read + ldda [%i1 + 0x00] %asi, %o2 + ldda [%i1 + 0x10] %asi, %o4 + ldda [%i1 + 0x20] %asi, %l2 + ldda [%i1 + 0x30] %asi, %l4 + stxa %o2, [%i0 + 0x00] %asi + stxa %o3, [%i0 + 0x08] %asi + stxa %o4, [%i0 + 0x10] %asi + stxa %o5, [%i0 + 0x18] %asi + stxa %l2, [%i0 + 0x20] %asi + stxa %l3, [%i0 + 0x28] %asi + stxa %l4, [%i0 + 0x30] %asi + stxa %l5, [%i0 + 0x38] %asi + ldda [%i1 + 0x40] %asi, %o2 + ldda [%i1 + 0x50] %asi, %o4 + ldda [%i1 + 0x60] %asi, %l2 + ldda [%i1 + 0x70] %asi, %l4 + stxa %o2, [%i0 + 0x40] %asi + stxa %o3, [%i0 + 0x48] %asi + stxa %o4, [%i0 + 0x50] %asi + stxa %o5, [%i0 + 0x58] %asi + stxa %l2, [%i0 + 0x60] %asi + stxa %l3, [%i0 + 0x68] %asi + stxa %l4, [%i0 + 0x70] %asi + stxa %l5, [%i0 + 0x78] %asi + add %i1, 128, %i1 + subcc %g7, 128, %g7 bne,pt %xcc, 1b - add %o0, 32, %o0 + add %i0, 128, %i0 + wr %g3, 0x0, %asi membar #Sync - retl - nop + ret + restore - .globl NGclear_page, NGclear_user_page + .align 32 NGclear_page: /* %o0=dest */ NGclear_user_page: /* %o0=dest, %o1=vaddr */ - mov 8, %g1 - mov 16, %g2 - mov 24, %g3 + rd %asi, %g3 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi set PAGE_SIZE, %g7 -1: stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P - stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P - stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P - stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P - add %o0, 32, %o0 - stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P - stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P - stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P - stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P - subcc %g7, 64, %g7 +1: stxa %g0, [%o0 + 0x00] %asi + stxa %g0, [%o0 + 0x08] %asi + stxa %g0, [%o0 + 0x10] %asi + stxa %g0, [%o0 + 0x18] %asi + stxa %g0, [%o0 + 0x20] %asi + stxa %g0, [%o0 + 0x28] %asi + stxa %g0, [%o0 + 0x30] %asi + stxa %g0, [%o0 + 0x38] %asi + stxa %g0, [%o0 + 0x40] %asi + stxa %g0, [%o0 + 0x48] %asi + stxa %g0, [%o0 + 0x50] %asi + stxa %g0, [%o0 + 0x58] %asi + stxa %g0, [%o0 + 0x60] %asi + stxa %g0, [%o0 + 0x68] %asi + stxa %g0, [%o0 + 0x70] %asi + stxa %g0, [%o0 + 0x78] %asi + stxa %g0, [%o0 + 0x80] %asi + stxa %g0, [%o0 + 0x88] %asi + stxa %g0, [%o0 + 0x90] %asi + stxa %g0, [%o0 + 0x98] %asi + stxa %g0, [%o0 + 0xa0] %asi + stxa %g0, [%o0 + 0xa8] %asi + stxa %g0, [%o0 + 0xb0] %asi + stxa %g0, [%o0 + 0xb8] %asi + stxa %g0, [%o0 + 0xc0] %asi + stxa %g0, [%o0 + 0xc8] %asi + stxa %g0, [%o0 + 0xd0] %asi + stxa %g0, [%o0 + 0xd8] %asi + stxa %g0, [%o0 + 0xe0] %asi + stxa %g0, [%o0 + 0xe8] %asi + stxa %g0, [%o0 + 0xf0] %asi + stxa %g0, [%o0 + 0xf8] %asi + subcc %g7, 256, %g7 bne,pt %xcc, 1b - add %o0, 32, %o0 + add %o0, 256, %o0 + wr %g3, 0x0, %asi membar #Sync retl nop diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 8600eb2461b5..1d32b54089aa 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -65,7 +65,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u) if (ret != u) v->counter += a; spin_unlock_irqrestore(ATOMIC_HASH(v), flags); - return ret != u; + return ret; } EXPORT_SYMBOL(__atomic_add_unless); diff --git a/arch/sparc/lib/ffs.S b/arch/sparc/lib/ffs.S new file mode 100644 index 000000000000..b39389f69899 --- /dev/null +++ b/arch/sparc/lib/ffs.S @@ -0,0 +1,84 @@ +#include <linux/linkage.h> + + .register %g2,#scratch + + .text + .align 32 + +ENTRY(ffs) + brnz,pt %o0, 1f + mov 1, %o1 + retl + clr %o0 + nop + nop +ENTRY(__ffs) + sllx %o0, 32, %g1 /* 1 */ + srlx %o0, 32, %g2 + + clr %o1 /* 2 */ + movrz %g1, %g2, %o0 + + movrz %g1, 32, %o1 /* 3 */ +1: clr %o2 + + sllx %o0, (64 - 16), %g1 /* 4 */ + srlx %o0, 16, %g2 + + movrz %g1, %g2, %o0 /* 5 */ + clr %o3 + + movrz %g1, 16, %o2 /* 6 */ + clr %o4 + + and %o0, 0xff, %g1 /* 7 */ + srlx %o0, 8, %g2 + + movrz %g1, %g2, %o0 /* 8 */ + clr %o5 + + movrz %g1, 8, %o3 /* 9 */ + add %o2, %o1, %o2 + + and %o0, 0xf, %g1 /* 10 */ + srlx %o0, 4, %g2 + + movrz %g1, %g2, %o0 /* 11 */ + add %o2, %o3, %o2 + + movrz %g1, 4, %o4 /* 12 */ + + and %o0, 0x3, %g1 /* 13 */ + srlx %o0, 2, %g2 + + movrz %g1, %g2, %o0 /* 14 */ + add %o2, %o4, %o2 + + movrz %g1, 2, %o5 /* 15 */ + + and %o0, 0x1, %g1 /* 16 */ + + add %o2, %o5, %o2 /* 17 */ + xor %g1, 0x1, %g1 + + retl /* 18 */ + add %o2, %g1, %o0 +ENDPROC(ffs) +ENDPROC(__ffs) + + .section .popc_6insn_patch, "ax" + .word ffs + brz,pn %o0, 98f + neg %o0, %g1 + xnor %o0, %g1, %o1 + popc %o1, %o0 +98: retl + nop + .word __ffs + neg %o0, %g1 + xnor %o0, %g1, %o1 + popc %o1, %o0 + retl + sub %o0, 1, %o0 + nop + .previous diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S new file mode 100644 index 000000000000..95414e0a6808 --- /dev/null +++ b/arch/sparc/lib/hweight.S @@ -0,0 +1,51 @@ +#include <linux/linkage.h> + + .text + .align 32 +ENTRY(__arch_hweight8) + ba,pt %xcc, __sw_hweight8 + nop + nop +ENDPROC(__arch_hweight8) + .section .popc_3insn_patch, "ax" + .word __arch_hweight8 + sllx %o0, 64-8, %g1 + retl + popc %g1, %o0 + .previous + +ENTRY(__arch_hweight16) + ba,pt %xcc, __sw_hweight16 + nop + nop +ENDPROC(__arch_hweight16) + .section .popc_3insn_patch, "ax" + .word __arch_hweight16 + sllx %o0, 64-16, %g1 + retl + popc %g1, %o0 + .previous + +ENTRY(__arch_hweight32) + ba,pt %xcc, __sw_hweight32 + nop + nop +ENDPROC(__arch_hweight32) + .section .popc_3insn_patch, "ax" + .word __arch_hweight32 + sllx %o0, 64-32, %g1 + retl + popc %g1, %o0 + .previous + +ENTRY(__arch_hweight64) + ba,pt %xcc, __sw_hweight64 + nop + nop +ENDPROC(__arch_hweight64) + .section .popc_3insn_patch, "ax" + .word __arch_hweight64 + retl + popc %o0, %o0 + nop + .previous diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3fd8e18bed80..581531dbc8b5 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1597,6 +1597,44 @@ static void __init tsb_phys_patch(void) static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; +static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) +{ + pa >>= KTSB_PHYS_SHIFT; + + while (start < end) { + unsigned int *ia = (unsigned int *)(unsigned long)*start; + + ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10); + __asm__ __volatile__("flush %0" : : "r" (ia)); + + ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff); + __asm__ __volatile__("flush %0" : : "r" (ia + 1)); + + start++; + } +} + +static void ktsb_phys_patch(void) +{ + extern unsigned int __swapper_tsb_phys_patch; + extern unsigned int __swapper_tsb_phys_patch_end; + unsigned long ktsb_pa; + + ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); + patch_one_ktsb_phys(&__swapper_tsb_phys_patch, + &__swapper_tsb_phys_patch_end, ktsb_pa); +#ifndef CONFIG_DEBUG_PAGEALLOC + { + extern unsigned int __swapper_4m_tsb_phys_patch; + extern unsigned int __swapper_4m_tsb_phys_patch_end; + ktsb_pa = (kern_base + + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); + patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, + &__swapper_4m_tsb_phys_patch_end, ktsb_pa); + } +#endif +} + static void __init sun4v_ktsb_init(void) { unsigned long ktsb_pa; @@ -1716,8 +1754,10 @@ void __init paging_init(void) sun4u_pgprot_init(); if (tlb_type == cheetah_plus || - tlb_type == hypervisor) + tlb_type == hypervisor) { tsb_phys_patch(); + ktsb_phys_patch(); + } if (tlb_type == hypervisor) { sun4v_patch_tlb_handlers(); diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 45e94aca5bce..3326204e251f 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -15,7 +15,7 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ grant-table.o suspend.o platform-pci-unplug.o \ p2m.o -obj-$(CONFIG_FUNCTION_TRACER) += trace.o +obj-$(CONFIG_FTRACE) += trace.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index a9627e2e3295..df118a825f39 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -93,8 +93,6 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, if (end <= start) return 0; - printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: ", - start, end); for(pfn = start; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); @@ -107,14 +105,14 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); - WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", - start, end, ret); + WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); if (ret == 1) { __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); len++; } } - printk(KERN_CONT "%ld pages freed\n", len); + printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n", + start, end, len); return len; } @@ -140,7 +138,7 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, if (last_end < max_addr) released += xen_release_chunk(last_end, max_addr); - printk(KERN_INFO "released %ld pages of unused memory\n", released); + printk(KERN_INFO "released %lu pages of unused memory\n", released); return released; } diff --git a/arch/x86/xen/trace.c b/arch/x86/xen/trace.c index 734beba2a08c..520022d1a181 100644 --- a/arch/x86/xen/trace.c +++ b/arch/x86/xen/trace.c @@ -1,4 +1,5 @@ #include <linux/ftrace.h> +#include <xen/interface/xen.h> #define N(x) [__HYPERVISOR_##x] = "("#x")" static const char *xen_hypercall_names[] = { |