diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-10-12 16:41:22 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-10-12 16:41:26 +0200 |
commit | 7c5f13519a67aa7ba3a99155f128d4bdef87d087 (patch) | |
tree | e4d0537092930a53a85932de83a7861990c58607 /arch | |
parent | Merge branch 'x86/cleanups' into irq/sparseirq (diff) | |
parent | x86, hpet: Fix bogus error check in hpet_assign_irq() (diff) | |
download | linux-7c5f13519a67aa7ba3a99155f128d4bdef87d087.tar.xz linux-7c5f13519a67aa7ba3a99155f128d4bdef87d087.zip |
Merge branch 'x86/urgent' of into irq/sparseirq
Reason: Pull in the latest io_apic bugfixes
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
73 files changed, 637 insertions, 403 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 4877a8c8ee16..fe48fc7a3eba 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -32,8 +32,9 @@ config HAVE_OPROFILE config KPROBES bool "Kprobes" - depends on KALLSYMS && MODULES + depends on MODULES depends on HAVE_KPROBES + select KALLSYMS help Kprobes allows you to trap at almost any kernel address and execute a callback function. register_kprobe() establishes @@ -45,7 +46,6 @@ config OPTPROBES def_bool y depends on KPROBES && HAVE_OPTPROBES depends on !PREEMPT - select KALLSYMS_ALL config HAVE_EFFICIENT_UNALIGNED_ACCESS bool diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index ab1ee0ab082b..6d159cee5f2f 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -73,8 +73,6 @@ ldq $20, HAE_REG($19); \ stq $21, HAE_CACHE($19); \ stq $21, 0($20); \ - ldq $0, 0($sp); \ - ldq $1, 8($sp); \ 99:; \ ldq $19, 72($sp); \ ldq $20, 80($sp); \ @@ -316,7 +314,7 @@ ret_from_sys_call: cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ ldq $0, SP_OFF($sp) and $0, 8, $0 - beq $0, restore_all + beq $0, ret_to_kernel ret_to_user: /* Make sure need_resched and sigpending don't change between sampling and the rti. */ @@ -329,6 +327,11 @@ restore_all: RESTORE_ALL call_pal PAL_rti +ret_to_kernel: + lda $16, 7 + call_pal PAL_swpipl + br restore_all + .align 3 $syscall_error: /* @@ -657,7 +660,7 @@ kernel_thread: /* We don't actually care for a3 success widgetry in the kernel. Not for positive errno values. */ stq $0, 0($sp) /* $0 */ - br restore_all + br ret_to_kernel .end kernel_thread /* @@ -912,15 +915,6 @@ sys_execve: .end sys_execve .align 4 - .globl osf_sigprocmask - .ent osf_sigprocmask -osf_sigprocmask: - .prologue 0 - mov $sp, $18 - jmp $31, sys_osf_sigprocmask -.end osf_sigprocmask - - .align 4 .globl alpha_ni_syscall .ent alpha_ni_syscall alpha_ni_syscall: diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 842dba308eab..3ec35066f1dc 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) dest[27] = pt->r27; dest[28] = pt->r28; dest[29] = pt->gp; - dest[30] = rdusp(); + dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; dest[31] = pt->pc; /* Once upon a time this was the PS value. Which is stupid diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 0f6b51ae865a..d290845aef59 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *, /* * The OSF/1 sigprocmask calling sequence is different from the * C sigprocmask() sequence.. - * - * how: - * 1 - SIG_BLOCK - * 2 - SIG_UNBLOCK - * 3 - SIG_SETMASK - * - * We change the range to -1 .. 1 in order to let gcc easily - * use the conditional move instructions. - * - * Note that we don't need to acquire the kernel lock for SMP - * operation, as all of this is local to this thread. */ -SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, - struct pt_regs *, regs) +SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) { - unsigned long oldmask = -EINVAL; - - if ((unsigned long)how-1 <= 2) { - long sign = how-2; /* -1 .. 1 */ - unsigned long block, unblock; - - newmask &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); - oldmask = current->blocked.sig[0]; - - unblock = oldmask & ~newmask; - block = oldmask | newmask; - if (!sign) - block = unblock; - if (sign <= 0) - newmask = block; - if (_NSIG_WORDS > 1 && sign > 0) - sigemptyset(¤t->blocked); - current->blocked.sig[0] = newmask; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - regs->r0 = 0; /* special no error return */ + sigset_t oldmask; + sigset_t mask; + unsigned long res; + + siginitset(&mask, newmask & ~_BLOCKABLE); + res = sigprocmask(how, &mask, &oldmask); + if (!res) { + force_successful_syscall_return(); + res = oldmask.sig[0]; } - return oldmask; + return res; } SYSCALL_DEFINE3(osf_sigaction, int, sig, @@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_flags, &act->sa_flags)) + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || + __get_user(mask, &act->sa_mask)) return -EFAULT; - __get_user(mask, &act->sa_mask); siginitset(&new_ka.sa.sa_mask, mask); new_ka.ka_restorer = NULL; } @@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index ce594ef533cc..a6a1de9db16f 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -58,7 +58,7 @@ sys_call_table: .quad sys_open /* 45 */ .quad alpha_ni_syscall .quad sys_getxgid - .quad osf_sigprocmask + .quad sys_osf_sigprocmask .quad alpha_ni_syscall .quad alpha_ni_syscall /* 50 */ .quad sys_acct diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 553b7cf17bfb..88c97bc7a6f5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -271,7 +271,6 @@ config ARCH_AT91 bool "Atmel AT91" select ARCH_REQUIRE_GPIOLIB select HAVE_CLK - select ARCH_USES_GETTIMEOFFSET help This enables support for systems based on the Atmel AT91RM9200, AT91SAM9 and AT91CAP9 processors. @@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075 ACTLR register. Note that setting specific bits in the ACTLR register may not be available in non-secure mode. +config ARM_ERRATA_742230 + bool "ARM errata: DMB operation may be faulty" + depends on CPU_V7 && SMP + help + This option enables the workaround for the 742230 Cortex-A9 + (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction + between two write operations may not ensure the correct visibility + ordering of the two writes. This workaround sets a specific bit in + the diagnostic register of the Cortex-A9 which causes the DMB + instruction to behave as a DSB, ensuring the correct behaviour of + the two writes. + +config ARM_ERRATA_742231 + bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption" + depends on CPU_V7 && SMP + help + This option enables the workaround for the 742231 Cortex-A9 + (r2p0..r2p2) erratum. Under certain conditions, specific to the + Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode, + accessing some data located in the same cache line, may get corrupted + data due to bad handling of the address hazard when the line gets + replaced from one of the CPUs at the same time as another CPU is + accessing it. This workaround sets specific bits in the diagnostic + register of the Cortex-A9 which reduces the linefill issuing + capabilities of the processor. + config PL310_ERRATA_588369 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" depends on CACHE_L2X0 && ARCH_OMAP4 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index b23f6bc46cfa..65a7c1c588a9 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -116,5 +116,5 @@ CFLAGS_font.o := -Dstatic= $(obj)/font.c: $(FONTC) $(call cmd,shipped) -$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config +$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG) @sed "$(SEDFLAGS)" < $< > $@ diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 7974baacafce..1bec96e85196 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c @@ -271,6 +271,14 @@ int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); } +int dma_set_coherent_mask(struct device *dev, u64 mask) +{ + if (mask >= PHYS_OFFSET + SZ_64M - 1) + return 0; + + return -EIO; +} + int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) { it8152_io.start = IT8152_IO_BASE + 0x12000; diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index ab68cf1ef80f..e90b167ea848 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE #define pgprot_dmacoherent(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); #else #define pgprot_dmacoherent(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 1b560825e1cf..7885722bdf4e 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -48,6 +48,8 @@ work_pending: beq no_work_pending mov r0, sp @ 'regs' mov r2, why @ 'syscall' + tst r1, #_TIF_SIGPENDING @ delivering a signal? + movne why, #0 @ prevent further restarts bl do_notify_resume b ret_slow_syscall @ Check work again diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 5e71ccd5e7d3..1276babf84d5 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = { .sda_is_open_drain = 1, .scl_pin = AT91_PIN_PA21, .scl_is_open_drain = 1, - .udelay = 2, /* ~100 kHz */ + .udelay = 5, /* ~100 kHz */ }; static struct platform_device at91sam9g45_twi0_device = { @@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = { .sda_is_open_drain = 1, .scl_pin = AT91_PIN_PB11, .scl_is_open_drain = 1, - .udelay = 2, /* ~100 kHz */ + .udelay = 5, /* ~100 kHz */ }; static struct platform_device at91sam9g45_twi1_device = { diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 3d996b659ff4..9be261beae7d 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c @@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = { .virtual = SRAM_VIRT, .pfn = __phys_to_pfn(0x00010000), .length = SZ_32K, - /* MT_MEMORY_NONCACHED requires supersection alignment */ - .type = MT_DEVICE, + .type = MT_MEMORY_NONCACHED, }, }; diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 6b6f4c643709..7781e35daec3 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = { .virtual = SRAM_VIRT, .pfn = __phys_to_pfn(0x00010000), .length = SZ_32K, - /* MT_MEMORY_NONCACHED requires supersection alignment */ - .type = MT_DEVICE, + .type = MT_MEMORY_NONCACHED, }, }; diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 40fec315c99a..5e5b0a7831fb 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c @@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = { .virtual = SRAM_VIRT, .pfn = __phys_to_pfn(0x00008000), .length = SZ_16K, - /* MT_MEMORY_NONCACHED requires supersection alignment */ - .type = MT_DEVICE, + .type = MT_MEMORY_NONCACHED, }, }; diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index e4a3df1872ac..26e8a9c7f50b 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = { .virtual = SRAM_VIRT, .pfn = __phys_to_pfn(0x00010000), .length = SZ_32K, - /* MT_MEMORY_NONCACHED requires supersection alignment */ - .type = MT_DEVICE, + .type = MT_MEMORY_NONCACHED, }, }; diff --git a/arch/arm/mach-dove/include/mach/io.h b/arch/arm/mach-dove/include/mach/io.h index 3b3e4721ce2e..eb4936ff90ad 100644 --- a/arch/arm/mach-dove/include/mach/io.h +++ b/arch/arm/mach-dove/include/mach/io.h @@ -13,8 +13,8 @@ #define IO_SPACE_LIMIT 0xffffffff -#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\ - DOVE_PCIE0_IO_VIRT_BASE)) -#define __mem_pci(a) (a) +#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \ + DOVE_PCIE0_IO_VIRT_BASE)) +#define __mem_pci(a) (a) #endif diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 61cd4d64b985..24498a932ba6 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys) return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys); } +int dma_set_coherent_mask(struct device *dev, u64 mask) +{ + if (mask >= SZ_64M - 1) + return 0; + + return -EIO; +} + EXPORT_SYMBOL(ixp4xx_pci_read); EXPORT_SYMBOL(ixp4xx_pci_write); diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h index f91ca6d4fbe8..8138371c406e 100644 --- a/arch/arm/mach-ixp4xx/include/mach/hardware.h +++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h @@ -26,6 +26,8 @@ #define PCIBIOS_MAX_MEM 0x4BFFFFFF #endif +#define ARCH_HAS_DMA_SET_COHERENT_MASK + #define pcibios_assign_all_busses() 1 /* Register locations and bits */ diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h index 93fc2ec95e76..6e924b398919 100644 --- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h +++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h @@ -38,7 +38,7 @@ #define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 #define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 -#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000 +#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000 #define KIRKWOOD_PCIE1_IO_SIZE SZ_1M #define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000 diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c index 55e7f00836b7..513ad3102d7c 100644 --- a/arch/arm/mach-kirkwood/pcie.c +++ b/arch/arm/mach-kirkwood/pcie.c @@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp) * IORESOURCE_IO */ pp->res[0].name = "PCIe 0 I/O Space"; - pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE; + pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE; pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; pp->res[0].flags = IORESOURCE_IO; @@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp) * IORESOURCE_IO */ pp->res[0].name = "PCIe 1 I/O Space"; - pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE; + pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE; pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; pp->res[0].flags = IORESOURCE_IO; diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h index 4f5b0e0ce6cf..1a8a25edb1b4 100644 --- a/arch/arm/mach-mmp/include/mach/system.h +++ b/arch/arm/mach-mmp/include/mach/system.h @@ -9,6 +9,8 @@ #ifndef __ASM_MACH_SYSTEM_H #define __ASM_MACH_SYSTEM_H +#include <mach/cputype.h> + static inline void arch_idle(void) { cpu_do_idle(); @@ -16,6 +18,9 @@ static inline void arch_idle(void) static inline void arch_reset(char mode, const char *cmd) { - cpu_reset(0); + if (cpu_is_pxa168()) + cpu_reset(0xffff0000); + else + cpu_reset(0); } #endif /* __ASM_MACH_SYSTEM_H */ diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c index 50d5939a78f1..58093d9e07be 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c @@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, freqs.cpu = policy->cpu; if (freq_debug) - pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, " - "(SDRAM %d Mhz)\n", + pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", freqs.new / 1000, (pxa_freq_settings[idx].div2) ? (new_freq_mem / 2000) : (new_freq_mem / 1000)); diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h index 7f64d24cd564..814f1458a06a 100644 --- a/arch/arm/mach-pxa/include/mach/hardware.h +++ b/arch/arm/mach-pxa/include/mach/hardware.h @@ -264,23 +264,35 @@ * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x * == 0x3 for pxa300/pxa310/pxa320 */ +#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x) #define __cpu_is_pxa2xx(id) \ ({ \ unsigned int _id = (id) >> 13 & 0x7; \ _id <= 0x2; \ }) +#else +#define __cpu_is_pxa2xx(id) (0) +#endif +#ifdef CONFIG_PXA3xx #define __cpu_is_pxa3xx(id) \ ({ \ unsigned int _id = (id) >> 13 & 0x7; \ _id == 0x3; \ }) +#else +#define __cpu_is_pxa3xx(id) (0) +#endif +#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935) #define __cpu_is_pxa93x(id) \ ({ \ unsigned int _id = (id) >> 4 & 0xfff; \ _id == 0x683 || _id == 0x693; \ }) +#else +#define __cpu_is_pxa93x(id) (0) +#endif #define cpu_is_pxa2xx() \ ({ \ @@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void); #define PCIBIOS_MIN_IO 0 #define PCIBIOS_MIN_MEM 0 #define pcibios_assign_all_busses() 1 +#define ARCH_HAS_DMA_SET_COHERENT_MASK #endif - #endif /* _ASM_ARCH_HARDWARE_H */ diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h index 262691fb97d8..fdca3be47d9b 100644 --- a/arch/arm/mach-pxa/include/mach/io.h +++ b/arch/arm/mach-pxa/include/mach/io.h @@ -6,6 +6,8 @@ #ifndef __ASM_ARM_ARCH_IO_H #define __ASM_ARM_ARCH_IO_H +#include <mach/hardware.h> + #define IO_SPACE_LIMIT 0xffffffff /* diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 77ad6d34ab5b..405b92a29793 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c @@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = { }, }; +static struct i2c_pxa_platform_data palm27x_i2c_power_info = { + .use_pio = 1, +}; + void __init palm27x_pmic_init(void) { i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); - pxa27x_set_i2c_power_info(NULL); + pxa27x_set_i2c_power_info(&palm27x_i2c_power_info); } #endif diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c index c9b747cedea8..37d6173bbb66 100644 --- a/arch/arm/mach-pxa/vpac270.c +++ b/arch/arm/mach-pxa/vpac270.c @@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {} #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) static struct pxamci_platform_data vpac270_mci_platform_data = { .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, + .gpio_power = -1, .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, .detect_delay_ms = 200, diff --git a/arch/arm/mach-u300/include/mach/gpio.h b/arch/arm/mach-u300/include/mach/gpio.h index 7b1fc984abb6..d5a71abcbaea 100644 --- a/arch/arm/mach-u300/include/mach/gpio.h +++ b/arch/arm/mach-u300/include/mach/gpio.h @@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value); extern int gpio_get_value(unsigned gpio); extern void gpio_set_value(unsigned gpio, int value); +#define gpio_get_value_cansleep gpio_get_value +#define gpio_set_value_cansleep gpio_set_value + /* wrappers to sleep-enable the previous two functions */ static inline unsigned gpio_to_irq(unsigned gpio) { diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 577df6cccb08..efb127022d42 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c @@ -227,7 +227,13 @@ static void ct_ca9x4_init(void) int i; #ifdef CONFIG_CACHE_L2X0 - l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); + void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC); + + /* set RAM latencies to 1 cycle for this core tile. */ + writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); + writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); + + l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); #endif clkdev_add_table(lookups, ARRAY_SIZE(lookups)); diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index d073b64ae87e..724ba3bce72c 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (ai_usermode & UM_SIGNAL) force_sig(SIGBUS, current); - else - set_cr(cr_no_alignment); + else { + /* + * We're about to disable the alignment trap and return to + * user space. But if an interrupt occurs before actually + * reaching user space, then the IRQ vector entry code will + * notice that we were still in kernel space and therefore + * the alignment trap won't be re-enabled in that case as it + * is presumed to be always on from kernel space. + * Let's prevent that race by disabling interrupts here (they + * are disabled on the way back to user space anyway in + * entry-common.S) and disable the alignment trap only if + * there is no work pending for this thread. + */ + raw_local_irq_disable(); + if (!(current_thread_info()->flags & _TIF_WORK_MASK)) + set_cr(cr_no_alignment); + } return 0; } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6e1c4f6a2b3f..6a3a2d0cd6db 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -15,6 +15,7 @@ #include <linux/nodemask.h> #include <linux/memblock.h> #include <linux/sort.h> +#include <linux/fs.h> #include <asm/cputype.h> #include <asm/sections.h> @@ -246,6 +247,9 @@ static struct mem_type mem_types[] = { .domain = DOMAIN_USER, }, [MT_MEMORY] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_USER | L_PTE_EXEC, + .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, @@ -254,6 +258,9 @@ static struct mem_type mem_types[] = { .domain = DOMAIN_KERNEL, }, [MT_MEMORY_NONCACHED] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, + .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, @@ -411,9 +418,12 @@ static void __init build_mem_type_table(void) * Enable CPU-specific coherency if supported. * (Only available on XSC3 at the moment.) */ - if (arch_is_coherent() && cpu_is_xsc3()) + if (arch_is_coherent() && cpu_is_xsc3()) { mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; - + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; + } /* * ARMv6 and above have extended page tables. */ @@ -438,7 +448,9 @@ static void __init build_mem_type_table(void) mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; #endif } @@ -475,6 +487,8 @@ static void __init build_mem_type_table(void) mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; mem_types[MT_ROM].prot_sect |= cp->pmd; switch (cp->pmd) { @@ -498,6 +512,19 @@ static void __init build_mem_type_table(void) } } +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) + return pgprot_noncached(vma_prot); + else if (file->f_flags & O_SYNC) + return pgprot_writecombine(vma_prot); + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); +#endif + #define vectors_base() (vectors_high() ? 0xffff0000 : 0) static void __init *early_alloc(unsigned long sz) diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 6a8506d99ee9..7563ff0141bd 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -186,13 +186,14 @@ cpu_v7_name: * It is assumed that: * - cache type register is implemented */ -__v7_setup: +__v7_ca9mp_setup: #ifdef CONFIG_SMP mrc p15, 0, r0, c1, c0, 1 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting #endif +__v7_setup: adr r12, __v7_setup_stack @ the local stack stmia r12, {r0-r5, r7, r9, r11, lr} bl v7_flush_dcache_all @@ -201,11 +202,16 @@ __v7_setup: mrc p15, 0, r0, c0, c0, 0 @ read main ID register and r10, r0, #0xff000000 @ ARM? teq r10, #0x41000000 - bne 2f + bne 3f and r5, r0, #0x00f00000 @ variant and r6, r0, #0x0000000f @ revision - orr r0, r6, r5, lsr #20-4 @ combine variant and revision + orr r6, r6, r5, lsr #20-4 @ combine variant and revision + ubfx r0, r0, #4, #12 @ primary part number + /* Cortex-A8 Errata */ + ldr r10, =0x00000c08 @ Cortex-A8 primary part number + teq r0, r10 + bne 2f #ifdef CONFIG_ARM_ERRATA_430973 teq r5, #0x00100000 @ only present in r1p* mrceq p15, 0, r10, c1, c0, 1 @ read aux control register @@ -213,21 +219,42 @@ __v7_setup: mcreq p15, 0, r10, c1, c0, 1 @ write aux control register #endif #ifdef CONFIG_ARM_ERRATA_458693 - teq r0, #0x20 @ only present in r2p0 + teq r6, #0x20 @ only present in r2p0 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register orreq r10, r10, #(1 << 5) @ set L1NEON to 1 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register #endif #ifdef CONFIG_ARM_ERRATA_460075 - teq r0, #0x20 @ only present in r2p0 + teq r6, #0x20 @ only present in r2p0 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register tsteq r10, #1 << 22 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register #endif + b 3f + + /* Cortex-A9 Errata */ +2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number + teq r0, r10 + bne 3f +#ifdef CONFIG_ARM_ERRATA_742230 + cmp r6, #0x22 @ only present up to r2p2 + mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register + orrle r10, r10, #1 << 4 @ set bit #4 + mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register +#endif +#ifdef CONFIG_ARM_ERRATA_742231 + teq r6, #0x20 @ present in r2p0 + teqne r6, #0x21 @ present in r2p1 + teqne r6, #0x22 @ present in r2p2 + mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register + orreq r10, r10, #1 << 12 @ set bit #12 + orreq r10, r10, #1 << 22 @ set bit #22 + mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register +#endif -2: mov r10, #0 +3: mov r10, #0 #ifdef HARVARD_CACHE mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate #endif @@ -323,6 +350,29 @@ cpu_elf_name: .section ".proc.info.init", #alloc, #execinstr + .type __v7_ca9mp_proc_info, #object +__v7_ca9mp_proc_info: + .long 0x410fc090 @ Required ID value + .long 0xff0ffff0 @ Mask for ID + .long PMD_TYPE_SECT | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ | \ + PMD_FLAGS + .long PMD_TYPE_SECT | \ + PMD_SECT_XN | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + b __v7_ca9mp_setup + .long cpu_arch_name + .long cpu_elf_name + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long cpu_v7_name + .long v7_processor_functions + .long v7wbi_tlb_fns + .long v6_user_fns + .long v7_cache_fns + .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info + /* * Match any ARMv7 processor core. */ diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c index ea3ca86c5283..aedf9c1d645e 100644 --- a/arch/arm/plat-nomadik/timer.c +++ b/arch/arm/plat-nomadik/timer.c @@ -1,5 +1,5 @@ /* - * linux/arch/arm/mach-nomadik/timer.c + * linux/arch/arm/plat-nomadik/timer.c * * Copyright (C) 2008 STMicroelectronics * Copyright (C) 2010 Alessandro Rubini @@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode, cr = readl(mtu_base + MTU_CR(1)); writel(0, mtu_base + MTU_LR(1)); writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); - writel(0x2, mtu_base + MTU_IMSC); + writel(1 << 1, mtu_base + MTU_IMSC); break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: @@ -131,25 +131,23 @@ void __init nmdk_timer_init(void) { unsigned long rate; struct clk *clk0; - struct clk *clk1; - u32 cr; + u32 cr = MTU_CRn_32BITS; clk0 = clk_get_sys("mtu0", NULL); BUG_ON(IS_ERR(clk0)); - clk1 = clk_get_sys("mtu1", NULL); - BUG_ON(IS_ERR(clk1)); - clk_enable(clk0); - clk_enable(clk1); /* - * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500: - * use a divide-by-16 counter if it's more than 16MHz + * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz + * for ux500. + * Use a divide-by-16 counter if the tick rate is more than 32MHz. + * At 32 MHz, the timer (with 32 bit counter) can be programmed + * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer + * with 16 gives too low timer resolution. */ - cr = MTU_CRn_32BITS;; rate = clk_get_rate(clk0); - if (rate > 16 << 20) { + if (rate > 32000000) { rate /= 16; cr |= MTU_CRn_PRESCALE_16; } else { @@ -170,15 +168,8 @@ void __init nmdk_timer_init(void) pr_err("timer: failed to initialize clock source %s\n", nmdk_clksrc.name); - /* Timer 1 is used for events, fix according to rate */ - cr = MTU_CRn_32BITS; - rate = clk_get_rate(clk1); - if (rate > 16 << 20) { - rate /= 16; - cr |= MTU_CRn_PRESCALE_16; - } else { - cr |= MTU_CRn_PRESCALE_1; - } + /* Timer 1 is used for events */ + clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */ diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 226b2e858d6c..10b3b4c63372 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -220,20 +220,7 @@ void __init omap_map_sram(void) if (omap_sram_size == 0) return; - if (cpu_is_omap24xx()) { - omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA; - - base = OMAP2_SRAM_PA; - base = ROUND_DOWN(base, PAGE_SIZE); - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); - } - if (cpu_is_omap34xx()) { - omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA; - base = OMAP3_SRAM_PA; - base = ROUND_DOWN(base, PAGE_SIZE); - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); - /* * SRAM must be marked as non-cached on OMAP3 since the * CORE DPLL M2 divider change code (in SRAM) runs with the @@ -244,13 +231,11 @@ void __init omap_map_sram(void) omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; } - if (cpu_is_omap44xx()) { - omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA; - base = OMAP4_SRAM_PA; - base = ROUND_DOWN(base, PAGE_SIZE); - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); - } - omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */ + omap_sram_io_desc[0].virtual = omap_sram_base; + base = omap_sram_start; + base = ROUND_DOWN(base, PAGE_SIZE); + omap_sram_io_desc[0].pfn = __phys_to_pfn(base); + omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h index 9c1acb2b1a92..b2eeb0de1c8d 100644 --- a/arch/m32r/include/asm/signal.h +++ b/arch/m32r/include/asm/signal.h @@ -157,7 +157,6 @@ typedef struct sigaltstack { #undef __HAVE_ARCH_SIG_BITOPS struct pt_regs; -extern int do_signal(struct pt_regs *regs, sigset_t *oldset); #define ptrace_signal_deliver(regs, cookie) do { } while (0) diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h index 76125777483c..c70545689da8 100644 --- a/arch/m32r/include/asm/unistd.h +++ b/arch/m32r/include/asm/unistd.h @@ -351,6 +351,7 @@ #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/ #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __IGNORE_lchown #define __IGNORE_setuid diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 403869833b98..225412bc227e 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S @@ -235,10 +235,9 @@ work_resched: work_notifysig: ; deal with pending signals and ; notify-resume requests mv r0, sp ; arg1 : struct pt_regs *regs - ldi r1, #0 ; arg2 : sigset_t *oldset - mv r2, r9 ; arg3 : __u32 thread_info_flags + mv r1, r9 ; arg2 : __u32 thread_info_flags bl do_notify_resume - bra restore_all + bra resume_userspace ; perform syscall exit tracing ALIGN diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index e555091eb97c..0021ade4cba8 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c @@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child) if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) != sizeof(insn)) - break; + return -EIO; compute_next_pc(insn, pc, &next_pc, child); if (next_pc & 0x80000000) - break; + return -EIO; if (embed_debug_trap(child, next_pc)) - break; + return -EIO; invalidate_cache(); + return 0; } void user_disable_single_step(struct task_struct *child) diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 144b0f124fc7..7bbe38645ed5 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c @@ -28,37 +28,6 @@ #define DEBUG_SIG 0 -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - -int do_signal(struct pt_regs *, sigset_t *); - -asmlinkage int -sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, - unsigned long r2, unsigned long r3, unsigned long r4, - unsigned long r5, unsigned long r6, struct pt_regs *regs) -{ - sigset_t newset; - - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(sigset_t)) - return -EINVAL; - - if (copy_from_user(&newset, unewset, sizeof(newset))) - return -EFAULT; - sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); - - spin_lock_irq(¤t->sighand->siglock); - current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - current->state = TASK_INTERRUPTIBLE; - schedule(); - set_thread_flag(TIF_RESTORE_SIGMASK); - return -ERESTARTNOHAND; -} - asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r2, unsigned long r3, unsigned long r4, @@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) return (void __user *)((sp - frame_size) & -8ul); } -static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, +static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; @@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, current->comm, current->pid, frame, regs->pc); #endif - return; + return 0; give_sigsegv: force_sigsegv(sig, current); + return -EFAULT; +} + +static int prev_insn(struct pt_regs *regs) +{ + u16 inst; + if (get_user(&inst, (u16 __user *)(regs->bpc - 2))) + return -EFAULT; + if ((inst & 0xfff0) == 0x10f0) /* trap ? */ + regs->bpc -= 2; + else + regs->bpc -= 4; + regs->syscall_nr = -1; + return 0; } /* * OK, we're invoking a handler */ -static void +static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { - unsigned short inst; - /* Are we from a system call? */ if (regs->syscall_nr >= 0) { /* If so, check system call restarting.. */ @@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, /* fallthrough */ case -ERESTARTNOINTR: regs->r0 = regs->orig_r0; - inst = *(unsigned short *)(regs->bpc - 2); - if ((inst & 0xfff0) == 0x10f0) /* trap ? */ - regs->bpc -= 2; - else - regs->bpc -= 4; + if (prev_insn(regs) < 0) + return -EFAULT; } } /* Set up the stack frame */ - setup_rt_frame(sig, ka, info, oldset, regs); + if (setup_rt_frame(sig, ka, info, oldset, regs)) + return -EFAULT; spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); @@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigaddset(¤t->blocked,sig); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); + return 0; } /* @@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -int do_signal(struct pt_regs *regs, sigset_t *oldset) +static void do_signal(struct pt_regs *regs) { siginfo_t info; int signr; struct k_sigaction ka; - unsigned short inst; + sigset_t *oldset; /* * We want the common case to go fast, which @@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) * if so. */ if (!user_mode(regs)) - return 1; + return; if (try_to_freeze()) goto no_signal; - if (!oldset) + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + oldset = ¤t->saved_sigmask; + else oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); @@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) */ /* Whee! Actually deliver the signal. */ - handle_signal(signr, &ka, &info, oldset, regs); - return 1; + if (handle_signal(signr, &ka, &info, oldset, regs) == 0) + clear_thread_flag(TIF_RESTORE_SIGMASK); + + return; } no_signal: @@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) { regs->r0 = regs->orig_r0; - inst = *(unsigned short *)(regs->bpc - 2); - if ((inst & 0xfff0) == 0x10f0) /* trap ? */ - regs->bpc -= 2; - else - regs->bpc -= 4; - } - if (regs->r0 == -ERESTART_RESTARTBLOCK){ + prev_insn(regs); + } else if (regs->r0 == -ERESTART_RESTARTBLOCK){ regs->r0 = regs->orig_r0; regs->r7 = __NR_restart_syscall; - inst = *(unsigned short *)(regs->bpc - 2); - if ((inst & 0xfff0) == 0x10f0) /* trap ? */ - regs->bpc -= 2; - else - regs->bpc -= 4; + prev_insn(regs); } } - return 0; + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } } /* * notification of userspace execution resumption * - triggered by current->work.notify_resume */ -void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, - __u32 thread_info_flags) +void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags) { /* Pending single-step? */ if (thread_info_flags & _TIF_SINGLESTEP) @@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) - do_signal(regs,oldset); + do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 444b9f918fdf..7c2a2f7f8dc1 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration" config MN10300 def_bool y select HAVE_OPROFILE - select HAVE_ARCH_TRACEHOOK config AM33 def_bool y diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug index ff80e86b9bd2..ce83c74b3fd7 100644 --- a/arch/mn10300/Kconfig.debug +++ b/arch/mn10300/Kconfig.debug @@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT choice prompt "GDB stub port" - default GDBSTUB_TTYSM0 + default GDBSTUB_ON_TTYSM0 depends on GDBSTUB help Select the serial port used for GDB-stub. diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h index f49ac49e09ad..3f50e9661076 100644 --- a/arch/mn10300/include/asm/bitops.h +++ b/arch/mn10300/include/asm/bitops.h @@ -229,9 +229,9 @@ int ffs(int x) #include <asm-generic/bitops/hweight.h> #define ext2_set_bit_atomic(lock, nr, addr) \ - test_and_set_bit((nr) ^ 0x18, (addr)) + test_and_set_bit((nr), (addr)) #define ext2_clear_bit_atomic(lock, nr, addr) \ - test_and_clear_bit((nr) ^ 0x18, (addr)) + test_and_clear_bit((nr), (addr)) #include <asm-generic/bitops/ext2-non-atomic.h> #include <asm-generic/bitops/minix-le.h> diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h index 7e891fce2370..1865d72a86ff 100644 --- a/arch/mn10300/include/asm/signal.h +++ b/arch/mn10300/include/asm/signal.h @@ -78,7 +78,7 @@ typedef unsigned long sigset_t; /* These should not be considered constants from userland. */ #define SIGRTMIN 32 -#define SIGRTMAX (_NSIG-1) +#define SIGRTMAX _NSIG /* * SA_FLAGS values: diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index 717db14c2cc3..d4de05ab7864 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c @@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig, old_sigset_t mask; if (verify_area(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) + __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || + __get_user(mask, &act->sa_mask)) return -EFAULT; - __get_user(new_ka.sa.sa_flags, &act->sa_flags); - __get_user(mask, &act->sa_mask); siginitset(&new_ka.sa.sa_mask, mask); } @@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig, if (!ret && oact) { if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) + __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; - __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; @@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs, { unsigned int err = 0; + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + if (is_using_fpu(current)) fpu_kill_state(current); @@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, regs->d0 = sig; regs->d1 = (unsigned long) &frame->sc; - set_fs(USER_DS); - /* the tracer may want to single-step inside the handler */ if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); @@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, return 0; give_sigsegv: - force_sig(SIGSEGV, current); + force_sigsegv(sig, current); return -EFAULT; } @@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->d0 = sig; regs->d1 = (long) &frame->info; - set_fs(USER_DS); - /* the tracer may want to single-step inside the handler */ if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); @@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, return 0; give_sigsegv: - force_sig(SIGSEGV, current); + force_sigsegv(sig, current); return -EFAULT; } +static inline void stepback(struct pt_regs *regs) +{ + regs->pc -= 2; + regs->orig_d0 = -1; +} + /* * handle the actual delivery of a signal to userspace */ @@ -459,7 +464,7 @@ static int handle_signal(int sig, /* fallthrough */ case -ERESTARTNOINTR: regs->d0 = regs->orig_d0; - regs->pc -= 2; + stepback(regs); } } @@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs) case -ERESTARTSYS: case -ERESTARTNOINTR: regs->d0 = regs->orig_d0; - regs->pc -= 2; + stepback(regs); break; case -ERESTART_RESTARTBLOCK: regs->d0 = __NR_restart_syscall; - regs->pc -= 2; + stepback(regs); break; } } diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 7109f5b1baa8..2300426e531a 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -138,6 +138,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs) ti->local_flags &= ~_TLF_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); } + regs->trap = 0; return 0; /* no signals delivered */ } @@ -164,6 +165,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs) ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); } + regs->trap = 0; if (ret) { spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked, ¤t->blocked, diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 266610119f66..b96a3a010c26 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -511,6 +511,7 @@ static long restore_user_regs(struct pt_regs *regs, if (!sig) save_r2 = (unsigned int)regs->gpr[2]; err = restore_general_regs(regs, sr); + regs->trap = 0; err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); if (!sig) regs->gpr[2] = (unsigned long) save_r2; @@ -884,7 +885,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, regs->nip = (unsigned long) ka->sa.sa_handler; /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; - regs->trap = 0; return 1; badframe: @@ -1228,7 +1228,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, regs->nip = (unsigned long) ka->sa.sa_handler; /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; - regs->trap = 0; return 1; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 2fe6fc64b614..27c4a4584f80 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -178,7 +178,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); /* skip SOFTE */ - err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); + regs->trap = 0; err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 357ced3c33ff..6318e622cfb0 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1038,6 +1038,7 @@ static int __hw_perf_event_init(struct perf_event *event) if (atomic_read(&nmi_active) < 0) return -ENODEV; + pmap = NULL; if (attr->type == PERF_TYPE_HARDWARE) { if (attr->config >= sparc_pmu->max_events) return -EINVAL; @@ -1046,9 +1047,18 @@ static int __hw_perf_event_init(struct perf_event *event) pmap = sparc_map_cache_event(attr->config); if (IS_ERR(pmap)) return PTR_ERR(pmap); - } else + } else if (attr->type != PERF_TYPE_RAW) return -EOPNOTSUPP; + if (pmap) { + hwc->event_base = perf_event_encode(pmap); + } else { + /* User gives us "(encoding << 16) | pic_mask" for + * PERF_TYPE_RAW events. + */ + hwc->event_base = attr->config; + } + /* We save the enable bits in the config_base. */ hwc->config_base = sparc_pmu->irq_bit; if (!attr->exclude_user) @@ -1058,8 +1068,6 @@ static int __hw_perf_event_init(struct perf_event *event) if (!attr->exclude_hv) hwc->config_base |= sparc_pmu->hv_bit; - hwc->event_base = perf_event_encode(pmap); - n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index ea22cd373c64..75fad425e249 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -453,8 +453,66 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) return err; } -static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, - int signo, sigset_t *oldset) +/* The I-cache flush instruction only works in the primary ASI, which + * right now is the nucleus, aka. kernel space. + * + * Therefore we have to kick the instructions out using the kernel + * side linear mapping of the physical address backing the user + * instructions. + */ +static void flush_signal_insns(unsigned long address) +{ + unsigned long pstate, paddr; + pte_t *ptep, pte; + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + + /* Commit all stores of the instructions we are about to flush. */ + wmb(); + + /* Disable cross-call reception. In this way even a very wide + * munmap() on another cpu can't tear down the page table + * hierarchy from underneath us, since that can't complete + * until the IPI tlb flush returns. + */ + + __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); + __asm__ __volatile__("wrpr %0, %1, %%pstate" + : : "r" (pstate), "i" (PSTATE_IE)); + + pgdp = pgd_offset(current->mm, address); + if (pgd_none(*pgdp)) + goto out_irqs_on; + pudp = pud_offset(pgdp, address); + if (pud_none(*pudp)) + goto out_irqs_on; + pmdp = pmd_offset(pudp, address); + if (pmd_none(*pmdp)) + goto out_irqs_on; + + ptep = pte_offset_map(pmdp, address); + pte = *ptep; + if (!pte_present(pte)) + goto out_unmap; + + paddr = (unsigned long) page_address(pte_page(pte)); + + __asm__ __volatile__("flush %0 + %1" + : /* no outputs */ + : "r" (paddr), + "r" (address & (PAGE_SIZE - 1)) + : "memory"); + +out_unmap: + pte_unmap(ptep); +out_irqs_on: + __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); + +} + +static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, + int signo, sigset_t *oldset) { struct signal_frame32 __user *sf; int sigframe_size; @@ -547,13 +605,7 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, if (ka->ka_restorer) { regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; } else { - /* Flush instruction space. */ unsigned long address = ((unsigned long)&(sf->insns[0])); - pgd_t *pgdp = pgd_offset(current->mm, address); - pud_t *pudp = pud_offset(pgdp, address); - pmd_t *pmdp = pmd_offset(pudp, address); - pte_t *ptep; - pte_t pte; regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); @@ -562,34 +614,22 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, if (err) goto sigsegv; - preempt_disable(); - ptep = pte_offset_map(pmdp, address); - pte = *ptep; - if (pte_present(pte)) { - unsigned long page = (unsigned long) - page_address(pte_page(pte)); - - wmb(); - __asm__ __volatile__("flush %0 + %1" - : /* no outputs */ - : "r" (page), - "r" (address & (PAGE_SIZE - 1)) - : "memory"); - } - pte_unmap(ptep); - preempt_enable(); + flush_signal_insns(address); } - return; + return 0; sigill: do_exit(SIGILL); + return -EINVAL; + sigsegv: force_sigsegv(signo, current); + return -EFAULT; } -static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, - unsigned long signr, sigset_t *oldset, - siginfo_t *info) +static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, + unsigned long signr, sigset_t *oldset, + siginfo_t *info) { struct rt_signal_frame32 __user *sf; int sigframe_size; @@ -687,12 +727,7 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, if (ka->ka_restorer) regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; else { - /* Flush instruction space. */ unsigned long address = ((unsigned long)&(sf->insns[0])); - pgd_t *pgdp = pgd_offset(current->mm, address); - pud_t *pudp = pud_offset(pgdp, address); - pmd_t *pmdp = pmd_offset(pudp, address); - pte_t *ptep; regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); @@ -704,38 +739,32 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, if (err) goto sigsegv; - preempt_disable(); - ptep = pte_offset_map(pmdp, address); - if (pte_present(*ptep)) { - unsigned long page = (unsigned long) - page_address(pte_page(*ptep)); - - wmb(); - __asm__ __volatile__("flush %0 + %1" - : /* no outputs */ - : "r" (page), - "r" (address & (PAGE_SIZE - 1)) - : "memory"); - } - pte_unmap(ptep); - preempt_enable(); + flush_signal_insns(address); } - return; + return 0; sigill: do_exit(SIGILL); + return -EINVAL; + sigsegv: force_sigsegv(signr, current); + return -EFAULT; } -static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, - siginfo_t *info, - sigset_t *oldset, struct pt_regs *regs) +static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, + siginfo_t *info, + sigset_t *oldset, struct pt_regs *regs) { + int err; + if (ka->sa.sa_flags & SA_SIGINFO) - setup_rt_frame32(ka, regs, signr, oldset, info); + err = setup_rt_frame32(ka, regs, signr, oldset, info); else - setup_frame32(ka, regs, signr, oldset); + err = setup_frame32(ka, regs, signr, oldset); + + if (err) + return err; spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); @@ -743,6 +772,10 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, sigaddset(¤t->blocked,signr); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); + + tracehook_signal_handler(signr, info, ka, regs, 0); + + return 0; } static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, @@ -789,16 +822,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs, if (signr > 0) { if (restart_syscall) syscall_restart32(orig_i0, regs, &ka.sa); - handle_signal32(signr, &ka, &info, oldset, regs); - - /* A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - - tracehook_signal_handler(signr, &info, &ka, regs, 0); + if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) { + /* A signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TS_RESTORE_SIGMASK flag. + */ + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; + } return; } if (restart_syscall && @@ -809,12 +840,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs, regs->u_regs[UREG_I0] = orig_i0; regs->tpc -= 4; regs->tnpc -= 4; + pt_regs_clear_syscall(regs); } if (restart_syscall && regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->tpc -= 4; regs->tnpc -= 4; + pt_regs_clear_syscall(regs); } /* If there's no signal to deliver, we just put the saved sigmask diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 9882df92ba0a..5e5c5fd03783 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -315,8 +315,8 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) return err; } -static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs, - int signo, sigset_t *oldset) +static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, + int signo, sigset_t *oldset) { struct signal_frame __user *sf; int sigframe_size, err; @@ -384,16 +384,19 @@ static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs, /* Flush instruction space. */ flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); } - return; + return 0; sigill_and_return: do_exit(SIGILL); + return -EINVAL; + sigsegv: force_sigsegv(signo, current); + return -EFAULT; } -static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, - int signo, sigset_t *oldset, siginfo_t *info) +static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, + int signo, sigset_t *oldset, siginfo_t *info) { struct rt_signal_frame __user *sf; int sigframe_size; @@ -466,22 +469,30 @@ static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, /* Flush instruction space. */ flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); } - return; + return 0; sigill: do_exit(SIGILL); + return -EINVAL; + sigsegv: force_sigsegv(signo, current); + return -EFAULT; } -static inline void +static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { + int err; + if (ka->sa.sa_flags & SA_SIGINFO) - setup_rt_frame(ka, regs, signr, oldset, info); + err = setup_rt_frame(ka, regs, signr, oldset, info); else - setup_frame(ka, regs, signr, oldset); + err = setup_frame(ka, regs, signr, oldset); + + if (err) + return err; spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); @@ -489,6 +500,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka, sigaddset(¤t->blocked, signr); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); + + tracehook_signal_handler(signr, info, ka, regs, 0); + + return 0; } static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, @@ -546,17 +561,15 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) if (signr > 0) { if (restart_syscall) syscall_restart(orig_i0, regs, &ka.sa); - handle_signal(signr, &ka, &info, oldset, regs); - - /* a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - - tracehook_signal_handler(signr, &info, &ka, regs, 0); + if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { + /* a signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + } return; } if (restart_syscall && @@ -567,12 +580,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) regs->u_regs[UREG_I0] = orig_i0; regs->pc -= 4; regs->npc -= 4; + pt_regs_clear_syscall(regs); } if (restart_syscall && regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->pc -= 4; regs->npc -= 4; + pt_regs_clear_syscall(regs); } /* if there's no signal to deliver, we just put the saved sigmask diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 9fa48c30037e..006fe4515886 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -409,7 +409,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs * return (void __user *) sp; } -static inline void +static inline int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, int signo, sigset_t *oldset, siginfo_t *info) { @@ -483,26 +483,37 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, } /* 4. return to kernel instructions */ regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; - return; + return 0; sigill: do_exit(SIGILL); + return -EINVAL; + sigsegv: force_sigsegv(signo, current); + return -EFAULT; } -static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, - siginfo_t *info, - sigset_t *oldset, struct pt_regs *regs) +static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, + siginfo_t *info, + sigset_t *oldset, struct pt_regs *regs) { - setup_rt_frame(ka, regs, signr, oldset, - (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); + int err; + + err = setup_rt_frame(ka, regs, signr, oldset, + (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); + if (err) + return err; spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NOMASK)) sigaddset(¤t->blocked,signr); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); + + tracehook_signal_handler(signr, info, ka, regs, 0); + + return 0; } static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, @@ -571,16 +582,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) if (signr > 0) { if (restart_syscall) syscall_restart(orig_i0, regs, &ka.sa); - handle_signal(signr, &ka, &info, oldset, regs); - - /* A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - - tracehook_signal_handler(signr, &info, &ka, regs, 0); + if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { + /* A signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TS_RESTORE_SIGMASK flag. + */ + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; + } return; } if (restart_syscall && @@ -591,12 +600,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) regs->u_regs[UREG_I0] = orig_i0; regs->tpc -= 4; regs->tnpc -= 4; + pt_regs_clear_syscall(regs); } if (restart_syscall && regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->tpc -= 4; regs->tnpc -= 4; + pt_regs_clear_syscall(regs); } /* If there's no signal to deliver, we just put the saved sigmask diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 84f296ca9e63..8f58bdff20d7 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -1506,13 +1506,6 @@ handle_ill: } STD_ENDPROC(handle_ill) - .pushsection .rodata, "a" - .align 8 -bpt_code: - bpt - ENDPROC(bpt_code) - .popsection - /* Various stub interrupt handlers and syscall handlers */ STD_ENTRY_LOCAL(_kernel_double_fault) diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index cd145eda3579..49b5e1eb3262 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c @@ -62,7 +62,7 @@ static long execve1(const char *file, return error; } -long um_execve(const char *file, char __user *__user *argv, char __user *__user *env) +long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env) { long err; @@ -72,8 +72,8 @@ long um_execve(const char *file, char __user *__user *argv, char __user *__user return err; } -long sys_execve(const char __user *file, char __user *__user *argv, - char __user *__user *env) +long sys_execve(const char __user *file, const char __user *const __user *argv, + const char __user *const __user *env) { long error; char *filename; diff --git a/arch/um/kernel/internal.h b/arch/um/kernel/internal.h index 1303a105fe91..5bf97db24a04 100644 --- a/arch/um/kernel/internal.h +++ b/arch/um/kernel/internal.h @@ -1 +1 @@ -extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env); +extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env); diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c index 5ddb246626db..f958cb876ee3 100644 --- a/arch/um/kernel/syscall.c +++ b/arch/um/kernel/syscall.c @@ -60,8 +60,8 @@ int kernel_execve(const char *filename, fs = get_fs(); set_fs(KERNEL_DS); - ret = um_execve(filename, (char __user *__user *)argv, - (char __user *__user *) envp); + ret = um_execve(filename, (const char __user *const __user *)argv, + (const char __user *const __user *) envp); set_fs(fs); return ret; diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c index 030f4b93e255..5df2869c874b 100644 --- a/arch/x86/boot/early_serial_console.c +++ b/arch/x86/boot/early_serial_console.c @@ -58,7 +58,19 @@ static void parse_earlyprintk(void) if (arg[pos] == ',') pos++; - if (!strncmp(arg, "ttyS", 4)) { + /* + * make sure we have + * "serial,0x3f8,115200" + * "serial,ttyS0,115200" + * "ttyS0,115200" + */ + if (pos == 7 && !strncmp(arg + pos, "0x", 2)) { + port = simple_strtoull(arg + pos, &e, 16); + if (port == 0 || arg + pos == e) + port = DEFAULT_SERIAL_PORT; + else + pos = e - arg; + } else if (!strncmp(arg + pos, "ttyS", 4)) { static const int bases[] = { 0x3f8, 0x2f8 }; int idx = 0; diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index d2544f1d705d..cb030374b90a 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h @@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { } #endif /* !CONFIG_AMD_IOMMU_STATS */ +static inline bool is_rd890_iommu(struct pci_dev *pdev) +{ + return (pdev->vendor == PCI_VENDOR_ID_ATI) && + (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); +} + #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 7014e88bc779..08616180deaf 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -368,6 +368,9 @@ struct amd_iommu { /* capabilities of that IOMMU read from ACPI */ u32 cap; + /* flags read from acpi table */ + u8 acpi_flags; + /* * Capability pointer. There could be more than one IOMMU per PCI * device function if there are more than one AMD IOMMU capability @@ -411,6 +414,15 @@ struct amd_iommu { /* default dma_ops domain for that IOMMU */ struct dma_ops_domain *default_dom; + + /* + * This array is required to work around a potential BIOS bug. + * The BIOS may miss to restore parts of the PCI configuration + * space when the system resumes from S3. The result is that the + * IOMMU does not execute commands anymore which leads to system + * failure. + */ + u32 cache_cfg[4]; }; /* diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 545776efeb16..bafd80defa43 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) { return ((1UL << (nr % BITS_PER_LONG)) & - (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; + (addr[nr / BITS_PER_LONG])) != 0; } static inline int variable_test_bit(int nr, volatile const unsigned long *addr) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index c6fbb7b430d1..3f76523589af 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -168,6 +168,7 @@ #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ +#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index 528a11e8d3e3..824ca07860d0 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -20,7 +20,7 @@ struct arch_hw_breakpoint { #include <linux/list.h> /* Available HW breakpoint length encodings */ -#define X86_BREAKPOINT_LEN_X 0x00 +#define X86_BREAKPOINT_LEN_X 0x40 #define X86_BREAKPOINT_LEN_1 0x40 #define X86_BREAKPOINT_LEN_2 0x44 #define X86_BREAKPOINT_LEN_4 0x4c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0925676266bd..fedf32a8c3ec 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_tsc.o = -pg CFLAGS_REMOVE_rtc.o = -pg CFLAGS_REMOVE_paravirt-spinlocks.o = -pg +CFLAGS_REMOVE_pvclock.o = -pg +CFLAGS_REMOVE_kvmclock.o = -pg CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg endif diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index fa044e1e30a2..679b6450382b 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, size_t size, int dir) { + dma_addr_t flush_addr; dma_addr_t i, start; unsigned int pages; @@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, (dma_addr + size > dma_dom->aperture_size)) return; + flush_addr = dma_addr; pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); dma_addr &= PAGE_MASK; start = dma_addr; @@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, dma_ops_free_addresses(dma_dom, dma_addr, pages); if (amd_iommu_unmap_flush || dma_dom->need_flush) { - iommu_flush_pages(&dma_dom->domain, dma_addr, size); + iommu_flush_pages(&dma_dom->domain, flush_addr, size); dma_dom->need_flush = false; } } diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 3cc63e2b8dd4..5a170cbbbed8 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) iommu->last_device = calc_devid(MMIO_GET_BUS(range), MMIO_GET_LD(range)); iommu->evt_msi_num = MMIO_MSI_NUM(misc); + + if (is_rd890_iommu(iommu->dev)) { + pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]); + pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]); + pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]); + pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]); + } } /* @@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, struct ivhd_entry *e; /* - * First set the recommended feature enable bits from ACPI - * into the IOMMU control registers + * First save the recommended feature enable bits from ACPI */ - h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : - iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); - - h->flags & IVHD_FLAG_PASSPW_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : - iommu_feature_disable(iommu, CONTROL_PASSPW_EN); - - h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : - iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); - - h->flags & IVHD_FLAG_ISOC_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_ISOC_EN) : - iommu_feature_disable(iommu, CONTROL_ISOC_EN); - - /* - * make IOMMU memory accesses cache coherent - */ - iommu_feature_enable(iommu, CONTROL_COHERENT_EN); + iommu->acpi_flags = h->flags; /* * Done. Now parse the device entries @@ -1116,6 +1103,40 @@ static void init_device_table(void) } } +static void iommu_init_flags(struct amd_iommu *iommu) +{ + iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : + iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); + + iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : + iommu_feature_disable(iommu, CONTROL_PASSPW_EN); + + iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : + iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); + + iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_ISOC_EN) : + iommu_feature_disable(iommu, CONTROL_ISOC_EN); + + /* + * make IOMMU memory accesses cache coherent + */ + iommu_feature_enable(iommu, CONTROL_COHERENT_EN); +} + +static void iommu_apply_quirks(struct amd_iommu *iommu) +{ + if (is_rd890_iommu(iommu->dev)) { + pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]); + pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]); + pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]); + pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]); + } +} + /* * This function finally enables all IOMMUs found in the system after * they have been initialized @@ -1126,6 +1147,8 @@ static void enable_iommus(void) for_each_iommu(iommu) { iommu_disable(iommu); + iommu_apply_quirks(iommu); + iommu_init_flags(iommu); iommu_set_device_table(iommu); iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index bc0fa1ae5233..7556eb7a1a47 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc, old_cfg = old_desc->chip_data; - memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); + cfg->vector = old_cfg->vector; + cfg->move_in_progress = old_cfg->move_in_progress; + cpumask_copy(cfg->domain, old_cfg->domain); + cpumask_copy(cfg->old_domain, old_cfg->old_domain); init_copy_irq_2_pin(old_cfg, cfg, node); } -static void free_irq_cfg(struct irq_cfg *old_cfg) +static void free_irq_cfg(struct irq_cfg *cfg) { - kfree(old_cfg); + free_cpumask_var(cfg->domain); + free_cpumask_var(cfg->old_domain); + kfree(cfg); } void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 787b3c7c6625..15c671385f59 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) } } -static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) +void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) { u32 tfms, xlvl; u32 ebx; diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 3624e8a0f71b..f668bb1f7d43 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], *const __x86_cpu_dev_end[]; extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); +extern void get_cpu_cap(struct cpuinfo_x86 *c); #endif diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 3a683ea5267e..695f17731e23 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); c->cpuid_level = cpuid_eax(0); + get_cpu_cap(c); } } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3efdf2870a35..03a5b0385ad6 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -102,6 +102,7 @@ struct cpu_hw_events { */ struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; int enabled; int n_events; @@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event) x86_perf_event_set_period(event); cpuc->events[idx] = event; __set_bit(idx, cpuc->active_mask); + __set_bit(idx, cpuc->running); x86_pmu.enable(event); perf_event_update_userpage(event); @@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx < x86_pmu.num_counters; idx++) { - if (!test_bit(idx, cpuc->active_mask)) + if (!test_bit(idx, cpuc->active_mask)) { + /* + * Though we deactivated the counter some cpus + * might still deliver spurious interrupts still + * in flight. Catch them: + */ + if (__test_and_clear_bit(idx, cpuc->running)) + handled++; continue; + } event = cpuc->events[idx]; hwc = &event->hw; diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 34b4dad6f0b8..d49079515122 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) const struct cpuid_bit *cb; static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { + { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 }, { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 410fdb3f1939..7494999141b3 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -506,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev) { unsigned int irq; - irq = create_irq(); + irq = create_irq_nr(0, -1); if (!irq) return -EINVAL; diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index a474ec37c32f..ff15c9dcc25d 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -206,11 +206,27 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp) int arch_bp_generic_fields(int x86_len, int x86_type, int *gen_len, int *gen_type) { - /* Len */ - switch (x86_len) { - case X86_BREAKPOINT_LEN_X: + /* Type */ + switch (x86_type) { + case X86_BREAKPOINT_EXECUTE: + if (x86_len != X86_BREAKPOINT_LEN_X) + return -EINVAL; + + *gen_type = HW_BREAKPOINT_X; *gen_len = sizeof(long); + return 0; + case X86_BREAKPOINT_WRITE: + *gen_type = HW_BREAKPOINT_W; break; + case X86_BREAKPOINT_RW: + *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; + break; + default: + return -EINVAL; + } + + /* Len */ + switch (x86_len) { case X86_BREAKPOINT_LEN_1: *gen_len = HW_BREAKPOINT_LEN_1; break; @@ -229,21 +245,6 @@ int arch_bp_generic_fields(int x86_len, int x86_type, return -EINVAL; } - /* Type */ - switch (x86_type) { - case X86_BREAKPOINT_EXECUTE: - *gen_type = HW_BREAKPOINT_X; - break; - case X86_BREAKPOINT_WRITE: - *gen_type = HW_BREAKPOINT_W; - break; - case X86_BREAKPOINT_RW: - *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; - break; - default: - return -EINVAL; - } - return 0; } @@ -316,9 +317,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) ret = -EINVAL; switch (info->len) { - case X86_BREAKPOINT_LEN_X: - align = sizeof(long) -1; - break; case X86_BREAKPOINT_LEN_1: align = 0; break; diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 9257510b4836..9d5f55848455 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -324,9 +324,8 @@ static void lguest_load_gdt(const struct desc_ptr *desc) } /* - * For a single GDT entry which changes, we do the lazy thing: alter our GDT, - * then tell the Host to reload the entire thing. This operation is so rare - * that this naive implementation is reasonable. + * For a single GDT entry which changes, we simply change our copy and + * then tell the host about it. */ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, const void *desc, int type) @@ -338,9 +337,13 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, } /* - * OK, I lied. There are three "thread local storage" GDT entries which change + * There are three "thread local storage" GDT entries which change * on every context switch (these three entries are how glibc implements - * __thread variables). So we have a hypercall specifically for this case. + * __thread variables). As an optimization, we have a hypercall + * specifically for this case. + * + * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall + * which took a range of entries? */ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) { diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index cfe4faabb0f6..009b819f48d0 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -671,7 +671,9 @@ static int __init ppro_init(char **cpu_type) case 14: *cpu_type = "i386/core"; break; - case 15: case 23: + case 0x0f: + case 0x16: + case 0x17: *cpu_type = "i386/core_2"; break; case 0x1a: |