diff options
Diffstat (limited to 'arch')
161 files changed, 2458 insertions, 822 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 56a4df952fb0..22e58a99f38b 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -477,7 +477,7 @@ config ALPHA_BROKEN_IRQ_MASK config VGA_HOSE bool - depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI + depends on VGA_CONSOLE && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI) default y help Support VGA on an arbitrary hose; needed for several platforms diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h index 1f7fba671ae6..d70408d36677 100644 --- a/arch/alpha/include/asm/rtc.h +++ b/arch/alpha/include/asm/rtc.h @@ -1,14 +1,10 @@ #ifndef _ALPHA_RTC_H #define _ALPHA_RTC_H -#if defined(CONFIG_ALPHA_GENERIC) +#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \ + || defined(CONFIG_ALPHA_GENERIC) # define get_rtc_time alpha_mv.rtc_get_time # define set_rtc_time alpha_mv.rtc_set_time -#else -# if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) -# define get_rtc_time marvel_get_rtc_time -# define set_rtc_time marvel_set_rtc_time -# endif #endif #include <asm-generic/rtc.h> diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c index 5e7c28f92f19..61893d7bdda5 100644 --- a/arch/alpha/kernel/core_tsunami.c +++ b/arch/alpha/kernel/core_tsunami.c @@ -11,6 +11,7 @@ #include <asm/core_tsunami.h> #undef __EXTERN_INLINE +#include <linux/module.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 14a4b6a7cf59..407accc80877 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -317,7 +317,7 @@ marvel_init_irq(void) } static int -marvel_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { struct pci_controller *hose = dev->sysdata; struct io7_port *io7_port = hose->sysdata; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index cf006d40342c..36586dba6fa6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1186,6 +1186,15 @@ if !MMU source "arch/arm/Kconfig-nommu" endif +config ARM_ERRATA_326103 + bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" + depends on CPU_V6 + help + Executing a SWP instruction to read-only memory does not set bit 11 + of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to + treat the access as a read, preventing a COW from occurring and + causing the faulting task to livelock. + config ARM_ERRATA_411920 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" depends on CPU_V6 || CPU_V6K diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts index 15ded0deaa79..45bc4bb04e57 100644 --- a/arch/arm/boot/dts/msm8660-surf.dts +++ b/arch/arm/boot/dts/msm8660-surf.dts @@ -10,7 +10,7 @@ intc: interrupt-controller@02080000 { compatible = "qcom,msm-8660-qgic"; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <3>; reg = < 0x02080000 0x1000 >, < 0x02081000 0x1000 >; }; @@ -19,6 +19,6 @@ compatible = "qcom,msm-hsuart", "qcom,msm-uart"; reg = <0x19c40000 0x1000>, <0x19c00000 0x1000>; - interrupts = <195>; + interrupts = <0 195 0x0>; }; }; diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 0b32925f2147..e2fe3195c0d1 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -173,7 +173,7 @@ mmc@5000 { compatible = "arm,primecell"; reg = < 0x5000 0x1000>; - interrupts = <22>; + interrupts = <22 34>; }; kmi@6000 { compatible = "arm,pl050", "arm,primecell"; diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 166461073b78..7e8175269064 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -41,7 +41,7 @@ mmc@b000 { compatible = "arm,primecell"; reg = <0xb000 0x1000>; - interrupts = <23>; + interrupts = <23 34>; }; }; }; diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig index 42da9183acc8..082175c54e7c 100644 --- a/arch/arm/configs/mini2440_defconfig +++ b/arch/arm/configs/mini2440_defconfig @@ -14,6 +14,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_BLK_DEV_INTEGRITY=y CONFIG_ARCH_S3C24XX=y +# CONFIG_CPU_S3C2410 is not set +CONFIG_CPU_S3C2440=y CONFIG_S3C_ADC=y CONFIG_S3C24XX_PWM=y CONFIG_MACH_MINI2440=y diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index d4c24d412a8d..0f04d84582e1 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *); extern void vfp_sync_hwstate(struct thread_info *); extern void vfp_flush_hwstate(struct thread_info *); +struct user_vfp; +struct user_vfp_exc; + +extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, + struct user_vfp_exc __user *); +extern int vfp_restore_user_hwstate(struct user_vfp __user *, + struct user_vfp_exc __user *); #endif /* diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 60843eb0f61c..73409e6c0251 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -7,6 +7,8 @@ .macro set_tls_v6k, tp, tmp1, tmp2 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register + mov \tmp1, #0 + mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register .endm .macro set_tls_v6, tp, tmp1, tmp2 @@ -15,6 +17,8 @@ mov \tmp2, #0xffff0fff tst \tmp1, #HWCAP_TLS @ hardware TLS available? mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register + movne \tmp1, #0 + mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 .endm diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 71ccdbfed662..8349d4e97e2b 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc) } c = irq_data_get_irq_chip(d); - if (c->irq_set_affinity) - c->irq_set_affinity(d, affinity, true); - else + if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); + else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + cpumask_copy(d->affinity, affinity); return ret; } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 80abafb9bf33..9650c143afc1 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -906,27 +906,14 @@ long arch_ptrace(struct task_struct *child, long request, return ret; } -#ifdef __ARMEB__ -#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB -#else -#define AUDIT_ARCH_NR AUDIT_ARCH_ARM -#endif - asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) { unsigned long ip; - /* - * Save IP. IP is used to denote syscall entry/exit: - * IP = 0 -> entry, = 1 -> exit - */ - ip = regs->ARM_ip; - regs->ARM_ip = why; - - if (!ip) + if (why) audit_syscall_exit(regs); else - audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0, + audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); if (!test_thread_flag(TIF_SYSCALL_TRACE)) @@ -936,6 +923,13 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) current_thread_info()->syscall = scno; + /* + * IP is used to denote syscall entry/exit: + * IP = 0 -> entry, =1 -> exit + */ + ip = regs->ARM_ip; + regs->ARM_ip = why; + /* the 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 7cb532fc8aa4..d68d1b694680 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) static int preserve_vfp_context(struct vfp_sigframe __user *frame) { - struct thread_info *thread = current_thread_info(); - struct vfp_hard_struct *h = &thread->vfpstate.hard; const unsigned long magic = VFP_MAGIC; const unsigned long size = VFP_STORAGE_SIZE; int err = 0; - vfp_sync_hwstate(thread); __put_user_error(magic, &frame->magic, err); __put_user_error(size, &frame->size, err); - /* - * Copy the floating point registers. There can be unused - * registers see asm/hwcap.h for details. - */ - err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, - sizeof(h->fpregs)); - /* - * Copy the status and control register. - */ - __put_user_error(h->fpscr, &frame->ufp.fpscr, err); - - /* - * Copy the exception registers. - */ - __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); - __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); - __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + if (err) + return -EFAULT; - return err ? -EFAULT : 0; + return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); } static int restore_vfp_context(struct vfp_sigframe __user *frame) { - struct thread_info *thread = current_thread_info(); - struct vfp_hard_struct *h = &thread->vfpstate.hard; unsigned long magic; unsigned long size; - unsigned long fpexc; int err = 0; __get_user_error(magic, &frame->magic, err); @@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; - vfp_flush_hwstate(thread); - - /* - * Copy the floating point registers. There can be unused - * registers see asm/hwcap.h for details. - */ - err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, - sizeof(h->fpregs)); - /* - * Copy the status and control register. - */ - __get_user_error(h->fpscr, &frame->ufp.fpscr, err); - - /* - * Sanitise and restore the exception registers. - */ - __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); - /* Ensure the VFP is enabled. */ - fpexc |= FPEXC_EN; - /* Ensure FPINST2 is invalid and the exception flag is cleared. */ - fpexc &= ~(FPEXC_EX | FPEXC_FP2V); - h->fpexc = fpexc; - - __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); - __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); - - return err ? -EFAULT : 0; + return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); } #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index addbbe8028c2..8f4644659777 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -251,8 +251,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); - printk("CPU%u: Booted secondary processor\n", cpu); - /* * All kernel threads share the same mm context; grab a * reference and switch to it. @@ -264,6 +262,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) enter_lazy_tlb(mm, current); local_flush_tlb_all(); + printk("CPU%u: Booted secondary processor\n", cpu); + cpu_init(); preempt_disable(); trace_hardirqs_off(); @@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu) local_fiq_disable(); local_irq_disable(); -#ifdef CONFIG_HOTPLUG_CPU - platform_cpu_kill(cpu); -#endif - while (1) cpu_relax(); } @@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } +#ifdef CONFIG_HOTPLUG_CPU +static void smp_kill_cpus(cpumask_t *mask) +{ + unsigned int cpu; + for_each_cpu(cpu, mask) + platform_cpu_kill(cpu); +} +#else +static void smp_kill_cpus(cpumask_t *mask) { } +#endif + void smp_send_stop(void) { unsigned long timeout; + struct cpumask mask; - if (num_online_cpus() > 1) { - struct cpumask mask; - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); - - smp_cross_call(&mask, IPI_CPU_STOP); - } + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; @@ -595,6 +599,8 @@ void smp_send_stop(void) if (num_online_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); + + smp_kill_cpus(&mask); } /* diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index d2b177905cdb..76cbb055dd05 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -115,7 +115,7 @@ int kernel_execve(const char *filename, "Ir" (THREAD_START_SP - sizeof(regs)), "r" (®s), "Ir" (sizeof(regs)) - : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); + : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory"); out: return ret; diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index 364c19357e60..89106792d067 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -26,15 +26,6 @@ #include "clock.h" #include "sam9_smc.h" -static struct map_desc at91rm9200_io_desc[] __initdata = { - { - .virtual = AT91_VA_BASE_EMAC, - .pfn = __phys_to_pfn(AT91RM9200_BASE_EMAC), - .length = SZ_16K, - .type = MT_DEVICE, - }, -}; - /* -------------------------------------------------------------------- * Clocks * -------------------------------------------------------------------- */ @@ -315,7 +306,6 @@ static void __init at91rm9200_map_io(void) { /* Map peripherals */ at91_init_sram(0, AT91RM9200_SRAM_BASE, AT91RM9200_SRAM_SIZE); - iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc)); } static void __init at91rm9200_ioremap_registers(void) diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index 05774e5b1cba..60c472861e5e 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -140,8 +140,8 @@ static struct macb_platform_data eth_data; static struct resource eth_resources[] = { [0] = { - .start = AT91_VA_BASE_EMAC, - .end = AT91_VA_BASE_EMAC + SZ_16K - 1, + .start = AT91RM9200_BASE_EMAC, + .end = AT91RM9200_BASE_EMAC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h index e9e29a6c3868..01db372be8e5 100644 --- a/arch/arm/mach-at91/include/mach/hardware.h +++ b/arch/arm/mach-at91/include/mach/hardware.h @@ -94,7 +94,6 @@ * Virtual to Physical Address mapping for IO devices. */ #define AT91_VA_BASE_SYS AT91_IO_P2V(AT91_BASE_SYS) -#define AT91_VA_BASE_EMAC AT91_IO_P2V(AT91RM9200_BASE_EMAC) /* Internal SRAM is mapped below the IO devices */ #define AT91_SRAM_MAX SZ_1M diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e81c35f936b5..b8df521fb68e 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -232,6 +232,9 @@ config MACH_ARMLEX4210 config MACH_UNIVERSAL_C210 bool "Mobile UNIVERSAL_C210 Board" select CPU_EXYNOS4210 + select S5P_HRT + select CLKSRC_MMIO + select HAVE_SCHED_CLOCK select S5P_GPIO_INT select S5P_DEV_FIMC0 select S5P_DEV_FIMC1 diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c index df54c2a92225..6efd1e5919fd 100644 --- a/arch/arm/mach-exynos/clock-exynos4.c +++ b/arch/arm/mach-exynos/clock-exynos4.c @@ -497,25 +497,25 @@ static struct clk exynos4_init_clocks_off[] = { .ctrlbit = (1 << 3), }, { .name = "hsmmc", - .devname = "s3c-sdhci.0", + .devname = "exynos4-sdhci.0", .parent = &exynos4_clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 5), }, { .name = "hsmmc", - .devname = "s3c-sdhci.1", + .devname = "exynos4-sdhci.1", .parent = &exynos4_clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 6), }, { .name = "hsmmc", - .devname = "s3c-sdhci.2", + .devname = "exynos4-sdhci.2", .parent = &exynos4_clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 7), }, { .name = "hsmmc", - .devname = "s3c-sdhci.3", + .devname = "exynos4-sdhci.3", .parent = &exynos4_clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 8), @@ -1202,7 +1202,7 @@ static struct clksrc_clk exynos4_clk_sclk_uart3 = { static struct clksrc_clk exynos4_clk_sclk_mmc0 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.0", + .devname = "exynos4-sdhci.0", .parent = &exynos4_clk_dout_mmc0.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 0), @@ -1213,7 +1213,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc0 = { static struct clksrc_clk exynos4_clk_sclk_mmc1 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.1", + .devname = "exynos4-sdhci.1", .parent = &exynos4_clk_dout_mmc1.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 4), @@ -1224,7 +1224,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc1 = { static struct clksrc_clk exynos4_clk_sclk_mmc2 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.2", + .devname = "exynos4-sdhci.2", .parent = &exynos4_clk_dout_mmc2.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 8), @@ -1235,7 +1235,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc2 = { static struct clksrc_clk exynos4_clk_sclk_mmc3 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.3", + .devname = "exynos4-sdhci.3", .parent = &exynos4_clk_dout_mmc3.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 12), @@ -1340,10 +1340,10 @@ static struct clk_lookup exynos4_clk_lookup[] = { CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos4_clk_sclk_uart1.clk), CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos4_clk_sclk_uart2.clk), CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos4_clk_sclk_uart3.clk), - CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk), - CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk), - CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk), - CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk), + CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk), + CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk), + CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk), + CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk), CLKDEV_INIT("exynos4-fb.0", "lcd", &exynos4_clk_fimd0), CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos4_clk_pdma0), CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos4_clk_pdma1), diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c index d013982d0f8e..7ac6ff4c46bd 100644 --- a/arch/arm/mach-exynos/clock-exynos5.c +++ b/arch/arm/mach-exynos/clock-exynos5.c @@ -455,25 +455,25 @@ static struct clk exynos5_init_clocks_off[] = { .ctrlbit = (1 << 20), }, { .name = "hsmmc", - .devname = "s3c-sdhci.0", + .devname = "exynos4-sdhci.0", .parent = &exynos5_clk_aclk_200.clk, .enable = exynos5_clk_ip_fsys_ctrl, .ctrlbit = (1 << 12), }, { .name = "hsmmc", - .devname = "s3c-sdhci.1", + .devname = "exynos4-sdhci.1", .parent = &exynos5_clk_aclk_200.clk, .enable = exynos5_clk_ip_fsys_ctrl, .ctrlbit = (1 << 13), }, { .name = "hsmmc", - .devname = "s3c-sdhci.2", + .devname = "exynos4-sdhci.2", .parent = &exynos5_clk_aclk_200.clk, .enable = exynos5_clk_ip_fsys_ctrl, .ctrlbit = (1 << 14), }, { .name = "hsmmc", - .devname = "s3c-sdhci.3", + .devname = "exynos4-sdhci.3", .parent = &exynos5_clk_aclk_200.clk, .enable = exynos5_clk_ip_fsys_ctrl, .ctrlbit = (1 << 15), @@ -678,7 +678,7 @@ static struct clk exynos5_clk_pdma1 = { .name = "dma", .devname = "dma-pl330.1", .enable = exynos5_clk_ip_fsys_ctrl, - .ctrlbit = (1 << 1), + .ctrlbit = (1 << 2), }; static struct clk exynos5_clk_mdma1 = { @@ -813,7 +813,7 @@ static struct clksrc_clk exynos5_clk_sclk_uart3 = { static struct clksrc_clk exynos5_clk_sclk_mmc0 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.0", + .devname = "exynos4-sdhci.0", .parent = &exynos5_clk_dout_mmc0.clk, .enable = exynos5_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 0), @@ -824,7 +824,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc0 = { static struct clksrc_clk exynos5_clk_sclk_mmc1 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.1", + .devname = "exynos4-sdhci.1", .parent = &exynos5_clk_dout_mmc1.clk, .enable = exynos5_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 4), @@ -835,7 +835,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc1 = { static struct clksrc_clk exynos5_clk_sclk_mmc2 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.2", + .devname = "exynos4-sdhci.2", .parent = &exynos5_clk_dout_mmc2.clk, .enable = exynos5_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 8), @@ -846,7 +846,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc2 = { static struct clksrc_clk exynos5_clk_sclk_mmc3 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.3", + .devname = "exynos4-sdhci.3", .parent = &exynos5_clk_dout_mmc3.clk, .enable = exynos5_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 12), @@ -990,10 +990,10 @@ static struct clk_lookup exynos5_clk_lookup[] = { CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos5_clk_sclk_uart1.clk), CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos5_clk_sclk_uart2.clk), CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos5_clk_sclk_uart3.clk), - CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk), - CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk), - CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk), - CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk), + CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk), + CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk), + CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk), + CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk), CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos5_clk_pdma0), CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos5_clk_pdma1), CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos5_clk_mdma1), diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index 8614aab47cc0..5ccd6e80a607 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c @@ -326,6 +326,11 @@ static void __init exynos4_map_io(void) s3c_fimc_setname(2, "exynos4-fimc"); s3c_fimc_setname(3, "exynos4-fimc"); + s3c_sdhci_setname(0, "exynos4-sdhci"); + s3c_sdhci_setname(1, "exynos4-sdhci"); + s3c_sdhci_setname(2, "exynos4-sdhci"); + s3c_sdhci_setname(3, "exynos4-sdhci"); + /* The I2C bus controllers are directly compatible with s3c2440 */ s3c_i2c0_setname("s3c2440-i2c"); s3c_i2c1_setname("s3c2440-i2c"); @@ -344,6 +349,11 @@ static void __init exynos5_map_io(void) s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC; s3c_device_i2c0.resource[1].end = EXYNOS5_IRQ_IIC; + s3c_sdhci_setname(0, "exynos4-sdhci"); + s3c_sdhci_setname(1, "exynos4-sdhci"); + s3c_sdhci_setname(2, "exynos4-sdhci"); + s3c_sdhci_setname(3, "exynos4-sdhci"); + /* The I2C bus controllers are directly compatible with s3c2440 */ s3c_i2c0_setname("s3c2440-i2c"); s3c_i2c1_setname("s3c2440-i2c"); @@ -537,7 +547,9 @@ void __init exynos5_init_irq(void) { int irq; - gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU); +#ifdef CONFIG_OF + of_irq_init(exynos4_dt_irq_match); +#endif for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) { combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq), diff --git a/arch/arm/mach-exynos/dev-dwmci.c b/arch/arm/mach-exynos/dev-dwmci.c index b025db4bf602..79035018fb74 100644 --- a/arch/arm/mach-exynos/dev-dwmci.c +++ b/arch/arm/mach-exynos/dev-dwmci.c @@ -16,6 +16,7 @@ #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/interrupt.h> +#include <linux/ioport.h> #include <linux/mmc/dw_mmc.h> #include <plat/devs.h> @@ -33,16 +34,8 @@ static int exynos4_dwmci_init(u32 slot_id, irq_handler_t handler, void *data) } static struct resource exynos4_dwmci_resource[] = { - [0] = { - .start = EXYNOS4_PA_DWMCI, - .end = EXYNOS4_PA_DWMCI + SZ_4K - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = IRQ_DWMCI, - .end = IRQ_DWMCI, - .flags = IORESOURCE_IRQ, - } + [0] = DEFINE_RES_MEM(EXYNOS4_PA_DWMCI, SZ_4K), + [1] = DEFINE_RES_IRQ(EXYNOS4_IRQ_DWMCI), }; static struct dw_mci_board exynos4_dwci_pdata = { diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c index b4f1f902ce6d..ed90aef404c3 100644 --- a/arch/arm/mach-exynos/mach-nuri.c +++ b/arch/arm/mach-exynos/mach-nuri.c @@ -112,6 +112,7 @@ static struct s3c_sdhci_platdata nuri_hsmmc0_data __initdata = { .host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_ERASE), + .host_caps2 = MMC_CAP2_BROKEN_VOLTAGE, .cd_type = S3C_SDHCI_CD_PERMANENT, .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, }; diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c index 7ebf79c2ab34..a34036eb8ba2 100644 --- a/arch/arm/mach-exynos/mach-universal_c210.c +++ b/arch/arm/mach-exynos/mach-universal_c210.c @@ -40,6 +40,7 @@ #include <plat/pd.h> #include <plat/regs-fb-v4.h> #include <plat/fimc-core.h> +#include <plat/s5p-time.h> #include <plat/camport.h> #include <plat/mipi_csis.h> @@ -747,6 +748,7 @@ static struct s3c_sdhci_platdata universal_hsmmc0_data __initdata = { .max_width = 8, .host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED), + .host_caps2 = MMC_CAP2_BROKEN_VOLTAGE, .cd_type = S3C_SDHCI_CD_PERMANENT, .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, }; @@ -1062,6 +1064,7 @@ static void __init universal_map_io(void) exynos_init_io(NULL, 0); s3c24xx_init_clocks(24000000); s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs)); + s5p_set_timer_source(S5P_PWM2, S5P_PWM4); } static void s5p_tv_setup(void) @@ -1112,7 +1115,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210") .map_io = universal_map_io, .handle_irq = gic_handle_irq, .init_machine = universal_machine_init, - .timer = &exynos4_timer, + .timer = &s5p_timer, .reserve = &universal_reserve, .restart = exynos4_restart, MACHINE_END diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h index 292d55ed2113..cf03614d250d 100644 --- a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h +++ b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h @@ -75,4 +75,7 @@ struct ixp46x_ts_regs { #define TX_SNAPSHOT_LOCKED (1<<0) #define RX_SNAPSHOT_LOCKED (1<<1) +/* The ptp_ixp46x module will set this variable */ +extern int ixp46x_phc_index; + #endif diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c index 1c672d9e6656..f7fe1b9f3170 100644 --- a/arch/arm/mach-kirkwood/board-dt.c +++ b/arch/arm/mach-kirkwood/board-dt.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/kexec.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/bridge-regs.h> diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c index 962e71169750..fb3496a52ef4 100644 --- a/arch/arm/mach-msm/board-msm8x60.c +++ b/arch/arm/mach-msm/board-msm8x60.c @@ -17,6 +17,7 @@ #include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/memblock.h> @@ -49,10 +50,22 @@ static void __init msm8x60_map_io(void) msm_map_msm8x60_io(); } +#ifdef CONFIG_OF +static struct of_device_id msm_dt_gic_match[] __initdata = { + { .compatible = "qcom,msm-8660-qgic", .data = gic_of_init }, + {} +}; +#endif + static void __init msm8x60_init_irq(void) { - gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE, - (void *)MSM_QGIC_CPU_BASE); + if (!of_have_populated_dt()) + gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE, + (void *)MSM_QGIC_CPU_BASE); +#ifdef CONFIG_OF + else + of_irq_init(msm_dt_gic_match); +#endif /* Edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */ writel(0xFFFFD7FF, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4); @@ -73,16 +86,8 @@ static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = { {} }; -static struct of_device_id msm_dt_gic_match[] __initdata = { - { .compatible = "qcom,msm-8660-qgic", }, - {} -}; - static void __init msm8x60_dt_init(void) { - irq_domain_generate_simple(msm_dt_gic_match, MSM8X60_QGIC_DIST_PHYS, - GIC_SPI_START); - if (of_machine_is_compatible("qcom,msm8660-surf")) { printk(KERN_INFO "Init surf UART registers\n"); msm8x60_init_uart12dm(); diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c index fcce7ff37630..cfd98b186fcc 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq.c +++ b/arch/arm/mach-omap1/ams-delta-fiq.c @@ -48,7 +48,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id) struct irq_chip *irq_chip = NULL; int gpio, irq_num, fiq_count; - irq_desc = irq_to_desc(IH_GPIO_BASE); + irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK)); if (irq_desc) irq_chip = irq_desc->irq_data.chip; diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 930c0d380435..740cee9369ba 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -641,7 +641,7 @@ static struct regulator_consumer_supply dummy_supplies[] = { static void __init igep_init(void) { - regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); /* Get IGEP2 hardware revision */ diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h index 1e2d3322f33e..c88420de1151 100644 --- a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h +++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h @@ -941,10 +941,10 @@ #define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29) #define OMAP4_DSI1_LANEENABLE_SHIFT 24 #define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24) -#define OMAP4_DSI2_PIPD_SHIFT 19 -#define OMAP4_DSI2_PIPD_MASK (0x1f << 19) -#define OMAP4_DSI1_PIPD_SHIFT 14 -#define OMAP4_DSI1_PIPD_MASK (0x1f << 14) +#define OMAP4_DSI1_PIPD_SHIFT 19 +#define OMAP4_DSI1_PIPD_MASK (0x1f << 19) +#define OMAP4_DSI2_PIPD_SHIFT 14 +#define OMAP4_DSI2_PIPD_MASK (0x1f << 14) /* CONTROL_MCBSPLP */ #define OMAP4_ALBCTRLRX_FSX_SHIFT 31 diff --git a/arch/arm/mach-orion5x/mpp.h b/arch/arm/mach-orion5x/mpp.h index eac68978a2c2..db70e79a1198 100644 --- a/arch/arm/mach-orion5x/mpp.h +++ b/arch/arm/mach-orion5x/mpp.h @@ -65,8 +65,8 @@ #define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1) #define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1) -#define MPP9_GPIO MPP(9, 0x0, 0, 0, 1, 1, 1) -#define MPP9_GIGE MPP(9, 0x1, 1, 1, 1, 1, 1) +#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1, 1, 1) +#define MPP9_GIGE MPP(9, 0x1, 0, 0, 1, 1, 1) #define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1) #define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1) diff --git a/arch/arm/mach-prima2/irq.c b/arch/arm/mach-prima2/irq.c index 37c2de9b6f26..a7b9415d30f8 100644 --- a/arch/arm/mach-prima2/irq.c +++ b/arch/arm/mach-prima2/irq.c @@ -42,7 +42,8 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) static __init void sirfsoc_irq_init(void) { sirfsoc_alloc_gc(sirfsoc_intc_base, 0, 32); - sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, SIRFSOC_INTENAL_IRQ_END - 32); + sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, + SIRFSOC_INTENAL_IRQ_END + 1 - 32); writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0); writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1); @@ -68,7 +69,8 @@ void __init sirfsoc_of_irq_init(void) if (!sirfsoc_intc_base) panic("unable to map intc cpu registers\n"); - irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL); + irq_domain_add_legacy(np, SIRFSOC_INTENAL_IRQ_END + 1, 0, 0, + &irq_domain_simple_ops, NULL); of_node_put(np); diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h b/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h index c54cef25895c..cbf51ae81855 100644 --- a/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h +++ b/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h @@ -17,6 +17,7 @@ * * bit 23 - Input/Output (PXA2xx specific) * bit 24 - Wakeup Enable(PXA2xx specific) + * bit 25 - Keep Output (PXA2xx specific) */ #define MFP_DIR_IN (0x0 << 23) @@ -25,6 +26,12 @@ #define MFP_DIR(x) (((x) >> 23) & 0x1) #define MFP_LPM_CAN_WAKEUP (0x1 << 24) + +/* + * MFP_LPM_KEEP_OUTPUT must be specified for pins that need to + * retain their last output level (low or high). + * Note: MFP_LPM_KEEP_OUTPUT has no effect on pins configured for input. + */ #define MFP_LPM_KEEP_OUTPUT (0x1 << 25) #define WAKEUP_ON_EDGE_RISE (MFP_LPM_CAN_WAKEUP | MFP_LPM_EDGE_RISE) diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index b0a842887780..ef0426a159d4 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c @@ -33,6 +33,8 @@ #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) #define GPLR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5)) #define GPDR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x0c) +#define GPSR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x18) +#define GPCR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x24) #define PWER_WE35 (1 << 24) @@ -348,6 +350,7 @@ static inline void pxa27x_mfp_init(void) {} #ifdef CONFIG_PM static unsigned long saved_gafr[2][4]; static unsigned long saved_gpdr[4]; +static unsigned long saved_gplr[4]; static unsigned long saved_pgsr[4]; static int pxa2xx_mfp_suspend(void) @@ -366,14 +369,26 @@ static int pxa2xx_mfp_suspend(void) } for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) { - saved_gafr[0][i] = GAFR_L(i); saved_gafr[1][i] = GAFR_U(i); saved_gpdr[i] = GPDR(i * 32); + saved_gplr[i] = GPLR(i * 32); saved_pgsr[i] = PGSR(i); - GPDR(i * 32) = gpdr_lpm[i]; + GPSR(i * 32) = PGSR(i); + GPCR(i * 32) = ~PGSR(i); + } + + /* set GPDR bits taking into account MFP_LPM_KEEP_OUTPUT */ + for (i = 0; i < pxa_last_gpio; i++) { + if ((gpdr_lpm[gpio_to_bank(i)] & GPIO_bit(i)) || + ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && + (saved_gpdr[gpio_to_bank(i)] & GPIO_bit(i)))) + GPDR(i) |= GPIO_bit(i); + else + GPDR(i) &= ~GPIO_bit(i); } + return 0; } @@ -384,6 +399,8 @@ static void pxa2xx_mfp_resume(void) for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) { GAFR_L(i) = saved_gafr[0][i]; GAFR_U(i) = saved_gafr[1][i]; + GPSR(i * 32) = saved_gplr[i]; + GPCR(i * 32) = ~saved_gplr[i]; GPDR(i * 32) = saved_gpdr[i]; PGSR(i) = saved_pgsr[i]; } diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 6bce78edce7a..4726c246dcdc 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -421,8 +421,11 @@ void __init pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info) pxa_register_device(&pxa27x_device_i2c_power, info); } +static struct pxa_gpio_platform_data pxa27x_gpio_info __initdata = { + .gpio_set_wake = gpio_set_wake, +}; + static struct platform_device *devices[] __initdata = { - &pxa_device_gpio, &pxa27x_device_udc, &pxa_device_pmu, &pxa_device_i2s, @@ -458,6 +461,7 @@ static int __init pxa27x_init(void) register_syscore_ops(&pxa2xx_mfp_syscore_ops); register_syscore_ops(&pxa2xx_clock_syscore_ops); + pxa_register_device(&pxa_device_gpio, &pxa27x_gpio_info); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig index 0f3a327ebcaa..b34287ab5afd 100644 --- a/arch/arm/mach-s3c24xx/Kconfig +++ b/arch/arm/mach-s3c24xx/Kconfig @@ -111,10 +111,6 @@ config S3C24XX_SETUP_TS help Compile in platform device definition for Samsung TouchScreen. -# cpu-specific sections - -if CPU_S3C2410 - config S3C2410_DMA bool depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442) @@ -127,6 +123,10 @@ config S3C2410_PM help Power Management code common to S3C2410 and better +# cpu-specific sections + +if CPU_S3C2410 + config S3C24XX_SIMTEC_NOR bool help diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index a8933de3d627..32395664e879 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c @@ -25,6 +25,7 @@ #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/gpio.h> +#include <linux/mmc/host.h> #include <linux/interrupt.h> #include <asm/hardware/vic.h> @@ -765,6 +766,7 @@ static void __init goni_pmic_init(void) /* MoviNAND */ static struct s3c_sdhci_platdata goni_hsmmc0_data __initdata = { .max_width = 4, + .host_caps2 = MMC_CAP2_BROKEN_VOLTAGE, .cd_type = S3C_SDHCI_CD_PERMANENT, }; diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 7c524b4e415d..16be4c56abe3 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -306,7 +306,7 @@ void sa11x0_register_irda(struct irda_platform_data *irda) } static struct resource sa1100_rtc_resources[] = { - DEFINE_RES_MEM(0x90010000, 0x9001003f), + DEFINE_RES_MEM(0x90010000, 0x40), DEFINE_RES_IRQ_NAMED(IRQ_RTC1Hz, "rtc 1Hz"), DEFINE_RES_IRQ_NAMED(IRQ_RTCAlrm, "rtc alarm"), }; diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index cb224a344af0..0891ec6e27f5 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c @@ -365,23 +365,13 @@ static struct platform_device mipidsi0_device = { }; /* SDHI0 */ -static irqreturn_t ag5evm_sdhi0_gpio_cd(int irq, void *arg) -{ - struct device *dev = arg; - struct sh_mobile_sdhi_info *info = dev->platform_data; - struct tmio_mmc_data *pdata = info->pdata; - - tmio_mmc_cd_wakeup(pdata); - - return IRQ_HANDLED; -} - static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, - .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, .tmio_caps = MMC_CAP_SD_HIGHSPEED, .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, + .cd_gpio = GPIO_PORT251, }; static struct resource sdhi0_resources[] = { @@ -557,7 +547,6 @@ static void __init ag5evm_init(void) lcd_backlight_reset(); /* enable SDHI0 on CN15 [SD I/F] */ - gpio_request(GPIO_FN_SDHICD0, NULL); gpio_request(GPIO_FN_SDHIWP0, NULL); gpio_request(GPIO_FN_SDHICMD0, NULL); gpio_request(GPIO_FN_SDHICLK0, NULL); @@ -566,13 +555,6 @@ static void __init ag5evm_init(void) gpio_request(GPIO_FN_SDHID0_1, NULL); gpio_request(GPIO_FN_SDHID0_0, NULL); - if (!request_irq(intcs_evt2irq(0x3c0), ag5evm_sdhi0_gpio_cd, - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - "sdhi0 cd", &sdhi0_device.dev)) - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; - else - pr_warn("Unable to setup SDHI0 GPIO IRQ\n"); - /* enable SDHI1 on CN4 [WLAN I/F] */ gpio_request(GPIO_FN_SDHICLK1, NULL); gpio_request(GPIO_FN_SDHICMD1_PU, NULL); diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index f49e28abe0ab..8c6202bb6aeb 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1011,21 +1011,12 @@ static int slot_cn7_get_cd(struct platform_device *pdev) } /* SDHI0 */ -static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg) -{ - struct device *dev = arg; - struct sh_mobile_sdhi_info *info = dev->platform_data; - struct tmio_mmc_data *pdata = info->pdata; - - tmio_mmc_cd_wakeup(pdata); - - return IRQ_HANDLED; -} - static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, + .tmio_flags = TMIO_MMC_USE_GPIO_CD, .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, + .cd_gpio = GPIO_PORT172, }; static struct resource sdhi0_resources[] = { @@ -1384,7 +1375,6 @@ static void __init mackerel_init(void) { u32 srcr4; struct clk *clk; - int ret; /* External clock source */ clk_set_rate(&sh7372_dv_clki_clk, 27000000); @@ -1481,7 +1471,6 @@ static void __init mackerel_init(void) irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); /* enable SDHI0 */ - gpio_request(GPIO_FN_SDHICD0, NULL); gpio_request(GPIO_FN_SDHIWP0, NULL); gpio_request(GPIO_FN_SDHICMD0, NULL); gpio_request(GPIO_FN_SDHICLK0, NULL); @@ -1490,13 +1479,6 @@ static void __init mackerel_init(void) gpio_request(GPIO_FN_SDHID0_1, NULL); gpio_request(GPIO_FN_SDHID0_0, NULL); - ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd, - IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev); - if (!ret) - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; - else - pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret); - #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) /* enable SDHI1 */ gpio_request(GPIO_FN_SDHICMD1, NULL); diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S index 6ac015c89206..b202c1272526 100644 --- a/arch/arm/mach-shmobile/headsmp.S +++ b/arch/arm/mach-shmobile/headsmp.S @@ -16,6 +16,59 @@ __CPUINIT +/* Cache invalidation nicked from arch/arm/mach-imx/head-v7.S, thanks! + * + * The secondary kernel init calls v7_flush_dcache_all before it enables + * the L1; however, the L1 comes out of reset in an undefined state, so + * the clean + invalidate performed by v7_flush_dcache_all causes a bunch + * of cache lines with uninitialized data and uninitialized tags to get + * written out to memory, which does really unpleasant things to the main + * processor. We fix this by performing an invalidate, rather than a + * clean + invalidate, before jumping into the kernel. + * + * This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs + * to be called for both secondary cores startup and primary core resume + * procedures. Ideally, it should be moved into arch/arm/mm/cache-v7.S. + */ +ENTRY(v7_invalidate_l1) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mcr p15, 2, r0, c0, c0, 0 + mrc p15, 1, r0, c0, c0, 0 + + ldr r1, =0x7fff + and r2, r1, r0, lsr #13 + + ldr r1, =0x3ff + + and r3, r1, r0, lsr #3 @ NumWays - 1 + add r2, r2, #1 @ NumSets + + and r0, r0, #0x7 + add r0, r0, #4 @ SetShift + + clz r1, r3 @ WayShift + add r4, r3, #1 @ NumWays +1: sub r2, r2, #1 @ NumSets-- + mov r3, r4 @ Temp = NumWays +2: subs r3, r3, #1 @ Temp-- + mov r5, r3, lsl r1 + mov r6, r2, lsl r0 + orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) + mcr p15, 0, r5, c7, c6, 2 + bgt 2b + cmp r2, #0 + bgt 1b + dsb + isb + mov pc, lr +ENDPROC(v7_invalidate_l1) + +ENTRY(shmobile_invalidate_start) + bl v7_invalidate_l1 + b secondary_startup +ENDPROC(shmobile_invalidate_start) + /* * Reset vector for secondary CPUs. * This will be mapped at address 0 by SBAR register. @@ -24,4 +77,5 @@ .align 12 ENTRY(shmobile_secondary_vector) ldr pc, 1f -1: .long secondary_startup - PAGE_OFFSET + PLAT_PHYS_OFFSET +1: .long shmobile_invalidate_start - PAGE_OFFSET + PLAT_PHYS_OFFSET +ENDPROC(shmobile_secondary_vector) diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index 83ad3fe0a75f..c85e6ecda606 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h @@ -4,7 +4,6 @@ extern void shmobile_earlytimer_init(void); extern struct sys_timer shmobile_timer; struct twd_local_timer; -void shmobile_twd_init(struct twd_local_timer *twd_local_timer); extern void shmobile_setup_console(void); extern void shmobile_secondary_vector(void); extern int shmobile_platform_cpu_kill(unsigned int cpu); @@ -82,5 +81,6 @@ extern int r8a7779_platform_cpu_kill(unsigned int cpu); extern void r8a7779_secondary_init(unsigned int cpu); extern int r8a7779_boot_secondary(unsigned int cpu); extern void r8a7779_smp_prepare_cpus(void); +extern void r8a7779_register_twd(void); #endif /* __ARCH_MACH_COMMON_H */ diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c index 12c6f529ab89..e98e46f6cf55 100644 --- a/arch/arm/mach-shmobile/setup-r8a7779.c +++ b/arch/arm/mach-shmobile/setup-r8a7779.c @@ -262,10 +262,14 @@ void __init r8a7779_add_standard_devices(void) ARRAY_SIZE(r8a7779_late_devices)); } +/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ +void __init __weak r8a7779_register_twd(void) { } + static void __init r8a7779_earlytimer_init(void) { r8a7779_clock_init(); shmobile_earlytimer_init(); + r8a7779_register_twd(); } void __init r8a7779_add_early_devices(void) diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index 5bebffc10455..04a0dfe75493 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -688,10 +688,14 @@ void __init sh73a0_add_standard_devices(void) ARRAY_SIZE(sh73a0_late_devices)); } +/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ +void __init __weak sh73a0_register_twd(void) { } + static void __init sh73a0_earlytimer_init(void) { sh73a0_clock_init(); shmobile_earlytimer_init(); + sh73a0_register_twd(); } void __init sh73a0_add_early_devices(void) diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c index b62e19d4c9af..6d1d0238cbf7 100644 --- a/arch/arm/mach-shmobile/smp-r8a7779.c +++ b/arch/arm/mach-shmobile/smp-r8a7779.c @@ -64,8 +64,15 @@ static void __iomem *scu_base_addr(void) static DEFINE_SPINLOCK(scu_lock); static unsigned long tmp; +#ifdef CONFIG_HAVE_ARM_TWD static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); +void __init r8a7779_register_twd(void) +{ + twd_local_timer_register(&twd_local_timer); +} +#endif + static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) { void __iomem *scu_base = scu_base_addr(); @@ -84,7 +91,6 @@ unsigned int __init r8a7779_get_core_count(void) { void __iomem *scu_base = scu_base_addr(); - shmobile_twd_init(&twd_local_timer); return scu_get_core_count(scu_base); } diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c index 14ad8b052f1a..e36c41c4ab40 100644 --- a/arch/arm/mach-shmobile/smp-sh73a0.c +++ b/arch/arm/mach-shmobile/smp-sh73a0.c @@ -42,7 +42,13 @@ static void __iomem *scu_base_addr(void) static DEFINE_SPINLOCK(scu_lock); static unsigned long tmp; +#ifdef CONFIG_HAVE_ARM_TWD static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); +void __init sh73a0_register_twd(void) +{ + twd_local_timer_register(&twd_local_timer); +} +#endif static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) { @@ -62,7 +68,6 @@ unsigned int __init sh73a0_get_core_count(void) { void __iomem *scu_base = scu_base_addr(); - shmobile_twd_init(&twd_local_timer); return scu_get_core_count(scu_base); } diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c index 2fba5f3d1c8a..8b79e7917a23 100644 --- a/arch/arm/mach-shmobile/timer.c +++ b/arch/arm/mach-shmobile/timer.c @@ -46,15 +46,6 @@ static void __init shmobile_timer_init(void) { } -void __init shmobile_twd_init(struct twd_local_timer *twd_local_timer) -{ -#ifdef CONFIG_HAVE_ARM_TWD - int err = twd_local_timer_register(twd_local_timer); - if (err) - pr_err("twd_local_timer_register failed %d\n", err); -#endif -} - struct sys_timer shmobile_timer = { .init = shmobile_timer_init, }; diff --git a/arch/arm/mach-tegra/flowctrl.c b/arch/arm/mach-tegra/flowctrl.c index fef66a7486ed..f07488e0bd32 100644 --- a/arch/arm/mach-tegra/flowctrl.c +++ b/arch/arm/mach-tegra/flowctrl.c @@ -53,10 +53,10 @@ static void flowctrl_update(u8 offset, u32 value) void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value) { - return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); + return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); } void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) { - return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); + return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); } diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c index 1621ad07d284..33339745d432 100644 --- a/arch/arm/mach-u300/core.c +++ b/arch/arm/mach-u300/core.c @@ -1667,8 +1667,10 @@ void __init u300_init_irq(void) for (i = 0; i < U300_VIC_IRQS_END; i++) set_bit(i, (unsigned long *) &mask[0]); - vic_init((void __iomem *) U300_INTCON0_VBASE, 0, mask[0], mask[0]); - vic_init((void __iomem *) U300_INTCON1_VBASE, 32, mask[1], mask[1]); + vic_init((void __iomem *) U300_INTCON0_VBASE, IRQ_U300_INTCON0_START, + mask[0], mask[0]); + vic_init((void __iomem *) U300_INTCON1_VBASE, IRQ_U300_INTCON1_START, + mask[1], mask[1]); } diff --git a/arch/arm/mach-u300/i2c.c b/arch/arm/mach-u300/i2c.c index a38f80238ea9..cb04bd6ab3e7 100644 --- a/arch/arm/mach-u300/i2c.c +++ b/arch/arm/mach-u300/i2c.c @@ -146,9 +146,6 @@ static struct ab3100_platform_data ab3100_plf_data = { .min_uV = 1800000, .max_uV = 1800000, .valid_modes_mask = REGULATOR_MODE_NORMAL, - .valid_ops_mask = - REGULATOR_CHANGE_VOLTAGE | - REGULATOR_CHANGE_STATUS, .always_on = 1, .boot_on = 1, }, @@ -160,9 +157,6 @@ static struct ab3100_platform_data ab3100_plf_data = { .min_uV = 2500000, .max_uV = 2500000, .valid_modes_mask = REGULATOR_MODE_NORMAL, - .valid_ops_mask = - REGULATOR_CHANGE_VOLTAGE | - REGULATOR_CHANGE_STATUS, .always_on = 1, .boot_on = 1, }, @@ -230,8 +224,7 @@ static struct ab3100_platform_data ab3100_plf_data = { .max_uV = 1800000, .valid_modes_mask = REGULATOR_MODE_NORMAL, .valid_ops_mask = - REGULATOR_CHANGE_VOLTAGE | - REGULATOR_CHANGE_STATUS, + REGULATOR_CHANGE_VOLTAGE, .always_on = 1, .boot_on = 1, }, diff --git a/arch/arm/mach-u300/include/mach/irqs.h b/arch/arm/mach-u300/include/mach/irqs.h index ee78a26707eb..ec09c1e07b1a 100644 --- a/arch/arm/mach-u300/include/mach/irqs.h +++ b/arch/arm/mach-u300/include/mach/irqs.h @@ -12,101 +12,101 @@ #ifndef __MACH_IRQS_H #define __MACH_IRQS_H -#define IRQ_U300_INTCON0_START 0 -#define IRQ_U300_INTCON1_START 32 +#define IRQ_U300_INTCON0_START 1 +#define IRQ_U300_INTCON1_START 33 /* These are on INTCON0 - 30 lines */ -#define IRQ_U300_IRQ0_EXT 0 -#define IRQ_U300_IRQ1_EXT 1 -#define IRQ_U300_DMA 2 -#define IRQ_U300_VIDEO_ENC_0 3 -#define IRQ_U300_VIDEO_ENC_1 4 -#define IRQ_U300_AAIF_RX 5 -#define IRQ_U300_AAIF_TX 6 -#define IRQ_U300_AAIF_VGPIO 7 -#define IRQ_U300_AAIF_WAKEUP 8 -#define IRQ_U300_PCM_I2S0_FRAME 9 -#define IRQ_U300_PCM_I2S0_FIFO 10 -#define IRQ_U300_PCM_I2S1_FRAME 11 -#define IRQ_U300_PCM_I2S1_FIFO 12 -#define IRQ_U300_XGAM_GAMCON 13 -#define IRQ_U300_XGAM_CDI 14 -#define IRQ_U300_XGAM_CDICON 15 +#define IRQ_U300_IRQ0_EXT 1 +#define IRQ_U300_IRQ1_EXT 2 +#define IRQ_U300_DMA 3 +#define IRQ_U300_VIDEO_ENC_0 4 +#define IRQ_U300_VIDEO_ENC_1 5 +#define IRQ_U300_AAIF_RX 6 +#define IRQ_U300_AAIF_TX 7 +#define IRQ_U300_AAIF_VGPIO 8 +#define IRQ_U300_AAIF_WAKEUP 9 +#define IRQ_U300_PCM_I2S0_FRAME 10 +#define IRQ_U300_PCM_I2S0_FIFO 11 +#define IRQ_U300_PCM_I2S1_FRAME 12 +#define IRQ_U300_PCM_I2S1_FIFO 13 +#define IRQ_U300_XGAM_GAMCON 14 +#define IRQ_U300_XGAM_CDI 15 +#define IRQ_U300_XGAM_CDICON 16 #if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330) /* MMIACC not used on the DB3210 or DB3350 chips */ -#define IRQ_U300_XGAM_MMIACC 16 +#define IRQ_U300_XGAM_MMIACC 17 #endif -#define IRQ_U300_XGAM_PDI 17 -#define IRQ_U300_XGAM_PDICON 18 -#define IRQ_U300_XGAM_GAMEACC 19 -#define IRQ_U300_XGAM_MCIDCT 20 -#define IRQ_U300_APEX 21 -#define IRQ_U300_UART0 22 -#define IRQ_U300_SPI 23 -#define IRQ_U300_TIMER_APP_OS 24 -#define IRQ_U300_TIMER_APP_DD 25 -#define IRQ_U300_TIMER_APP_GP1 26 -#define IRQ_U300_TIMER_APP_GP2 27 -#define IRQ_U300_TIMER_OS 28 -#define IRQ_U300_TIMER_MS 29 -#define IRQ_U300_KEYPAD_KEYBF 30 -#define IRQ_U300_KEYPAD_KEYBR 31 +#define IRQ_U300_XGAM_PDI 18 +#define IRQ_U300_XGAM_PDICON 19 +#define IRQ_U300_XGAM_GAMEACC 20 +#define IRQ_U300_XGAM_MCIDCT 21 +#define IRQ_U300_APEX 22 +#define IRQ_U300_UART0 23 +#define IRQ_U300_SPI 24 +#define IRQ_U300_TIMER_APP_OS 25 +#define IRQ_U300_TIMER_APP_DD 26 +#define IRQ_U300_TIMER_APP_GP1 27 +#define IRQ_U300_TIMER_APP_GP2 28 +#define IRQ_U300_TIMER_OS 29 +#define IRQ_U300_TIMER_MS 30 +#define IRQ_U300_KEYPAD_KEYBF 31 +#define IRQ_U300_KEYPAD_KEYBR 32 /* These are on INTCON1 - 32 lines */ -#define IRQ_U300_GPIO_PORT0 32 -#define IRQ_U300_GPIO_PORT1 33 -#define IRQ_U300_GPIO_PORT2 34 +#define IRQ_U300_GPIO_PORT0 33 +#define IRQ_U300_GPIO_PORT1 34 +#define IRQ_U300_GPIO_PORT2 35 #if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330) || \ defined(CONFIG_MACH_U300_BS335) /* These are for DB3150, DB3200 and DB3350 */ -#define IRQ_U300_WDOG 35 -#define IRQ_U300_EVHIST 36 -#define IRQ_U300_MSPRO 37 -#define IRQ_U300_MMCSD_MCIINTR0 38 -#define IRQ_U300_MMCSD_MCIINTR1 39 -#define IRQ_U300_I2C0 40 -#define IRQ_U300_I2C1 41 -#define IRQ_U300_RTC 42 -#define IRQ_U300_NFIF 43 -#define IRQ_U300_NFIF2 44 +#define IRQ_U300_WDOG 36 +#define IRQ_U300_EVHIST 37 +#define IRQ_U300_MSPRO 38 +#define IRQ_U300_MMCSD_MCIINTR0 39 +#define IRQ_U300_MMCSD_MCIINTR1 40 +#define IRQ_U300_I2C0 41 +#define IRQ_U300_I2C1 42 +#define IRQ_U300_RTC 43 +#define IRQ_U300_NFIF 44 +#define IRQ_U300_NFIF2 45 #endif /* DB3150 and DB3200 have only 45 IRQs */ #if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330) -#define U300_VIC_IRQS_END 45 +#define U300_VIC_IRQS_END 46 #endif /* The DB3350-specific interrupt lines */ #ifdef CONFIG_MACH_U300_BS335 -#define IRQ_U300_ISP_F0 45 -#define IRQ_U300_ISP_F1 46 -#define IRQ_U300_ISP_F2 47 -#define IRQ_U300_ISP_F3 48 -#define IRQ_U300_ISP_F4 49 -#define IRQ_U300_GPIO_PORT3 50 -#define IRQ_U300_SYSCON_PLL_LOCK 51 -#define IRQ_U300_UART1 52 -#define IRQ_U300_GPIO_PORT4 53 -#define IRQ_U300_GPIO_PORT5 54 -#define IRQ_U300_GPIO_PORT6 55 -#define U300_VIC_IRQS_END 56 +#define IRQ_U300_ISP_F0 46 +#define IRQ_U300_ISP_F1 47 +#define IRQ_U300_ISP_F2 48 +#define IRQ_U300_ISP_F3 49 +#define IRQ_U300_ISP_F4 50 +#define IRQ_U300_GPIO_PORT3 51 +#define IRQ_U300_SYSCON_PLL_LOCK 52 +#define IRQ_U300_UART1 53 +#define IRQ_U300_GPIO_PORT4 54 +#define IRQ_U300_GPIO_PORT5 55 +#define IRQ_U300_GPIO_PORT6 56 +#define U300_VIC_IRQS_END 57 #endif /* The DB3210-specific interrupt lines */ #ifdef CONFIG_MACH_U300_BS365 -#define IRQ_U300_GPIO_PORT3 35 -#define IRQ_U300_GPIO_PORT4 36 -#define IRQ_U300_WDOG 37 -#define IRQ_U300_EVHIST 38 -#define IRQ_U300_MSPRO 39 -#define IRQ_U300_MMCSD_MCIINTR0 40 -#define IRQ_U300_MMCSD_MCIINTR1 41 -#define IRQ_U300_I2C0 42 -#define IRQ_U300_I2C1 43 -#define IRQ_U300_RTC 44 -#define IRQ_U300_NFIF 45 -#define IRQ_U300_NFIF2 46 -#define IRQ_U300_SYSCON_PLL_LOCK 47 -#define U300_VIC_IRQS_END 48 +#define IRQ_U300_GPIO_PORT3 36 +#define IRQ_U300_GPIO_PORT4 37 +#define IRQ_U300_WDOG 38 +#define IRQ_U300_EVHIST 39 +#define IRQ_U300_MSPRO 40 +#define IRQ_U300_MMCSD_MCIINTR0 41 +#define IRQ_U300_MMCSD_MCIINTR1 42 +#define IRQ_U300_I2C0 43 +#define IRQ_U300_I2C1 44 +#define IRQ_U300_RTC 45 +#define IRQ_U300_NFIF 46 +#define IRQ_U300_NFIF2 47 +#define IRQ_U300_SYSCON_PLL_LOCK 48 +#define U300_VIC_IRQS_END 49 #endif /* Maximum 8*7 GPIO lines */ @@ -117,6 +117,6 @@ #define IRQ_U300_GPIO_END (U300_VIC_IRQS_END) #endif -#define NR_IRQS (IRQ_U300_GPIO_END) +#define NR_IRQS (IRQ_U300_GPIO_END - IRQ_U300_INTCON0_START) #endif diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index ff1f7cc11f87..80741992a9fc 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -26,18 +26,23 @@ ENTRY(v6_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR /* - * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103). - * The test below covers all the write situations, including Java bytecodes + * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR. */ - bic r1, r1, #1 << 11 @ clear bit 11 of FSR +#ifdef CONFIG_ARM_ERRATA_326103 + ldr ip, =0x4107b36 + mrc p15, 0, r3, c0, c0, 0 @ get processor id + teq ip, r3, lsr #4 @ r0 ARM1136? + bne do_DataAbort tst r5, #PSR_J_BIT @ Java? + tsteq r5, #PSR_T_BIT @ Thumb? bne do_DataAbort - do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 - ldreq r3, [r4] @ read aborted ARM instruction + bic r1, r1, #1 << 11 @ clear bit 11 of FSR + ldr r3, [r4] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 - reveq r3, r3 + rev r3, r3 #endif do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. +#endif b do_DataAbort diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index a53fd2aaa2f4..2a8e380501e8 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -32,6 +32,7 @@ static void __iomem *l2x0_base; static DEFINE_RAW_SPINLOCK(l2x0_lock); static u32 l2x0_way_mask; /* Bitmask of active ways */ static u32 l2x0_size; +static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; struct l2x0_regs l2x0_saved_regs; @@ -61,12 +62,7 @@ static inline void cache_sync(void) { void __iomem *base = l2x0_base; -#ifdef CONFIG_PL310_ERRATA_753970 - /* write to an unmmapped register */ - writel_relaxed(0, base + L2X0_DUMMY_REG); -#else - writel_relaxed(0, base + L2X0_CACHE_SYNC); -#endif + writel_relaxed(0, base + sync_reg_offset); cache_wait(base + L2X0_CACHE_SYNC, 1); } @@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr) } #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) +static inline void debug_writel(unsigned long val) +{ + if (outer_cache.set_debug) + outer_cache.set_debug(val); +} -#define debug_writel(val) outer_cache.set_debug(val) - -static void l2x0_set_debug(unsigned long val) +static void pl310_set_debug(unsigned long val) { writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); } @@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val) { } -#define l2x0_set_debug NULL +#define pl310_set_debug NULL #endif #ifdef CONFIG_PL310_ERRATA_588369 @@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) else ways = 8; type = "L310"; +#ifdef CONFIG_PL310_ERRATA_753970 + /* Unmapped register. */ + sync_reg_offset = L2X0_DUMMY_REG; +#endif + outer_cache.set_debug = pl310_set_debug; break; case L2X0_CACHE_ID_PART_L210: ways = (aux >> 13) & 0xf; @@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) outer_cache.flush_all = l2x0_flush_all; outer_cache.inv_all = l2x0_inv_all; outer_cache.disable = l2x0_disable; - outer_cache.set_debug = l2x0_set_debug; printk(KERN_INFO "%s cache controller enabled\n", type); printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index f07467533365..5bb48356d217 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -247,7 +247,9 @@ good_area: return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); check_stack: - if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) + /* Don't allow expansion below FIRST_USER_ADDRESS */ + if (vma->vm_flags & VM_GROWSDOWN && + addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr)) goto good_area; out: return fault; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 595079fa9d1d..8f5813bbffb5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid); #endif #ifndef CONFIG_SPARSEMEM -static void arm_memory_present(void) +static void __init arm_memory_present(void) { } #else -static void arm_memory_present(void) +static void __init arm_memory_present(void) { struct memblock_region *reg; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b86f8933ff91..aa78de8bfdd3 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -489,7 +489,8 @@ static void __init build_mem_type_table(void) */ for (i = 0; i < ARRAY_SIZE(mem_types); i++) { mem_types[i].prot_pte |= PTE_EXT_AF; - mem_types[i].prot_sect |= PMD_SECT_AF; + if (mem_types[i].prot_sect) + mem_types[i].prot_sect |= PMD_SECT_AF; } kern_pgprot |= PTE_EXT_AF; vecs_pgprot |= PTE_EXT_AF; @@ -618,8 +619,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, } } -static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, - unsigned long phys, const struct mem_type *type) +static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, + unsigned long end, unsigned long phys, const struct mem_type *type) { pud_t *pud = pud_offset(pgd, addr); unsigned long next; diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index ecdb3da0dea9..c58d896cd5c3 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -916,6 +916,13 @@ void omap_start_dma(int lch) l |= OMAP_DMA_CCR_BUFFERING_DISABLE; l |= OMAP_DMA_CCR_EN; + /* + * As dma_write() uses IO accessors which are weakly ordered, there + * is no guarantee that data in coherent DMA memory will be visible + * to the DMA device. Add a memory barrier here to ensure that any + * such data is visible prior to enabling DMA. + */ + mb(); p->dma_write(l, CCR, lch); dma_chan[lch].flags |= OMAP_DMA_ACTIVE; @@ -965,6 +972,13 @@ void omap_stop_dma(int lch) p->dma_write(l, CCR, lch); } + /* + * Ensure that data transferred by DMA is visible to any access + * after DMA has been disabled. This is important for coherent + * DMA regions. + */ + mb(); + if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch = lch; char dma_chan_link_map[dma_lch_count]; diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h index 317e246ffc56..e834c5ef437c 100644 --- a/arch/arm/plat-samsung/include/plat/sdhci.h +++ b/arch/arm/plat-samsung/include/plat/sdhci.h @@ -18,6 +18,8 @@ #ifndef __PLAT_S3C_SDHCI_H #define __PLAT_S3C_SDHCI_H __FILE__ +#include <plat/devs.h> + struct platform_device; struct mmc_host; struct mmc_card; @@ -356,4 +358,30 @@ static inline void exynos4_default_sdhci3(void) { } #endif /* CONFIG_EXYNOS4_SETUP_SDHCI */ +static inline void s3c_sdhci_setname(int id, char *name) +{ + switch (id) { +#ifdef CONFIG_S3C_DEV_HSMMC + case 0: + s3c_device_hsmmc0.name = name; + break; +#endif +#ifdef CONFIG_S3C_DEV_HSMMC1 + case 1: + s3c_device_hsmmc1.name = name; + break; +#endif +#ifdef CONFIG_S3C_DEV_HSMMC2 + case 2: + s3c_device_hsmmc2.name = name; + break; +#endif +#ifdef CONFIG_S3C_DEV_HSMMC3 + case 3: + s3c_device_hsmmc3.name = name; + break; +#endif + } +} + #endif /* __PLAT_S3C_SDHCI_H */ diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 858748eaa144..b0197b2c857d 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -11,12 +11,15 @@ #include <linux/types.h> #include <linux/cpu.h> #include <linux/cpu_pm.h> +#include <linux/hardirq.h> #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/init.h> +#include <linux/uaccess.h> +#include <linux/user.h> #include <asm/cp15.h> #include <asm/cputype.h> @@ -430,7 +433,10 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) static void vfp_enable(void *unused) { - u32 access = get_copro_access(); + u32 access; + + BUG_ON(preemptible()); + access = get_copro_access(); /* * Enable full access to VFP (cp10 and cp11) @@ -529,6 +535,93 @@ void vfp_flush_hwstate(struct thread_info *thread) } /* + * Save the current VFP state into the provided structures and prepare + * for entry into a new function (signal handler). + */ +int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, + struct user_vfp_exc __user *ufp_exc) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; + int err = 0; + + /* Ensure that the saved hwstate is up-to-date. */ + vfp_sync_hwstate(thread); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, + sizeof(hwstate->fpregs)); + /* + * Copy the status and control register. + */ + __put_user_error(hwstate->fpscr, &ufp->fpscr, err); + + /* + * Copy the exception registers. + */ + __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); + __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); + __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + + if (err) + return -EFAULT; + + /* Ensure that VFP is disabled. */ + vfp_flush_hwstate(thread); + + /* + * As per the PCS, clear the length and stride bits for function + * entry. + */ + hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK); + return 0; +} + +/* Sanitise and restore the current VFP state from the provided structures. */ +int vfp_restore_user_hwstate(struct user_vfp __user *ufp, + struct user_vfp_exc __user *ufp_exc) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; + unsigned long fpexc; + int err = 0; + + /* Disable VFP to avoid corrupting the new thread state. */ + vfp_flush_hwstate(thread); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs, + sizeof(hwstate->fpregs)); + /* + * Copy the status and control register. + */ + __get_user_error(hwstate->fpscr, &ufp->fpscr, err); + + /* + * Sanitise and restore the exception registers. + */ + __get_user_error(fpexc, &ufp_exc->fpexc, err); + + /* Ensure the VFP is enabled. */ + fpexc |= FPEXC_EN; + + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); + hwstate->fpexc = fpexc; + + __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); + __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + + return err ? -EFAULT : 0; +} + +/* * VFP hardware can lose all context when a CPU goes offline. * As we will be running in SMP mode with CPU hotplug, we will save the * hardware state at every thread switch. We clear our held state when @@ -558,7 +651,7 @@ static int __init vfp_init(void) unsigned int cpu_arch = cpu_architecture(); if (cpu_arch >= CPU_ARCH_ARMv6) - vfp_enable(NULL); + on_each_cpu(vfp_enable, NULL, 1); /* * First check that there is a VFP that we can use. @@ -579,8 +672,6 @@ static int __init vfp_init(void) } else { hotcpu_notifier(vfp_hotplug, 0); - smp_call_function(vfp_enable, NULL, 1); - VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ printk("implementor %02x architecture %d part %02x variant %x rev %x\n", (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c index 1633a6f306c0..85038f54354d 100644 --- a/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/arch/blackfin/mach-bf538/boards/ezkit.c @@ -38,7 +38,7 @@ static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; -#endif +#endif /* CONFIG_RTC_DRV_BFIN */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 @@ -100,7 +100,7 @@ static struct platform_device bfin_uart0_device = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_SERIAL_BFIN_UART0 */ #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { @@ -148,7 +148,7 @@ static struct platform_device bfin_uart1_device = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_SERIAL_BFIN_UART1 */ #ifdef CONFIG_SERIAL_BFIN_UART2 static struct resource bfin_uart2_resources[] = { { @@ -196,8 +196,8 @@ static struct platform_device bfin_uart2_device = { .platform_data = &bfin_uart2_peripherals, /* Passed to driver */ }, }; -#endif -#endif +#endif /* CONFIG_SERIAL_BFIN_UART2 */ +#endif /* CONFIG_SERIAL_BFIN */ #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -224,7 +224,7 @@ static struct platform_device bfin_sir0_device = { .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; -#endif +#endif /* CONFIG_BFIN_SIR0 */ #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { @@ -249,7 +249,7 @@ static struct platform_device bfin_sir1_device = { .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; -#endif +#endif /* CONFIG_BFIN_SIR1 */ #ifdef CONFIG_BFIN_SIR2 static struct resource bfin_sir2_resources[] = { { @@ -274,8 +274,8 @@ static struct platform_device bfin_sir2_device = { .num_resources = ARRAY_SIZE(bfin_sir2_resources), .resource = bfin_sir2_resources, }; -#endif -#endif +#endif /* CONFIG_BFIN_SIR2 */ +#endif /* CONFIG_BFIN_SIR */ #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART @@ -311,7 +311,7 @@ static struct platform_device bfin_sport0_uart_device = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_SERIAL_BFIN_SPORT0_UART */ #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { @@ -345,7 +345,7 @@ static struct platform_device bfin_sport1_uart_device = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_SERIAL_BFIN_SPORT1_UART */ #ifdef CONFIG_SERIAL_BFIN_SPORT2_UART static struct resource bfin_sport2_uart_resources[] = { { @@ -379,7 +379,7 @@ static struct platform_device bfin_sport2_uart_device = { .platform_data = &bfin_sport2_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_SERIAL_BFIN_SPORT2_UART */ #ifdef CONFIG_SERIAL_BFIN_SPORT3_UART static struct resource bfin_sport3_uart_resources[] = { { @@ -413,8 +413,8 @@ static struct platform_device bfin_sport3_uart_device = { .platform_data = &bfin_sport3_peripherals, /* Passed to driver */ }, }; -#endif -#endif +#endif /* CONFIG_SERIAL_BFIN_SPORT3_UART */ +#endif /* CONFIG_SERIAL_BFIN_SPORT */ #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) static unsigned short bfin_can_peripherals[] = { @@ -452,7 +452,7 @@ static struct platform_device bfin_can_device = { .platform_data = &bfin_can_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_CAN_BFIN */ /* * USB-LAN EzExtender board @@ -488,7 +488,7 @@ static struct platform_device smc91x_device = { .platform_data = &smc91x_info, }, }; -#endif +#endif /* CONFIG_SMC91X */ #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* all SPI peripherals info goes here */ @@ -518,7 +518,8 @@ static struct flash_platform_data bfin_spi_flash_data = { static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; -#endif +#endif /* CONFIG_MTD_M25P80 */ +#endif /* CONFIG_SPI_BFIN5XX */ #if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) #include <linux/spi/ad7879.h> @@ -535,7 +536,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = { .gpio_export = 1, /* Export GPIO to gpiolib */ .gpio_base = -1, /* Dynamic allocation */ }; -#endif +#endif /* CONFIG_TOUCHSCREEN_AD7879 */ #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) #include <asm/bfin-lq035q1.h> @@ -564,7 +565,7 @@ static struct platform_device bfin_lq035q1_device = { .platform_data = &bfin_lq035q1_data, }, }; -#endif +#endif /* CONFIG_FB_BFIN_LQ035Q1 */ static struct spi_board_info bf538_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ @@ -579,7 +580,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = { .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, -#endif +#endif /* CONFIG_MTD_M25P80 */ #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) { .modalias = "ad7879", @@ -590,7 +591,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = { .chip_select = 1, .mode = SPI_CPHA | SPI_CPOL, }, -#endif +#endif /* CONFIG_TOUCHSCREEN_AD7879_SPI */ #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) { .modalias = "bfin-lq035q1-spi", @@ -599,7 +600,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = { .chip_select = 2, .mode = SPI_CPHA | SPI_CPOL, }, -#endif +#endif /* CONFIG_FB_BFIN_LQ035Q1 */ #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", @@ -607,7 +608,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = { .bus_num = 0, .chip_select = 1, }, -#endif +#endif /* CONFIG_SPI_SPIDEV */ }; /* SPI (0) */ @@ -716,8 +717,6 @@ static struct platform_device bf538_spi_master2 = { }, }; -#endif /* spi master and devices */ - #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { @@ -759,8 +758,8 @@ static struct platform_device i2c_bfin_twi1_device = { .num_resources = ARRAY_SIZE(bfin_twi1_resource), .resource = bfin_twi1_resource, }; -#endif -#endif +#endif /* CONFIG_BF542 */ +#endif /* CONFIG_I2C_BLACKFIN_TWI */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/gpio_keys.h> diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h index 81c2e271d620..9b1a92b73f60 100644 --- a/arch/frv/include/asm/processor.h +++ b/arch/frv/include/asm/processor.h @@ -135,10 +135,6 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc) #define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp) -/* Allocation and freeing of basic task resources. */ -extern struct task_struct *alloc_task_struct_node(int node); -extern void free_task_struct(struct task_struct *p); - #define cpu_relax() barrier() /* data cache prefetch */ diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index f5104b7c52cd..463fb3bbe11e 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1174,7 +1174,7 @@ out: bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { - return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL); + return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) diff --git a/arch/m68k/platform/520x/config.c b/arch/m68k/platform/520x/config.c index 235947844f27..09df4b89e8be 100644 --- a/arch/m68k/platform/520x/config.c +++ b/arch/m68k/platform/520x/config.c @@ -22,7 +22,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m520x_qspi_init(void) { @@ -35,7 +35,7 @@ static void __init m520x_qspi_init(void) writew(par, MCF_GPIO_PAR_UART); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -79,7 +79,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m520x_uarts_init(); m520x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m520x_qspi_init(); #endif } diff --git a/arch/m68k/platform/523x/config.c b/arch/m68k/platform/523x/config.c index c8b405d5a961..d47dfd8f50a2 100644 --- a/arch/m68k/platform/523x/config.c +++ b/arch/m68k/platform/523x/config.c @@ -22,7 +22,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m523x_qspi_init(void) { @@ -36,7 +36,7 @@ static void __init m523x_qspi_init(void) writew(par, MCFGPIO_PAR_TIMER); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -58,7 +58,7 @@ void __init config_BSP(char *commandp, int size) { mach_sched_init = hw_timer_init; m523x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m523x_qspi_init(); #endif } diff --git a/arch/m68k/platform/5249/config.c b/arch/m68k/platform/5249/config.c index bbf05135bb98..300e729a58d0 100644 --- a/arch/m68k/platform/5249/config.c +++ b/arch/m68k/platform/5249/config.c @@ -51,7 +51,7 @@ static struct platform_device *m5249_devices[] __initdata = { /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m5249_qspi_init(void) { @@ -61,7 +61,7 @@ static void __init m5249_qspi_init(void) mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size) #ifdef CONFIG_M5249C3 m5249_smc91x_init(); #endif -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m5249_qspi_init(); #endif } diff --git a/arch/m68k/platform/527x/config.c b/arch/m68k/platform/527x/config.c index f91a53294c35..b3cb378c5e94 100644 --- a/arch/m68k/platform/527x/config.c +++ b/arch/m68k/platform/527x/config.c @@ -23,7 +23,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m527x_qspi_init(void) { @@ -42,7 +42,7 @@ static void __init m527x_qspi_init(void) #endif } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m527x_uarts_init(); m527x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m527x_qspi_init(); #endif } diff --git a/arch/m68k/platform/528x/config.c b/arch/m68k/platform/528x/config.c index d4492926614c..c5f11ba49be5 100644 --- a/arch/m68k/platform/528x/config.c +++ b/arch/m68k/platform/528x/config.c @@ -24,7 +24,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m528x_qspi_init(void) { @@ -32,7 +32,7 @@ static void __init m528x_qspi_init(void) __raw_writeb(0x07, MCFGPIO_PQSPAR); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -98,7 +98,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m528x_uarts_init(); m528x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m528x_qspi_init(); #endif } diff --git a/arch/m68k/platform/532x/config.c b/arch/m68k/platform/532x/config.c index 2bec3477b739..37082d02f2bd 100644 --- a/arch/m68k/platform/532x/config.c +++ b/arch/m68k/platform/532x/config.c @@ -30,7 +30,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m532x_qspi_init(void) { @@ -38,7 +38,7 @@ static void __init m532x_qspi_init(void) writew(0x01f0, MCF_GPIO_PAR_QSPI); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -77,7 +77,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m532x_uarts_init(); m532x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m532x_qspi_init(); #endif diff --git a/arch/m68k/platform/coldfire/device.c b/arch/m68k/platform/coldfire/device.c index 7af97362b95c..3aa77ddea89d 100644 --- a/arch/m68k/platform/coldfire/device.c +++ b/arch/m68k/platform/coldfire/device.c @@ -121,7 +121,7 @@ static struct platform_device mcf_fec1 = { #endif /* MCFFEC_BASE1 */ #endif /* CONFIG_FEC */ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) /* * The ColdFire QSPI module is an SPI protocol hardware block used * on a number of different ColdFire CPUs. @@ -274,7 +274,7 @@ static struct platform_device mcf_qspi = { .resource = mcf_qspi_resources, .dev.platform_data = &mcf_qspi_data, }; -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ static struct platform_device *mcf_devices[] __initdata = { &mcf_uart, @@ -284,7 +284,7 @@ static struct platform_device *mcf_devices[] __initdata = { &mcf_fec1, #endif #endif -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) &mcf_qspi, #endif }; diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c index e21507052066..9c717bf98ffe 100644 --- a/arch/mips/ath79/dev-wmac.c +++ b/arch/mips/ath79/dev-wmac.c @@ -58,8 +58,8 @@ static void __init ar913x_wmac_setup(void) static int ar933x_wmac_reset(void) { - ath79_device_reset_clear(AR933X_RESET_WMAC); ath79_device_reset_set(AR933X_RESET_WMAC); + ath79_device_reset_clear(AR933X_RESET_WMAC); return 0; } diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 807c97eed8a8..46c61edcdf7b 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -346,11 +346,8 @@ CONFIG_CHELSIO_T1=m CONFIG_IXGB=m CONFIG_S2IO=m CONFIG_MYRI10GE=m -CONFIG_TR=y CONFIG_IBMOL=m CONFIG_IBMLS=m -CONFIG_3C359=m -CONFIG_TMS380TR=m CONFIG_TMSPCI=m CONFIG_ABYSS=m CONFIG_USB_CATC=m @@ -376,7 +373,6 @@ CONFIG_PCMCIA_SMC91C92=m CONFIG_PCMCIA_XIRC2PS=m CONFIG_PCMCIA_AXNET=m CONFIG_ARCNET_COM20020_CS=m -CONFIG_PCMCIA_IBMTR=m CONFIG_WAN=y CONFIG_LANMEDIA=m CONFIG_HDLC=m diff --git a/arch/mips/include/asm/mach-jz4740/irq.h b/arch/mips/include/asm/mach-jz4740/irq.h index a865c983c70a..5ad1a9c113c6 100644 --- a/arch/mips/include/asm/mach-jz4740/irq.h +++ b/arch/mips/include/asm/mach-jz4740/irq.h @@ -45,7 +45,7 @@ #define JZ4740_IRQ_LCD JZ4740_IRQ(30) /* 2nd-level interrupts */ -#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (X)) +#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (x)) #define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x)) #define JZ4740_IRQ_GPIO(x) (JZ4740_IRQ(48) + (x)) diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 73c0d45798de..9b02cfba7449 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -37,12 +37,6 @@ extern void tlbmiss_handler_setup_pgd(unsigned long pgd); write_c0_xcontext((unsigned long) smp_processor_id() << 51); \ } while (0) - -static inline unsigned long get_current_pgd(void) -{ - return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL); -} - #else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ /* diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 185ca00c4c84..d5a338a1739c 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -257,11 +257,8 @@ asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -286,11 +283,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -362,10 +356,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = blocked; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&blocked); sig = restore_sigcontext(®s, &frame->sf_sc); if (sig < 0) @@ -401,10 +392,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) @@ -580,12 +568,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info, if (ret) return ret; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked, sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + block_sigmask(ka, sig); return ret; } diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 06b5da392e24..ac3b8d89aae5 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -290,11 +290,8 @@ asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -318,11 +315,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -488,10 +482,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = blocked; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&blocked); sig = restore_sigcontext32(®s, &frame->sf_sc); if (sig < 0) @@ -529,10 +520,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index ae29e894ab8d..86eb4b04631c 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -93,11 +93,8 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) sigset_from_compat(&newset, &uset); sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -121,10 +118,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 910dddf65e44..9cd69ad6aa02 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c @@ -24,6 +24,7 @@ #include <linux/sched.h> #include <linux/profile.h> #include <linux/smp.h> +#include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/bitops.h> #include <asm/processor.h> @@ -38,7 +39,6 @@ #include "internal.h" #ifdef CONFIG_HOTPLUG_CPU -#include <linux/cpu.h> #include <asm/cacheflush.h> static unsigned long sleep_mode[NR_CPUS]; @@ -874,10 +874,13 @@ static void __init smp_online(void) cpu = smp_processor_id(); - local_irq_enable(); + notify_cpu_starting(cpu); + ipi_call_lock(); set_cpu_online(cpu, true); - smp_wmb(); + ipi_call_unlock(); + + local_irq_enable(); } /** diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h index 4e9626836bab..d1d864b81bae 100644 --- a/arch/parisc/include/asm/hardware.h +++ b/arch/parisc/include/asm/hardware.h @@ -2,7 +2,6 @@ #define _PARISC_HARDWARE_H #include <linux/mod_devicetable.h> -#include <asm/pdc.h> #define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID #define HVERSION_ANY_ID PA_HVERSION_ANY_ID @@ -95,12 +94,14 @@ struct bc_module { #define HPHW_MC 15 #define HPHW_FAULTY 31 +struct parisc_device_id; /* hardware.c: */ extern const char *parisc_hardware_description(struct parisc_device_id *id); extern enum cpu_type parisc_get_cpu_type(unsigned long hversion); struct pci_dev; +struct hardware_path; /* drivers.c: */ extern struct parisc_device *alloc_pa_dev(unsigned long hpa, diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index a84cc1f925f6..4e0e7dbf0f3f 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -160,5 +160,11 @@ extern int npmem_ranges; #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> +#include <asm/pdc.h> + +#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) + +/* DEFINITION OF THE ZERO-PAGE (PAG0) */ +/* based on work by Jason Eckhardt (jason@equator.com) */ #endif /* _PARISC_PAGE_H */ diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 4ca510b3c6f8..7f0f2d23059d 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -343,8 +343,6 @@ #ifdef __KERNEL__ -#include <asm/page.h> /* for __PAGE_OFFSET */ - extern int pdc_type; /* Values for pdc_type */ @@ -677,11 +675,6 @@ static inline char * os_id_to_string(u16 os_id) { #endif /* __KERNEL__ */ -#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) - -/* DEFINITION OF THE ZERO-PAGE (PAG0) */ -/* based on work by Jason Eckhardt (jason@equator.com) */ - /* flags of the device_path */ #define PF_AUTOBOOT 0x80 #define PF_AUTOSEARCH 0x40 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 22dadeb58695..ee99f2339356 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -44,6 +44,8 @@ struct vm_area_struct; #endif /* !__ASSEMBLY__ */ +#include <asm/page.h> + #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h index c5edc60c059f..1ee7c82672c1 100644 --- a/arch/parisc/include/asm/prefetch.h +++ b/arch/parisc/include/asm/prefetch.h @@ -21,7 +21,12 @@ #define ARCH_HAS_PREFETCH static inline void prefetch(const void *addr) { - __asm__("ldw 0(%0), %%r0" : : "r" (addr)); + __asm__( +#ifndef CONFIG_PA20 + /* Need to avoid prefetch of NULL on PA7300LC */ + " extrw,u,= %0,31,32,%%r0\n" +#endif + " ldw 0(%0), %%r0" : : "r" (addr)); } /* LDD is a PA2.0 addition. */ diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 804aa28ab1d6..3516e0b27044 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -1,6 +1,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H +#include <asm/barrier.h> +#include <asm/ldcw.h> #include <asm/processor.h> #include <asm/spinlock_types.h> diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 6f0594439143..535034217021 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -581,7 +581,11 @@ */ cmpiclr,= 0x01,\tmp,%r0 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot +#ifdef CONFIG_64BIT depd,z \prot,8,7,\prot +#else + depw,z \prot,8,7,\prot +#endif /* * OK, it is in the temp alias region, check whether "from" or "to". * Check "subtle" note in pacache.S re: r23/r26. diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 93ff3d90edd1..5d7218ad885c 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -692,7 +692,7 @@ ENTRY(flush_icache_page_asm) /* Purge any old translation */ - pitlb (%sr0,%r28) + pitlb (%sr4,%r28) ldil L%icache_stride, %r1 ldw R%icache_stride(%r1), %r1 @@ -706,27 +706,29 @@ ENTRY(flush_icache_page_asm) sub %r25, %r1, %r25 -1: fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) + /* fic only has the type 26 form on PA1.1, requiring an + * explicit space specification, so use %sr4 */ +1: fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) cmpb,COND(<<) %r28, %r25,1b - fic,m %r1(%r28) + fic,m %r1(%sr4,%r28) sync bv %r0(%r2) - pitlb (%sr0,%r25) + pitlb (%sr4,%r25) .exit .procend diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index 4f004596a6e7..47341aa208f2 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c @@ -50,6 +50,7 @@ #include <linux/init.h> #include <linux/major.h> #include <linux/tty.h> +#include <asm/page.h> /* for PAGE0 */ #include <asm/pdc.h> /* for iodc_call() proto and friends */ static DEFINE_SPINLOCK(pdc_console_lock); @@ -104,7 +105,7 @@ static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp) static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp) { - if (!tty->count) { + if (tty->count == 1) { del_timer_sync(&pdc_console_timer); tty_port_tty_set(&tty_port, NULL); } diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 0bb1d63907f8..4dc7b7942b4c 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -31,6 +31,7 @@ #include <linux/delay.h> #include <linux/bitops.h> #include <linux/ftrace.h> +#include <linux/cpu.h> #include <linux/atomic.h> #include <asm/current.h> @@ -295,8 +296,13 @@ smp_cpu_init(int cpunum) printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); machine_halt(); - } + } + + notify_cpu_starting(cpunum); + + ipi_call_lock(); set_cpu_online(cpunum, true); + ipi_call_unlock(); /* Initialise the idle task for this CPU */ atomic_inc(&init_mm.mm_count); diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 7c0774397b89..70e105d62423 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -29,6 +29,7 @@ #include <asm/uaccess.h> #include <asm/io.h> #include <asm/irq.h> +#include <asm/page.h> #include <asm/param.h> #include <asm/pdc.h> #include <asm/led.h> diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 548da3aa0a30..d58fc4e4149c 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -288,13 +288,6 @@ label##_hv: \ /* Exception addition: Hard disable interrupts */ #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) -/* Exception addition: Keep interrupt state */ -#define ENABLE_INTS \ - ld r11,PACAKMSR(r13); \ - ld r12,_MSR(r1); \ - rlwimi r11,r12,0,MSR_EE; \ - mtmsrd r11,1 - #define ADD_NVGPRS \ bl .save_nvgprs diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index e648af92ced1..0e40843a1c6e 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -18,10 +18,6 @@ #include <linux/atomic.h> -/* Define a way to iterate across irqs. */ -#define for_each_irq(i) \ - for ((i) = 0; (i) < NR_IRQS; ++(i)) - extern atomic_t ppc_n_lost_interrupts; /* This number is used when no interrupt has been assigned */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index aa795ccef294..fd07f43d6622 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s { u64 sdr1; u64 hior; u64 msr_mask; - u64 vsid_next; #ifdef CONFIG_PPC_BOOK3S_32 u32 vsid_pool[VSID_POOL_SIZE]; + u32 vsid_next; #else - u64 vsid_first; - u64 vsid_max; + u64 proto_vsid_first; + u64 proto_vsid_max; + u64 proto_vsid_next; #endif int context_id[SID_CONTEXTS]; diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f8a7a1a1a9f4..ef2074c3e906 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -588,23 +588,19 @@ _GLOBAL(ret_from_except_lite) fast_exc_return_irq: restore: /* - * This is the main kernel exit path, we first check if we - * have to change our interrupt state. + * This is the main kernel exit path. First we check if we + * are about to re-enable interrupts */ ld r5,SOFTE(r1) lbz r6,PACASOFTIRQEN(r13) - cmpwi cr1,r5,0 - cmpw cr0,r5,r6 - beq cr0,4f + cmpwi cr0,r5,0 + beq restore_irq_off - /* We do, handle disable first, which is easy */ - bne cr1,3f; - li r0,0 - stb r0,PACASOFTIRQEN(r13); - TRACE_DISABLE_INTS - b 4f + /* We are enabling, were we already enabled ? Yes, just return */ + cmpwi cr0,r6,1 + beq cr0,do_restore -3: /* + /* * We are about to soft-enable interrupts (we are hard disabled * at this point). We check if there's anything that needs to * be replayed first. @@ -626,7 +622,7 @@ restore_no_replay: /* * Final return path. BookE is handled in a different file */ -4: +do_restore: #ifdef CONFIG_PPC_BOOK3E b .exception_return_book3e #else @@ -700,6 +696,25 @@ fast_exception_return: #endif /* CONFIG_PPC_BOOK3E */ /* + * We are returning to a context with interrupts soft disabled. + * + * However, we may also about to hard enable, so we need to + * make sure that in this case, we also clear PACA_IRQ_HARD_DIS + * or that bit can get out of sync and bad things will happen + */ +restore_irq_off: + ld r3,_MSR(r1) + lbz r7,PACAIRQHAPPENED(r13) + andi. r0,r3,MSR_EE + beq 1f + rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS + stb r7,PACAIRQHAPPENED(r13) +1: li r0,0 + stb r0,PACASOFTIRQEN(r13); + TRACE_DISABLE_INTS + b do_restore + + /* * Something did happen, check if a re-emit is needed * (this also clears paca->irq_happened) */ @@ -748,6 +763,9 @@ restore_check_irq_replay: #endif /* CONFIG_PPC_BOOK3E */ 1: b .ret_from_except /* What else to do here ? */ + + +3: do_work: #ifdef CONFIG_PREEMPT andi. r0,r3,MSR_PR /* Returning to user mode? */ @@ -767,16 +785,6 @@ do_work: SOFT_DISABLE_INTS(r3,r4) 1: bl .preempt_schedule_irq - /* Hard-disable interrupts again (and update PACA) */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 0 -#else - ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - li r0,PACA_IRQ_HARD_DIS - stb r0,PACAIRQHAPPENED(r13) - /* Re-test flags and eventually loop */ clrrdi r9,r1,THREAD_SHIFT ld r4,TI_FLAGS(r9) @@ -787,14 +795,6 @@ do_work: user_work: #endif /* CONFIG_PREEMPT */ - /* Enable interrupts */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 1 -#else - ori r10,r10,MSR_EE - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - andi. r0,r4,_TIF_NEED_RESCHED beq 1f bl .restore_interrupts diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index cb705fdbb458..8f880bc77c56 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -768,8 +768,8 @@ alignment_common: std r3,_DAR(r1) std r4,_DSISR(r1) bl .save_nvgprs + DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - ENABLE_INTS bl .alignment_exception b .ret_from_except diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5ec1b2354ca6..641da9e868ce 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en) */ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) __hard_irq_disable(); +#ifdef CONFIG_TRACE_IRQFLAG + else { + /* + * We should already be hard disabled here. We had bugs + * where that wasn't the case so let's dbl check it and + * warn if we are wrong. Only do that when IRQ tracing + * is enabled as mfmsr() can be costly. + */ + if (WARN_ON(mfmsr() & MSR_EE)) + __hard_irq_disable(); + } +#endif /* CONFIG_TRACE_IRQFLAG */ + set_soft_enabled(0); /* @@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore); * if they are currently disabled. This is typically called before * schedule() or do_signal() when returning to userspace. We do it * in C to avoid the burden of dealing with lockdep etc... + * + * NOTE: This is called with interrupts hard disabled but not marked + * as such in paca->irq_happened, so we need to resync this. */ void restore_interrupts(void) { - if (irqs_disabled()) + if (irqs_disabled()) { + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_irq_enable(); + } else + __hard_irq_enable(); } #endif /* CONFIG_PPC64 */ @@ -330,14 +349,10 @@ void migrate_irqs(void) alloc_cpumask_var(&mask, GFP_KERNEL); - for_each_irq(irq) { + for_each_irq_desc(irq, desc) { struct irq_data *data; struct irq_chip *chip; - desc = irq_to_desc(irq); - if (!desc) - continue; - data = irq_desc_get_irq_data(desc); if (irqd_is_per_cpu(data)) continue; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index c957b1202bdc..5df777794403 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -23,14 +23,11 @@ void machine_kexec_mask_interrupts(void) { unsigned int i; + struct irq_desc *desc; - for_each_irq(i) { - struct irq_desc *desc = irq_to_desc(i); + for_each_irq_desc(i, desc) { struct irq_chip *chip; - if (!desc) - continue; - chip = irq_desc_get_chip(desc); if (!chip) continue; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6aa0c663e247..158972341a2d 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) addr, regs->nip, regs->link, code); } - if (!arch_irq_disabled_regs(regs)) + if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) local_irq_enable(); memset(&info, 0, sizeof(info)); @@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs) return; } - local_irq_enable(); + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); #ifdef CONFIG_MATH_EMULATION /* (reason & REASON_ILLEGAL) would be the obvious thing here, @@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs) { int sig, code, fixed = 0; + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) fixed = fix_alignment(regs); diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 6f87f39a1ac2..10fc8ec9d2a8 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) backwards_map = !backwards_map; /* Uh-oh ... out of mappings. Let's flush! */ - if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) { - vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; + if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) { + vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first; memset(vcpu_book3s->sid_map, 0, sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_flush_segments(vcpu); } - map->host_vsid = vcpu_book3s->vsid_next++; + map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M); map->guest_vsid = gvsid; map->valid = true; @@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) return -1; vcpu3s->context_id[0] = err; - vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; - vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; - vcpu3s->vsid_next = vcpu3s->vsid_first; + vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) + << USER_ESID_BITS) - 1; + vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; + vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; kvmppc_mmu_hpte_init(vcpu); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index ddc485a529f2..c3beaeef3f60 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, !(memslot->userspace_addr & (s - 1))) { start &= ~(s - 1); pgsize = s; + get_page(hpage); + put_page(page); page = hpage; } } @@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, err = 0; out: - if (got) { - if (PageHuge(page)) - page = compound_head(page); + if (got) put_page(page); - } return err; up_err: @@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, SetPageDirty(page); out_put: - if (page) - put_page(page); + if (page) { + /* + * We drop pages[0] here, not page because page might + * have been set to the head page of a compound, but + * we have to drop the reference on the correct tail + * page to match the get inside gup() + */ + put_page(pages[0]); + } return ret; out_unlock: @@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, pa = *physp; } page = pfn_to_page(pa >> PAGE_SHIFT); + get_page(page); } else { hva = gfn_to_hva_memslot(memslot, gfn); npages = get_user_pages_fast(hva, 1, 1, pages); @@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, page = compound_head(page); psize <<= compound_order(page); } - if (!kvm->arch.using_mmu_notifiers) - get_page(page); offset = gpa & (psize - 1); if (nb_ret) *nb_ret = psize - offset; @@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) { struct page *page = virt_to_page(va); - page = compound_head(page); put_page(page); } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 01294a5099dd..108d1f580177 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id) continue; pfn = physp[j] >> PAGE_SHIFT; page = pfn_to_page(pfn); - if (PageHuge(page)) - page = compound_head(page); SetPageDirty(page); put_page(page); } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index def880aea63a..cec4daddbf31 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -463,6 +463,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) /* insert R and C bits from PTE */ rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); args[j] |= rcbits << (56 - 5); + hp[0] = 0; continue; } diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 0676ae249b9f..6e6e9cef34a8 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -197,7 +197,8 @@ kvmppc_interrupt: /* Save guest PC and MSR */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION - andi. r0,r12,0x2 + andi. r0, r12, 0x2 + cmpwi cr1, r0, 0 beq 1f mfspr r3,SPRN_HSRR0 mfspr r4,SPRN_HSRR1 @@ -250,6 +251,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) beq ld_last_prev_inst cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT beq- ld_last_inst +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST + beq- ld_last_inst +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +#endif b no_ld_last_inst @@ -316,23 +323,17 @@ no_dcbz32_off: * Having set up SRR0/1 with the address where we want * to continue with relocation on (potentially in module * space), we either just go straight there with rfi[d], - * or we jump to an interrupt handler with bctr if there - * is an interrupt to be handled first. In the latter - * case, the rfi[d] at the end of the interrupt handler - * will get us back to where we want to continue. + * or we jump to an interrupt handler if there is an + * interrupt to be handled first. In the latter case, + * the rfi[d] at the end of the interrupt handler will + * get us back to where we want to continue. */ - cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL - beq 1f - cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER - beq 1f - cmpwi r12, BOOK3S_INTERRUPT_PERFMON -1: mtctr r12 - /* Register usage at this point: * * R1 = host R1 * R2 = host R2 + * R10 = raw exit handler id * R12 = exit handler id * R13 = shadow vcpu (32-bit) or PACA (64-bit) * SVCPU.* = guest * @@ -342,12 +343,25 @@ no_dcbz32_off: PPC_LL r6, HSTATE_HOST_MSR(r13) PPC_LL r8, HSTATE_VMHANDLER(r13) - /* Restore host msr -> SRR1 */ +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + beq cr1, 1f + mtspr SPRN_HSRR1, r6 + mtspr SPRN_HSRR0, r8 +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +#endif +1: /* Restore host msr -> SRR1 */ mtsrr1 r6 /* Load highmem handler address */ mtsrr0 r8 /* RFI into the highmem handler, or jump to interrupt handler */ - beqctr + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL + beqa BOOK3S_INTERRUPT_EXTERNAL + cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER + beqa BOOK3S_INTERRUPT_DECREMENTER + cmpwi r12, BOOK3S_INTERRUPT_PERFMON + beqa BOOK3S_INTERRUPT_PERFMON + RFI kvmppc_handler_trampoline_exit_end: diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index af1ab5e9a691..5c3cf2d04e41 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -48,7 +48,13 @@ /* * Assembly helpers from arch/powerpc/net/bpf_jit.S: */ -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; +#define DECLARE_LOAD_FUNC(func) \ + extern u8 func[], func##_negative_offset[], func##_positive_offset[] + +DECLARE_LOAD_FUNC(sk_load_word); +DECLARE_LOAD_FUNC(sk_load_half); +DECLARE_LOAD_FUNC(sk_load_byte); +DECLARE_LOAD_FUNC(sk_load_byte_msh); #define FUNCTION_DESCR_SIZE 24 diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index ff4506e85cce..55ba3855a97f 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S @@ -31,14 +31,13 @@ * then branch directly to slow_path_XXX if required. (In fact, could * load a spare GPR with the address of slow_path_generic and pass size * as an argument, making the call site a mtlr, li and bllr.) - * - * Technically, the "is addr < 0" check is unnecessary & slowing down - * the ABS path, as it's statically checked on generation. */ .globl sk_load_word sk_load_word: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_word_neg + .globl sk_load_word_positive_offset +sk_load_word_positive_offset: /* Are we accessing past headlen? */ subi r_scratch1, r_HL, 4 cmpd r_scratch1, r_addr @@ -51,7 +50,9 @@ sk_load_word: .globl sk_load_half sk_load_half: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_half_neg + .globl sk_load_half_positive_offset +sk_load_half_positive_offset: subi r_scratch1, r_HL, 2 cmpd r_scratch1, r_addr blt bpf_slow_path_half @@ -61,7 +62,9 @@ sk_load_half: .globl sk_load_byte sk_load_byte: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_byte_neg + .globl sk_load_byte_positive_offset +sk_load_byte_positive_offset: cmpd r_HL, r_addr ble bpf_slow_path_byte lbzx r_A, r_D, r_addr @@ -69,22 +72,20 @@ sk_load_byte: /* * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) - * r_addr is the offset value, already known positive + * r_addr is the offset value */ .globl sk_load_byte_msh sk_load_byte_msh: + cmpdi r_addr, 0 + blt bpf_slow_path_byte_msh_neg + .globl sk_load_byte_msh_positive_offset +sk_load_byte_msh_positive_offset: cmpd r_HL, r_addr ble bpf_slow_path_byte_msh lbzx r_X, r_D, r_addr rlwinm r_X, r_X, 2, 32-4-2, 31-2 blr -bpf_error: - /* Entered with cr0 = lt */ - li r3, 0 - /* Generated code will 'blt epilogue', returning 0. */ - blr - /* Call out to skb_copy_bits: * We'll need to back up our volatile regs first; we have * local variable space at r1+(BPF_PPC_STACK_BASIC). @@ -136,3 +137,84 @@ bpf_slow_path_byte_msh: lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) rlwinm r_X, r_X, 2, 32-4-2, 31-2 blr + +/* Call out to bpf_internal_load_pointer_neg_helper: + * We'll need to back up our volatile regs first; we have + * local variable space at r1+(BPF_PPC_STACK_BASIC). + * Allocate a new stack frame here to remain ABI-compliant in + * stashing LR. + */ +#define sk_negative_common(SIZE) \ + mflr r0; \ + std r0, 16(r1); \ + /* R3 goes in parameter space of caller's frame */ \ + std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ + /* R3 = r_skb, as passed */ \ + mr r4, r_addr; \ + li r5, SIZE; \ + bl bpf_internal_load_pointer_neg_helper; \ + /* R3 != 0 on success */ \ + addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ + ld r0, 16(r1); \ + ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + mtlr r0; \ + cmpldi r3, 0; \ + beq bpf_error_slow; /* cr0 = EQ */ \ + mr r_addr, r3; \ + ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + /* Great success! */ + +bpf_slow_path_word_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_word_negative_offset +sk_load_word_negative_offset: + sk_negative_common(4) + lwz r_A, 0(r_addr) + blr + +bpf_slow_path_half_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_half_negative_offset +sk_load_half_negative_offset: + sk_negative_common(2) + lhz r_A, 0(r_addr) + blr + +bpf_slow_path_byte_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_byte_negative_offset +sk_load_byte_negative_offset: + sk_negative_common(1) + lbz r_A, 0(r_addr) + blr + +bpf_slow_path_byte_msh_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_byte_msh_negative_offset +sk_load_byte_msh_negative_offset: + sk_negative_common(1) + lbz r_X, 0(r_addr) + rlwinm r_X, r_X, 2, 32-4-2, 31-2 + blr + +bpf_error_slow: + /* fabricate a cr0 = lt */ + li r_scratch1, -1 + cmpdi r_scratch1, 0 +bpf_error: + /* Entered with cr0 = lt */ + li r3, 0 + /* Generated code will 'blt epilogue', returning 0. */ + blr diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 73619d3aeb6c..2dc8b1484845 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) PPC_BLR(); } +#define CHOOSE_LOAD_FUNC(K, func) \ + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) + /* Assemble the body code between the prologue & epilogue. */ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, struct codegen_context *ctx, @@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, /*** Absolute loads from packet header/data ***/ case BPF_S_LD_W_ABS: - func = sk_load_word; + func = CHOOSE_LOAD_FUNC(K, sk_load_word); goto common_load; case BPF_S_LD_H_ABS: - func = sk_load_half; + func = CHOOSE_LOAD_FUNC(K, sk_load_half); goto common_load; case BPF_S_LD_B_ABS: - func = sk_load_byte; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte); common_load: - /* - * Load from [K]. Reference with the (negative) - * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported. - */ + /* Load from [K]. */ ctx->seen |= SEEN_DATAREF; - if ((int)K < 0) - return -ENOTSUPP; PPC_LI64(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_LI32(r_addr, K); @@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, common_load_ind: /* * Load from [X + K]. Negative offsets are tested for - * in the helper functions, and result in a 'ret 0'. + * in the helper functions. */ ctx->seen |= SEEN_DATAREF | SEEN_XREG; PPC_LI64(r_scratch1, func); @@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; case BPF_S_LDX_B_MSH: - /* - * x86 version drops packet (RET 0) when K<0, whereas - * interpreter does allow K<0 (__load_pointer, special - * ancillary data). common_load returns ENOTSUPP if K<0, - * so we fall back to interpreter & filter works. - */ - func = sk_load_byte_msh; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); goto common_load; break; diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index d09f3e8e6867..85825b5401e5 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); - if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { + if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { @@ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (rc) return rc; - /* We rely on being able to stash a virq in a u16 */ - BUILD_BUG_ON(NR_IRQS > 65536); - list_for_each_entry(entry, &dev->msi_list, list) { virq = irq_create_direct_mapping(msic->irq_domain); if (virq == NO_IRQ) { @@ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device) } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); - msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic); + /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ + msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); if (!msic->irq_domain) { printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", dn->full_name); diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index f9a48af335cb..8c6dc42ecf65 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c @@ -248,6 +248,6 @@ void beatic_deinit_IRQ(void) { int i; - for (i = 1; i < NR_IRQS; i++) + for (i = 1; i < nr_irqs; i++) beat_destruct_irq_plug(i); } diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 66ad93de1d55..c4e630576ff2 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -57,9 +57,9 @@ static int max_real_irqs; static DEFINE_RAW_SPINLOCK(pmac_pic_lock); -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +/* The max irq number this driver deals with is 128; see max_irqs */ +static DECLARE_BITMAP(ppc_lost_interrupts, 128); +static DECLARE_BITMAP(ppc_cached_irq_mask, 128); static int pmac_irq_cascade = -1; static struct irq_domain *pmac_pic_host; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index aadbe4f6d537..178a5f300bc9 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -30,9 +30,9 @@ config PPC_SPLPAR two or more partitions. config EEH - bool "PCI Extended Error Handling (EEH)" if EXPERT + bool depends on PPC_PSERIES && PCI - default y if !EXPERT + default y config PSERIES_MSI bool diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index d3be961e2ae7..10386b676d87 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -51,8 +51,7 @@ static intctl_cpm2_t __iomem *cpm2_intctl; static struct irq_domain *cpm2_pic_host; -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */ static const u_char irq_to_siureg[] = { 1, 1, 1, 1, 1, 1, 1, 1, diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index d5f5416be310..b724622c3a0b 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -18,69 +18,45 @@ extern int cpm_get_irq(struct pt_regs *regs); static struct irq_domain *mpc8xx_pic_host; -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static unsigned long mpc8xx_cached_irq_mask; static sysconf8xx_t __iomem *siu_reg; -int cpm_get_irq(struct pt_regs *regs); +static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d) +{ + return 0x80000000 >> irqd_to_hwirq(d); +} static void mpc8xx_unmask_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] |= (1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static void mpc8xx_mask_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static void mpc8xx_ack(struct irq_data *d) { - int bit; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); + out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d)); } static void mpc8xx_end_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] |= (1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) { - if (flow_type & IRQ_TYPE_EDGE_FALLING) { - irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d); + /* only external IRQ senses are programmable */ + if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) { unsigned int siel = in_be32(&siu_reg->sc_siel); - - /* only external IRQ senses are programmable */ - if ((hw & 1) == 0) { - siel |= (0x80000000 >> hw); - out_be32(&siu_reg->sc_siel, siel); - __irq_set_handler_locked(d->irq, handle_edge_irq); - } + siel |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_siel, siel); + __irq_set_handler_locked(d->irq, handle_edge_irq); } return 0; } @@ -132,6 +108,9 @@ static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct, IRQ_TYPE_EDGE_FALLING, }; + if (intspec[0] > 0x1f) + return 0; + *out_hwirq = intspec[0]; if (intsize > 1 && intspec[1] < 4) *out_flags = map_pic_senses[intspec[1]]; diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c index 49a3ece1c6b3..702256a1ca11 100644 --- a/arch/powerpc/sysdev/scom.c +++ b/arch/powerpc/sysdev/scom.c @@ -22,6 +22,7 @@ #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/export.h> +#include <asm/debug.h> #include <asm/prom.h> #include <asm/scom.h> diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index ea5e204e3450..cd1d18db92c6 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -188,6 +188,7 @@ void xics_migrate_irqs_away(void) { int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); unsigned int irq, virq; + struct irq_desc *desc; /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == xics_default_server) @@ -202,8 +203,7 @@ void xics_migrate_irqs_away(void) /* Allow IPIs again... */ icp_ops->set_priority(DEFAULT_PRIORITY); - for_each_irq(virq) { - struct irq_desc *desc; + for_each_irq_desc(virq, desc) { struct irq_chip *chip; long server; unsigned long flags; @@ -212,9 +212,8 @@ void xics_migrate_irqs_away(void) /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; - desc = irq_to_desc(virq); /* We only need to migrate enabled IRQS */ - if (!desc || !desc->action) + if (!desc->action) continue; if (desc->irq_data.domain != xics_host) continue; diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 37f2f4a55231..f4c1c20bcdf6 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -11,7 +11,7 @@ #include <linux/types.h> #include <asm/cmpxchg.h> -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) +#define ATOMIC_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 324eef93c900..e99b104d967a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -86,7 +86,7 @@ static noinline int vmalloc_fault(unsigned long address) pte_t *pte_k; /* Make sure we are in vmalloc/module/P3 area: */ - if (!(address >= VMALLOC_START && address < P3_ADDR_MAX)) + if (!(address >= P3SEG && address < P3_ADDR_MAX)) return -1; /* diff --git a/arch/sparc/Kbuild b/arch/sparc/Kbuild index 27b540d2d55e..5cd01161fd00 100644 --- a/arch/sparc/Kbuild +++ b/arch/sparc/Kbuild @@ -5,3 +5,4 @@ obj-y += kernel/ obj-y += mm/ obj-y += math-emu/ +obj-y += net/ diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 871de4ec3488..6c49ed2ee786 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -30,6 +30,7 @@ config SPARC select USE_GENERIC_SMP_HELPERS if SMP select GENERIC_PCI_IOMAP select HAVE_NMI_WATCHDOG if SPARC64 + select HAVE_BPF_JIT if NET config SPARC32 def_bool !64BIT diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index 38d48a59879c..9708851a8b9f 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -269,4 +269,4 @@ static int __init sunfire_init(void) return 0; } -subsys_initcall(sunfire_init); +fs_initcall(sunfire_init); diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index b57a5942ba64..874162a11ceb 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -495,11 +495,11 @@ xcall_fetch_glob_regs: stx %o7, [%g1 + GR_SNAP_O7] stx %i7, [%g1 + GR_SNAP_I7] /* Don't try this at home kids... */ - rdpr %cwp, %g2 - sub %g2, 1, %g7 + rdpr %cwp, %g3 + sub %g3, 1, %g7 wrpr %g7, %cwp mov %i7, %g7 - wrpr %g2, %cwp + wrpr %g3, %cwp stx %g7, [%g1 + GR_SNAP_RPC] sethi %hi(trap_block), %g7 or %g7, %lo(trap_block), %g7 diff --git a/arch/sparc/net/Makefile b/arch/sparc/net/Makefile new file mode 100644 index 000000000000..1306a58ac541 --- /dev/null +++ b/arch/sparc/net/Makefile @@ -0,0 +1,4 @@ +# +# Arch-specific network modules +# +obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o diff --git a/arch/sparc/net/bpf_jit.h b/arch/sparc/net/bpf_jit.h new file mode 100644 index 000000000000..33d6b375ff12 --- /dev/null +++ b/arch/sparc/net/bpf_jit.h @@ -0,0 +1,68 @@ +#ifndef _BPF_JIT_H +#define _BPF_JIT_H + +/* Conventions: + * %g1 : temporary + * %g2 : Secondary temporary used by SKB data helper stubs. + * %g3 : packet offset passed into SKB data helper stubs. + * %o0 : pointer to skb (first argument given to JIT function) + * %o1 : BPF A accumulator + * %o2 : BPF X accumulator + * %o3 : Holds saved %o7 so we can call helper functions without needing + * to allocate a register window. + * %o4 : skb->len - skb->data_len + * %o5 : skb->data + */ + +#ifndef __ASSEMBLER__ +#define G0 0x00 +#define G1 0x01 +#define G3 0x03 +#define G6 0x06 +#define O0 0x08 +#define O1 0x09 +#define O2 0x0a +#define O3 0x0b +#define O4 0x0c +#define O5 0x0d +#define SP 0x0e +#define O7 0x0f +#define FP 0x1e + +#define r_SKB O0 +#define r_A O1 +#define r_X O2 +#define r_saved_O7 O3 +#define r_HEADLEN O4 +#define r_SKB_DATA O5 +#define r_TMP G1 +#define r_TMP2 G2 +#define r_OFF G3 + +/* assembly code in arch/sparc/net/bpf_jit_asm.S */ +extern u32 bpf_jit_load_word[]; +extern u32 bpf_jit_load_half[]; +extern u32 bpf_jit_load_byte[]; +extern u32 bpf_jit_load_byte_msh[]; +extern u32 bpf_jit_load_word_positive_offset[]; +extern u32 bpf_jit_load_half_positive_offset[]; +extern u32 bpf_jit_load_byte_positive_offset[]; +extern u32 bpf_jit_load_byte_msh_positive_offset[]; +extern u32 bpf_jit_load_word_negative_offset[]; +extern u32 bpf_jit_load_half_negative_offset[]; +extern u32 bpf_jit_load_byte_negative_offset[]; +extern u32 bpf_jit_load_byte_msh_negative_offset[]; + +#else +#define r_SKB %o0 +#define r_A %o1 +#define r_X %o2 +#define r_saved_O7 %o3 +#define r_HEADLEN %o4 +#define r_SKB_DATA %o5 +#define r_TMP %g1 +#define r_TMP2 %g2 +#define r_OFF %g3 +#endif + +#endif /* _BPF_JIT_H */ diff --git a/arch/sparc/net/bpf_jit_asm.S b/arch/sparc/net/bpf_jit_asm.S new file mode 100644 index 000000000000..9d016c7017f7 --- /dev/null +++ b/arch/sparc/net/bpf_jit_asm.S @@ -0,0 +1,205 @@ +#include <asm/ptrace.h> + +#include "bpf_jit.h" + +#ifdef CONFIG_SPARC64 +#define SAVE_SZ 176 +#define SCRATCH_OFF STACK_BIAS + 128 +#define BE_PTR(label) be,pn %xcc, label +#else +#define SAVE_SZ 96 +#define SCRATCH_OFF 72 +#define BE_PTR(label) be label +#endif + +#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ + + .text + .globl bpf_jit_load_word +bpf_jit_load_word: + cmp r_OFF, 0 + bl bpf_slow_path_word_neg + nop + .globl bpf_jit_load_word_positive_offset +bpf_jit_load_word_positive_offset: + sub r_HEADLEN, r_OFF, r_TMP + cmp r_TMP, 3 + ble bpf_slow_path_word + add r_SKB_DATA, r_OFF, r_TMP + andcc r_TMP, 3, %g0 + bne load_word_unaligned + nop + retl + ld [r_TMP], r_A +load_word_unaligned: + ldub [r_TMP + 0x0], r_OFF + ldub [r_TMP + 0x1], r_TMP2 + sll r_OFF, 8, r_OFF + or r_OFF, r_TMP2, r_OFF + ldub [r_TMP + 0x2], r_TMP2 + sll r_OFF, 8, r_OFF + or r_OFF, r_TMP2, r_OFF + ldub [r_TMP + 0x3], r_TMP2 + sll r_OFF, 8, r_OFF + retl + or r_OFF, r_TMP2, r_A + + .globl bpf_jit_load_half +bpf_jit_load_half: + cmp r_OFF, 0 + bl bpf_slow_path_half_neg + nop + .globl bpf_jit_load_half_positive_offset +bpf_jit_load_half_positive_offset: + sub r_HEADLEN, r_OFF, r_TMP + cmp r_TMP, 1 + ble bpf_slow_path_half + add r_SKB_DATA, r_OFF, r_TMP + andcc r_TMP, 1, %g0 + bne load_half_unaligned + nop + retl + lduh [r_TMP], r_A +load_half_unaligned: + ldub [r_TMP + 0x0], r_OFF + ldub [r_TMP + 0x1], r_TMP2 + sll r_OFF, 8, r_OFF + retl + or r_OFF, r_TMP2, r_A + + .globl bpf_jit_load_byte +bpf_jit_load_byte: + cmp r_OFF, 0 + bl bpf_slow_path_byte_neg + nop + .globl bpf_jit_load_byte_positive_offset +bpf_jit_load_byte_positive_offset: + cmp r_OFF, r_HEADLEN + bge bpf_slow_path_byte + nop + retl + ldub [r_SKB_DATA + r_OFF], r_A + + .globl bpf_jit_load_byte_msh +bpf_jit_load_byte_msh: + cmp r_OFF, 0 + bl bpf_slow_path_byte_msh_neg + nop + .globl bpf_jit_load_byte_msh_positive_offset +bpf_jit_load_byte_msh_positive_offset: + cmp r_OFF, r_HEADLEN + bge bpf_slow_path_byte_msh + nop + ldub [r_SKB_DATA + r_OFF], r_OFF + and r_OFF, 0xf, r_OFF + retl + sll r_OFF, 2, r_X + +#define bpf_slow_path_common(LEN) \ + save %sp, -SAVE_SZ, %sp; \ + mov %i0, %o0; \ + mov r_OFF, %o1; \ + add %fp, SCRATCH_OFF, %o2; \ + call skb_copy_bits; \ + mov (LEN), %o3; \ + cmp %o0, 0; \ + restore; + +bpf_slow_path_word: + bpf_slow_path_common(4) + bl bpf_error + ld [%sp + SCRATCH_OFF], r_A + retl + nop +bpf_slow_path_half: + bpf_slow_path_common(2) + bl bpf_error + lduh [%sp + SCRATCH_OFF], r_A + retl + nop +bpf_slow_path_byte: + bpf_slow_path_common(1) + bl bpf_error + ldub [%sp + SCRATCH_OFF], r_A + retl + nop +bpf_slow_path_byte_msh: + bpf_slow_path_common(1) + bl bpf_error + ldub [%sp + SCRATCH_OFF], r_A + and r_OFF, 0xf, r_OFF + retl + sll r_OFF, 2, r_X + +#define bpf_negative_common(LEN) \ + save %sp, -SAVE_SZ, %sp; \ + mov %i0, %o0; \ + mov r_OFF, %o1; \ + call bpf_internal_load_pointer_neg_helper; \ + mov (LEN), %o2; \ + mov %o0, r_TMP; \ + cmp %o0, 0; \ + BE_PTR(bpf_error); \ + restore; + +bpf_slow_path_word_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_word_negative_offset +bpf_jit_load_word_negative_offset: + bpf_negative_common(4) + andcc r_TMP, 3, %g0 + bne load_word_unaligned + nop + retl + ld [r_TMP], r_A + +bpf_slow_path_half_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_half_negative_offset +bpf_jit_load_half_negative_offset: + bpf_negative_common(2) + andcc r_TMP, 1, %g0 + bne load_half_unaligned + nop + retl + lduh [r_TMP], r_A + +bpf_slow_path_byte_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_byte_negative_offset +bpf_jit_load_byte_negative_offset: + bpf_negative_common(1) + retl + ldub [r_TMP], r_A + +bpf_slow_path_byte_msh_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_byte_msh_negative_offset +bpf_jit_load_byte_msh_negative_offset: + bpf_negative_common(1) + ldub [r_TMP], r_OFF + and r_OFF, 0xf, r_OFF + retl + sll r_OFF, 2, r_X + +bpf_error: + /* Make the JIT program return zero. The JIT epilogue + * stores away the original %o7 into r_saved_O7. The + * normal leaf function return is to use "retl" which + * would evalute to "jmpl %o7 + 8, %g0" but we want to + * use the saved value thus the sequence you see here. + */ + jmpl r_saved_O7 + 8, %g0 + clr %o0 diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c new file mode 100644 index 000000000000..1a69244e785b --- /dev/null +++ b/arch/sparc/net/bpf_jit_comp.c @@ -0,0 +1,802 @@ +#include <linux/moduleloader.h> +#include <linux/workqueue.h> +#include <linux/netdevice.h> +#include <linux/filter.h> +#include <linux/cache.h> + +#include <asm/cacheflush.h> +#include <asm/ptrace.h> + +#include "bpf_jit.h" + +int bpf_jit_enable __read_mostly; + +static inline bool is_simm13(unsigned int value) +{ + return value + 0x1000 < 0x2000; +} + +static void bpf_flush_icache(void *start_, void *end_) +{ +#ifdef CONFIG_SPARC64 + /* Cheetah's I-cache is fully coherent. */ + if (tlb_type == spitfire) { + unsigned long start = (unsigned long) start_; + unsigned long end = (unsigned long) end_; + + start &= ~7UL; + end = (end + 7UL) & ~7UL; + while (start < end) { + flushi(start); + start += 32; + } + } +#endif +} + +#define SEEN_DATAREF 1 /* might call external helpers */ +#define SEEN_XREG 2 /* ebx is used */ +#define SEEN_MEM 4 /* use mem[] for temporary storage */ + +#define S13(X) ((X) & 0x1fff) +#define IMMED 0x00002000 +#define RD(X) ((X) << 25) +#define RS1(X) ((X) << 14) +#define RS2(X) ((X)) +#define OP(X) ((X) << 30) +#define OP2(X) ((X) << 22) +#define OP3(X) ((X) << 19) +#define COND(X) ((X) << 25) +#define F1(X) OP(X) +#define F2(X, Y) (OP(X) | OP2(Y)) +#define F3(X, Y) (OP(X) | OP3(Y)) + +#define CONDN COND(0x0) +#define CONDE COND(0x1) +#define CONDLE COND(0x2) +#define CONDL COND(0x3) +#define CONDLEU COND(0x4) +#define CONDCS COND(0x5) +#define CONDNEG COND(0x6) +#define CONDVC COND(0x7) +#define CONDA COND(0x8) +#define CONDNE COND(0x9) +#define CONDG COND(0xa) +#define CONDGE COND(0xb) +#define CONDGU COND(0xc) +#define CONDCC COND(0xd) +#define CONDPOS COND(0xe) +#define CONDVS COND(0xf) + +#define CONDGEU CONDCC +#define CONDLU CONDCS + +#define WDISP22(X) (((X) >> 2) & 0x3fffff) + +#define BA (F2(0, 2) | CONDA) +#define BGU (F2(0, 2) | CONDGU) +#define BLEU (F2(0, 2) | CONDLEU) +#define BGEU (F2(0, 2) | CONDGEU) +#define BLU (F2(0, 2) | CONDLU) +#define BE (F2(0, 2) | CONDE) +#define BNE (F2(0, 2) | CONDNE) + +#ifdef CONFIG_SPARC64 +#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20)) +#else +#define BNE_PTR BNE +#endif + +#define SETHI(K, REG) \ + (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff)) +#define OR_LO(K, REG) \ + (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG)) + +#define ADD F3(2, 0x00) +#define AND F3(2, 0x01) +#define ANDCC F3(2, 0x11) +#define OR F3(2, 0x02) +#define SUB F3(2, 0x04) +#define SUBCC F3(2, 0x14) +#define MUL F3(2, 0x0a) /* umul */ +#define DIV F3(2, 0x0e) /* udiv */ +#define SLL F3(2, 0x25) +#define SRL F3(2, 0x26) +#define JMPL F3(2, 0x38) +#define CALL F1(1) +#define BR F2(0, 0x01) +#define RD_Y F3(2, 0x28) +#define WR_Y F3(2, 0x30) + +#define LD32 F3(3, 0x00) +#define LD8 F3(3, 0x01) +#define LD16 F3(3, 0x02) +#define LD64 F3(3, 0x0b) +#define ST32 F3(3, 0x04) + +#ifdef CONFIG_SPARC64 +#define LDPTR LD64 +#define BASE_STACKFRAME 176 +#else +#define LDPTR LD32 +#define BASE_STACKFRAME 96 +#endif + +#define LD32I (LD32 | IMMED) +#define LD8I (LD8 | IMMED) +#define LD16I (LD16 | IMMED) +#define LD64I (LD64 | IMMED) +#define LDPTRI (LDPTR | IMMED) +#define ST32I (ST32 | IMMED) + +#define emit_nop() \ +do { \ + *prog++ = SETHI(0, G0); \ +} while (0) + +#define emit_neg() \ +do { /* sub %g0, r_A, r_A */ \ + *prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A); \ +} while (0) + +#define emit_reg_move(FROM, TO) \ +do { /* or %g0, FROM, TO */ \ + *prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO); \ +} while (0) + +#define emit_clear(REG) \ +do { /* or %g0, %g0, REG */ \ + *prog++ = OR | RS1(G0) | RS2(G0) | RD(REG); \ +} while (0) + +#define emit_set_const(K, REG) \ +do { /* sethi %hi(K), REG */ \ + *prog++ = SETHI(K, REG); \ + /* or REG, %lo(K), REG */ \ + *prog++ = OR_LO(K, REG); \ +} while (0) + + /* Emit + * + * OP r_A, r_X, r_A + */ +#define emit_alu_X(OPCODE) \ +do { \ + seen |= SEEN_XREG; \ + *prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A); \ +} while (0) + + /* Emit either: + * + * OP r_A, K, r_A + * + * or + * + * sethi %hi(K), r_TMP + * or r_TMP, %lo(K), r_TMP + * OP r_A, r_TMP, r_A + * + * depending upon whether K fits in a signed 13-bit + * immediate instruction field. Emit nothing if K + * is zero. + */ +#define emit_alu_K(OPCODE, K) \ +do { \ + if (K) { \ + unsigned int _insn = OPCODE; \ + _insn |= RS1(r_A) | RD(r_A); \ + if (is_simm13(K)) { \ + *prog++ = _insn | IMMED | S13(K); \ + } else { \ + emit_set_const(K, r_TMP); \ + *prog++ = _insn | RS2(r_TMP); \ + } \ + } \ +} while (0) + +#define emit_loadimm(K, DEST) \ +do { \ + if (is_simm13(K)) { \ + /* or %g0, K, DEST */ \ + *prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST); \ + } else { \ + emit_set_const(K, DEST); \ + } \ +} while (0) + +#define emit_loadptr(BASE, STRUCT, FIELD, DEST) \ +do { unsigned int _off = offsetof(STRUCT, FIELD); \ + BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *)); \ + *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \ +} while (0) + +#define emit_load32(BASE, STRUCT, FIELD, DEST) \ +do { unsigned int _off = offsetof(STRUCT, FIELD); \ + BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32)); \ + *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \ +} while (0) + +#define emit_load16(BASE, STRUCT, FIELD, DEST) \ +do { unsigned int _off = offsetof(STRUCT, FIELD); \ + BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16)); \ + *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \ +} while (0) + +#define __emit_load8(BASE, STRUCT, FIELD, DEST) \ +do { unsigned int _off = offsetof(STRUCT, FIELD); \ + *prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST); \ +} while (0) + +#define emit_load8(BASE, STRUCT, FIELD, DEST) \ +do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \ + __emit_load8(BASE, STRUCT, FIELD, DEST); \ +} while (0) + +#define emit_ldmem(OFF, DEST) \ +do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST); \ +} while (0) + +#define emit_stmem(OFF, SRC) \ +do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC); \ +} while (0) + +#ifdef CONFIG_SMP +#ifdef CONFIG_SPARC64 +#define emit_load_cpu(REG) \ + emit_load16(G6, struct thread_info, cpu, REG) +#else +#define emit_load_cpu(REG) \ + emit_load32(G6, struct thread_info, cpu, REG) +#endif +#else +#define emit_load_cpu(REG) emit_clear(REG) +#endif + +#define emit_skb_loadptr(FIELD, DEST) \ + emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST) +#define emit_skb_load32(FIELD, DEST) \ + emit_load32(r_SKB, struct sk_buff, FIELD, DEST) +#define emit_skb_load16(FIELD, DEST) \ + emit_load16(r_SKB, struct sk_buff, FIELD, DEST) +#define __emit_skb_load8(FIELD, DEST) \ + __emit_load8(r_SKB, struct sk_buff, FIELD, DEST) +#define emit_skb_load8(FIELD, DEST) \ + emit_load8(r_SKB, struct sk_buff, FIELD, DEST) + +#define emit_jmpl(BASE, IMM_OFF, LREG) \ + *prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG)) + +#define emit_call(FUNC) \ +do { void *_here = image + addrs[i] - 8; \ + unsigned int _off = (void *)(FUNC) - _here; \ + *prog++ = CALL | (((_off) >> 2) & 0x3fffffff); \ + emit_nop(); \ +} while (0) + +#define emit_branch(BR_OPC, DEST) \ +do { unsigned int _here = addrs[i] - 8; \ + *prog++ = BR_OPC | WDISP22((DEST) - _here); \ +} while (0) + +#define emit_branch_off(BR_OPC, OFF) \ +do { *prog++ = BR_OPC | WDISP22(OFF); \ +} while (0) + +#define emit_jump(DEST) emit_branch(BA, DEST) + +#define emit_read_y(REG) *prog++ = RD_Y | RD(REG) +#define emit_write_y(REG) *prog++ = WR_Y | IMMED | RS1(REG) | S13(0) + +#define emit_cmp(R1, R2) \ + *prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0)) + +#define emit_cmpi(R1, IMM) \ + *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); + +#define emit_btst(R1, R2) \ + *prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0)) + +#define emit_btsti(R1, IMM) \ + *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); + +#define emit_sub(R1, R2, R3) \ + *prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3)) + +#define emit_subi(R1, IMM, R3) \ + *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3)) + +#define emit_add(R1, R2, R3) \ + *prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3)) + +#define emit_addi(R1, IMM, R3) \ + *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3)) + +#define emit_alloc_stack(SZ) \ + *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP)) + +#define emit_release_stack(SZ) \ + *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP)) + +/* A note about branch offset calculations. The addrs[] array, + * indexed by BPF instruction, records the address after all the + * sparc instructions emitted for that BPF instruction. + * + * The most common case is to emit a branch at the end of such + * a code sequence. So this would be two instructions, the + * branch and it's delay slot. + * + * Therefore by default the branch emitters calculate the branch + * offset field as: + * + * destination - (addrs[i] - 8) + * + * This "addrs[i] - 8" is the address of the branch itself or + * what "." would be in assembler notation. The "8" part is + * how we take into consideration the branch and it's delay + * slot mentioned above. + * + * Sometimes we need to emit a branch earlier in the code + * sequence. And in these situations we adjust "destination" + * to accomodate this difference. For example, if we needed + * to emit a branch (and it's delay slot) right before the + * final instruction emitted for a BPF opcode, we'd use + * "destination + 4" instead of just plain "destination" above. + * + * This is why you see all of these funny emit_branch() and + * emit_jump() calls with adjusted offsets. + */ + +void bpf_jit_compile(struct sk_filter *fp) +{ + unsigned int cleanup_addr, proglen, oldproglen = 0; + u32 temp[8], *prog, *func, seen = 0, pass; + const struct sock_filter *filter = fp->insns; + int i, flen = fp->len, pc_ret0 = -1; + unsigned int *addrs; + void *image; + + if (!bpf_jit_enable) + return; + + addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); + if (addrs == NULL) + return; + + /* Before first pass, make a rough estimation of addrs[] + * each bpf instruction is translated to less than 64 bytes + */ + for (proglen = 0, i = 0; i < flen; i++) { + proglen += 64; + addrs[i] = proglen; + } + cleanup_addr = proglen; /* epilogue address */ + image = NULL; + for (pass = 0; pass < 10; pass++) { + u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; + + /* no prologue/epilogue for trivial filters (RET something) */ + proglen = 0; + prog = temp; + + /* Prologue */ + if (seen_or_pass0) { + if (seen_or_pass0 & SEEN_MEM) { + unsigned int sz = BASE_STACKFRAME; + sz += BPF_MEMWORDS * sizeof(u32); + emit_alloc_stack(sz); + } + + /* Make sure we dont leek kernel memory. */ + if (seen_or_pass0 & SEEN_XREG) + emit_clear(r_X); + + /* If this filter needs to access skb data, + * load %o4 and %o5 with: + * %o4 = skb->len - skb->data_len + * %o5 = skb->data + * And also back up %o7 into r_saved_O7 so we can + * invoke the stubs using 'call'. + */ + if (seen_or_pass0 & SEEN_DATAREF) { + emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN); + emit_load32(r_SKB, struct sk_buff, data_len, r_TMP); + emit_sub(r_HEADLEN, r_TMP, r_HEADLEN); + emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA); + } + } + emit_reg_move(O7, r_saved_O7); + + switch (filter[0].code) { + case BPF_S_RET_K: + case BPF_S_LD_W_LEN: + case BPF_S_ANC_PROTOCOL: + case BPF_S_ANC_PKTTYPE: + case BPF_S_ANC_IFINDEX: + case BPF_S_ANC_MARK: + case BPF_S_ANC_RXHASH: + case BPF_S_ANC_CPU: + case BPF_S_ANC_QUEUE: + case BPF_S_LD_W_ABS: + case BPF_S_LD_H_ABS: + case BPF_S_LD_B_ABS: + /* The first instruction sets the A register (or is + * a "RET 'constant'") + */ + break; + default: + /* Make sure we dont leak kernel information to the + * user. + */ + emit_clear(r_A); /* A = 0 */ + } + + for (i = 0; i < flen; i++) { + unsigned int K = filter[i].k; + unsigned int t_offset; + unsigned int f_offset; + u32 t_op, f_op; + int ilen; + + switch (filter[i].code) { + case BPF_S_ALU_ADD_X: /* A += X; */ + emit_alu_X(ADD); + break; + case BPF_S_ALU_ADD_K: /* A += K; */ + emit_alu_K(ADD, K); + break; + case BPF_S_ALU_SUB_X: /* A -= X; */ + emit_alu_X(SUB); + break; + case BPF_S_ALU_SUB_K: /* A -= K */ + emit_alu_K(SUB, K); + break; + case BPF_S_ALU_AND_X: /* A &= X */ + emit_alu_X(AND); + break; + case BPF_S_ALU_AND_K: /* A &= K */ + emit_alu_K(AND, K); + break; + case BPF_S_ALU_OR_X: /* A |= X */ + emit_alu_X(OR); + break; + case BPF_S_ALU_OR_K: /* A |= K */ + emit_alu_K(OR, K); + break; + case BPF_S_ALU_LSH_X: /* A <<= X */ + emit_alu_X(SLL); + break; + case BPF_S_ALU_LSH_K: /* A <<= K */ + emit_alu_K(SLL, K); + break; + case BPF_S_ALU_RSH_X: /* A >>= X */ + emit_alu_X(SRL); + break; + case BPF_S_ALU_RSH_K: /* A >>= K */ + emit_alu_K(SRL, K); + break; + case BPF_S_ALU_MUL_X: /* A *= X; */ + emit_alu_X(MUL); + break; + case BPF_S_ALU_MUL_K: /* A *= K */ + emit_alu_K(MUL, K); + break; + case BPF_S_ALU_DIV_K: /* A /= K */ + emit_alu_K(MUL, K); + emit_read_y(r_A); + break; + case BPF_S_ALU_DIV_X: /* A /= X; */ + emit_cmpi(r_X, 0); + if (pc_ret0 > 0) { + t_offset = addrs[pc_ret0 - 1]; +#ifdef CONFIG_SPARC32 + emit_branch(BE, t_offset + 20); +#else + emit_branch(BE, t_offset + 8); +#endif + emit_nop(); /* delay slot */ + } else { + emit_branch_off(BNE, 16); + emit_nop(); +#ifdef CONFIG_SPARC32 + emit_jump(cleanup_addr + 20); +#else + emit_jump(cleanup_addr + 8); +#endif + emit_clear(r_A); + } + emit_write_y(G0); +#ifdef CONFIG_SPARC32 + /* The Sparc v8 architecture requires + * three instructions between a %y + * register write and the first use. + */ + emit_nop(); + emit_nop(); + emit_nop(); +#endif + emit_alu_X(DIV); + break; + case BPF_S_ALU_NEG: + emit_neg(); + break; + case BPF_S_RET_K: + if (!K) { + if (pc_ret0 == -1) + pc_ret0 = i; + emit_clear(r_A); + } else { + emit_loadimm(K, r_A); + } + /* Fallthrough */ + case BPF_S_RET_A: + if (seen_or_pass0) { + if (i != flen - 1) { + emit_jump(cleanup_addr); + emit_nop(); + break; + } + if (seen_or_pass0 & SEEN_MEM) { + unsigned int sz = BASE_STACKFRAME; + sz += BPF_MEMWORDS * sizeof(u32); + emit_release_stack(sz); + } + } + /* jmpl %r_saved_O7 + 8, %g0 */ + emit_jmpl(r_saved_O7, 8, G0); + emit_reg_move(r_A, O0); /* delay slot */ + break; + case BPF_S_MISC_TAX: + seen |= SEEN_XREG; + emit_reg_move(r_A, r_X); + break; + case BPF_S_MISC_TXA: + seen |= SEEN_XREG; + emit_reg_move(r_X, r_A); + break; + case BPF_S_ANC_CPU: + emit_load_cpu(r_A); + break; + case BPF_S_ANC_PROTOCOL: + emit_skb_load16(protocol, r_A); + break; +#if 0 + /* GCC won't let us take the address of + * a bit field even though we very much + * know what we are doing here. + */ + case BPF_S_ANC_PKTTYPE: + __emit_skb_load8(pkt_type, r_A); + emit_alu_K(SRL, 5); + break; +#endif + case BPF_S_ANC_IFINDEX: + emit_skb_loadptr(dev, r_A); + emit_cmpi(r_A, 0); + emit_branch(BNE_PTR, cleanup_addr + 4); + emit_nop(); + emit_load32(r_A, struct net_device, ifindex, r_A); + break; + case BPF_S_ANC_MARK: + emit_skb_load32(mark, r_A); + break; + case BPF_S_ANC_QUEUE: + emit_skb_load16(queue_mapping, r_A); + break; + case BPF_S_ANC_HATYPE: + emit_skb_loadptr(dev, r_A); + emit_cmpi(r_A, 0); + emit_branch(BNE_PTR, cleanup_addr + 4); + emit_nop(); + emit_load16(r_A, struct net_device, type, r_A); + break; + case BPF_S_ANC_RXHASH: + emit_skb_load32(rxhash, r_A); + break; + + case BPF_S_LD_IMM: + emit_loadimm(K, r_A); + break; + case BPF_S_LDX_IMM: + emit_loadimm(K, r_X); + break; + case BPF_S_LD_MEM: + emit_ldmem(K * 4, r_A); + break; + case BPF_S_LDX_MEM: + emit_ldmem(K * 4, r_X); + break; + case BPF_S_ST: + emit_stmem(K * 4, r_A); + break; + case BPF_S_STX: + emit_stmem(K * 4, r_X); + break; + +#define CHOOSE_LOAD_FUNC(K, func) \ + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) + + case BPF_S_LD_W_ABS: + func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); +common_load: seen |= SEEN_DATAREF; + emit_loadimm(K, r_OFF); + emit_call(func); + break; + case BPF_S_LD_H_ABS: + func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); + goto common_load; + case BPF_S_LD_B_ABS: + func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); + goto common_load; + case BPF_S_LDX_B_MSH: + func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); + goto common_load; + case BPF_S_LD_W_IND: + func = bpf_jit_load_word; +common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; + if (K) { + if (is_simm13(K)) { + emit_addi(r_X, K, r_OFF); + } else { + emit_loadimm(K, r_TMP); + emit_add(r_X, r_TMP, r_OFF); + } + } else { + emit_reg_move(r_X, r_OFF); + } + emit_call(func); + break; + case BPF_S_LD_H_IND: + func = bpf_jit_load_half; + goto common_load_ind; + case BPF_S_LD_B_IND: + func = bpf_jit_load_byte; + goto common_load_ind; + case BPF_S_JMP_JA: + emit_jump(addrs[i + K]); + emit_nop(); + break; + +#define COND_SEL(CODE, TOP, FOP) \ + case CODE: \ + t_op = TOP; \ + f_op = FOP; \ + goto cond_branch + + COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU); + COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU); + COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE); + COND_SEL(BPF_S_JMP_JSET_K, BNE, BE); + COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU); + COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU); + COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE); + COND_SEL(BPF_S_JMP_JSET_X, BNE, BE); + +cond_branch: f_offset = addrs[i + filter[i].jf]; + t_offset = addrs[i + filter[i].jt]; + + /* same targets, can avoid doing the test :) */ + if (filter[i].jt == filter[i].jf) { + emit_jump(t_offset); + emit_nop(); + break; + } + + switch (filter[i].code) { + case BPF_S_JMP_JGT_X: + case BPF_S_JMP_JGE_X: + case BPF_S_JMP_JEQ_X: + seen |= SEEN_XREG; + emit_cmp(r_A, r_X); + break; + case BPF_S_JMP_JSET_X: + seen |= SEEN_XREG; + emit_btst(r_A, r_X); + break; + case BPF_S_JMP_JEQ_K: + case BPF_S_JMP_JGT_K: + case BPF_S_JMP_JGE_K: + if (is_simm13(K)) { + emit_cmpi(r_A, K); + } else { + emit_loadimm(K, r_TMP); + emit_cmp(r_A, r_TMP); + } + break; + case BPF_S_JMP_JSET_K: + if (is_simm13(K)) { + emit_btsti(r_A, K); + } else { + emit_loadimm(K, r_TMP); + emit_btst(r_A, r_TMP); + } + break; + } + if (filter[i].jt != 0) { + if (filter[i].jf) + t_offset += 8; + emit_branch(t_op, t_offset); + emit_nop(); /* delay slot */ + if (filter[i].jf) { + emit_jump(f_offset); + emit_nop(); + } + break; + } + emit_branch(f_op, f_offset); + emit_nop(); /* delay slot */ + break; + + default: + /* hmm, too complex filter, give up with jit compiler */ + goto out; + } + ilen = (void *) prog - (void *) temp; + if (image) { + if (unlikely(proglen + ilen > oldproglen)) { + pr_err("bpb_jit_compile fatal error\n"); + kfree(addrs); + module_free(NULL, image); + return; + } + memcpy(image + proglen, temp, ilen); + } + proglen += ilen; + addrs[i] = proglen; + prog = temp; + } + /* last bpf instruction is always a RET : + * use it to give the cleanup instruction(s) addr + */ + cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */ + if (seen_or_pass0 & SEEN_MEM) + cleanup_addr -= 4; /* add %sp, X, %sp; */ + + if (image) { + if (proglen != oldproglen) + pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", + proglen, oldproglen); + break; + } + if (proglen == oldproglen) { + image = module_alloc(max_t(unsigned int, + proglen, + sizeof(struct work_struct))); + if (!image) + goto out; + } + oldproglen = proglen; + } + + if (bpf_jit_enable > 1) + pr_err("flen=%d proglen=%u pass=%d image=%p\n", + flen, proglen, pass, image); + + if (image) { + if (bpf_jit_enable > 1) + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, + 16, 1, image, proglen, false); + bpf_flush_icache(image, image + proglen); + fp->bpf_func = (void *)image; + } +out: + kfree(addrs); + return; +} + +static void jit_free_defer(struct work_struct *arg) +{ + module_free(NULL, arg); +} + +/* run from softirq, we must use a work_struct to call + * module_free() from process context + */ +void bpf_jit_free(struct sk_filter *fp) +{ + if (fp->bpf_func != sk_run_filter) { + struct work_struct *work = (struct work_struct *)fp->bpf_func; + + INIT_WORK(work, jit_free_defer); + schedule_work(work); + } +} diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 96033e2d6845..74239dd77e06 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -11,6 +11,7 @@ config TILE select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP select GENERIC_IRQ_SHOW + select HAVE_SYSCALL_WRAPPERS if TILEGX select SYS_HYPERVISOR select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index bc4f562bd459..7594764d8a69 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -100,9 +100,14 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti, #else /* __ASSEMBLY__ */ -/* how to get the thread information struct from ASM */ +/* + * How to get the thread information struct from assembly. + * Note that we use different macros since different architectures + * have different semantics in their "mm" instruction and we would + * like to guarantee that the macro expands to exactly one instruction. + */ #ifdef __tilegx__ -#define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63 +#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63 #else #define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31 #endif diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index 77763ccd5a7d..cdef6e5ec022 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -403,19 +403,17 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. + * We always pass siginfo and mcontext, regardless of SA_SIGINFO, + * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = ptr_to_compat_reg(frame); regs->lr = restorer; regs->regs[0] = (unsigned long) usig; - - if (ka->sa.sa_flags & SA_SIGINFO) { - /* Need extra arguments, so mark to restore caller-saves. */ - regs->regs[1] = ptr_to_compat_reg(&frame->info); - regs->regs[2] = ptr_to_compat_reg(&frame->uc); - regs->flags |= PT_FLAGS_CALLER_SAVES; - } + regs->regs[1] = ptr_to_compat_reg(&frame->info); + regs->regs[2] = ptr_to_compat_reg(&frame->uc); + regs->flags |= PT_FLAGS_CALLER_SAVES; /* * Notify any tracer that was single-stepping it. diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 5d56a1ef5ba5..6943515100f8 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -839,6 +839,18 @@ STD_ENTRY(interrupt_return) FEEDBACK_REENTER(interrupt_return) /* + * Use r33 to hold whether we have already loaded the callee-saves + * into ptregs. We don't want to do it twice in this loop, since + * then we'd clobber whatever changes are made by ptrace, etc. + * Get base of stack in r32. + */ + { + GET_THREAD_INFO(r32) + movei r33, 0 + } + +.Lretry_work_pending: + /* * Disable interrupts so as to make sure we don't * miss an interrupt that sets any of the thread flags (like * need_resched or sigpending) between sampling and the iret. @@ -848,9 +860,6 @@ STD_ENTRY(interrupt_return) IRQ_DISABLE(r20, r21) TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ - /* Get base of stack in r32; note r30/31 are used as arguments here. */ - GET_THREAD_INFO(r32) - /* Check to see if there is any work to do before returning to user. */ { @@ -866,16 +875,18 @@ STD_ENTRY(interrupt_return) /* * Make sure we have all the registers saved for signal - * handling or single-step. Call out to C code to figure out - * exactly what we need to do for each flag bit, then if - * necessary, reload the flags and recheck. + * handling, notify-resume, or single-step. Call out to C + * code to figure out exactly what we need to do for each flag bit, + * then if necessary, reload the flags and recheck. */ - push_extra_callee_saves r0 { PTREGS_PTR(r0, PTREGS_OFFSET_BASE) - jal do_work_pending + bnz r33, 1f } - bnz r0, .Lresume_userspace + push_extra_callee_saves r0 + movei r33, 1 +1: jal do_work_pending + bnz r0, .Lretry_work_pending /* * In the NMI case we @@ -1180,10 +1191,12 @@ handle_syscall: add r20, r20, tp lw r21, r20 addi r21, r21, 1 - sw r20, r21 + { + sw r20, r21 + GET_THREAD_INFO(r31) + } /* Trace syscalls, if requested. */ - GET_THREAD_INFO(r31) addi r31, r31, THREAD_INFO_FLAGS_OFFSET lw r30, r31 andi r30, r30, _TIF_SYSCALL_TRACE @@ -1362,7 +1375,10 @@ handle_ill: 3: /* set PC and continue */ lw r26, r24 - sw r28, r26 + { + sw r28, r26 + GET_THREAD_INFO(r0) + } /* * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill. @@ -1370,7 +1386,6 @@ handle_ill: * need to clear it here and can't really impose on all other arches. * So what's another write between friends? */ - GET_THREAD_INFO(r0) addi r1, r0, THREAD_INFO_FLAGS_OFFSET { diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 49d9d6621682..30ae76e50c44 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -647,6 +647,20 @@ STD_ENTRY(interrupt_return) FEEDBACK_REENTER(interrupt_return) /* + * Use r33 to hold whether we have already loaded the callee-saves + * into ptregs. We don't want to do it twice in this loop, since + * then we'd clobber whatever changes are made by ptrace, etc. + */ + { + movei r33, 0 + move r32, sp + } + + /* Get base of stack in r32. */ + EXTRACT_THREAD_INFO(r32) + +.Lretry_work_pending: + /* * Disable interrupts so as to make sure we don't * miss an interrupt that sets any of the thread flags (like * need_resched or sigpending) between sampling and the iret. @@ -656,9 +670,6 @@ STD_ENTRY(interrupt_return) IRQ_DISABLE(r20, r21) TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ - /* Get base of stack in r32; note r30/31 are used as arguments here. */ - GET_THREAD_INFO(r32) - /* Check to see if there is any work to do before returning to user. */ { @@ -674,16 +685,18 @@ STD_ENTRY(interrupt_return) /* * Make sure we have all the registers saved for signal - * handling or single-step. Call out to C code to figure out + * handling or notify-resume. Call out to C code to figure out * exactly what we need to do for each flag bit, then if * necessary, reload the flags and recheck. */ - push_extra_callee_saves r0 { PTREGS_PTR(r0, PTREGS_OFFSET_BASE) - jal do_work_pending + bnez r33, 1f } - bnez r0, .Lresume_userspace + push_extra_callee_saves r0 + movei r33, 1 +1: jal do_work_pending + bnez r0, .Lretry_work_pending /* * In the NMI case we @@ -968,11 +981,16 @@ handle_syscall: shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) add r20, r20, tp ld4s r21, r20 - addi r21, r21, 1 - st4 r20, r21 + { + addi r21, r21, 1 + move r31, sp + } + { + st4 r20, r21 + EXTRACT_THREAD_INFO(r31) + } /* Trace syscalls, if requested. */ - GET_THREAD_INFO(r31) addi r31, r31, THREAD_INFO_FLAGS_OFFSET ld r30, r31 andi r30, r30, _TIF_SYSCALL_TRACE diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 2d5ef617bb39..54e6c64b85cc 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -567,6 +567,10 @@ struct task_struct *__sched _switch_to(struct task_struct *prev, */ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) { + /* If we enter in kernel mode, do nothing and exit the caller loop. */ + if (!user_mode(regs)) + return 0; + if (thread_info_flags & _TIF_NEED_RESCHED) { schedule(); return 1; @@ -589,8 +593,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) return 1; } if (thread_info_flags & _TIF_SINGLESTEP) { - if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0) - single_step_once(regs); + single_step_once(regs); return 0; } panic("work_pending: bad flags %#x\n", thread_info_flags); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1d14cc6b79ad..c9866b0b77d8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -81,7 +81,7 @@ config X86 select CLKEVT_I8253 select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_IOMAP - select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC + select DCACHE_WORD_ACCESS config INSTRUCTION_DECODER def_bool (KPROBES || PERF_EVENTS) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 41a7237606a3..94e91e401da9 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -134,6 +134,9 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,) KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) +archscripts: + $(Q)$(MAKE) $(build)=arch/x86/tools relocs + ### # Syscall table generation diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index fd55a2ff3ad8..e398bb5d63bb 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -40,13 +40,12 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) +targets += vmlinux.bin.all vmlinux.relocs -targets += vmlinux.bin.all vmlinux.relocs relocs -hostprogs-$(CONFIG_X86_NEED_RELOCS) += relocs - +CMD_RELOCS = arch/x86/tools/relocs quiet_cmd_relocs = RELOCS $@ - cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $< -$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE + cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $< +$(obj)/vmlinux.relocs: vmlinux FORCE $(call if_changed,relocs) vmlinux.bin.all-y := $(obj)/vmlinux.bin diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 4824fb45560f..07b3a68d2d29 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -294,8 +294,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* OK, This is the point of no return */ set_personality(PER_LINUX); - set_thread_flag(TIF_IA32); - current->mm->context.ia32_compat = 1; + set_personality_ia32(false); setup_new_exec(bprm); diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 734c3767cfac..183922e13de1 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -170,6 +170,9 @@ static inline int kvm_para_available(void) unsigned int eax, ebx, ecx, edx; char signature[13]; + if (boot_cpu_data.cpuid_level < 0) + return 0; /* So we don't blow up on old processors */ + cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); memcpy(signature + 0, &ebx, 4); memcpy(signature + 4, &ecx, 4); diff --git a/arch/x86/include/asm/stat.h b/arch/x86/include/asm/stat.h index e0b1d9bbcbc6..7b3ddc348585 100644 --- a/arch/x86/include/asm/stat.h +++ b/arch/x86/include/asm/stat.h @@ -25,6 +25,12 @@ struct stat { unsigned long __unused5; }; +/* We don't need to memset the whole thing just to initialize the padding */ +#define INIT_STRUCT_STAT_PADDING(st) do { \ + st.__unused4 = 0; \ + st.__unused5 = 0; \ +} while (0) + #define STAT64_HAS_BROKEN_ST_INO 1 /* This matches struct stat64 in glibc2.1, hence the absolutely @@ -63,6 +69,12 @@ struct stat64 { unsigned long long st_ino; }; +/* We don't need to memset the whole thing just to initialize the padding */ +#define INIT_STRUCT_STAT64_PADDING(st) do { \ + memset(&st.__pad0, 0, sizeof(st.__pad0)); \ + memset(&st.__pad3, 0, sizeof(st.__pad3)); \ +} while (0) + #else /* __i386__ */ struct stat { @@ -87,6 +99,15 @@ struct stat { unsigned long st_ctime_nsec; long __unused[3]; }; + +/* We don't need to memset the whole thing just to initialize the padding */ +#define INIT_STRUCT_STAT_PADDING(st) do { \ + st.__pad0 = 0; \ + st.__unused[0] = 0; \ + st.__unused[1] = 0; \ + st.__unused[2] = 0; \ +} while (0) + #endif /* for 32bit emulation and 32 bit kernels */ diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h index 6fe6767b7124..e58f03b206c3 100644 --- a/arch/x86/include/asm/word-at-a-time.h +++ b/arch/x86/include/asm/word-at-a-time.h @@ -43,4 +43,37 @@ static inline unsigned long has_zero(unsigned long a) return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); } +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + unsigned long ret, dummy; + + asm( + "1:\tmov %2,%0\n" + "2:\n" + ".section .fixup,\"ax\"\n" + "3:\t" + "lea %2,%1\n\t" + "and %3,%1\n\t" + "mov (%1),%0\n\t" + "leal %2,%%ecx\n\t" + "andl %4,%%ecx\n\t" + "shll $3,%%ecx\n\t" + "shr %%cl,%0\n\t" + "jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) + :"=&r" (ret),"=&c" (dummy) + :"m" (*(unsigned long *)addr), + "i" (-sizeof(unsigned long)), + "i" (sizeof(unsigned long)-1)); + return ret; +} + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index a415b1f44365..7c439fe4941b 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -593,7 +593,7 @@ void __init acpi_set_irq_model_ioapic(void) #ifdef CONFIG_ACPI_HOTPLUG_CPU #include <acpi/processor.h> -static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int nid; diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 8a778db45e3a..991e315f4227 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -24,6 +24,12 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (x2apic_phys) return x2apic_enabled(); + else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && + (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && + x2apic_enabled()) { + printk(KERN_DEBUG "System requires x2apic physical mode\n"); + return 1; + } else return 0; } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1c67ca100e4c..146bb6218eec 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -580,6 +580,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* re-enable TopologyExtensions if switched off by BIOS */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && + !cpu_has(c, X86_FEATURE_TOPOEXT)) { + u64 val; + + if (!rdmsrl_amd_safe(0xc0011005, &val)) { + val |= 1ULL << 54; + wrmsrl_amd_safe(0xc0011005, val); + rdmsrl(0xc0011005, val); + if (val & (1ULL << 54)) { + set_cpu_cap(c, X86_FEATURE_TOPOEXT); + printk(KERN_INFO FW_INFO "CPU: Re-enabling " + "disabled Topology Extensions Support\n"); + } + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 73d08ed98a64..b8f3653dddbc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -433,14 +433,14 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, /* check if @slot is already used or the index is already disabled */ ret = amd_get_l3_disable_slot(nb, slot); if (ret >= 0) - return -EINVAL; + return -EEXIST; if (index > nb->l3_cache.indices) return -EINVAL; /* check whether the other slot has disabled the same index already */ if (index == amd_get_l3_disable_slot(nb, !slot)) - return -EINVAL; + return -EEXIST; amd_l3_disable_index(nb, cpu, slot, index); @@ -468,8 +468,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); if (err) { if (err == -EEXIST) - printk(KERN_WARNING "L3 disable slot %d in use!\n", - slot); + pr_warning("L3 slot %d in use/index already disabled!\n", + slot); return err; } return count; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index d086a09c087d..11c9166c3337 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -945,9 +945,10 @@ struct mce_info { atomic_t inuse; struct task_struct *t; __u64 paddr; + int restartable; } mce_info[MCE_INFO_MAX]; -static void mce_save_info(__u64 addr) +static void mce_save_info(__u64 addr, int c) { struct mce_info *mi; @@ -955,6 +956,7 @@ static void mce_save_info(__u64 addr) if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) { mi->t = current; mi->paddr = addr; + mi->restartable = c; return; } } @@ -1130,7 +1132,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) mce_panic("Fatal machine check on current CPU", &m, msg); if (worst == MCE_AR_SEVERITY) { /* schedule action before return to userland */ - mce_save_info(m.addr); + mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV); set_thread_flag(TIF_MCE_NOTIFY); } else if (kill_it) { force_sig(SIGBUS, current); @@ -1179,7 +1181,13 @@ void mce_notify_process(void) pr_err("Uncorrected hardware memory error in user-access at %llx", mi->paddr); - if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0) { + /* + * We must call memory_failure() here even if the current process is + * doomed. We still need to mark the page as poisoned and alert any + * other users of the page. + */ + if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 || + mi->restartable == 0) { pr_err("Memory error not recovered"); force_sig(SIGBUS, current); } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index b8ba6e4a27e4..e554e5ad2fe8 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -79,7 +79,6 @@ struct kvm_task_sleep_node { u32 token; int cpu; bool halted; - struct mm_struct *mm; }; static struct kvm_task_sleep_head { @@ -126,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token) n.token = token; n.cpu = smp_processor_id(); - n.mm = current->active_mm; n.halted = idle || preempt_count() > 1; - atomic_inc(&n.mm->mm_count); init_waitqueue_head(&n.wq); hlist_add_head(&n.link, &b->list); spin_unlock(&b->lock); @@ -161,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); static void apf_task_wake_one(struct kvm_task_sleep_node *n) { hlist_del_init(&n->link); - if (!n->mm) - return; - mmdrop(n->mm); if (n->halted) smp_send_reschedule(n->cpu); else if (waitqueue_active(&n->wq)) @@ -207,7 +201,7 @@ again: * async PF was not yet handled. * Add dummy entry for the token. */ - n = kmalloc(sizeof(*n), GFP_ATOMIC); + n = kzalloc(sizeof(*n), GFP_ATOMIC); if (!n) { /* * Allocation failed! Busy wait while other cpu @@ -219,7 +213,6 @@ again: } n->token = token; n->cpu = smp_processor_id(); - n->mm = NULL; init_waitqueue_head(&n->wq); hlist_add_head(&n->link, &b->list); } else diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 3ca42d0e43a2..0327e2b3c408 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c @@ -147,12 +147,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) memset(csig, 0, sizeof(*csig)); - if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || - cpu_has(c, X86_FEATURE_IA64)) { - pr_err("CPU%d not a capable Intel processor\n", cpu_num); - return -1; - } - csig->sig = cpuid_eax(0x00000001); if ((c->x86_model >= 5) || (c->x86 > 6)) { @@ -463,6 +457,14 @@ static struct microcode_ops microcode_intel_ops = { struct microcode_ops * __init init_intel_microcode(void) { + struct cpuinfo_x86 *c = &cpu_data(0); + + if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || + cpu_has(c, X86_FEATURE_IA64)) { + pr_err("Intel CPU family 0x%x not supported\n", c->x86); + return NULL; + } + return µcode_intel_ops; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 733ca39f367e..43d8b48b23e6 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -423,6 +423,7 @@ void set_personality_ia32(bool x32) current_thread_info()->status |= TS_COMPAT; } } +EXPORT_SYMBOL_GPL(set_personality_ia32); unsigned long get_wchan(struct task_struct *p) { diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 71f4727da373..5a98aa272184 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void) #endif rc = -EINVAL; if (pcpu_chosen_fc != PCPU_FC_PAGE) { - const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE; const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; + size_t atom_size; + /* + * On 64bit, use PMD_SIZE for atom_size so that embedded + * percpu areas are aligned to PMD. This, in the future, + * can also allow using PMD mappings in vmalloc area. Use + * PAGE_SIZE on 32bit as vmalloc space is highly contended + * and large vmalloc area allocs can easily fail. + */ +#ifdef CONFIG_X86_64 + atom_size = PMD_SIZE; +#else + atom_size = PAGE_SIZE; +#endif rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, dyn_size, atom_size, pcpu_cpu_distance, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 91a5e989abcf..185a2b823a2d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6581,6 +6581,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index d6ae30bbd7bb..2e4e4b02c37a 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -44,13 +44,6 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) } EXPORT_SYMBOL_GPL(copy_from_user_nmi); -static inline unsigned long count_bytes(unsigned long mask) -{ - mask = (mask - 1) & ~mask; - mask >>= 7; - return count_masked_bytes(mask); -} - /* * Do a strncpy, return length of string without final '\0'. * 'count' is the user-supplied count (return 'count' if we @@ -69,16 +62,19 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long max = count; while (max >= sizeof(unsigned long)) { - unsigned long c; + unsigned long c, mask; /* Fall back to byte-at-a-time if we get a page fault */ if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) break; - /* This can write a few bytes past the NUL character, but that's ok */ + mask = has_zero(c); + if (mask) { + mask = (mask - 1) & ~mask; + mask >>= 7; + *(unsigned long *)(dst+res) = c & mask; + return res + count_masked_bytes(mask); + } *(unsigned long *)(dst+res) = c; - c = has_zero(c); - if (c) - return res + count_bytes(c); res += sizeof(unsigned long); max -= sizeof(unsigned long); } diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c index 66d377e334f7..646e3b5b4bb6 100644 --- a/arch/x86/platform/geode/net5501.c +++ b/arch/x86/platform/geode/net5501.c @@ -63,7 +63,7 @@ static struct gpio_led net5501_leds[] = { .name = "net5501:1", .gpio = 6, .default_trigger = "default-on", - .active_low = 1, + .active_low = 0, }, }; diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index e0a37233c0af..e31bcd8f2eee 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -805,7 +805,7 @@ void intel_scu_devices_create(void) } else i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1); } - intel_scu_notifier_post(SCU_AVAILABLE, 0L); + intel_scu_notifier_post(SCU_AVAILABLE, NULL); } EXPORT_SYMBOL_GPL(intel_scu_devices_create); @@ -814,7 +814,7 @@ void intel_scu_devices_destroy(void) { int i; - intel_scu_notifier_post(SCU_DOWN, 0L); + intel_scu_notifier_post(SCU_DOWN, NULL); for (i = 0; i < ipc_next_dev; i++) platform_device_del(ipc_devs[i]); diff --git a/arch/x86/tools/.gitignore b/arch/x86/tools/.gitignore new file mode 100644 index 000000000000..be0ed065249b --- /dev/null +++ b/arch/x86/tools/.gitignore @@ -0,0 +1 @@ +relocs diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index d511aa97533a..733057b435b0 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -36,3 +36,7 @@ HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c + +HOST_EXTRACFLAGS += -I$(srctree)/tools/include +hostprogs-y += relocs +relocs: $(obj)/relocs diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/tools/relocs.c index d3c0b0277666..b43cfcd9bf40 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/tools/relocs.c @@ -18,6 +18,8 @@ static void die(char *fmt, ...); static Elf32_Ehdr ehdr; static unsigned long reloc_count, reloc_idx; static unsigned long *relocs; +static unsigned long reloc16_count, reloc16_idx; +static unsigned long *relocs16; struct section { Elf32_Shdr shdr; @@ -28,52 +30,86 @@ struct section { }; static struct section *secs; +enum symtype { + S_ABS, + S_REL, + S_SEG, + S_LIN, + S_NSYMTYPES +}; + +static const char * const sym_regex_kernel[S_NSYMTYPES] = { /* * Following symbols have been audited. There values are constant and do * not change if bzImage is loaded at a different physical address than * the address for which it has been compiled. Don't warn user about * absolute relocations present w.r.t these symbols. */ -static const char abs_sym_regex[] = + [S_ABS] = "^(xen_irq_disable_direct_reloc$|" "xen_save_fl_direct_reloc$|" "VDSO|" - "__crc_)"; -static regex_t abs_sym_regex_c; -static int is_abs_reloc(const char *sym_name) -{ - return !regexec(&abs_sym_regex_c, sym_name, 0, NULL, 0); -} + "__crc_)", /* * These symbols are known to be relative, even if the linker marks them * as absolute (typically defined outside any section in the linker script.) */ -static const char rel_sym_regex[] = - "^_end$"; -static regex_t rel_sym_regex_c; -static int is_rel_reloc(const char *sym_name) + [S_REL] = + "^(__init_(begin|end)|" + "__x86_cpu_dev_(start|end)|" + "(__parainstructions|__alt_instructions)(|_end)|" + "(__iommu_table|__apicdrivers|__smp_locks)(|_end)|" + "_end)$" +}; + + +static const char * const sym_regex_realmode[S_NSYMTYPES] = { +/* + * These are 16-bit segment symbols when compiling 16-bit code. + */ + [S_SEG] = + "^real_mode_seg$", + +/* + * These are offsets belonging to segments, as opposed to linear addresses, + * when compiling 16-bit code. + */ + [S_LIN] = + "^pa_", +}; + +static const char * const *sym_regex; + +static regex_t sym_regex_c[S_NSYMTYPES]; +static int is_reloc(enum symtype type, const char *sym_name) { - return !regexec(&rel_sym_regex_c, sym_name, 0, NULL, 0); + return sym_regex[type] && + !regexec(&sym_regex_c[type], sym_name, 0, NULL, 0); } -static void regex_init(void) +static void regex_init(int use_real_mode) { char errbuf[128]; int err; - - err = regcomp(&abs_sym_regex_c, abs_sym_regex, - REG_EXTENDED|REG_NOSUB); - if (err) { - regerror(err, &abs_sym_regex_c, errbuf, sizeof errbuf); - die("%s", errbuf); - } + int i; + + if (use_real_mode) + sym_regex = sym_regex_realmode; + else + sym_regex = sym_regex_kernel; - err = regcomp(&rel_sym_regex_c, rel_sym_regex, - REG_EXTENDED|REG_NOSUB); - if (err) { - regerror(err, &rel_sym_regex_c, errbuf, sizeof errbuf); - die("%s", errbuf); + for (i = 0; i < S_NSYMTYPES; i++) { + if (!sym_regex[i]) + continue; + + err = regcomp(&sym_regex_c[i], sym_regex[i], + REG_EXTENDED|REG_NOSUB); + + if (err) { + regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf); + die("%s", errbuf); + } } } @@ -154,6 +190,10 @@ static const char *rel_type(unsigned type) REL_TYPE(R_386_RELATIVE), REL_TYPE(R_386_GOTOFF), REL_TYPE(R_386_GOTPC), + REL_TYPE(R_386_8), + REL_TYPE(R_386_PC8), + REL_TYPE(R_386_16), + REL_TYPE(R_386_PC16), #undef REL_TYPE }; const char *name = "unknown type rel type name"; @@ -189,7 +229,7 @@ static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym) name = sym_strtab + sym->st_name; } else { - name = sec_name(secs[sym->st_shndx].shdr.sh_name); + name = sec_name(sym->st_shndx); } return name; } @@ -403,13 +443,11 @@ static void print_absolute_symbols(void) for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; char *sym_strtab; - Elf32_Sym *sh_symtab; int j; if (sec->shdr.sh_type != SHT_SYMTAB) { continue; } - sh_symtab = sec->symtab; sym_strtab = sec->link->strtab; for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { Elf32_Sym *sym; @@ -474,7 +512,7 @@ static void print_absolute_relocs(void) * Before warning check if this absolute symbol * relocation is harmless. */ - if (is_abs_reloc(name) || is_rel_reloc(name)) + if (is_reloc(S_ABS, name) || is_reloc(S_REL, name)) continue; if (!printed) { @@ -498,7 +536,8 @@ static void print_absolute_relocs(void) printf("\n"); } -static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) +static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym), + int use_real_mode) { int i; /* Walk through the relocations */ @@ -523,30 +562,67 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) Elf32_Rel *rel; Elf32_Sym *sym; unsigned r_type; + const char *symname; + int shn_abs; + rel = &sec->reltab[j]; sym = &sh_symtab[ELF32_R_SYM(rel->r_info)]; r_type = ELF32_R_TYPE(rel->r_info); - /* Don't visit relocations to absolute symbols */ - if (sym->st_shndx == SHN_ABS && - !is_rel_reloc(sym_name(sym_strtab, sym))) { - continue; - } + + shn_abs = sym->st_shndx == SHN_ABS; + switch (r_type) { case R_386_NONE: case R_386_PC32: + case R_386_PC16: + case R_386_PC8: /* * NONE can be ignored and and PC relative * relocations don't need to be adjusted. */ break; + + case R_386_16: + symname = sym_name(sym_strtab, sym); + if (!use_real_mode) + goto bad; + if (shn_abs) { + if (is_reloc(S_ABS, symname)) + break; + else if (!is_reloc(S_SEG, symname)) + goto bad; + } else { + if (is_reloc(S_LIN, symname)) + goto bad; + else + break; + } + visit(rel, sym); + break; + case R_386_32: - /* Visit relocations that need to be adjusted */ + symname = sym_name(sym_strtab, sym); + if (shn_abs) { + if (is_reloc(S_ABS, symname)) + break; + else if (!is_reloc(S_REL, symname)) + goto bad; + } else { + if (use_real_mode && + !is_reloc(S_LIN, symname)) + break; + } visit(rel, sym); break; default: die("Unsupported relocation type: %s (%d)\n", rel_type(r_type), r_type); break; + bad: + symname = sym_name(sym_strtab, sym); + die("Invalid %s %s relocation: %s\n", + shn_abs ? "absolute" : "relative", + rel_type(r_type), symname); } } } @@ -554,13 +630,19 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym) { - reloc_count += 1; + if (ELF32_R_TYPE(rel->r_info) == R_386_16) + reloc16_count++; + else + reloc_count++; } static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym) { /* Remember the address that needs to be adjusted. */ - relocs[reloc_idx++] = rel->r_offset; + if (ELF32_R_TYPE(rel->r_info) == R_386_16) + relocs16[reloc16_idx++] = rel->r_offset; + else + relocs[reloc_idx++] = rel->r_offset; } static int cmp_relocs(const void *va, const void *vb) @@ -570,23 +652,41 @@ static int cmp_relocs(const void *va, const void *vb) return (*a == *b)? 0 : (*a > *b)? 1 : -1; } -static void emit_relocs(int as_text) +static int write32(unsigned int v, FILE *f) +{ + unsigned char buf[4]; + + put_unaligned_le32(v, buf); + return fwrite(buf, 1, 4, f) == 4 ? 0 : -1; +} + +static void emit_relocs(int as_text, int use_real_mode) { int i; /* Count how many relocations I have and allocate space for them. */ reloc_count = 0; - walk_relocs(count_reloc); + walk_relocs(count_reloc, use_real_mode); relocs = malloc(reloc_count * sizeof(relocs[0])); if (!relocs) { die("malloc of %d entries for relocs failed\n", reloc_count); } + + relocs16 = malloc(reloc16_count * sizeof(relocs[0])); + if (!relocs16) { + die("malloc of %d entries for relocs16 failed\n", + reloc16_count); + } /* Collect up the relocations */ reloc_idx = 0; - walk_relocs(collect_reloc); + walk_relocs(collect_reloc, use_real_mode); + + if (reloc16_count && !use_real_mode) + die("Segment relocations found but --realmode not specified\n"); /* Order the relocations for more efficient processing */ qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs); + qsort(relocs16, reloc16_count, sizeof(relocs16[0]), cmp_relocs); /* Print the relocations */ if (as_text) { @@ -595,58 +695,83 @@ static void emit_relocs(int as_text) */ printf(".section \".data.reloc\",\"a\"\n"); printf(".balign 4\n"); - for (i = 0; i < reloc_count; i++) { - printf("\t .long 0x%08lx\n", relocs[i]); + if (use_real_mode) { + printf("\t.long %lu\n", reloc16_count); + for (i = 0; i < reloc16_count; i++) + printf("\t.long 0x%08lx\n", relocs16[i]); + printf("\t.long %lu\n", reloc_count); + for (i = 0; i < reloc_count; i++) { + printf("\t.long 0x%08lx\n", relocs[i]); + } + } else { + /* Print a stop */ + printf("\t.long 0x%08lx\n", (unsigned long)0); + for (i = 0; i < reloc_count; i++) { + printf("\t.long 0x%08lx\n", relocs[i]); + } } + printf("\n"); } else { - unsigned char buf[4]; - /* Print a stop */ - fwrite("\0\0\0\0", 4, 1, stdout); - /* Now print each relocation */ - for (i = 0; i < reloc_count; i++) { - put_unaligned_le32(relocs[i], buf); - fwrite(buf, 4, 1, stdout); + if (use_real_mode) { + write32(reloc16_count, stdout); + for (i = 0; i < reloc16_count; i++) + write32(relocs16[i], stdout); + write32(reloc_count, stdout); + + /* Now print each relocation */ + for (i = 0; i < reloc_count; i++) + write32(relocs[i], stdout); + } else { + /* Print a stop */ + write32(0, stdout); + + /* Now print each relocation */ + for (i = 0; i < reloc_count; i++) { + write32(relocs[i], stdout); + } } } } static void usage(void) { - die("relocs [--abs-syms |--abs-relocs | --text] vmlinux\n"); + die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n"); } int main(int argc, char **argv) { int show_absolute_syms, show_absolute_relocs; - int as_text; + int as_text, use_real_mode; const char *fname; FILE *fp; int i; - regex_init(); - show_absolute_syms = 0; show_absolute_relocs = 0; as_text = 0; + use_real_mode = 0; fname = NULL; for (i = 1; i < argc; i++) { char *arg = argv[i]; if (*arg == '-') { - if (strcmp(argv[1], "--abs-syms") == 0) { + if (strcmp(arg, "--abs-syms") == 0) { show_absolute_syms = 1; continue; } - - if (strcmp(argv[1], "--abs-relocs") == 0) { + if (strcmp(arg, "--abs-relocs") == 0) { show_absolute_relocs = 1; continue; } - else if (strcmp(argv[1], "--text") == 0) { + if (strcmp(arg, "--text") == 0) { as_text = 1; continue; } + if (strcmp(arg, "--realmode") == 0) { + use_real_mode = 1; + continue; + } } else if (!fname) { fname = arg; @@ -657,6 +782,7 @@ int main(int argc, char **argv) if (!fname) { usage(); } + regex_init(use_real_mode); fp = fopen(fname, "r"); if (!fp) { die("Cannot open %s: %s\n", @@ -675,6 +801,6 @@ int main(int argc, char **argv) print_absolute_relocs(); return 0; } - emit_relocs(as_text); + emit_relocs(as_text, use_real_mode); return 0; } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 4f51bebac02c..95dccce8e979 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -63,6 +63,7 @@ #include <asm/stackprotector.h> #include <asm/hypervisor.h> #include <asm/mwait.h> +#include <asm/pci_x86.h> #ifdef CONFIG_ACPI #include <linux/acpi.h> @@ -261,7 +262,8 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, static bool __init xen_check_mwait(void) { -#ifdef CONFIG_ACPI +#if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \ + !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) struct xen_platform_op op = { .cmd = XENPF_set_processor_pminfo, .u.set_pminfo.id = -1, @@ -349,7 +351,6 @@ static void __init xen_init_cpuid_mask(void) /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ if ((cx & xsave_mask) != xsave_mask) cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ - if (xen_check_mwait()) cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); } @@ -809,9 +810,40 @@ static void xen_io_delay(void) } #ifdef CONFIG_X86_LOCAL_APIC +static unsigned long xen_set_apic_id(unsigned int x) +{ + WARN_ON(1); + return x; +} +static unsigned int xen_get_apic_id(unsigned long x) +{ + return ((x)>>24) & 0xFFu; +} static u32 xen_apic_read(u32 reg) { - return 0; + struct xen_platform_op op = { + .cmd = XENPF_get_cpuinfo, + .interface_version = XENPF_INTERFACE_VERSION, + .u.pcpu_info.xen_cpuid = 0, + }; + int ret = 0; + + /* Shouldn't need this as APIC is turned off for PV, and we only + * get called on the bootup processor. But just in case. */ + if (!xen_initial_domain() || smp_processor_id()) + return 0; + + if (reg == APIC_LVR) + return 0x10; + + if (reg != APIC_ID) + return 0; + + ret = HYPERVISOR_dom0_op(&op); + if (ret) + return 0; + + return op.u.pcpu_info.apic_id << 24; } static void xen_apic_write(u32 reg, u32 val) @@ -849,6 +881,8 @@ static void set_xen_basic_apic_ops(void) apic->icr_write = xen_apic_icr_write; apic->wait_icr_idle = xen_apic_wait_icr_idle; apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; + apic->set_apic_id = xen_set_apic_id; + apic->get_apic_id = xen_get_apic_id; } #endif @@ -1365,8 +1399,10 @@ asmlinkage void __init xen_start_kernel(void) /* Make sure ACS will be enabled */ pci_request_acs(); } - - +#ifdef CONFIG_PCI + /* PCI BIOS service won't work from a PV guest. */ + pci_probe &= ~PCI_PROBE_BIOS; +#endif xen_raw_console_write("about to get started...\n"); xen_setup_runstate_info(0); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index b8e279479a6b..69f5857660ac 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) { if (val & _PAGE_PRESENT) { unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; + unsigned long pfn = mfn_to_pfn(mfn); + pteval_t flags = val & PTE_FLAGS_MASK; - val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; + if (unlikely(pfn == ~0)) + val = flags & ~_PAGE_PRESENT; + else + val = ((pteval_t)pfn << PAGE_SHIFT) | flags; } return val; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 5fac6919b957..0503c0c493a9 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -178,6 +178,7 @@ static void __init xen_fill_possible_map(void) static void __init xen_filter_cpu_maps(void) { int i, rc; + unsigned int subtract = 0; if (!xen_initial_domain()) return; @@ -192,8 +193,22 @@ static void __init xen_filter_cpu_maps(void) } else { set_cpu_possible(i, false); set_cpu_present(i, false); + subtract++; } } +#ifdef CONFIG_HOTPLUG_CPU + /* This is akin to using 'nr_cpus' on the Linux command line. + * Which is OK as when we use 'dom0_max_vcpus=X' we can only + * have up to X, while nr_cpu_ids is greater than X. This + * normally is not a problem, except when CPU hotplugging + * is involved and then there might be more than X CPUs + * in the guest - which will not work as there is no + * hypercall to expand the max number of VCPUs an already + * running guest has. So cap it up to X. */ + if (subtract) + nr_cpu_ids = nr_cpu_ids - subtract; +#endif + } static void __init xen_smp_prepare_boot_cpu(void) diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 79d7362ad6d1..3e45aa000718 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct) /* check for unmasked and pending */ cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending - jz 1f + jnz 1f 2: call check_events 1: ENDPATCH(xen_restore_fl_direct) diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig index b90038e40dd3..a182a4e6d688 100644 --- a/arch/xtensa/configs/common_defconfig +++ b/arch/xtensa/configs/common_defconfig @@ -333,11 +333,6 @@ CONFIG_XT2000_SONIC=y # CONFIG_S2IO is not set # -# Token Ring devices -# -# CONFIG_TR is not set - -# # Wireless LAN (non-hamradio) # CONFIG_NET_RADIO=y diff --git a/arch/xtensa/include/asm/hardirq.h b/arch/xtensa/include/asm/hardirq.h index 26664cef8f11..91695a135498 100644 --- a/arch/xtensa/include/asm/hardirq.h +++ b/arch/xtensa/include/asm/hardirq.h @@ -11,9 +11,6 @@ #ifndef _XTENSA_HARDIRQ_H #define _XTENSA_HARDIRQ_H -void ack_bad_irq(unsigned int irq); -#define ack_bad_irq ack_bad_irq - #include <asm-generic/hardirq.h> #endif /* _XTENSA_HARDIRQ_H */ diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index d04cd3a625fa..4beb43c087d3 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -14,6 +14,7 @@ #ifdef __KERNEL__ #include <asm/byteorder.h> #include <asm/page.h> +#include <linux/bug.h> #include <linux/kernel.h> #include <linux/types.h> diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index b69b000349fc..d78869a00b11 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -496,6 +496,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { + int ret; /* Are we from a system call? */ |