diff options
author | Arnd Bergmann <arnd@arndb.de> | 2013-04-09 14:56:08 +0200 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2013-04-09 14:56:08 +0200 |
commit | 0a01216c452d092b16c5a559cca339ad60193457 (patch) | |
tree | 10a2dac7501ee889f5b0b1634abd16c81d3952d0 /arch | |
parent | Merge tag 'mxs-fixes-3.9-4' of git://git.linaro.org/people/shawnguo/linux-2.6... (diff) | |
parent | ARM i.MX6: Fix ldb_di clock selection (diff) | |
download | linux-0a01216c452d092b16c5a559cca339ad60193457.tar.xz linux-0a01216c452d092b16c5a559cca339ad60193457.zip |
Merge tag 'imx-fixes-3.9-5' of git://git.linaro.org/people/shawnguo/linux-2.6 into fixes
From Shawn Guo <shawn.guo@linaro.org>:
The imx fixes for 3.9, take 5:
* A couple imx35 clock fixes for regressions caused by common clock
framework conversion. The admux and iomux get disabled by common
clock framework late initcall, and hence causes problems.
* Add missing twd clock lookup in device tree. This becomes required
since commit bd60345 (ARM: use device tree to get smp_twd clock)
forces all DT boot to find lookup from device tree.
* Fix imx6q ldb_di clock parents mismatch per reference manual.
* tag 'imx-fixes-3.9-5' of git://git.linaro.org/people/shawnguo/linux-2.6: (217 commits)
ARM i.MX6: Fix ldb_di clock selection
ARM: imx: provide twd clock lookup from device tree
ARM: imx35 Bugfix admux clock
ARM: clk-imx35: Bugfix iomux clock
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch')
54 files changed, 585 insertions, 243 deletions
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 4759fe751aa1..2cc3cc519c54 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile @@ -12,7 +12,7 @@ NM := $(NM) -B LDFLAGS_vmlinux := -static -N #-relax CHECKFLAGS += -D__alpha__ -m64 -cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data +cflags-y := -pipe -mno-fp-regs -ffixed-8 cflags-y += $(call cc-option, -fno-jump-tables) cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h index 46cefbd50e73..bae97eb19d26 100644 --- a/arch/alpha/include/asm/floppy.h +++ b/arch/alpha/include/asm/floppy.h @@ -26,7 +26,7 @@ #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_cacheflush(addr,size) /* nothing */ #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ - IRQF_DISABLED, "floppy", NULL) + 0, "floppy", NULL) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #ifdef CONFIG_PCI diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2872accd2215..7b2be251c30f 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -117,13 +117,6 @@ handle_irq(int irq) return; } - /* - * From here we must proceed with IPL_MAX. Note that we do not - * explicitly enable interrupts afterwards - some MILO PALcode - * (namely LX164 one) seems to have severe problems with RTI - * at IPL 0. - */ - local_irq_disable(); irq_enter(); generic_handle_irq_desc(irq, desc); irq_exit(); diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 772ddfdb71a8..f433fc11877a 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr, struct pt_regs *regs) { struct pt_regs *old_regs; + + /* + * Disable interrupts during IRQ handling. + * Note that there is no matching local_irq_enable() due to + * severe problems with RTI at IPL0 and some MILO PALcode + * (namely LX164). + */ + local_irq_disable(); switch (type) { case 0: #ifdef CONFIG_SMP @@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector, { long cpu; - local_irq_disable(); smp_percpu_timer_interrupt(regs); cpu = smp_processor_id(); if (cpu != boot_cpuid) { @@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, .name = "timer", }; diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 4d4c046f708d..1383f8601a93 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); +static struct resource irongate_io = { + .name = "Irongate PCI IO", + .flags = IORESOURCE_IO, +}; static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, @@ -209,6 +213,7 @@ nautilus_init_pci(void) irongate = pci_get_bus_and_slot(0, 0); bus->self = irongate; + bus->resource[0] = &irongate_io; bus->resource[1] = &irongate_mem; pci_bus_size_bridges(bus); diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 5cf4a481b8c5..a53cf03f49d5 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -280,15 +280,15 @@ titan_late_init(void) * all reported to the kernel as machine checks, so the handler * is a nop so it can be called to count the individual events. */ - titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(63+16, titan_intr_nop, 0, "CChip Error", NULL); - titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(62+16, titan_intr_nop, 0, "PChip 0 H_Error", NULL); - titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(61+16, titan_intr_nop, 0, "PChip 1 H_Error", NULL); - titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(60+16, titan_intr_nop, 0, "PChip 0 C_Error", NULL); - titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(59+16, titan_intr_nop, 0, "PChip 1 C_Error", NULL); /* @@ -348,9 +348,9 @@ privateer_init_pci(void) * Hook a couple of extra err interrupts that the * common titan code won't. */ - titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(53+16, titan_intr_nop, 0, "NMI", NULL); - titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(50+16, titan_intr_nop, 0, "Temperature Warning", NULL); /* diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 13b739469c51..1cacda426a0e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1183,9 +1183,9 @@ config ARM_NR_BANKS default 8 config IWMMXT - bool "Enable iWMMXt support" + bool "Enable iWMMXt support" if !CPU_PJ4 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 - default y if PXA27x || PXA3xx || ARCH_MMP + default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 help Enable support for iWMMXt context switching at run time if running on a CPU that supports it. @@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420 to deadlock. This workaround puts DSB before executing ISB if an abort may occur on cache maintenance. +config ARM_ERRATA_798181 + bool "ARM errata: TLBI/DSB failure on Cortex-A15" + depends on CPU_V7 && SMP + help + On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not + adequately shooting down all use of the old entries. This + option enables the Linux kernel workaround for this erratum + which sends an IPI to the CPUs that are running the same ASID + as the one being invalidated. + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 06ec460b4581..281a223591ff 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -91,6 +91,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x00a00600 0x20>; interrupts = <1 13 0xf01>; + clocks = <&clks 15>; }; L2: l2-cache@00a02000 { diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799fd3a81..dff714d886d5 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,7 +24,7 @@ extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); void (*udelay)(unsigned long); - bool const_clock; + unsigned long ticks_per_jiffy; } arm_delay_ops; #define __delay(n) arm_delay_ops.delay(n) diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828f484d..91b99abe7a95 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page); #endif #endif +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif + #ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); #else diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a6611323c..a7b85e0d0cc1 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +DECLARE_PER_CPU(atomic64_t, active_asids); + #else /* !CONFIG_CPU_HAS_ASID */ #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c8820f0d..9e9c041358ca 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -450,6 +450,21 @@ static inline void local_flush_bp_all(void) isb(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static inline void dummy_flush_tlb_a15_erratum(void) +{ + /* + * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. + */ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(); +} +#else +static inline void dummy_flush_tlb_a15_erratum(void) +{ +} +#endif + /* * flush_pmd_entry * diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 3248cde504ed..fefd7f971437 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old) */ .macro mcount_enter +/* + * This pad compensates for the push {lr} at the call site. Note that we are + * unable to unwind through a function which does not otherwise save its lr. + */ + UNWIND(.pad #4) stmdb sp!, {r0-r3, lr} + UNWIND(.save {r0-r3, lr}) .endm .macro mcount_get_lr reg @@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old) .endm ENTRY(__gnu_mcount_nc) +UNWIND(.fnstart) #ifdef CONFIG_DYNAMIC_FTRACE mov ip, lr ldmia sp!, {lr} @@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc) #else __mcount #endif +UNWIND(.fnend) ENDPROC(__gnu_mcount_nc) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller) +UNWIND(.fnstart) __ftrace_caller +UNWIND(.fnend) ENDPROC(ftrace_caller) #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) +UNWIND(.fnstart) __ftrace_graph_caller +UNWIND(.fnend) ENDPROC(ftrace_graph_caller) #endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index e0eb9a1cae77..8bac553fe213 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -267,7 +267,7 @@ __create_page_tables: addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] -#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) +#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) sub r4, r4, #4 @ Fixup page table pointer @ for 64-bit descriptors #endif diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 96093b75ab90..5dc1aa6f0f7d 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused) } if (err) { - pr_warning("CPU %d debug is powered down!\n", cpu); + pr_warn_once("CPU %d debug is powered down!\n", cpu); cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); return; } @@ -987,7 +987,7 @@ clear_vcr: isb(); if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to disable vector catch\n", cpu); + pr_warn_once("CPU %d failed to disable vector catch\n", cpu); return; } @@ -1007,7 +1007,7 @@ clear_vcr: } if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to clear debug register pairs\n", cpu); + pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); return; } diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3f6cbb2e3eda..d343a6c3a6d1 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -353,6 +353,23 @@ void __init early_print(const char *str, ...) printk("%s", buf); } +static void __init cpuid_init_hwcaps(void) +{ + unsigned int divide_instrs; + + if (cpu_architecture() < CPU_ARCH_ARMv7) + return; + + divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; + + switch (divide_instrs) { + case 2: + elf_hwcap |= HWCAP_IDIVA; + case 1: + elf_hwcap |= HWCAP_IDIVT; + } +} + static void __init feat_v6_fixup(void) { int id = read_cpuid_id(); @@ -483,8 +500,11 @@ static void __init setup_processor(void) snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; + + cpuid_init_hwcaps(); + #ifndef CONFIG_ARM_THUMB - elf_hwcap &= ~HWCAP_THUMB; + elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); #endif feat_v6_fixup(); @@ -524,7 +544,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) size -= start & ~PAGE_MASK; bank->start = PAGE_ALIGN(start); -#ifndef CONFIG_LPAE +#ifndef CONFIG_ARM_LPAE if (bank->start + size < bank->start) { printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " "32-bit physical address space\n", (long long)start); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 79078edbb9bc..1f2ccccaf009 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb, if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; - if (arm_delay_ops.const_clock) - return NOTIFY_OK; - if (!per_cpu(l_p_j_ref, cpu)) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index bd0300531399..e82e1d248772 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -12,6 +12,7 @@ #include <asm/smp_plat.h> #include <asm/tlbflush.h> +#include <asm/mmu_context.h> /**********************************************************************/ @@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored) local_flush_bp_all(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static int erratum_a15_798181(void) +{ + unsigned int midr = read_cpuid_id(); + + /* Cortex-A15 r0p0..r3p2 affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) + return 0; + return 1; +} +#else +static int erratum_a15_798181(void) +{ + return 0; +} +#endif + +static void ipi_flush_tlb_a15_erratum(void *arg) +{ + dmb(); +} + +static void broadcast_tlb_a15_erratum(void) +{ + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, + NULL, 1); +} + +static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) +{ + int cpu; + cpumask_t mask = { CPU_BITS_NONE }; + + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + for_each_online_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; + /* + * We only need to send an IPI if the other CPUs are running + * the same ASID as the one being invalidated. There is no + * need for locking around the active_asids check since the + * switch_mm() function has at least one dmb() (as required by + * this workaround) in case a context switch happens on + * another CPU after the condition below. + */ + if (atomic64_read(&mm->context.id) == + atomic64_read(&per_cpu(active_asids, cpu))) + cpumask_set_cpu(cpu, &mask); + } + smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); +} + void flush_tlb_all(void) { if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else local_flush_tlb_all(); + broadcast_tlb_a15_erratum(); } void flush_tlb_mm(struct mm_struct *mm) @@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); else local_flush_tlb_mm(mm); + broadcast_tlb_mm_a15_erratum(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) @@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) &ta, 1); } else local_flush_tlb_page(vma, uaddr); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_page(unsigned long kaddr) @@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); + broadcast_tlb_a15_erratum(); } void flush_tlb_range(struct vm_area_struct *vma, @@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma, &ta, 1); } else local_flush_tlb_range(vma, start, end); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) @@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } else local_flush_tlb_kernel_range(start, end); + broadcast_tlb_a15_erratum(); } void flush_bp_all(void) diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a17316e9fe..0e4cfe123b38 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) lr, irq, vgic_cpu->vgic_lr[lr]); BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; - - goto out; + return true; } /* Try to use another LR for this interrupt */ @@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) vgic_cpu->vgic_irq_lr_map[irq] = lr; set_bit(lr, vgic_cpu->lr_used); -out: if (!vgic_irq_is_edge(vcpu, irq)) vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; @@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); - /* - * We do not need to take the distributor lock here, since the only - * action we perform is clearing the irq_active_bit for an EOIed - * level interrupt. There is a potential race with - * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we - * check if the interrupt is already active. Two possibilities: - * - * - The queuing is occurring on the same vcpu: cannot happen, - * as we're already in the context of this vcpu, and - * executing the handler - * - The interrupt has been migrated to another vcpu, and we - * ignore this interrupt for this run. Big deal. It is still - * pending though, and will get considered when this vcpu - * exits. - */ if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { /* * Some level interrupts have been EOIed. Clear their @@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } else { vgic_cpu_irq_clear(vcpu, irq); } + + /* + * Despite being EOIed, the LR may not have + * been marked as empty. + */ + set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); + vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; } } @@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } /* - * Sync back the VGIC state after a guest run. We do not really touch - * the distributor here (the irq_pending_on_cpu bit is safe to set), - * so there is no need for taking its lock. + * Sync back the VGIC state after a guest run. The distributor lock is + * needed so we don't get preempted in the middle of the state processing. */ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { @@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + if (!irqchip_in_kernel(vcpu->kvm)) return; + spin_lock(&dist->lock); __kvm_vgic_sync_hwstate(vcpu); + spin_unlock(&dist->lock); } int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 6b93f6a1a3c7..64dbfa57204a 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles) static void __timer_const_udelay(unsigned long xloops) { unsigned long long loops = xloops; - loops *= loops_per_jiffy; + loops *= arm_delay_ops.ticks_per_jiffy; __timer_delay(loops >> UDELAY_SHIFT); } @@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer) pr_info("Switching to timer-based delay loop\n"); delay_timer = timer; lpj_fine = timer->freq / HZ; - loops_per_jiffy = lpj_fine; + + /* cpufreq may scale loops_per_jiffy, so keep a private copy */ + arm_delay_ops.ticks_per_jiffy = lpj_fine; arm_delay_ops.delay = __timer_delay; arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.udelay = __timer_udelay; - arm_delay_ops.const_clock = true; + delay_calibrated = true; } else { pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index e13a8fa5e62c..2193c834f55c 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -257,6 +257,7 @@ int __init mx35_clocks_init(void) clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); + clk_register_clkdev(clk[admux_gate], "audmux", NULL); clk_prepare_enable(clk[spba_gate]); clk_prepare_enable(clk[gpio1_gate]); @@ -265,6 +266,7 @@ int __init mx35_clocks_init(void) clk_prepare_enable(clk[iim_gate]); clk_prepare_enable(clk[emi_gate]); clk_prepare_enable(clk[max_gate]); + clk_prepare_enable(clk[iomuxc_gate]); /* * SCC is needed to boot via mmc after a watchdog reset. The clock code diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 2f9ff93a4e61..d38e54f5b6d7 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m" static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; -static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; +static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; @@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void) clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); - clk_register_clkdev(clk[twd], NULL, "smp_twd"); clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); clk_register_clkdev(clk[ahb], "ahb", NULL); clk_register_clkdev(clk[cko1], "cko1", NULL); diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f37390308a..c465faca51b0 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id) int lockregs; int i; - switch (cache_id) { + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: lockregs = 8; break; @@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) if (cache_id_part_number_from_dt) cache_id = cache_id_part_number_from_dt; else - cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) - & L2X0_CACHE_ID_PART_MASK; + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); aux &= aux_mask; aux |= aux_val; /* Determine the number of ways */ - switch (cache_id) { + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: if (aux & (1 << 16)) ways = 16; @@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = { .flush_all = l2x0_flush_all, .inv_all = l2x0_inv_all, .disable = l2x0_disable, - .set_debug = pl310_set_debug, }, }; @@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) data->save(); of_init = true; - l2x0_init(l2x0_base, aux_val, aux_mask); - memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); + l2x0_init(l2x0_base, aux_val, aux_mask); return 0; } diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a5a4b2bc42ba..2ac37372ef52 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); -static DEFINE_PER_CPU(atomic64_t, active_asids); +DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; @@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { local_flush_bp_all(); local_flush_tlb_all(); + dummy_flush_tlb_a15_erratum(); } atomic64_set(&per_cpu(active_asids, cpu), asid); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e95a996ab78f..78978945492a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } while (pte++, addr += PAGE_SIZE, addr != end); } -static void __init alloc_init_section(pud_t *pud, unsigned long addr, - unsigned long end, phys_addr_t phys, - const struct mem_type *type) +static void __init map_init_section(pmd_t *pmd, unsigned long addr, + unsigned long end, phys_addr_t phys, + const struct mem_type *type) { - pmd_t *pmd = pmd_offset(pud, addr); - +#ifndef CONFIG_ARM_LPAE /* - * Try a section mapping - end, addr and phys must all be aligned - * to a section boundary. Note that PMDs refer to the individual - * L1 entries, whereas PGDs refer to a group of L1 entries making - * up one logical pointer to an L2 table. + * In classic MMU format, puds and pmds are folded in to + * the pgds. pmd_offset gives the PGD entry. PGDs refer to a + * group of L1 entries making up one logical pointer to + * an L2 table (2MB), where as PMDs refer to the individual + * L1 entries (1MB). Hence increment to get the correct + * offset for odd 1MB sections. + * (See arch/arm/include/asm/pgtable-2level.h) */ - if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { - pmd_t *p = pmd; - -#ifndef CONFIG_ARM_LPAE - if (addr & SECTION_SIZE) - pmd++; + if (addr & SECTION_SIZE) + pmd++; #endif + do { + *pmd = __pmd(phys | type->prot_sect); + phys += SECTION_SIZE; + } while (pmd++, addr += SECTION_SIZE, addr != end); - do { - *pmd = __pmd(phys | type->prot_sect); - phys += SECTION_SIZE; - } while (pmd++, addr += SECTION_SIZE, addr != end); + flush_pmd_entry(pmd); +} - flush_pmd_entry(p); - } else { +static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, + unsigned long end, phys_addr_t phys, + const struct mem_type *type) +{ + pmd_t *pmd = pmd_offset(pud, addr); + unsigned long next; + + do { /* - * No need to loop; pte's aren't interested in the - * individual L1 entries. + * With LPAE, we must loop over to map + * all the pmds for the given range. */ - alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); - } + next = pmd_addr_end(addr, end); + + /* + * Try a section mapping - addr, next and phys must all be + * aligned to a section boundary. + */ + if (type->prot_sect && + ((addr | next | phys) & ~SECTION_MASK) == 0) { + map_init_section(pmd, addr, next, phys, type); + } else { + alloc_init_pte(pmd, addr, next, + __phys_to_pfn(phys), type); + } + + phys += next - addr; + + } while (pmd++, addr = next, addr != end); } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, @@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, do { next = pud_addr_end(addr, end); - alloc_init_section(pud, addr, next, phys, type); + alloc_init_pmd(pud, addr, next, phys, type); phys += next - addr; } while (pud++, addr = next, addr != end); } diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015f8d5c..f584d3f5b37c 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -420,7 +420,7 @@ __v7_pj4b_proc_info: __v7_ca7mp_proc_info: .long 0x410fc070 .long 0xff0ffff0 - __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_ca7mp_setup .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info /* @@ -430,10 +430,25 @@ __v7_ca7mp_proc_info: __v7_ca15mp_proc_info: .long 0x410fc0f0 .long 0xff0ffff0 - __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_ca15mp_setup .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info /* + * Qualcomm Inc. Krait processors. + */ + .type __krait_proc_info, #object +__krait_proc_info: + .long 0x510f0400 @ Required ID value + .long 0xff0ffc00 @ Mask for ID + /* + * Some Krait processors don't indicate support for SDIV and UDIV + * instructions in the ARM instruction set, even though they actually + * do support them. + */ + __v7_proc __v7_setup, hwcaps = HWCAP_IDIV + .size __krait_proc_info, . - __krait_proc_info + + /* * Match any ARMv7 processor core. */ .type __v7_proc_info, #object diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cd2e21ff562a..51244bf97271 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -18,7 +18,7 @@ config MIPS select HAVE_KRETPROBES select HAVE_DEBUG_KMEMLEAK select ARCH_BINFMT_ELF_RANDOMIZE_PIE - select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT select RTC_LIB if !MACH_LOONGSON select GENERIC_ATOMIC64 if !64BIT select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE @@ -657,7 +657,7 @@ config SNI_RM bool "SNI RM200/300/400" select FW_ARC if CPU_LITTLE_ENDIAN select FW_ARC32 if CPU_LITTLE_ENDIAN - select SNIPROM if CPU_BIG_ENDIAN + select FW_SNIPROM if CPU_BIG_ENDIAN select ARCH_MAY_HAVE_PC_FDC select BOOT_ELF32 select CEVT_R4K @@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION config FW_ARC32 bool -config SNIPROM +config FW_SNIPROM bool config BOOT_ELF32 @@ -1493,7 +1493,6 @@ config CPU_XLP select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM - select CPU_HAS_LLSC select WEAK_ORDERING select WEAK_REORDERING_BEYOND_LLSC select CPU_HAS_PREFETCH diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index ed1949c29508..9aa7d44898ed 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -745,10 +745,7 @@ void __init board_prom_init(void) strcpy(cfe_version, "unknown"); printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); - if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { - printk(KERN_ERR PFX "invalid nvram checksum\n"); - return; - } + bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET); board_name = bcm63xx_nvram_get_name(); /* find board by name */ diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index 620611680839..a4b8864f9307 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c @@ -38,7 +38,7 @@ struct bcm963xx_nvram { static struct bcm963xx_nvram nvram; static int mac_addr_used; -int __init bcm63xx_nvram_init(void *addr) +void __init bcm63xx_nvram_init(void *addr) { unsigned int check_len; u32 crc, expected_crc; @@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr) crc = crc32_le(~0, (u8 *)&nvram, check_len); if (crc != expected_crc) - return -EINVAL; - - return 0; + pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", + expected_crc, crc); } u8 *bcm63xx_nvram_get_name(void) diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 314231be788c..35e18e98beb9 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void) return board_register_devices(); } -device_initcall(bcm63xx_register_devices); +arch_initcall(bcm63xx_register_devices); diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index c594a3d4f743..b0baa299f899 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image) static void octeon_generic_shutdown(void) { - int cpu, i; + int i; +#ifdef CONFIG_SMP + int cpu; +#endif struct cvmx_bootmem_desc *bootmem_desc; void *named_block_array_ptr; diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h index 62d6a3b4d3b7..4e0b6bc1165e 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h @@ -9,10 +9,8 @@ * * Initialized the local nvram copy from the target address and checks * its checksum. - * - * Returns 0 on success. */ -int __init bcm63xx_nvram_init(void *nvram); +void bcm63xx_nvram_init(void *nvram); /** * bcm63xx_nvram_get_name() - returns the board name according to nvram diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index d9c828419037..193c0912d38e 100644 --- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h @@ -28,11 +28,7 @@ /* #define cpu_has_prefetch ? */ #define cpu_has_mcheck 1 /* #define cpu_has_ejtag ? */ -#ifdef CONFIG_CPU_HAS_LLSC #define cpu_has_llsc 1 -#else -#define cpu_has_llsc 0 -#endif /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 12b70c25906a..0da44d422f5b 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1166,7 +1166,10 @@ do { \ unsigned int __dspctl; \ \ __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ " rddsp %0, %x1 \n" \ + " .set pop \n" \ : "=r" (__dspctl) \ : "i" (mask)); \ __dspctl; \ @@ -1175,30 +1178,198 @@ do { \ #define wrdsp(val, mask) \ do { \ __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ " wrdsp %0, %x1 \n" \ + " .set pop \n" \ : \ : "r" (val), "i" (mask)); \ } while (0) -#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) -#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) -#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) -#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) - -#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) -#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) -#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) -#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) - -#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) -#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) -#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) -#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) - -#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) -#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) -#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) -#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) +#define mflo0() \ +({ \ + long mflo0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mflo0)); \ + mflo0; \ +}) + +#define mflo1() \ +({ \ + long mflo1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mflo1)); \ + mflo1; \ +}) + +#define mflo2() \ +({ \ + long mflo2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mflo2)); \ + mflo2; \ +}) + +#define mflo3() \ +({ \ + long mflo3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mflo3)); \ + mflo3; \ +}) + +#define mfhi0() \ +({ \ + long mfhi0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mfhi0)); \ + mfhi0; \ +}) + +#define mfhi1() \ +({ \ + long mfhi1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mfhi1)); \ + mfhi1; \ +}) + +#define mfhi2() \ +({ \ + long mfhi2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mfhi2)); \ + mfhi2; \ +}) + +#define mfhi3() \ +({ \ + long mfhi3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mfhi3)); \ + mfhi3; \ +}) + + +#define mtlo0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) #else diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 197f6367c201..8efe5a9e2c3e 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h @@ -21,6 +21,6 @@ #include <asm/sigcontext.h> #include <asm/siginfo.h> -#define __ARCH_HAS_ODD_SIGACTION +#define __ARCH_HAS_IRIX_SIGACTION #endif /* _ASM_SIGNAL_H */ diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h index d6b18b4d0f3a..addb9f556b71 100644 --- a/arch/mips/include/uapi/asm/signal.h +++ b/arch/mips/include/uapi/asm/signal.h @@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ * * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single * Unix names RESETHAND and NODEFER respectively. + * + * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever + * supported its use and no libc was using it, so the entire sa-restorer + * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48 + * retaining only the SA_RESTORER definition as a reminder to avoid + * accidental reuse of the mask bit. */ #define SA_ONSTACK 0x08000000 #define SA_RESETHAND 0x80000000 @@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_RESTORER 0x04000000 /* Only for o32 */ - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index f81d98f6184c..de75fb50562b 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o # -# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe -# to enable DSP assembler support here even if the MIPS Release 2 CPU we -# are targetting does not support DSP because all code-paths making use of -# it properly check that the running CPU *actually does* support these -# instructions. +# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not +# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches +# here because the compiler may use DSP ASE instructions (such as lwx) in +# code paths where we cannot check that the CPU we are running on supports it. +# Proper abstraction using HAVE_AS_DSP and macros is done in +# arch/mips/include/asm/mipsregs.h. # ifeq ($(CONFIG_CPU_MIPSR2), y) CFLAGS_DSP = -DHAVE_AS_DSP -# -# Check if assembler supports DSP ASE -# -ifeq ($(call cc-option-yn,-mdsp), y) -CFLAGS_DSP += -mdsp -endif - -# -# Check if assembler supports DSP ASE Rev2 -# -ifeq ($(call cc-option-yn,-mdspr2), y) -CFLAGS_DSP += -mdspr2 -endif - CFLAGS_signal.o = $(CFLAGS_DSP) CFLAGS_signal32.o = $(CFLAGS_DSP) CFLAGS_process.o = $(CFLAGS_DSP) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6bfccc227a95..5fe66a0c3224 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->tlbsize = 48; break; case PRID_IMP_VR41XX: + set_isa(c, MIPS_CPU_ISA_III); + c->options = R4K_OPTS; + c->tlbsize = 32; switch (c->processor_id & 0xf0) { case PRID_REV_VR4111: c->cputype = CPU_VR4111; @@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "NEC VR4131"; } else { c->cputype = CPU_VR4133; + c->options |= MIPS_CPU_LLSC; __cpu_name[cpu] = "NEC VR4133"; } break; @@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "NEC Vr41xx"; break; } - set_isa(c, MIPS_CPU_ISA_III); - c->options = R4K_OPTS; - c->tlbsize = 32; break; case PRID_IMP_R4300: c->cputype = CPU_R4300; @@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void) if (c->options & MIPS_CPU_FPU) { c->fpu_id = cpu_get_fpu_id(); - if (c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M32R2 || - c->isa_level == MIPS_CPU_ISA_M64R1 || - c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; } diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 8eeee1c860c0..db9655f08892 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third, err = compat_sys_shmctl(first, second, compat_ptr(ptr)); break; default: - err = -EINVAL; + err = -ENOSYS; break; } diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 165867673357..33d067148e61 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -46,10 +46,9 @@ PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) -#else - PTR_ADDIU sp, PT_SIZE #endif -.endm + PTR_ADDIU sp, PT_SIZE + .endm .macro RETURN_BACK jr ra @@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra) .globl _mcount _mcount: b ftrace_stub - addiu sp,sp,8 +#ifdef CONFIG_32BIT + addiu sp,sp,8 +#else + nop +#endif /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ lw t1, function_trace_stop diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 135c4aadccbe..7a54f74b7818 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_mips_r) { seq_printf(m, "isa\t\t\t:"); if (cpu_has_mips_1) - seq_printf(m, "%s", "mips1"); + seq_printf(m, "%s", " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); if (cpu_has_mips_3) diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a200b5bdbb87..c3abb88170fc 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif - if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) + if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) status_set |= ST0_XX; if (cpu_has_dsp) status_set |= ST0_MX; diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 81f1dcfdcab8..a64daee740ee 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a |= mask; raw_local_irq_restore(flags); return res; @@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a |= mask; raw_local_irq_restore(flags); return res; @@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a &= ~mask; raw_local_irq_restore(flags); return res; @@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a ^= mask; raw_local_irq_restore(flags); return res; diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 507147aebd41..a6adffbb4e5f 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -270,7 +270,7 @@ LEAF(csum_partial) #endif /* odd buffer alignment? */ -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2 wsbh v1, sum movn sum, v1, t7 #else @@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) addu sum, v1 #endif -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2 wsbh v1, sum movn sum, v1, odd #else diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index ecca559b8d7b..2078915eacb9 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void) return; default: - if (c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M32R2 || - c->isa_level == MIPS_CPU_ISA_M64R1 || - c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { #ifdef CONFIG_MIPS_CPU_SCACHE if (mips_sc_init ()) { scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 93d937b4b1ba..df96da7e939b 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void) c->scache.flags |= MIPS_CACHE_NOT_PRESENT; /* Ignore anything but MIPSxx processors */ - if (c->isa_level != MIPS_CPU_ISA_M32R1 && - c->isa_level != MIPS_CPU_ISA_M32R2 && - c->isa_level != MIPS_CPU_ISA_M64R1 && - c->isa_level != MIPS_CPU_ISA_M64R2) + if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) return 0; /* Does this MIPS32/MIPS64 CPU have a config2 register? */ diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index 38a80c83fd67..d1faece21b6a 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -19,7 +19,7 @@ #include <asm/mach-au1x00/au1000.h> #include <asm/tlbmisc.h> -#ifdef CONFIG_DEBUG_PCI +#ifdef CONFIG_PCI_DEBUG #define DBG(x...) printk(KERN_DEBUG x) #else #define DBG(x...) do {} while (0) @@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus, if (status & (1 << 29)) { *data = 0xffffffff; error = -1; - DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", + DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n", access_type, bus->number, device); } else if ((status >> 28) & 0xf) { DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a2930844d43..4a5443118cfb 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -344,6 +344,7 @@ extern unsigned long MODULES_END; #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ /* Bits in the segment table entry */ +#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ @@ -1531,7 +1532,8 @@ extern int s390_enable_sie(void); /* * No page table caches to initialise */ -#define pgtable_cache_init() do { } while (0) +static inline void pgtable_cache_init(void) { } +static inline void check_pgt_cache(void) { } #include <asm-generic/pgtable.h> diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dff631d34b45..466fb3383960 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to, * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address * contains the (negative) exception code. */ -static __always_inline unsigned long follow_table(struct mm_struct *mm, - unsigned long addr, int write) +#ifdef CONFIG_64BIT +static unsigned long follow_table(struct mm_struct *mm, + unsigned long address, int write) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep; + unsigned long *table = (unsigned long *)__pa(mm->pgd); + + switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { + case _ASCE_TYPE_REGION1: + table = table + ((address >> 53) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x39UL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_REGION2: + table = table + ((address >> 42) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x3aUL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_REGION3: + table = table + ((address >> 31) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x3bUL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_SEGMENT: + table = table + ((address >> 20) & 0x7ff); + if (unlikely(*table & _SEGMENT_ENTRY_INV)) + return -0x10UL; + if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { + if (write && (*table & _SEGMENT_ENTRY_RO)) + return -0x04UL; + return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + + (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); + } + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + } + table = table + ((address >> 12) & 0xff); + if (unlikely(*table & _PAGE_INVALID)) + return -0x11UL; + if (write && (*table & _PAGE_RO)) + return -0x04UL; + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); +} - pgd = pgd_offset(mm, addr); - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - return -0x3aUL; +#else /* CONFIG_64BIT */ - pud = pud_offset(pgd, addr); - if (pud_none(*pud) || unlikely(pud_bad(*pud))) - return -0x3bUL; +static unsigned long follow_table(struct mm_struct *mm, + unsigned long address, int write) +{ + unsigned long *table = (unsigned long *)__pa(mm->pgd); - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + table = table + ((address >> 20) & 0x7ff); + if (unlikely(*table & _SEGMENT_ENTRY_INV)) return -0x10UL; - if (pmd_large(*pmd)) { - if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) - return -0x04UL; - return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); - } - if (unlikely(pmd_bad(*pmd))) - return -0x10UL; - - ptep = pte_offset_map(pmd, addr); - if (!pte_present(*ptep)) + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + table = table + ((address >> 12) & 0xff); + if (unlikely(*table & _PAGE_INVALID)) return -0x11UL; - if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) + if (write && (*table & _PAGE_RO)) return -0x04UL; - - return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); } +#endif /* CONFIG_64BIT */ + static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, size_t n, int write_user) { @@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from) static size_t clear_user_pt(size_t n, void __user *to) { - void *zpage = &empty_zero_page; + void *zpage = (void *) empty_zero_page; long done, size, ret; done = 0; diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index d1e15f7b59c6..7a5aa1a7864e 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot) #ifdef CONFIG_BLK_DEV_INITRD -/* - * Note that the kernel can potentially support other compression - * techniques than gz, though we don't do so by default. If we ever - * decide to do so we can either look for other filename extensions, - * or just allow a file with this name to be compressed with an - * arbitrary compressor (somewhat counterintuitively). - */ static int __initdata set_initramfs_file; -static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; +static char __initdata initramfs_file[128] = "initramfs"; static int __init setup_initramfs_file(char *str) { @@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str) early_param("initramfs_file", setup_initramfs_file); /* - * We look for an "initramfs.cpio.gz" file in the hvfs. - * If there is one, we allocate some memory for it and it will be - * unpacked to the initramfs. + * We look for a file called "initramfs" in the hvfs. If there is one, we + * allocate some memory for it and it will be unpacked to the initramfs. + * If it's compressed, the initd code will uncompress it first. */ static void __init load_hv_initrd(void) { @@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void) fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); if (fd == HV_ENOENT) { - if (set_initramfs_file) + if (set_initramfs_file) { pr_warning("No such hvfs initramfs file '%s'\n", initramfs_file); - return; + return; + } else { + /* Try old backwards-compatible name. */ + fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); + if (fd == HV_ENOENT) + return; + } } BUG_ON(fd < 0); stat = hv_fs_fstat(fd); diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 8a84501acb1b..5ef205c5f37b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -4,7 +4,7 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/piggy.o $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone -$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone ifeq ($(CONFIG_EFI_STUB), y) VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o @@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -targets += vmlinux.bin.all vmlinux.relocs +targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs CMD_RELOCS = arch/x86/tools/relocs quiet_cmd_relocs = RELOCS $@ diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 1ace47b62592..2e188d68397c 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[]; */ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return regs->orig_ax & __SYSCALL_MASK; + return regs->orig_ax; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { - regs->ax = regs->orig_ax & __SYSCALL_MASK; + regs->ax = regs->orig_ax; } static inline long syscall_get_error(struct task_struct *task, diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd4e4ad..f77df1c5de6e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) if (!pv_eoi_enabled(vcpu)) return 0; return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr); + addr, sizeof(u8)); } void kvm_lapic_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f19ac0aca60d..e1721324c271 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, + sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) gpa_offset = data & ~(PAGE_MASK | 1); - /* Check that the address is 32-byte aligned. */ - if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) - break; - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, - &vcpu->arch.pv_time, data & ~1ULL)) + &vcpu->arch.pv_time, data & ~1ULL, + sizeof(struct pvclock_vcpu_time_info))) vcpu->arch.pv_time_enabled = false; else vcpu->arch.pv_time_enabled = true; @@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, - data & KVM_STEAL_VALID_BITS)) + data & KVM_STEAL_VALID_BITS, + sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; |