summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 18:13:19 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 18:13:19 +0200
commit8546dc1d4b671480961c3eaf4c0c102ae6848340 (patch)
treec646079fb48811b22b742deb6bd2e907f9e6c3d4 /arch/arm/kernel
parentMerge tag 'sound-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai... (diff)
parentMerge branch 'cleanup' into for-linus (diff)
downloadlinux-8546dc1d4b671480961c3eaf4c0c102ae6848340.tar.xz
linux-8546dc1d4b671480961c3eaf4c0c102ae6848340.zip
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King: "The major items included in here are: - MCPM, multi-cluster power management, part of the infrastructure required for ARMs big.LITTLE support. - A rework of the ARM KVM code to allow re-use by ARM64. - Error handling cleanups of the IS_ERR_OR_NULL() madness and fixes of that stuff for arch/arm - Preparatory patches for Cortex-M3 support from Uwe Kleine-König. There is also a set of three patches in here from Hugh/Catalin to address freeing of inappropriate page tables on LPAE. You already have these from akpm, but they were already part of my tree at the time he sent them, so unfortunately they'll end up with duplicate commits" * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (77 commits) ARM: EXYNOS: remove unnecessary use of IS_ERR_VALUE() ARM: IMX: remove unnecessary use of IS_ERR_VALUE() ARM: OMAP: use consistent error checking ARM: cleanup: OMAP hwmod error checking ARM: 7709/1: mcpm: Add explicit AFLAGS to support v6/v7 multiplatform kernels ARM: 7700/2: Make cpu_init() notrace ARM: 7702/1: Set the page table freeing ceiling to TASK_SIZE ARM: 7701/1: mm: Allow arch code to control the user page table ceiling ARM: 7703/1: Disable preemption in broadcast_tlb*_a15_erratum() ARM: mcpm: provide an interface to set the SMP ops at run time ARM: mcpm: generic SMP secondary bringup and hotplug support ARM: mcpm_head.S: vlock-based first man election ARM: mcpm: Add baremetal voting mutexes ARM: mcpm: introduce helpers for platform coherency exit/setup ARM: mcpm: introduce the CPU/cluster power API ARM: multi-cluster PM: secondary kernel entry code ARM: cacheflush: add synchronization helpers for mixed cache state accesses ARM: cpu hotplug: remove majority of cache flushing from platforms ARM: smp: flush L1 cache in cpu_die() ARM: tegra: remove tegra specific cpu_disable() ...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/kernel/bios32.c6
-rw-r--r--arch/arm/kernel/entry-armv.S59
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/entry-header.S66
-rw-r--r--arch/arm/kernel/head-common.S9
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/kernel/return_address.c5
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kernel/smp.c42
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/smp_tlb.c9
13 files changed, 160 insertions, 83 deletions
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 923eec7105cf..a53efa993690 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -149,6 +149,10 @@ int main(void)
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
+ BLANK();
+ DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
+ DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
+ BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
@@ -165,10 +169,10 @@ int main(void)
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
- DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr));
- DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar));
- DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar));
- DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc));
+ DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
+ DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
+ DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
+ DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index a1f73b502ef0..b2ed73c45489 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -462,6 +462,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
sys->busnr = busnr;
sys->swizzle = hw->swizzle;
sys->map_irq = hw->map_irq;
+ sys->align_resource = hw->align_resource;
INIT_LIST_HEAD(&sys->resources);
if (hw->private_data)
@@ -574,6 +575,8 @@ char * __init pcibios_setup(char *str)
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
+ struct pci_dev *dev = data;
+ struct pci_sys_data *sys = dev->sysdata;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO && start & 0x300)
@@ -581,6 +584,9 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
start = (start + align - 1) & ~(align - 1);
+ if (sys->align_resource)
+ return sys->align_resource(dev, res, start, size, align);
+
return start;
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 0f82098c9bfe..582b405befc5 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -192,18 +192,6 @@ __dabt_svc:
svc_entry
mov r2, sp
dabt_helper
-
- @
- @ IRQs off again before pulling preserved data off the stack
- @
- disable_irq_notrace
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- tst r5, #PSR_I_BIT
- bleq trace_hardirqs_on
- tst r5, #PSR_I_BIT
- blne trace_hardirqs_off
-#endif
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__dabt_svc)
@@ -223,12 +211,7 @@ __irq_svc:
blne svc_preempt
#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
- @ The parent context IRQs must have been enabled to get here in
- @ the first place, so there's no point checking the PSR I bit.
- bl trace_hardirqs_on
-#endif
- svc_exit r5 @ return from exception
+ svc_exit r5, irq = 1 @ return from exception
UNWIND(.fnend )
ENDPROC(__irq_svc)
@@ -295,22 +278,8 @@ __und_svc_fault:
mov r0, sp @ struct pt_regs *regs
bl __und_fault
- @
- @ IRQs off again before pulling preserved data off the stack
- @
__und_svc_finish:
- disable_irq_notrace
-
- @
- @ restore SPSR and restart the instruction
- @
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
-#ifdef CONFIG_TRACE_IRQFLAGS
- tst r5, #PSR_I_BIT
- bleq trace_hardirqs_on
- tst r5, #PSR_I_BIT
- blne trace_hardirqs_off
-#endif
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__und_svc)
@@ -320,18 +289,6 @@ __pabt_svc:
svc_entry
mov r2, sp @ regs
pabt_helper
-
- @
- @ IRQs off again before pulling preserved data off the stack
- @
- disable_irq_notrace
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- tst r5, #PSR_I_BIT
- bleq trace_hardirqs_on
- tst r5, #PSR_I_BIT
- blne trace_hardirqs_off
-#endif
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__pabt_svc)
@@ -396,6 +353,7 @@ ENDPROC(__pabt_svc)
#ifdef CONFIG_IRQSOFF_TRACER
bl trace_hardirqs_off
#endif
+ ct_user_exit save = 0
.endm
.macro kuser_cmpxchg_check
@@ -562,21 +520,21 @@ ENDPROC(__und_usr)
@ Fall-through from Thumb-2 __und_usr
@
#ifdef CONFIG_NEON
+ get_thread_info r10 @ get current thread
adr r6, .LCneon_thumb_opcodes
b 2f
#endif
call_fpe:
+ get_thread_info r10 @ get current thread
#ifdef CONFIG_NEON
adr r6, .LCneon_arm_opcodes
-2:
- ldr r7, [r6], #4 @ mask value
- cmp r7, #0 @ end mask?
- beq 1f
- and r8, r0, r7
+2: ldr r5, [r6], #4 @ mask value
ldr r7, [r6], #4 @ opcode bits matching in mask
+ cmp r5, #0 @ end mask?
+ beq 1f
+ and r8, r0, r5
cmp r8, r7 @ NEON instruction?
bne 2b
- get_thread_info r10
mov r7, #1
strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
@@ -586,7 +544,6 @@ call_fpe:
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
moveq pc, lr
- get_thread_info r10 @ get current thread
and r8, r0, #0x00000f00 @ mask out CP number
THUMB( lsr r8, r8, #8 )
mov r7, #1
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index fefd7f971437..bc5bc0a97131 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -35,12 +35,11 @@ ret_fast_syscall:
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne fast_work_pending
-#if defined(CONFIG_IRQSOFF_TRACER)
asm_trace_hardirqs_on
-#endif
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
+ ct_user_enter
restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
@@ -71,11 +70,11 @@ ENTRY(ret_to_user_from_irq)
tst r1, #_TIF_WORK_MASK
bne work_pending
no_work_pending:
-#if defined(CONFIG_IRQSOFF_TRACER)
asm_trace_hardirqs_on
-#endif
+
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
+ ct_user_enter save = 0
restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user_from_irq)
@@ -406,6 +405,7 @@ ENTRY(vector_swi)
mcr p15, 0, ip, c1, c0 @ update control register
#endif
enable_irq
+ ct_user_exit
get_thread_info tsk
adr tbl, sys_call_table @ load syscall table pointer
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 9a8531eadd3d..160f3376ba6d 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -74,7 +74,24 @@
.endm
#ifndef CONFIG_THUMB2_KERNEL
- .macro svc_exit, rpsr
+ .macro svc_exit, rpsr, irq = 0
+ .if \irq != 0
+ @ IRQs already off
+#ifdef CONFIG_TRACE_IRQFLAGS
+ @ The parent context IRQs must have been enabled to get here in
+ @ the first place, so there's no point checking the PSR I bit.
+ bl trace_hardirqs_on
+#endif
+ .else
+ @ IRQs off again before pulling preserved data off the stack
+ disable_irq_notrace
+#ifdef CONFIG_TRACE_IRQFLAGS
+ tst \rpsr, #PSR_I_BIT
+ bleq trace_hardirqs_on
+ tst \rpsr, #PSR_I_BIT
+ blne trace_hardirqs_off
+#endif
+ .endif
msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_V6)
ldr r0, [sp]
@@ -120,7 +137,24 @@
mov pc, \reg
.endm
#else /* CONFIG_THUMB2_KERNEL */
- .macro svc_exit, rpsr
+ .macro svc_exit, rpsr, irq = 0
+ .if \irq != 0
+ @ IRQs already off
+#ifdef CONFIG_TRACE_IRQFLAGS
+ @ The parent context IRQs must have been enabled to get here in
+ @ the first place, so there's no point checking the PSR I bit.
+ bl trace_hardirqs_on
+#endif
+ .else
+ @ IRQs off again before pulling preserved data off the stack
+ disable_irq_notrace
+#ifdef CONFIG_TRACE_IRQFLAGS
+ tst \rpsr, #PSR_I_BIT
+ bleq trace_hardirqs_on
+ tst \rpsr, #PSR_I_BIT
+ blne trace_hardirqs_off
+#endif
+ .endif
ldr lr, [sp, #S_SP] @ top of the stack
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
clrex @ clear the exclusive monitor
@@ -164,6 +198,34 @@
#endif /* !CONFIG_THUMB2_KERNEL */
/*
+ * Context tracking subsystem. Used to instrument transitions
+ * between user and kernel mode.
+ */
+ .macro ct_user_exit, save = 1
+#ifdef CONFIG_CONTEXT_TRACKING
+ .if \save
+ stmdb sp!, {r0-r3, ip, lr}
+ bl user_exit
+ ldmia sp!, {r0-r3, ip, lr}
+ .else
+ bl user_exit
+ .endif
+#endif
+ .endm
+
+ .macro ct_user_enter, save = 1
+#ifdef CONFIG_CONTEXT_TRACKING
+ .if \save
+ stmdb sp!, {r0-r3, ip, lr}
+ bl user_enter
+ ldmia sp!, {r0-r3, ip, lr}
+ .else
+ bl user_enter
+ .endif
+#endif
+ .endm
+
+/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.
*
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 854bd22380d3..5b391a689b47 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -98,8 +98,9 @@ __mmap_switched:
str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type
str r2, [r6] @ Save atags pointer
- bic r4, r0, #CR_A @ Clear 'A' bit
- stmia r7, {r0, r4} @ Save control register values
+ cmp r7, #0
+ bicne r4, r0, #CR_A @ Clear 'A' bit
+ stmneia r7, {r0, r4} @ Save control register values
b start_kernel
ENDPROC(__mmap_switched)
@@ -113,7 +114,11 @@ __mmap_switched_data:
.long processor_id @ r4
.long __machine_arch_type @ r5
.long __atags_pointer @ r6
+#ifdef CONFIG_CPU_CP15
.long cr_alignment @ r7
+#else
+ .long 0 @ r7
+#endif
.long init_thread_union + THREAD_START_SP @ sp
.size __mmap_switched_data, . - __mmap_switched_data
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2c228a07e58c..6a2e09c952c7 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -32,15 +32,21 @@
* numbers for r1.
*
*/
- .arm
__HEAD
+
+#ifdef CONFIG_CPU_THUMBONLY
+ .thumb
+ENTRY(stext)
+#else
+ .arm
ENTRY(stext)
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
+#endif
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
@ and irqs disabled
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index ae58d3b37d9d..f21970316836 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -407,15 +407,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
* atomic helpers and the signal restart code. Insert it into the
* gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
*/
-static struct vm_area_struct gate_vma;
+static struct vm_area_struct gate_vma = {
+ .vm_start = 0xffff0000,
+ .vm_end = 0xffff0000 + PAGE_SIZE,
+ .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
+ .vm_mm = &init_mm,
+};
static int __init gate_vma_init(void)
{
- gate_vma.vm_start = 0xffff0000;
- gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
- gate_vma.vm_flags = VM_READ | VM_EXEC |
- VM_MAYREAD | VM_MAYEXEC;
+ gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
return 0;
}
arch_initcall(gate_vma_init);
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 8085417555dd..fafedd86885d 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -26,7 +26,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
struct return_address_data *data = d;
if (!data->level) {
- data->addr = (void *)frame->lr;
+ data->addr = (void *)frame->pc;
return 1;
} else {
@@ -41,7 +41,8 @@ void *return_address(unsigned int level)
struct stackframe frame;
register unsigned long current_sp asm ("sp");
- data.level = level + 1;
+ data.level = level + 2;
+ data.addr = NULL;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 234e339196c0..728007c4a2b7 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -290,10 +290,10 @@ static int cpu_has_aliasing_icache(unsigned int arch)
static void __init cacheid_init(void)
{
- unsigned int cachetype = read_cpuid_cachetype();
unsigned int arch = cpu_architecture();
if (arch >= CPU_ARCH_ARMv6) {
+ unsigned int cachetype = read_cpuid_cachetype();
if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */
arch = CPU_ARCH_ARMv7;
@@ -389,7 +389,7 @@ static void __init feat_v6_fixup(void)
*
* cpu_init sets up the per-CPU stacks.
*/
-void cpu_init(void)
+void notrace cpu_init(void)
{
unsigned int cpu = smp_processor_id();
struct stack *stk = &stacks[cpu];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 4619177bcfe6..47ab90563bf4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -211,6 +211,13 @@ void __cpuinit __cpu_die(unsigned int cpu)
}
printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
+ /*
+ * platform_cpu_kill() is generally expected to do the powering off
+ * and/or cutting of clocks to the dying CPU. Optionally, this may
+ * be done by the CPU which is dying in preference to supporting
+ * this call, but that means there is _no_ synchronisation between
+ * the requesting CPU and the dying CPU actually losing power.
+ */
if (!platform_cpu_kill(cpu))
printk("CPU%u: unable to kill\n", cpu);
}
@@ -230,14 +237,41 @@ void __ref cpu_die(void)
idle_task_exit();
local_irq_disable();
- mb();
- /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ /*
+ * Flush the data out of the L1 cache for this CPU. This must be
+ * before the completion to ensure that data is safely written out
+ * before platform_cpu_kill() gets called - which may disable
+ * *this* CPU and power down its cache.
+ */
+ flush_cache_louis();
+
+ /*
+ * Tell __cpu_die() that this CPU is now safe to dispose of. Once
+ * this returns, power and/or clocks can be removed at any point
+ * from this CPU and its cache by platform_cpu_kill().
+ */
RCU_NONIDLE(complete(&cpu_died));
/*
- * actual CPU shutdown procedure is at least platform (if not
- * CPU) specific.
+ * Ensure that the cache lines associated with that completion are
+ * written out. This covers the case where _this_ CPU is doing the
+ * powering down, to ensure that the completion is visible to the
+ * CPU waiting for this one.
+ */
+ flush_cache_louis();
+
+ /*
+ * The actual CPU shutdown procedure is at least platform (if not
+ * CPU) specific. This may remove power, or it may simply spin.
+ *
+ * Platforms are generally expected *NOT* to return from this call,
+ * although there are some which do because they have no way to
+ * power down the CPU. These platforms are the _only_ reason we
+ * have a return path which uses the fragment of assembly below.
+ *
+ * The return path should not be used for platforms which can
+ * power off the CPU.
*/
if (smp_ops.cpu_die)
smp_ops.cpu_die(cpu);
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 45eac87ed66a..5bc1a63284e3 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -41,7 +41,7 @@ void scu_enable(void __iomem *scu_base)
#ifdef CONFIG_ARM_ERRATA_764369
/* Cortex-A9 only */
- if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
+ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
scu_ctrl = __raw_readl(scu_base + 0x30);
if (!(scu_ctrl & 1))
__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index e82e1d248772..9a52a07aa40e 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -98,21 +98,21 @@ static void broadcast_tlb_a15_erratum(void)
return;
dummy_flush_tlb_a15_erratum();
- smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
- NULL, 1);
+ smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
}
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
{
- int cpu;
+ int cpu, this_cpu;
cpumask_t mask = { CPU_BITS_NONE };
if (!erratum_a15_798181())
return;
dummy_flush_tlb_a15_erratum();
+ this_cpu = get_cpu();
for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
+ if (cpu == this_cpu)
continue;
/*
* We only need to send an IPI if the other CPUs are running
@@ -127,6 +127,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
cpumask_set_cpu(cpu, &mask);
}
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
+ put_cpu();
}
void flush_tlb_all(void)