summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-06 00:57:04 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-06 00:57:04 +0200
commiteb3d3ec567e868c8a3bfbfdfc9465ffd52983d11 (patch)
tree75acf38b8d73cd281e5ce4dcc941faf48e244b98 /arch/arm/kernel
parentMerge branch 'arm64-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kern... (diff)
parentMerge branch 'devel-stable' into for-next (diff)
downloadlinux-eb3d3ec567e868c8a3bfbfdfc9465ffd52983d11.tar.xz
linux-eb3d3ec567e868c8a3bfbfdfc9465ffd52983d11.zip
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm into next
Pull ARM updates from Russell King: - Major clean-up of the L2 cache support code. The existing mess was becoming rather unmaintainable through all the additions that others have done over time. This turns it into a much nicer structure, and implements a few performance improvements as well. - Clean up some of the CP15 control register tweaks for alignment support, moving some code and data into alignment.c - DMA properties for ARM, from Santosh and reviewed by DT people. This adds DT properties to specify bus translations we can't discover automatically, and to indicate whether devices are coherent. - Hibernation support for ARM - Make ftrace work with read-only text in modules - add suspend support for PJ4B CPUs - rework interrupt masking for undefined instruction handling, which allows us to enable interrupts earlier in the handling of these exceptions. - support for big endian page tables - fix stacktrace support to exclude stacktrace functions from the trace, and add save_stack_trace_regs() implementation so that kprobes can record stack traces. - Add support for the Cortex-A17 CPU. - Remove last vestiges of ARM710 support. - Removal of ARM "meminfo" structure, finally converting us solely to memblock to handle the early memory initialisation. * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (142 commits) ARM: ensure C page table setup code follows assembly code (part II) ARM: ensure C page table setup code follows assembly code ARM: consolidate last remaining open-coded alignment trap enable ARM: remove global cr_no_alignment ARM: remove CPU_CP15 conditional from alignment.c ARM: remove unused adjust_cr() function ARM: move "noalign" command line option to alignment.c ARM: provide common method to clear bits in CPU control register ARM: 8025/1: Get rid of meminfo ARM: 8060/1: mm: allow sub-architectures to override PCI I/O memory type ARM: 8066/1: correction for ARM patch 8031/2 ARM: 8049/1: ftrace/add save_stack_trace_regs() implementation ARM: 8065/1: remove last use of CONFIG_CPU_ARM710 ARM: 8062/1: Modify ldrt fixup handler to re-execute the userspace instruction ARM: 8047/1: rwsem: use asm-generic rwsem implementation ARM: l2c: trial at enabling some Cortex-A9 optimisations ARM: l2c: add warnings for stuff modifying aux_ctrl register values ARM: l2c: print a warning with L2C-310 caches if the cache size is modified ARM: l2c: remove old .set_debug method ARM: l2c: kill L2X0_AUX_CTRL_MASK before anyone else makes use of this ...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/atags_parse.c5
-rw-r--r--arch/arm/kernel/devtree.c4
-rw-r--r--arch/arm/kernel/entry-armv.S19
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/entry-header.S4
-rw-r--r--arch/arm/kernel/ftrace.c13
-rw-r--r--arch/arm/kernel/head-common.S3
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/hibernate.c107
-rw-r--r--arch/arm/kernel/irq.c12
-rw-r--r--arch/arm/kernel/iwmmxt.S16
-rw-r--r--arch/arm/kernel/perf_event_cpu.c1
-rw-r--r--arch/arm/kernel/perf_event_v7.c12
-rw-r--r--arch/arm/kernel/setup.c37
-rw-r--r--arch/arm/kernel/sleep.S5
-rw-r--r--arch/arm/kernel/stacktrace.c60
-rw-r--r--arch/arm/kernel/topology.c8
-rw-r--r--arch/arm/kernel/uprobes.c20
19 files changed, 272 insertions, 65 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 040619c32d68..38ddd9f83d0e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o
obj-$(CONFIG_SMP) += smp.o
ifdef CONFIG_MMU
obj-$(CONFIG_SMP) += smp_tlb.o
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
index 8c14de8180c0..7807ef58a2ab 100644
--- a/arch/arm/kernel/atags_parse.c
+++ b/arch/arm/kernel/atags_parse.c
@@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/root_dev.h>
#include <linux/screen_info.h>
+#include <linux/memblock.h>
#include <asm/setup.h>
#include <asm/system_info.h>
@@ -222,10 +223,10 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
}
if (mdesc->fixup)
- mdesc->fixup(tags, &from, &meminfo);
+ mdesc->fixup(tags, &from);
if (tags->hdr.tag == ATAG_CORE) {
- if (meminfo.nr_banks != 0)
+ if (memblock_phys_mem_size())
squash_mem_tags(tags);
save_atags(tags);
parse_tags(tags);
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index ea9ce92a4b52..e94a157ddff1 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -27,10 +27,6 @@
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
-void __init early_init_dt_add_memory_arch(u64 base, u64 size)
-{
- arm_add_memory(base, size);
-}
#ifdef CONFIG_SMP
extern struct of_cpu_method __cpu_method_of_table[];
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 1879e8dd2acc..52a949a8077d 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -344,7 +344,7 @@ ENDPROC(__pabt_svc)
@
@ Enable the alignment trap while in kernel mode
@
- alignment_trap r0
+ alignment_trap r0, .LCcralign
@
@ Clear FP to mark the first stack frame
@@ -413,6 +413,11 @@ __und_usr:
@
adr r9, BSYM(ret_from_exception)
+ @ IRQs must be enabled before attempting to read the instruction from
+ @ user space since that could cause a page/translation fault if the
+ @ page table was modified by another CPU.
+ enable_irq
+
tst r3, #PSR_T_BIT @ Thumb mode?
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
@@ -484,7 +489,8 @@ ENDPROC(__und_usr)
*/
.pushsection .fixup, "ax"
.align 2
-4: mov pc, r9
+4: str r4, [sp, #S_PC] @ retry current instruction
+ mov pc, r9
.popsection
.pushsection __ex_table,"a"
.long 1b, 4b
@@ -517,7 +523,7 @@ ENDPROC(__und_usr)
* r9 = normal "successful" return address
* r10 = this threads thread_info structure
* lr = unrecognised instruction return address
- * IRQs disabled, FIQs enabled.
+ * IRQs enabled, FIQs enabled.
*/
@
@ Fall-through from Thumb-2 __und_usr
@@ -624,7 +630,6 @@ call_fpe:
#endif
do_fpe:
- enable_irq
ldr r4, .LCfp
add r10, r10, #TI_FPSTATE @ r10 = workspace
ldr pc, [r4] @ Call FP module USR entry point
@@ -652,8 +657,7 @@ __und_usr_fault_32:
b 1f
__und_usr_fault_16:
mov r1, #2
-1: enable_irq
- mov r0, sp
+1: mov r0, sp
adr lr, BSYM(ret_from_exception)
b __und_fault
ENDPROC(__und_usr_fault_32)
@@ -1143,11 +1147,8 @@ __vectors_start:
.data
.globl cr_alignment
- .globl cr_no_alignment
cr_alignment:
.space 4
-cr_no_alignment:
- .space 4
#ifdef CONFIG_MULTI_IRQ_HANDLER
.globl handle_arch_irq
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index a2dcafdf1bc8..7139d4a7dea7 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -365,13 +365,7 @@ ENTRY(vector_swi)
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
#endif
zero_fp
-
-#ifdef CONFIG_ALIGNMENT_TRAP
- ldr ip, __cr_alignment
- ldr ip, [ip]
- mcr p15, 0, ip, c1, c0 @ update control register
-#endif
-
+ alignment_trap ip, __cr_alignment
enable_irq
ct_user_exit
get_thread_info tsk
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index efb208de75ec..5d702f8900b1 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -37,9 +37,9 @@
#endif
.endm
- .macro alignment_trap, rtemp
+ .macro alignment_trap, rtemp, label
#ifdef CONFIG_ALIGNMENT_TRAP
- ldr \rtemp, .LCcralign
+ ldr \rtemp, \label
ldr \rtemp, [\rtemp]
mcr p15, 0, \rtemp, c1, c0
#endif
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index c108ddcb9ba4..af9a8a927a4e 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -14,6 +14,7 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
+#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/opcodes.h>
@@ -63,6 +64,18 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
}
#endif
+int ftrace_arch_code_modify_prepare(void)
+{
+ set_all_modules_text_rw();
+ return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+ set_all_modules_text_ro();
+ return 0;
+}
+
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
return arm_gen_branch_link(pc, addr);
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index c96ecacb2021..572a38335c96 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -99,8 +99,7 @@ __mmap_switched:
str r1, [r5] @ Save machine type
str r2, [r6] @ Save atags pointer
cmp r7, #0
- bicne r4, r0, #CR_A @ Clear 'A' bit
- stmneia r7, {r0, r4} @ Save control register values
+ strne r0, [r7] @ Save control register values
b start_kernel
ENDPROC(__mmap_switched)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 591d6e4a6492..2c35f0ff2fdc 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -475,7 +475,7 @@ ENDPROC(__turn_mmu_on)
#ifdef CONFIG_SMP_ON_UP
- __INIT
+ __HEAD
__fixup_smp:
and r3, r9, #0x000f0000 @ architecture version
teq r3, #0x000f0000 @ CPU ID supported?
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
new file mode 100644
index 000000000000..bb8b79648643
--- /dev/null
+++ b/arch/arm/kernel/hibernate.c
@@ -0,0 +1,107 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Derived from work on ARM hibernation support by:
+ *
+ * Ubuntu project, hibernation support for mach-dove
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ * https://lkml.org/lkml/2010/6/18/4
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/mm.h>
+#include <linux/suspend.h>
+#include <asm/system_misc.h>
+#include <asm/idmap.h>
+#include <asm/suspend.h>
+#include <asm/memory.h>
+
+extern const void __nosave_begin, __nosave_end;
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
+ unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
+
+ return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
+}
+
+void notrace save_processor_state(void)
+{
+ WARN_ON(num_online_cpus() != 1);
+ local_fiq_disable();
+}
+
+void notrace restore_processor_state(void)
+{
+ local_fiq_enable();
+}
+
+/*
+ * Snapshot kernel memory and reset the system.
+ *
+ * swsusp_save() is executed in the suspend finisher so that the CPU
+ * context pointer and memory are part of the saved image, which is
+ * required by the resume kernel image to restart execution from
+ * swsusp_arch_suspend().
+ *
+ * soft_restart is not technically needed, but is used to get success
+ * returned from cpu_suspend.
+ *
+ * When soft reboot completes, the hibernation snapshot is written out.
+ */
+static int notrace arch_save_image(unsigned long unused)
+{
+ int ret;
+
+ ret = swsusp_save();
+ if (ret == 0)
+ soft_restart(virt_to_phys(cpu_resume));
+ return ret;
+}
+
+/*
+ * Save the current CPU state before suspend / poweroff.
+ */
+int notrace swsusp_arch_suspend(void)
+{
+ return cpu_suspend(0, arch_save_image);
+}
+
+/*
+ * Restore page contents for physical pages that were in use during loading
+ * hibernation image. Switch to idmap_pgd so the physical page tables
+ * are overwritten with the same contents.
+ */
+static void notrace arch_restore_image(void *unused)
+{
+ struct pbe *pbe;
+
+ cpu_switch_mm(idmap_pgd, &init_mm);
+ for (pbe = restore_pblist; pbe; pbe = pbe->next)
+ copy_page(pbe->orig_address, pbe->address);
+
+ soft_restart(virt_to_phys(cpu_resume));
+}
+
+static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
+
+/*
+ * Resume from the hibernation image.
+ * Due to the kernel heap / data restore, stack contents change underneath
+ * and that would make function calls impossible; switch to a temporary
+ * stack within the nosave region to avoid that problem.
+ */
+int swsusp_arch_resume(void)
+{
+ extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+ call_with_stack(arch_restore_image, 0,
+ resume_stack + ARRAY_SIZE(resume_stack));
+ return 0;
+}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 9723d17b8f38..2c4257604513 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -37,6 +37,7 @@
#include <linux/proc_fs.h>
#include <linux/export.h>
+#include <asm/hardware/cache-l2x0.h>
#include <asm/exception.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
@@ -115,10 +116,21 @@ EXPORT_SYMBOL_GPL(set_irq_flags);
void __init init_IRQ(void)
{
+ int ret;
+
if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
irqchip_init();
else
machine_desc->init_irq();
+
+ if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) &&
+ (machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) {
+ outer_cache.write_sec = machine_desc->l2c_write_sec;
+ ret = l2x0_of_init(machine_desc->l2c_aux_val,
+ machine_desc->l2c_aux_mask);
+ if (ret)
+ pr_err("L2C: failed to init: %d\n", ret);
+ }
}
#ifdef CONFIG_MULTI_IRQ_HANDLER
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index 2452dd1bef53..a5599cfc43cb 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -18,6 +18,7 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
#define PJ4(code...) code
@@ -65,17 +66,18 @@
* r9 = ret_from_exception
* lr = undefined instr exit
*
- * called from prefetch exception handler with interrupts disabled
+ * called from prefetch exception handler with interrupts enabled
*/
ENTRY(iwmmxt_task_enable)
+ inc_preempt_count r10, r3
XSC(mrc p15, 0, r2, c15, c1, 0)
PJ4(mrc p15, 0, r2, c1, c0, 2)
@ CP0 and CP1 accessible?
XSC(tst r2, #0x3)
PJ4(tst r2, #0xf)
- movne pc, lr @ if so no business here
+ bne 4f @ if so no business here
@ enable access to CP0 and CP1
XSC(orr r2, r2, #0x3)
XSC(mcr p15, 0, r2, c15, c1, 0)
@@ -136,7 +138,7 @@ concan_dump:
wstrd wR15, [r1, #MMX_WR15]
2: teq r0, #0 @ anything to load?
- moveq pc, lr
+ beq 3f
concan_load:
@@ -169,8 +171,14 @@ concan_load:
@ clear CUP/MUP (only if r1 != 0)
teq r1, #0
mov r2, #0
- moveq pc, lr
+ beq 3f
tmcr wCon, r2
+
+3:
+#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+#endif
+4: dec_preempt_count r10, r3
mov pc, lr
/*
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 51798d7854ac..a71ae1523620 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -221,6 +221,7 @@ static struct notifier_block cpu_pmu_hotplug_notifier = {
* PMU platform driver and devicetree bindings.
*/
static struct of_device_id cpu_pmu_of_device_ids[] = {
+ {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
{.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
{.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
{.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index f4ef3981ed02..2037f7205987 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1599,6 +1599,13 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
return 0;
}
+static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7_a12_pmu_init(cpu_pmu);
+ cpu_pmu->name = "ARMv7 Cortex-A17";
+ return 0;
+}
+
/*
* Krait Performance Monitor Region Event Selection Register (PMRESRn)
*
@@ -2021,6 +2028,11 @@ static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
return -ENODEV;
}
+static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return -ENODEV;
+}
+
static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
{
return -ENODEV;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 50e198c1e9c8..8a16ee5d8a95 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -72,6 +72,7 @@ static int __init fpe_setup(char *line)
__setup("fpe=", fpe_setup);
#endif
+extern void init_default_cache_policy(unsigned long);
extern void paging_init(const struct machine_desc *desc);
extern void early_paging_init(const struct machine_desc *,
struct proc_info_list *);
@@ -590,7 +591,7 @@ static void __init setup_processor(void)
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
- proc_arch[cpu_architecture()], cr_alignment);
+ proc_arch[cpu_architecture()], get_cr());
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
list->arch_name, ENDIANNESS);
@@ -603,7 +604,9 @@ static void __init setup_processor(void)
#ifndef CONFIG_ARM_THUMB
elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
#endif
-
+#ifdef CONFIG_MMU
+ init_default_cache_policy(list->__cpu_mm_mmu_flags);
+#endif
erratum_a15_798181_init();
feat_v6_fixup();
@@ -628,15 +631,8 @@ void __init dump_machine_table(void)
int __init arm_add_memory(u64 start, u64 size)
{
- struct membank *bank = &meminfo.bank[meminfo.nr_banks];
u64 aligned_start;
- if (meminfo.nr_banks >= NR_BANKS) {
- pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n",
- (long long)start);
- return -EINVAL;
- }
-
/*
* Ensure that start/size are aligned to a page boundary.
* Size is appropriately rounded down, start is rounded up.
@@ -677,17 +673,17 @@ int __init arm_add_memory(u64 start, u64 size)
aligned_start = PHYS_OFFSET;
}
- bank->start = aligned_start;
- bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
+ start = aligned_start;
+ size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
/*
* Check whether this memory region has non-zero size or
* invalid node number.
*/
- if (bank->size == 0)
+ if (size == 0)
return -EINVAL;
- meminfo.nr_banks++;
+ memblock_add(start, size);
return 0;
}
@@ -695,6 +691,7 @@ int __init arm_add_memory(u64 start, u64 size)
* Pick out the memory size. We look for mem=size@start,
* where start and size are "size[KkMm]"
*/
+
static int __init early_mem(char *p)
{
static int usermem __initdata = 0;
@@ -709,7 +706,8 @@ static int __init early_mem(char *p)
*/
if (usermem == 0) {
usermem = 1;
- meminfo.nr_banks = 0;
+ memblock_remove(memblock_start_of_DRAM(),
+ memblock_end_of_DRAM() - memblock_start_of_DRAM());
}
start = PHYS_OFFSET;
@@ -854,13 +852,6 @@ static void __init reserve_crashkernel(void)
static inline void reserve_crashkernel(void) {}
#endif /* CONFIG_KEXEC */
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
- const struct membank *a = _a, *b = _b;
- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
-}
-
void __init hyp_mode_check(void)
{
#ifdef CONFIG_ARM_VIRT_EXT
@@ -903,12 +894,10 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
- sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
-
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
setup_dma_zone(mdesc);
sanity_check_meminfo();
- arm_memblock_init(&meminfo, mdesc);
+ arm_memblock_init(mdesc);
paging_init(mdesc);
request_standard_resources(mdesc);
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index b907d9b790ab..1b880db2a033 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -127,6 +127,10 @@ ENDPROC(cpu_resume_after_mmu)
.align
ENTRY(cpu_resume)
ARM_BE8(setend be) @ ensure we are in BE mode
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install_secondary
+#endif
+ safe_svcmode_maskall r1
mov r1, #0
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
ALT_UP_B(1f)
@@ -144,7 +148,6 @@ ARM_BE8(setend be) @ ensure we are in BE mode
ldr r0, [r0, #SLEEP_SAVE_SP_PHYS]
ldr r0, [r0, r1, lsl #2]
- setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
@ load phys pgd, stack, resume fn
ARM( ldmia r0!, {r1, sp, pc} )
THUMB( ldmia r0!, {r1, r2, r3} )
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index af4e8c8a5422..f065eb05d254 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -3,6 +3,7 @@
#include <linux/stacktrace.h>
#include <asm/stacktrace.h>
+#include <asm/traps.h>
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
/*
@@ -61,6 +62,7 @@ EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
+ unsigned long last_pc;
unsigned int no_sched_functions;
unsigned int skip;
};
@@ -69,6 +71,7 @@ static int save_trace(struct stackframe *frame, void *d)
{
struct stack_trace_data *data = d;
struct stack_trace *trace = data->trace;
+ struct pt_regs *regs;
unsigned long addr = frame->pc;
if (data->no_sched_functions && in_sched_functions(addr))
@@ -80,16 +83,39 @@ static int save_trace(struct stackframe *frame, void *d)
trace->entries[trace->nr_entries++] = addr;
+ if (trace->nr_entries >= trace->max_entries)
+ return 1;
+
+ /*
+ * in_exception_text() is designed to test if the PC is one of
+ * the functions which has an exception stack above it, but
+ * unfortunately what is in frame->pc is the return LR value,
+ * not the saved PC value. So, we need to track the previous
+ * frame PC value when doing this.
+ */
+ addr = data->last_pc;
+ data->last_pc = frame->pc;
+ if (!in_exception_text(addr))
+ return 0;
+
+ regs = (struct pt_regs *)frame->sp;
+
+ trace->entries[trace->nr_entries++] = regs->ARM_pc;
+
return trace->nr_entries >= trace->max_entries;
}
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+/* This must be noinline to so that our skip calculation works correctly */
+static noinline void __save_stack_trace(struct task_struct *tsk,
+ struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
+ data.last_pc = ULONG_MAX;
data.skip = trace->skip;
+ data.no_sched_functions = nosched;
if (tsk != current) {
#ifdef CONFIG_SMP
@@ -102,7 +128,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
trace->entries[trace->nr_entries++] = ULONG_MAX;
return;
#else
- data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.lr = 0; /* recovered from the stack */
@@ -111,11 +136,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
} else {
register unsigned long current_sp asm ("sp");
- data.no_sched_functions = 0;
+ /* We don't want this function nor the caller */
+ data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.lr = (unsigned long)__builtin_return_address(0);
- frame.pc = (unsigned long)save_stack_trace_tsk;
+ frame.pc = (unsigned long)__save_stack_trace;
}
walk_stackframe(&frame, save_trace, &data);
@@ -123,9 +149,33 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
+void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+ struct stack_trace_data data;
+ struct stackframe frame;
+
+ data.trace = trace;
+ data.skip = trace->skip;
+ data.no_sched_functions = 0;
+
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
+
+ walk_stackframe(&frame, save_trace, &data);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ __save_stack_trace(tsk, trace, 1);
+}
+
void save_stack_trace(struct stack_trace *trace)
{
- save_stack_trace_tsk(current, trace);
+ __save_stack_trace(current, trace, 0);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 71e1fec6d31a..3997c411c140 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -91,13 +91,13 @@ static void __init parse_dt_topology(void)
{
const struct cpu_efficiency *cpu_eff;
struct device_node *cn = NULL;
- unsigned long min_capacity = (unsigned long)(-1);
+ unsigned long min_capacity = ULONG_MAX;
unsigned long max_capacity = 0;
unsigned long capacity = 0;
- int alloc_size, cpu = 0;
+ int cpu = 0;
- alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity);
- __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
+ __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
+ GFP_NOWAIT);
for_each_possible_cpu(cpu) {
const u32 *rate;
diff --git a/arch/arm/kernel/uprobes.c b/arch/arm/kernel/uprobes.c
index f9bacee973bf..56adf9c1fde0 100644
--- a/arch/arm/kernel/uprobes.c
+++ b/arch/arm/kernel/uprobes.c
@@ -113,6 +113,26 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
return 0;
}
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ void *xol_page_kaddr = kmap_atomic(page);
+ void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
+
+ preempt_disable();
+
+ /* Initialize the slot */
+ memcpy(dst, src, len);
+
+ /* flush caches (dcache/icache) */
+ flush_uprobe_xol_access(page, vaddr, dst, len);
+
+ preempt_enable();
+
+ kunmap_atomic(xol_page_kaddr);
+}
+
+
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;