summaryrefslogtreecommitdiffstats
path: root/arch/arc/kernel
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2013-03-05 00:10:32 +0100
committerMarcelo Tosatti <mtosatti@redhat.com>2013-03-05 00:10:32 +0100
commitee2c25efdd46d7ed5605d6fe877bdf4b47a4ab2e (patch)
tree35890281e93e667a8e262d76ef250025eb30a8c1 /arch/arc/kernel
parentKVM: VMX: Pass vcpu to __vmx_complete_interrupts (diff)
parentLinux 3.9-rc1 (diff)
downloadlinux-ee2c25efdd46d7ed5605d6fe877bdf4b47a4ab2e.tar.xz
linux-ee2c25efdd46d7ed5605d6fe877bdf4b47a4ab2e.zip
Merge branch 'master' into queue
* master: (15791 commits) Linux 3.9-rc1 btrfs/raid56: Add missing #include <linux/vmalloc.h> fix compat_sys_rt_sigprocmask() SUNRPC: One line comment fix ext4: enable quotas before orphan cleanup ext4: don't allow quota mount options when quota feature enabled ext4: fix a warning from sparse check for ext4_dir_llseek ext4: convert number of blocks to clusters properly ext4: fix possible memory leak in ext4_remount() jbd2: fix ERR_PTR dereference in jbd2__journal_start metag: Provide dma_get_sgtable() metag: prom.h: remove declaration of metag_dt_memblock_reserve() metag: copy devicetree to non-init memory metag: cleanup metag_ksyms.c includes metag: move mm/init.c exports out of metag_ksyms.c metag: move usercopy.c exports out of metag_ksyms.c metag: move setup.c exports out of metag_ksyms.c metag: move kick.c exports out of metag_ksyms.c metag: move traps.c exports out of metag_ksyms.c metag: move irq enable out of irqflags.h on SMP ... Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Conflicts: arch/x86/kernel/kvmclock.c
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r--arch/arc/kernel/Makefile33
-rw-r--r--arch/arc/kernel/arc_hostlink.c58
-rw-r--r--arch/arc/kernel/arcksyms.c56
-rw-r--r--arch/arc/kernel/asm-offsets.c64
-rw-r--r--arch/arc/kernel/clk.c21
-rw-r--r--arch/arc/kernel/ctx_sw.c109
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S58
-rw-r--r--arch/arc/kernel/devtree.c123
-rw-r--r--arch/arc/kernel/disasm.c538
-rw-r--r--arch/arc/kernel/entry.S839
-rw-r--r--arch/arc/kernel/fpu.c55
-rw-r--r--arch/arc/kernel/head.S111
-rw-r--r--arch/arc/kernel/irq.c273
-rw-r--r--arch/arc/kernel/kgdb.c205
-rw-r--r--arch/arc/kernel/kprobes.c525
-rw-r--r--arch/arc/kernel/module.c145
-rw-r--r--arch/arc/kernel/process.c235
-rw-r--r--arch/arc/kernel/ptrace.c158
-rw-r--r--arch/arc/kernel/reset.c33
-rw-r--r--arch/arc/kernel/setup.c473
-rw-r--r--arch/arc/kernel/signal.c360
-rw-r--r--arch/arc/kernel/smp.c332
-rw-r--r--arch/arc/kernel/stacktrace.c254
-rw-r--r--arch/arc/kernel/sys.c18
-rw-r--r--arch/arc/kernel/time.c265
-rw-r--r--arch/arc/kernel/traps.c170
-rw-r--r--arch/arc/kernel/troubleshoot.c322
-rw-r--r--arch/arc/kernel/unaligned.c245
-rw-r--r--arch/arc/kernel/unwind.c1329
-rw-r--r--arch/arc/kernel/vmlinux.lds.S163
30 files changed, 7570 insertions, 0 deletions
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
new file mode 100644
index 000000000000..c242ef07ba70
--- /dev/null
+++ b/arch/arc/kernel/Makefile
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+# Pass UTS_MACHINE for user_regset definition
+CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
+obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o entry.o process.o
+obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o
+obj-y += devtree.o
+
+obj-$(CONFIG_MODULES) += arcksyms.o module.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
+
+obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
+CFLAGS_fpu.o += -mdpfp
+
+ifdef CONFIG_ARC_DW2_UNWIND
+CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
+obj-y += ctx_sw.o
+else
+obj-y += ctx_sw_asm.o
+endif
+
+extra-y := vmlinux.lds head.o
diff --git a/arch/arc/kernel/arc_hostlink.c b/arch/arc/kernel/arc_hostlink.c
new file mode 100644
index 000000000000..47b2a17cc52a
--- /dev/null
+++ b/arch/arc/kernel/arc_hostlink.c
@@ -0,0 +1,58 @@
+/*
+ * arc_hostlink.c: Pseudo-driver for Metaware provided "hostlink" facility
+ *
+ * Allows Linux userland access to host in absence of any peripherals.
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h> /* file_operations */
+#include <linux/miscdevice.h>
+#include <linux/mm.h> /* VM_IO */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
+
+static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ pr_warn("Hostlink buffer mmap ERROR\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static long arc_hl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ /* we only support, returning the physical addr to mmap in user space */
+ put_user((unsigned int)__HOSTLINK__, (int __user *)arg);
+ return 0;
+}
+
+static const struct file_operations arc_hl_fops = {
+ .unlocked_ioctl = arc_hl_ioctl,
+ .mmap = arc_hl_mmap,
+};
+
+static struct miscdevice arc_hl_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hostlink",
+ .fops = &arc_hl_fops
+};
+
+static int __init arc_hl_init(void)
+{
+ pr_info("ARC Hostlink driver mmap at 0x%p\n", __HOSTLINK__);
+ return misc_register(&arc_hl_dev);
+}
+module_init(arc_hl_init);
diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c
new file mode 100644
index 000000000000..4d9e77724bed
--- /dev/null
+++ b/arch/arc/kernel/arcksyms.c
@@ -0,0 +1,56 @@
+/*
+ * arcksyms.c - Exporting symbols not exportable from their own sources
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+
+/* libgcc functions, not part of kernel sources */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __divsf3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __cmpdi2(void);
+extern void __fixunsdfsi(void);
+extern void __muldf3(void);
+extern void __divdf3(void);
+extern void __floatunsidf(void);
+extern void __floatunsisf(void);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__divsf3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__cmpdi2);
+EXPORT_SYMBOL(__fixunsdfsi);
+EXPORT_SYMBOL(__muldf3);
+EXPORT_SYMBOL(__divdf3);
+EXPORT_SYMBOL(__floatunsidf);
+EXPORT_SYMBOL(__floatunsisf);
+
+/* ARC optimised assembler routines */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strlen);
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..0dc148ebce74
--- /dev/null
+++ b/arch/arc/kernel/asm-offsets.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+#include <asm/hardirq.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+int main(void)
+{
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+
+ BLANK();
+
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+ DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
+#ifdef CONFIG_ARC_CURR_IN_REG
+ DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25));
+#endif
+ DEFINE(THREAD_FAULT_ADDR,
+ offsetof(struct thread_struct, fault_address));
+
+ BLANK();
+
+ DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(THREAD_INFO_PREEMPT_COUNT,
+ offsetof(struct thread_info, preempt_count));
+
+ BLANK();
+
+ DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
+
+ DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
+ DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
+
+ DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
+
+ BLANK();
+
+ DEFINE(PT_status32, offsetof(struct pt_regs, status32));
+ DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word));
+ DEFINE(PT_sp, offsetof(struct pt_regs, sp));
+ DEFINE(PT_r0, offsetof(struct pt_regs, r0));
+ DEFINE(PT_r1, offsetof(struct pt_regs, r1));
+ DEFINE(PT_r2, offsetof(struct pt_regs, r2));
+ DEFINE(PT_r3, offsetof(struct pt_regs, r3));
+ DEFINE(PT_r4, offsetof(struct pt_regs, r4));
+ DEFINE(PT_r5, offsetof(struct pt_regs, r5));
+ DEFINE(PT_r6, offsetof(struct pt_regs, r6));
+ DEFINE(PT_r7, offsetof(struct pt_regs, r7));
+
+ return 0;
+}
diff --git a/arch/arc/kernel/clk.c b/arch/arc/kernel/clk.c
new file mode 100644
index 000000000000..66ce0dc917fb
--- /dev/null
+++ b/arch/arc/kernel/clk.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/clk.h>
+
+unsigned long core_freq = 800000000;
+
+/*
+ * As of now we default to device-tree provided clock
+ * In future we can determine this in early boot
+ */
+int arc_set_core_freq(unsigned long freq)
+{
+ core_freq = freq;
+ return 0;
+}
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
new file mode 100644
index 000000000000..60844dac6132
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Aug 2009
+ * -"C" version of lowest level context switch asm macro called by schedular
+ * gcc doesn't generate the dward CFI info for hand written asm, hence can't
+ * backtrace out of it (e.g. tasks sleeping in kernel).
+ * So we cheat a bit by writing almost similar code in inline-asm.
+ * -This is a hacky way of doing things, but there is no other simple way.
+ * I don't want/intend to extend unwinding code to understand raw asm
+ */
+
+#include <asm/asm-offsets.h>
+#include <linux/sched.h>
+
+struct task_struct *__sched
+__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
+{
+ unsigned int tmp;
+ unsigned int prev = (unsigned int)prev_task;
+ unsigned int next = (unsigned int)next_task;
+ int num_words_to_skip = 1;
+#ifdef CONFIG_ARC_CURR_IN_REG
+ num_words_to_skip++;
+#endif
+
+ __asm__ __volatile__(
+ /* FP/BLINK save generated by gcc (standard function prologue */
+ "st.a r13, [sp, -4] \n\t"
+ "st.a r14, [sp, -4] \n\t"
+ "st.a r15, [sp, -4] \n\t"
+ "st.a r16, [sp, -4] \n\t"
+ "st.a r17, [sp, -4] \n\t"
+ "st.a r18, [sp, -4] \n\t"
+ "st.a r19, [sp, -4] \n\t"
+ "st.a r20, [sp, -4] \n\t"
+ "st.a r21, [sp, -4] \n\t"
+ "st.a r22, [sp, -4] \n\t"
+ "st.a r23, [sp, -4] \n\t"
+ "st.a r24, [sp, -4] \n\t"
+#ifndef CONFIG_ARC_CURR_IN_REG
+ "st.a r25, [sp, -4] \n\t"
+#endif
+ "sub sp, sp, %4 \n\t" /* create gutter at top */
+
+ /* set ksp of outgoing task in tsk->thread.ksp */
+ "st.as sp, [%3, %1] \n\t"
+
+ "sync \n\t"
+
+ /*
+ * setup _current_task with incoming tsk.
+ * optionally, set r25 to that as well
+ * For SMP extra work to get to &_current_task[cpu]
+ * (open coded SET_CURR_TASK_ON_CPU)
+ */
+#ifndef CONFIG_SMP
+ "st %2, [@_current_task] \n\t"
+#else
+ "lr r24, [identity] \n\t"
+ "lsr r24, r24, 8 \n\t"
+ "bmsk r24, r24, 7 \n\t"
+ "add2 r24, @_current_task, r24 \n\t"
+ "st %2, [r24] \n\t"
+#endif
+#ifdef CONFIG_ARC_CURR_IN_REG
+ "mov r25, %2 \n\t"
+#endif
+
+ /* get ksp of incoming task from tsk->thread.ksp */
+ "ld.as sp, [%2, %1] \n\t"
+
+ /* start loading it's CALLEE reg file */
+
+ "add sp, sp, %4 \n\t" /* skip gutter at top */
+
+#ifndef CONFIG_ARC_CURR_IN_REG
+ "ld.ab r25, [sp, 4] \n\t"
+#endif
+ "ld.ab r24, [sp, 4] \n\t"
+ "ld.ab r23, [sp, 4] \n\t"
+ "ld.ab r22, [sp, 4] \n\t"
+ "ld.ab r21, [sp, 4] \n\t"
+ "ld.ab r20, [sp, 4] \n\t"
+ "ld.ab r19, [sp, 4] \n\t"
+ "ld.ab r18, [sp, 4] \n\t"
+ "ld.ab r17, [sp, 4] \n\t"
+ "ld.ab r16, [sp, 4] \n\t"
+ "ld.ab r15, [sp, 4] \n\t"
+ "ld.ab r14, [sp, 4] \n\t"
+ "ld.ab r13, [sp, 4] \n\t"
+
+ /* last (ret value) = prev : although for ARC it mov r0, r0 */
+ "mov %0, %3 \n\t"
+
+ /* FP/BLINK restore generated by gcc (standard func epilogue */
+
+ : "=r"(tmp)
+ : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev),
+ "n"(num_words_to_skip * 4)
+ : "blink"
+ );
+
+ return (struct task_struct *)tmp;
+}
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
new file mode 100644
index 000000000000..d8972345e4c2
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Aug 2009
+ * -Moved core context switch macro out of entry.S into this file.
+ * -This is the more "natural" hand written assembler
+ */
+
+#include <asm/entry.h> /* For the SAVE_* macros */
+#include <asm/asm-offsets.h>
+#include <asm/linkage.h>
+
+;################### Low Level Context Switch ##########################
+
+ .section .sched.text,"ax",@progbits
+ .align 4
+ .global __switch_to
+ .type __switch_to, @function
+__switch_to:
+
+ /* Save regs on kernel mode stack of task */
+ st.a blink, [sp, -4]
+ st.a fp, [sp, -4]
+ SAVE_CALLEE_SAVED_KERNEL
+
+ /* Save the now KSP in task->thread.ksp */
+ st.as sp, [r0, (TASK_THREAD + THREAD_KSP)/4]
+
+ /*
+ * Return last task in r0 (return reg)
+ * On ARC, Return reg = First Arg reg = r0.
+ * Since we already have last task in r0,
+ * don't need to do anything special to return it
+ */
+
+ /* hardware memory barrier */
+ sync
+
+ /*
+ * switch to new task, contained in r1
+ * Temp reg r3 is required to get the ptr to store val
+ */
+ SET_CURR_TASK_ON_CPU r1, r3
+
+ /* reload SP with kernel mode stack pointer in task->thread.ksp */
+ ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
+
+ /* restore the registers */
+ RESTORE_CALLEE_SAVED_KERNEL
+ ld.ab fp, [sp, 4]
+ ld.ab blink, [sp, 4]
+ j [blink]
+
+ARC_EXIT __switch_to
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
new file mode 100644
index 000000000000..bdee3a812052
--- /dev/null
+++ b/arch/arc/kernel/devtree.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Based on reduced version of METAG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <asm/prom.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+/* called from unflatten_device_tree() to bootstrap devicetree itself */
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
+
+/**
+ * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
+ * @dt: virtual address pointer to dt blob
+ *
+ * If a dtb was passed to the kernel, then use it to choose the correct
+ * machine_desc and to setup the system.
+ */
+struct machine_desc * __init setup_machine_fdt(void *dt)
+{
+ struct boot_param_header *devtree = dt;
+ struct machine_desc *mdesc = NULL, *mdesc_best = NULL;
+ unsigned int score, mdesc_score = ~1;
+ unsigned long dt_root;
+ const char *model, *compat;
+ void *clk;
+ char manufacturer[16];
+ unsigned long len;
+
+ /* check device tree validity */
+ if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+ return NULL;
+
+ initial_boot_params = devtree;
+ dt_root = of_get_flat_dt_root();
+
+ /*
+ * The kernel could be multi-platform enabled, thus could have many
+ * "baked-in" machine descriptors. Search thru all for the best
+ * "compatible" string match.
+ */
+ for_each_machine_desc(mdesc) {
+ score = of_flat_dt_match(dt_root, mdesc->dt_compat);
+ if (score > 0 && score < mdesc_score) {
+ mdesc_best = mdesc;
+ mdesc_score = score;
+ }
+ }
+ if (!mdesc_best) {
+ const char *prop;
+ long size;
+
+ pr_err("\n unrecognized device tree list:\n[ ");
+
+ prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+ if (prop) {
+ while (size > 0) {
+ printk("'%s' ", prop);
+ size -= strlen(prop) + 1;
+ prop += strlen(prop) + 1;
+ }
+ }
+ printk("]\n\n");
+
+ machine_halt();
+ }
+
+ /* compat = "<manufacturer>,<model>" */
+ compat = mdesc_best->dt_compat[0];
+
+ model = strchr(compat, ',');
+ if (model)
+ model++;
+
+ strlcpy(manufacturer, compat, model ? model - compat : strlen(compat));
+
+ pr_info("Board \"%s\" from %s (Manufacturer)\n", model, manufacturer);
+
+ /* Retrieve various information from the /chosen node */
+ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+
+ /* Initialize {size,address}-cells info */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+
+ /* Setup memory, calling early_init_dt_add_memory_arch */
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len);
+ if (clk)
+ arc_set_core_freq(of_read_ulong(clk, len/4));
+
+ return mdesc_best;
+}
+
+/*
+ * Copy the flattened DT out of .init since unflattening doesn't copy strings
+ * and the normal DT APIs refs them from orig flat DT
+ */
+void __init copy_devtree(void)
+{
+ void *alloc = early_init_dt_alloc_memory_arch(
+ be32_to_cpu(initial_boot_params->totalsize), 64);
+ if (alloc) {
+ memcpy(alloc, initial_boot_params,
+ be32_to_cpu(initial_boot_params->totalsize));
+ initial_boot_params = alloc;
+ }
+}
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
new file mode 100644
index 000000000000..2f390289a792
--- /dev/null
+++ b/arch/arc/kernel/disasm.c
@@ -0,0 +1,538 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <asm/disasm.h>
+#include <asm/uaccess.h>
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \
+ defined(CONFIG_KPROBES)
+
+/* disasm_instr: Analyses instruction at addr, stores
+ * findings in *state
+ */
+void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs)
+{
+ int fieldA = 0;
+ int fieldC = 0, fieldCisReg = 0;
+ uint16_t word1 = 0, word0 = 0;
+ int subopcode, is_linked, op_format;
+ uint16_t *ins_ptr;
+ uint16_t ins_buf[4];
+ int bytes_not_copied = 0;
+
+ memset(state, 0, sizeof(struct disasm_state));
+
+ /* This fetches the upper part of the 32 bit instruction
+ * in both the cases of Little Endian or Big Endian configurations. */
+ if (userspace) {
+ bytes_not_copied = copy_from_user(ins_buf,
+ (const void __user *) addr, 8);
+ if (bytes_not_copied > 6)
+ goto fault;
+ ins_ptr = ins_buf;
+ } else {
+ ins_ptr = (uint16_t *) addr;
+ }
+
+ word1 = *((uint16_t *)addr);
+
+ state->major_opcode = (word1 >> 11) & 0x1F;
+
+ /* Check if the instruction is 32 bit or 16 bit instruction */
+ if (state->major_opcode < 0x0B) {
+ if (bytes_not_copied > 4)
+ goto fault;
+ state->instr_len = 4;
+ word0 = *((uint16_t *)(addr+2));
+ state->words[0] = (word1 << 16) | word0;
+ } else {
+ state->instr_len = 2;
+ state->words[0] = word1;
+ }
+
+ /* Read the second word in case of limm */
+ word1 = *((uint16_t *)(addr + state->instr_len));
+ word0 = *((uint16_t *)(addr + state->instr_len + 2));
+ state->words[1] = (word1 << 16) | word0;
+
+ switch (state->major_opcode) {
+ case op_Bcc:
+ state->is_branch = 1;
+
+ /* unconditional branch s25, conditional branch s21 */
+ fieldA = (IS_BIT(state->words[0], 16)) ?
+ FIELD_s25(state->words[0]) :
+ FIELD_s21(state->words[0]);
+
+ state->delay_slot = IS_BIT(state->words[0], 5);
+ state->target = fieldA + (addr & ~0x3);
+ state->flow = direct_jump;
+ break;
+
+ case op_BLcc:
+ if (IS_BIT(state->words[0], 16)) {
+ /* Branch and Link*/
+ /* unconditional branch s25, conditional branch s21 */
+ fieldA = (IS_BIT(state->words[0], 17)) ?
+ (FIELD_s25(state->words[0]) & ~0x3) :
+ FIELD_s21(state->words[0]);
+
+ state->flow = direct_call;
+ } else {
+ /*Branch On Compare */
+ fieldA = FIELD_s9(state->words[0]) & ~0x3;
+ state->flow = direct_jump;
+ }
+
+ state->delay_slot = IS_BIT(state->words[0], 5);
+ state->target = fieldA + (addr & ~0x3);
+ state->is_branch = 1;
+ break;
+
+ case op_LD: /* LD<zz> a,[b,s9] */
+ state->write = 0;
+ state->di = BITS(state->words[0], 11, 11);
+ if (state->di)
+ break;
+ state->x = BITS(state->words[0], 6, 6);
+ state->zz = BITS(state->words[0], 7, 8);
+ state->aa = BITS(state->words[0], 9, 10);
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->instr_len += 4;
+ state->aa = 0;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->wb_reg, regs, cregs);
+ }
+ state->src2 = FIELD_s9(state->words[0]);
+ state->dest = FIELD_A(state->words[0]);
+ state->pref = (state->dest == REG_LIMM);
+ break;
+
+ case op_ST:
+ state->write = 1;
+ state->di = BITS(state->words[0], 5, 5);
+ if (state->di)
+ break;
+ state->aa = BITS(state->words[0], 3, 4);
+ state->zz = BITS(state->words[0], 1, 2);
+ state->src1 = FIELD_C(state->words[0]);
+ if (state->src1 == REG_LIMM) {
+ state->instr_len += 4;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->src1, regs, cregs);
+ }
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->aa = 0;
+ state->instr_len += 4;
+ state->src2 = state->words[1];
+ } else {
+ state->src2 = get_reg(state->wb_reg, regs, cregs);
+ }
+ state->src3 = FIELD_s9(state->words[0]);
+ break;
+
+ case op_MAJOR_4:
+ subopcode = MINOR_OPCODE(state->words[0]);
+ switch (subopcode) {
+ case 32: /* Jcc */
+ case 33: /* Jcc.D */
+ case 34: /* JLcc */
+ case 35: /* JLcc.D */
+ is_linked = 0;
+
+ if (subopcode == 33 || subopcode == 35)
+ state->delay_slot = 1;
+
+ if (subopcode == 34 || subopcode == 35)
+ is_linked = 1;
+
+ fieldCisReg = 0;
+ op_format = BITS(state->words[0], 22, 23);
+ if (op_format == 0 || ((op_format == 3) &&
+ (!IS_BIT(state->words[0], 5)))) {
+ fieldC = FIELD_C(state->words[0]);
+
+ if (fieldC == REG_LIMM) {
+ fieldC = state->words[1];
+ state->instr_len += 4;
+ } else {
+ fieldCisReg = 1;
+ }
+ } else if (op_format == 1 || ((op_format == 3)
+ && (IS_BIT(state->words[0], 5)))) {
+ fieldC = FIELD_C(state->words[0]);
+ } else {
+ /* op_format == 2 */
+ fieldC = FIELD_s12(state->words[0]);
+ }
+
+ if (!fieldCisReg) {
+ state->target = fieldC;
+ state->flow = is_linked ?
+ direct_call : direct_jump;
+ } else {
+ state->target = get_reg(fieldC, regs, cregs);
+ state->flow = is_linked ?
+ indirect_call : indirect_jump;
+ }
+ state->is_branch = 1;
+ break;
+
+ case 40: /* LPcc */
+ if (BITS(state->words[0], 22, 23) == 3) {
+ /* Conditional LPcc u7 */
+ fieldC = FIELD_C(state->words[0]);
+
+ fieldC = fieldC << 1;
+ fieldC += (addr & ~0x03);
+ state->is_branch = 1;
+ state->flow = direct_jump;
+ state->target = fieldC;
+ }
+ /* For Unconditional lp, next pc is the fall through
+ * which is updated */
+ break;
+
+ case 48 ... 55: /* LD a,[b,c] */
+ state->di = BITS(state->words[0], 15, 15);
+ if (state->di)
+ break;
+ state->x = BITS(state->words[0], 16, 16);
+ state->zz = BITS(state->words[0], 17, 18);
+ state->aa = BITS(state->words[0], 22, 23);
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->instr_len += 4;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->wb_reg, regs,
+ cregs);
+ }
+ state->src2 = FIELD_C(state->words[0]);
+ if (state->src2 == REG_LIMM) {
+ state->instr_len += 4;
+ state->src2 = state->words[1];
+ } else {
+ state->src2 = get_reg(state->src2, regs,
+ cregs);
+ }
+ state->dest = FIELD_A(state->words[0]);
+ if (state->dest == REG_LIMM)
+ state->pref = 1;
+ break;
+
+ case 10: /* MOV */
+ /* still need to check for limm to extract instr len */
+ /* MOV is special case because it only takes 2 args */
+ switch (BITS(state->words[0], 22, 23)) {
+ case 0: /* OP a,b,c */
+ if (FIELD_C(state->words[0]) == REG_LIMM)
+ state->instr_len += 4;
+ break;
+ case 1: /* OP a,b,u6 */
+ break;
+ case 2: /* OP b,b,s12 */
+ break;
+ case 3: /* OP.cc b,b,c/u6 */
+ if ((!IS_BIT(state->words[0], 5)) &&
+ (FIELD_C(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+ }
+ break;
+
+
+ default:
+ /* Not a Load, Jump or Loop instruction */
+ /* still need to check for limm to extract instr len */
+ switch (BITS(state->words[0], 22, 23)) {
+ case 0: /* OP a,b,c */
+ if ((FIELD_B(state->words[0]) == REG_LIMM) ||
+ (FIELD_C(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+ case 1: /* OP a,b,u6 */
+ break;
+ case 2: /* OP b,b,s12 */
+ break;
+ case 3: /* OP.cc b,b,c/u6 */
+ if ((!IS_BIT(state->words[0], 5)) &&
+ ((FIELD_B(state->words[0]) == REG_LIMM) ||
+ (FIELD_C(state->words[0]) == REG_LIMM)))
+ state->instr_len += 4;
+ break;
+ }
+ break;
+ }
+ break;
+
+ /* 16 Bit Instructions */
+ case op_LD_ADD: /* LD_S|LDB_S|LDW_S a,[b,c] */
+ state->zz = BITS(state->words[0], 3, 4);
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->dest = FIELD_S_A(state->words[0]);
+ break;
+
+ case op_ADD_MOV_CMP:
+ /* check for limm, ignore mov_s h,b (== mov_s 0,b) */
+ if ((BITS(state->words[0], 3, 4) < 3) &&
+ (FIELD_S_H(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+
+ case op_S:
+ subopcode = BITS(state->words[0], 5, 7);
+ switch (subopcode) {
+ case 0: /* j_s */
+ case 1: /* j_s.d */
+ case 2: /* jl_s */
+ case 3: /* jl_s.d */
+ state->target = get_reg(FIELD_S_B(state->words[0]),
+ regs, cregs);
+ state->delay_slot = subopcode & 1;
+ state->flow = (subopcode >= 2) ?
+ direct_call : indirect_jump;
+ break;
+ case 7:
+ switch (BITS(state->words[0], 8, 10)) {
+ case 4: /* jeq_s [blink] */
+ case 5: /* jne_s [blink] */
+ case 6: /* j_s [blink] */
+ case 7: /* j_s.d [blink] */
+ state->delay_slot = (subopcode == 7);
+ state->flow = indirect_jump;
+ state->target = get_reg(31, regs, cregs);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+
+ case op_LD_S: /* LD_S c, [b, u7] */
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = FIELD_S_u7(state->words[0]);
+ state->dest = FIELD_S_C(state->words[0]);
+ break;
+
+ case op_LDB_S:
+ case op_STB_S:
+ /* no further handling required as byte accesses should not
+ * cause an unaligned access exception */
+ state->zz = 1;
+ break;
+
+ case op_LDWX_S: /* LDWX_S c, [b, u6] */
+ state->x = 1;
+ /* intentional fall-through */
+
+ case op_LDW_S: /* LDW_S c, [b, u6] */
+ state->zz = 2;
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = FIELD_S_u6(state->words[0]);
+ state->dest = FIELD_S_C(state->words[0]);
+ break;
+
+ case op_ST_S: /* ST_S c, [b, u7] */
+ state->write = 1;
+ state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src3 = FIELD_S_u7(state->words[0]);
+ break;
+
+ case op_STW_S: /* STW_S c,[b,u6] */
+ state->write = 1;
+ state->zz = 2;
+ state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src3 = FIELD_S_u6(state->words[0]);
+ break;
+
+ case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
+ /* note: we are ignoring possibility of:
+ * ADD_S, SUB_S, PUSH_S, POP_S as these should not
+ * cause unaliged exception anyway */
+ state->write = BITS(state->words[0], 6, 6);
+ state->zz = BITS(state->words[0], 5, 5);
+ if (state->zz)
+ break; /* byte accesses should not come here */
+ if (!state->write) {
+ state->src1 = get_reg(28, regs, cregs);
+ state->src2 = FIELD_S_u7(state->words[0]);
+ state->dest = FIELD_S_B(state->words[0]);
+ } else {
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs,
+ cregs);
+ state->src2 = get_reg(28, regs, cregs);
+ state->src3 = FIELD_S_u7(state->words[0]);
+ }
+ break;
+
+ case op_GP: /* LD_S|LDB_S|LDW_S r0,[gp,s11/s9/s10] */
+ /* note: ADD_S r0, gp, s11 is ignored */
+ state->zz = BITS(state->words[0], 9, 10);
+ state->src1 = get_reg(26, regs, cregs);
+ state->src2 = state->zz ? FIELD_S_s10(state->words[0]) :
+ FIELD_S_s11(state->words[0]);
+ state->dest = 0;
+ break;
+
+ case op_Pcl: /* LD_S b,[pcl,u10] */
+ state->src1 = regs->ret & ~3;
+ state->src2 = FIELD_S_u10(state->words[0]);
+ state->dest = FIELD_S_B(state->words[0]);
+ break;
+
+ case op_BR_S:
+ state->target = FIELD_S_s8(state->words[0]) + (addr & ~0x03);
+ state->flow = direct_jump;
+ state->is_branch = 1;
+ break;
+
+ case op_B_S:
+ fieldA = (BITS(state->words[0], 9, 10) == 3) ?
+ FIELD_S_s7(state->words[0]) :
+ FIELD_S_s10(state->words[0]);
+ state->target = fieldA + (addr & ~0x03);
+ state->flow = direct_jump;
+ state->is_branch = 1;
+ break;
+
+ case op_BL_S:
+ state->target = FIELD_S_s13(state->words[0]) + (addr & ~0x03);
+ state->flow = direct_call;
+ state->is_branch = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ if (bytes_not_copied <= (8 - state->instr_len))
+ return;
+
+fault: state->fault = 1;
+}
+
+long __kprobes get_reg(int reg, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ long *p;
+
+ if (reg <= 12) {
+ p = &regs->r0;
+ return p[-reg];
+ }
+
+ if (cregs && (reg <= 25)) {
+ p = &cregs->r13;
+ return p[13-reg];
+ }
+
+ if (reg == 26)
+ return regs->r26;
+ if (reg == 27)
+ return regs->fp;
+ if (reg == 28)
+ return regs->sp;
+ if (reg == 31)
+ return regs->blink;
+
+ return 0;
+}
+
+void __kprobes set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ long *p;
+
+ switch (reg) {
+ case 0 ... 12:
+ p = &regs->r0;
+ p[-reg] = val;
+ break;
+ case 13 ... 25:
+ if (cregs) {
+ p = &cregs->r13;
+ p[13-reg] = val;
+ }
+ break;
+ case 26:
+ regs->r26 = val;
+ break;
+ case 27:
+ regs->fp = val;
+ break;
+ case 28:
+ regs->sp = val;
+ break;
+ case 31:
+ regs->blink = val;
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Disassembles the insn at @pc and sets @next_pc to next PC (which could be
+ * @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns).
+ *
+ * If @pc is a branch
+ * -@tgt_if_br is set to branch target.
+ * -If branch has delay slot, @next_pc updated with actual next PC.
+ */
+int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
+ struct callee_regs *cregs,
+ unsigned long *next_pc, unsigned long *tgt_if_br)
+{
+ struct disasm_state instr;
+
+ memset(&instr, 0, sizeof(struct disasm_state));
+ disasm_instr(pc, &instr, 0, regs, cregs);
+
+ *next_pc = pc + instr.instr_len;
+
+ /* Instruction with possible two targets branch, jump and loop */
+ if (instr.is_branch)
+ *tgt_if_br = instr.target;
+
+ /* For the instructions with delay slots, the fall through is the
+ * instruction following the instruction in delay slot.
+ */
+ if (instr.delay_slot) {
+ struct disasm_state instr_d;
+
+ disasm_instr(*next_pc, &instr_d, 0, regs, cregs);
+
+ *next_pc += instr_d.instr_len;
+ }
+
+ /* Zero Overhead Loop - end of the loop */
+ if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end)
+ && (regs->lp_count > 1)) {
+ *next_pc = regs->lp_start;
+ }
+
+ return instr.is_branch;
+}
+
+#endif /* CONFIG_KGDB || CONFIG_MISALIGN_ACCESS || CONFIG_KPROBES */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
new file mode 100644
index 000000000000..ef6800ba2f03
--- /dev/null
+++ b/arch/arc/kernel/entry.S
@@ -0,0 +1,839 @@
+/*
+ * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Userspace unaligned access emulation
+ *
+ * vineetg: Feb 2011 (ptrace low level code fixes)
+ * -traced syscall return code (r0) was not saved into pt_regs for restoring
+ * into user reg-file when traded task rets to user space.
+ * -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
+ * were not invoking post-syscall trace hook (jumping directly into
+ * ret_from_system_call)
+ *
+ * vineetg: Nov 2010:
+ * -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
+ * -To maintain the slot size of 8 bytes/vector, added nop, which is
+ * not executed at runtime.
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
+ * -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't
+ * need ptregs anymore
+ *
+ * Vineetg: Oct 2009
+ * -In a rare scenario, Process gets a Priv-V exception and gets scheduled
+ * out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains
+ * active (AE bit enabled). This causes a double fault for a subseq valid
+ * exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
+ * Instr Error could also cause similar scenario, so same there as well.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
+ * setting it and not clearing it clears ZOL context
+ *
+ * Vineetg: May 16th, 2008
+ * - r25 now contains the Current Task when in kernel
+ *
+ * Vineetg: Dec 22, 2007
+ * Minor Surgery of Low Level ISR to make it SMP safe
+ * - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
+ * - _current_task is made an array of NR_CPUS
+ * - Access of _current_task wrapped inside a macro so that if hardware
+ * team agrees for a dedicated reg, no other code is touched
+ *
+ * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
+ */
+
+/*------------------------------------------------------------------
+ * Function ABI
+ *------------------------------------------------------------------
+ *
+ * Arguments r0 - r7
+ * Caller Saved Registers r0 - r12
+ * Callee Saved Registers r13- r25
+ * Global Pointer (gp) r26
+ * Frame Pointer (fp) r27
+ * Stack Pointer (sp) r28
+ * Interrupt link register (ilink1) r29
+ * Interrupt link register (ilink2) r30
+ * Branch link register (blink) r31
+ *------------------------------------------------------------------
+ */
+
+ .cpu A7
+
+;############################ Vector Table #################################
+
+.macro VECTOR lbl
+#if 1 /* Just in case, build breaks */
+ j \lbl
+#else
+ b \lbl
+ nop
+#endif
+.endm
+
+ .section .vector, "ax",@progbits
+ .align 4
+
+/* Each entry in the vector table must occupy 2 words. Since it is a jump
+ * across sections (.vector to .text) we are gauranteed that 'j somewhere'
+ * will use the 'j limm' form of the intrsuction as long as somewhere is in
+ * a section other than .vector.
+ */
+
+; ********* Critical System Events **********************
+VECTOR res_service ; 0x0, Restart Vector (0x0)
+VECTOR mem_service ; 0x8, Mem exception (0x1)
+VECTOR instr_service ; 0x10, Instrn Error (0x2)
+
+; ******************** Device ISRs **********************
+#ifdef CONFIG_ARC_IRQ3_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+VECTOR handle_interrupt_level1
+
+#ifdef CONFIG_ARC_IRQ5_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+#ifdef CONFIG_ARC_IRQ6_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+.rept 25
+VECTOR handle_interrupt_level1 ; Other devices
+.endr
+
+/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
+
+; ******************** Exceptions **********************
+VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20)
+VECTOR EV_TLBMissI ; 0x108, Intruction TLB miss (0x21)
+VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22)
+VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23)
+ ; or Misaligned Access
+VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24)
+VECTOR EV_Trap ; 0x128, Trap exception (0x25)
+VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26)
+
+.rept 24
+VECTOR reserved ; Reserved Exceptions
+.endr
+
+#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
+#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
+#include <asm/errno.h>
+#include <asm/arcregs.h>
+#include <asm/irqflags.h>
+
+;##################### Scratch Mem for IRQ stack switching #############
+
+ARCFP_DATA int1_saved_reg
+ .align 32
+ .type int1_saved_reg, @object
+ .size int1_saved_reg, 4
+int1_saved_reg:
+ .zero 4
+
+/* Each Interrupt level needs it's own scratch */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ARCFP_DATA int2_saved_reg
+ .type int2_saved_reg, @object
+ .size int2_saved_reg, 4
+int2_saved_reg:
+ .zero 4
+
+#endif
+
+; ---------------------------------------------
+ .section .text, "ax",@progbits
+
+res_service: ; processor restart
+ flag 0x1 ; not implemented
+ nop
+ nop
+
+reserved: ; processor restart
+ rtie ; jump to processor initializations
+
+;##################### Interrupt Handling ##############################
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+; ---------------------------------------------
+; Level 2 ISR: Can interrupt a Level 1 ISR
+; ---------------------------------------------
+ARC_ENTRY handle_interrupt_level2
+
+ ; TODO-vineetg for SMP this wont work
+ ; free up r9 as scratchpad
+ st r9, [@int2_saved_reg]
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [status32_l2]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_INT2
+
+ ;------------------------------------------------------
+ ; if L2 IRQ interrupted a L1 ISR, disable preemption
+ ;------------------------------------------------------
+
+ ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
+ bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
+
+ ; A1 is set in status32_l2
+ ; bump thread_info->preempt_count (Disable preemption)
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+ add r9, r9, 1
+ st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+1:
+ ;------------------------------------------------------
+ ; setup params for Linux common ISR and invoke it
+ ;------------------------------------------------------
+ lr r0, [icause2]
+ and r0, r0, 0x1f
+
+ bl.d @arch_do_IRQ
+ mov r1, sp
+
+ mov r8,0x2
+ sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
+
+ b ret_from_exception
+
+ARC_EXIT handle_interrupt_level2
+
+#endif
+
+; ---------------------------------------------
+; Level 1 ISR
+; ---------------------------------------------
+ARC_ENTRY handle_interrupt_level1
+
+ /* free up r9 as scratchpad */
+#ifdef CONFIG_SMP
+ sr r9, [ARC_REG_SCRATCH_DATA0]
+#else
+ st r9, [@int1_saved_reg]
+#endif
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [status32_l1]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_INT1
+
+ lr r0, [icause1]
+ and r0, r0, 0x1f
+
+ bl.d @arch_do_IRQ
+ mov r1, sp
+
+ mov r8,0x1
+ sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
+
+ b ret_from_exception
+ARC_EXIT handle_interrupt_level1
+
+;################### Non TLB Exception Handling #############################
+
+; ---------------------------------------------
+; Instruction Error Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY instr_service
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+
+ mov r2, sp
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_insterror_or_kprobe
+ b ret_from_exception
+ARC_EXIT instr_service
+
+; ---------------------------------------------
+; Memory Error Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY mem_service
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+ bl do_memory_error
+ b ret_from_exception
+ARC_EXIT mem_service
+
+; ---------------------------------------------
+; Machine Check Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY EV_MachineCheck
+
+ EXCPN_PROLOG_FREEUP_REG r9
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+
+ brne r0, 0x200100, 1f
+ bl do_tlb_overlap_fault
+ b ret_from_exception
+
+1:
+ ; DEAD END: can't do much, display Regs and HALT
+ SAVE_CALLEE_SAVED_USER
+
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ j do_machine_check_fault
+
+ARC_EXIT EV_MachineCheck
+
+; ---------------------------------------------
+; Protection Violation Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY EV_TLBProtV
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ ;Which mode (user/kernel) was the system in when Exception occured
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ ;---------(3) Save some more regs-----------------
+ ; vineetg: Mar 6th: Random Seg Fault issue #1
+ ; ecr and efa were not saved in case an Intr sneaks in
+ ; after fake rtie
+ ;
+ lr r3, [ecr]
+ lr r4, [efa]
+
+ ; --------(4) Return from CPU Exception Mode ---------
+ ; Fake a rtie, but rtie to next label
+ ; That way, subsequently, do_page_fault ( ) executes in pure kernel
+ ; mode with further Exceptions enabled
+
+ FAKE_RET_FROM_EXCPN r9
+
+ ;------ (5) Type of Protection Violation? ----------
+ ;
+ ; ProtV Hardware Exception is triggered for Access Faults of 2 types
+ ; -Access Violaton (WRITE to READ ONLY Page) - for linux COW
+ ; -Unaligned Access (READ/WRITE on odd boundary)
+ ;
+ cmp r3, 0x230400 ; Misaligned data access ?
+ beq 4f
+
+ ;========= (6a) Access Violation Processing ========
+ cmp r3, 0x230100
+ mov r1, 0x0 ; if LD exception ? write = 0
+ mov.ne r1, 0x1 ; else write = 1
+
+ mov r2, r4 ; faulting address
+ mov r0, sp ; pt_regs
+ bl do_page_fault
+ b ret_from_exception
+
+ ;========== (6b) Non aligned access ============
+4:
+ mov r0, r3 ; cause code
+ mov r1, r4 ; faulting address
+ mov r2, sp ; pt_regs
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+ SAVE_CALLEE_SAVED_USER
+ mov r3, sp ; callee_regs
+#endif
+
+ bl do_misaligned_access
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+ DISCARD_CALLEE_SAVED_USER
+#endif
+
+ b ret_from_exception
+
+ARC_EXIT EV_TLBProtV
+
+; ---------------------------------------------
+; Privilege Violation Exception Handler
+; ---------------------------------------------
+ARC_ENTRY EV_PrivilegeV
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_privilege_fault
+ b ret_from_exception
+ARC_EXIT EV_PrivilegeV
+
+; ---------------------------------------------
+; Extension Instruction Exception Handler
+; ---------------------------------------------
+ARC_ENTRY EV_Extension
+
+ EXCPN_PROLOG_FREEUP_REG r9
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+ bl do_extension_fault
+ b ret_from_exception
+ARC_EXIT EV_Extension
+
+;######################### System Call Tracing #########################
+
+tracesys:
+ ; save EFA in case tracer wants the PC of traced task
+ ; using ERET won't work since next-PC has already committed
+ lr r12, [efa]
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
+ st r12, [r11, THREAD_FAULT_ADDR]
+
+ ; PRE Sys Call Ptrace hook
+ mov r0, sp ; pt_regs needed
+ bl @syscall_trace_entry
+
+ ; Tracing code now returns the syscall num (orig or modif)
+ mov r8, r0
+
+ ; Do the Sys Call as we normally would.
+ ; Validate the Sys Call number
+ cmp r8, NR_syscalls
+ mov.hi r0, -ENOSYS
+ bhi tracesys_exit
+
+ ; Restore the sys-call args. Mere invocation of the hook abv could have
+ ; clobbered them (since they are in scratch regs). The tracer could also
+ ; have deliberately changed the syscall args: r0-r7
+ ld r0, [sp, PT_r0]
+ ld r1, [sp, PT_r1]
+ ld r2, [sp, PT_r2]
+ ld r3, [sp, PT_r3]
+ ld r4, [sp, PT_r4]
+ ld r5, [sp, PT_r5]
+ ld r6, [sp, PT_r6]
+ ld r7, [sp, PT_r7]
+ ld.as r9, [sys_call_table, r8]
+ jl [r9] ; Entry into Sys Call Handler
+
+tracesys_exit:
+ st r0, [sp, PT_r0] ; sys call return value in pt_regs
+
+ ;POST Sys Call Ptrace Hook
+ bl @syscall_trace_exit
+ b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
+ ; we'd done before calling post hook above
+
+;################### Break Point TRAP ##########################
+
+ ; ======= (5b) Trap is due to Break-Point =========
+
+trap_with_param:
+
+ ; stop_pc info by gdb needs this info
+ stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
+
+ mov r0, r12
+ lr r1, [efa]
+ mov r2, sp
+
+ ; Now that we have read EFA, its safe to do "fake" rtie
+ ; and get out of CPU exception mode
+ FAKE_RET_FROM_EXCPN r11
+
+ ; Save callee regs in case gdb wants to have a look
+ ; SP will grow up by size of CALLEE Reg-File
+ ; NOTE: clobbers r12
+ SAVE_CALLEE_SAVED_USER
+
+ ; save location of saved Callee Regs @ thread_struct->pc
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ ; Call the trap handler
+ bl do_non_swi_trap
+
+ ; unwind stack to discard Callee saved Regs
+ DISCARD_CALLEE_SAVED_USER
+
+ b ret_from_exception
+
+;##################### Trap Handling ##############################
+;
+; EV_Trap caused by TRAP_S and TRAP0 instructions.
+;------------------------------------------------------------------
+; (1) System Calls
+; :parameters in r0-r7.
+; :r8 has the system call number
+; (2) Break Points
+;------------------------------------------------------------------
+
+ARC_ENTRY EV_Trap
+
+ ; Need at least 1 reg to code the early exception prolog
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_TRAP
+
+ ;------- (4) What caused the Trap --------------
+ lr r12, [ecr]
+ and.f 0, r12, ECR_PARAM_MASK
+ bnz trap_with_param
+
+ ; ======= (5a) Trap is due to System Call ========
+
+ ; Before doing anything, return from CPU Exception Mode
+ FAKE_RET_FROM_EXCPN r11
+
+ ; If syscall tracing ongoing, invoke pre-pos-hooks
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys ; this never comes back
+
+ ;============ This is normal System Call case ==========
+ ; Sys-call num shd not exceed the total system calls avail
+ cmp r8, NR_syscalls
+ mov.hi r0, -ENOSYS
+ bhi ret_from_system_call
+
+ ; Offset into the syscall_table and call handler
+ ld.as r9,[sys_call_table, r8]
+ jl [r9] ; Entry into Sys Call Handler
+
+ ; fall through to ret_from_system_call
+ARC_EXIT EV_Trap
+
+ARC_ENTRY ret_from_system_call
+
+ st r0, [sp, PT_r0] ; sys call return value in pt_regs
+
+ ; fall through yet again to ret_from_exception
+
+;############# Return from Intr/Excp/Trap (Linux Specifics) ##############
+;
+; If ret to user mode do we need to handle signals, schedule() et al.
+
+ARC_ENTRY ret_from_exception
+
+ ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
+ ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
+
+#ifdef CONFIG_PREEMPT
+ bbit0 r8, STATUS_U_BIT, resume_kernel_mode
+#else
+ bbit0 r8, STATUS_U_BIT, restore_regs
+#endif
+
+ ; Before returning to User mode check-for-and-complete any pending work
+ ; such as rescheduling/signal-delivery etc.
+resume_user_mode_begin:
+
+ ; Disable IRQs to ensures that chk for pending work itself is atomic
+ ; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an
+ ; interim IRQ).
+ IRQ_DISABLE r10
+
+ ; Fast Path return to user mode if no pending work
+ GET_CURR_THR_INFO_FLAGS r9
+ and.f 0, r9, _TIF_WORK_MASK
+ bz restore_regs
+
+ ; --- (Slow Path #1) task preemption ---
+ bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals
+ mov blink, resume_user_mode_begin ; tail-call to U mode ret chks
+ b @schedule ; BTST+Bnz causes relo error in link
+
+.Lchk_pend_signals:
+ IRQ_ENABLE r10
+
+ ; --- (Slow Path #2) pending signal ---
+ mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
+
+ bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume
+
+ ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
+ ; in pt_reg since the "C" ABI (kernel code) will automatically
+ ; save/restore callee-saved regs.
+ ;
+ ; However, here we need to explicitly save callee regs because
+ ; (i) If this signal causes coredump - full regfile needed
+ ; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus
+ ; tracer might call PEEKUSR(CALLEE reg)
+ ;
+ ; NOTE: SP will grow up by size of CALLEE Reg-File
+ SAVE_CALLEE_SAVED_USER ; clobbers r12
+
+ ; save location of saved Callee Regs @ thread_struct->callee
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ bl @do_signal
+
+ ; Ideally we want to discard the Callee reg above, however if this was
+ ; a tracing signal, tracer could have done a POKEUSR(CALLEE reg)
+ RESTORE_CALLEE_SAVED_USER
+
+ b resume_user_mode_begin ; loop back to start of U mode ret
+
+ ; --- (Slow Path #3) notify_resume ---
+.Lchk_notify_resume:
+ btst r9, TIF_NOTIFY_RESUME
+ blnz @do_notify_resume
+ b resume_user_mode_begin ; unconditionally back to U mode ret chks
+ ; for single exit point from this block
+
+#ifdef CONFIG_PREEMPT
+
+resume_kernel_mode:
+
+ ; Can't preempt if preemption disabled
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
+ brne r8, 0, restore_regs
+
+ ; check if this task's NEED_RESCHED flag set
+ ld r9, [r10, THREAD_INFO_FLAGS]
+ bbit0 r9, TIF_NEED_RESCHED, restore_regs
+
+ IRQ_DISABLE r9
+
+ ; Invoke PREEMPTION
+ bl preempt_schedule_irq
+
+ ; preempt_schedule_irq() always returns with IRQ disabled
+#endif
+
+ ; fall through
+
+;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
+;
+; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
+; IRQ shd definitely not happen between now and rtie
+
+restore_regs :
+
+ ; Disable Interrupts while restoring reg-file back
+ ; XXX can this be optimised out
+ IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ; Restore User R25
+ ; Earlier this used to be only for returning to user mode
+ ; However with 2 levels of IRQ this can also happen even if
+ ; in kernel mode
+ ld r9, [sp, PT_sp]
+ brhs r9, VMALLOC_START, 8f
+ RESTORE_USER_R25
+8:
+#endif
+
+ ; Restore REG File. In case multiple Events outstanding,
+ ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
+ ; Note that we use realtime STATUS32 (not pt_regs->status32) to
+ ; decide that.
+
+ ; if Returning from Exception
+ bbit0 r10, STATUS_AE_BIT, not_exception
+ RESTORE_ALL_SYS
+ rtie
+
+ ; Not Exception so maybe Interrupts (Level 1 or 2)
+
+not_exception:
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ bbit0 r10, STATUS_A2_BIT, not_level2_interrupt
+
+ ;------------------------------------------------------------------
+ ; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier
+ ; so that sched doesnt move to new task, causing L1 to be delayed
+ ; undeterministically. Now that we've achieved that, lets reset
+ ; things to what they were, before returning from L2 context
+ ;----------------------------------------------------------------
+
+ ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
+ brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
+
+ ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
+ bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal
+
+ ; A1 is set in status32_l2
+ ; decrement thread_info->preempt_count (re-enable preemption)
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+ ; paranoid check, given A1 was active when A2 happened, preempt count
+ ; must not be 0 beccause we would have incremented it.
+ ; If this does happen we simply HALT as it means a BUG !!!
+ cmp r9, 0
+ bnz 2f
+ flag 1
+
+2:
+ sub r9, r9, 1
+ st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+149:
+ ;return from level 2
+ RESTORE_ALL_INT2
+debug_marker_l2:
+ rtie
+
+not_level2_interrupt:
+
+#endif
+
+ bbit0 r10, STATUS_A1_BIT, not_level1_interrupt
+
+ ;return from level 1
+
+ RESTORE_ALL_INT1
+debug_marker_l1:
+ rtie
+
+not_level1_interrupt:
+
+ ;this case is for syscalls or Exceptions (with fake rtie)
+
+ RESTORE_ALL_SYS
+debug_marker_syscall:
+ rtie
+
+ARC_EXIT ret_from_exception
+
+ARC_ENTRY ret_from_fork
+ ; when the forked child comes here from the __switch_to function
+ ; r0 has the last task pointer.
+ ; put last task in scheduler queue
+ bl @schedule_tail
+
+ ; If kernel thread, jump to it's entry-point
+ ld r9, [sp, PT_status32]
+ brne r9, 0, 1f
+
+ jl.d [r14]
+ mov r0, r13 ; arg to payload
+
+1:
+ ; special case of kernel_thread entry point returning back due to
+ ; kernel_execve() - pretend return from syscall to ret to userland
+ b ret_from_exception
+ARC_EXIT ret_from_fork
+
+;################### Special Sys Call Wrappers ##########################
+
+; TBD: call do_fork directly from here
+ARC_ENTRY sys_fork_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_fork
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_fork_wrapper
+
+ARC_ENTRY sys_vfork_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_vfork
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_vfork_wrapper
+
+ARC_ENTRY sys_clone_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_clone
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_clone_wrapper
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+; Workaround for bug 94179 (STAR ):
+; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder
+; section (.debug_frame) as loadable. So we force it here.
+; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag)
+; would not work after a clean build due to kernel build system dependencies.
+.section .debug_frame, "wa",@progbits
+#endif
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
new file mode 100644
index 000000000000..f352e512cbd1
--- /dev/null
+++ b/arch/arc/kernel/fpu.c
@@ -0,0 +1,55 @@
+/*
+ * fpu.c - save/restore of Floating Point Unit Registers on task switch
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <asm/switch_to.h>
+
+/*
+ * To save/restore FPU regs, simplest scheme would use LR/SR insns.
+ * However since SR serializes the pipeline, an alternate "hack" can be used
+ * which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
+ *
+ * Store to 64bit dpfp1 reg from a pair of core regs:
+ * dexcl1 0, r1, r0 ; where r1:r0 is the 64 bit val
+ *
+ * Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
+ * mov_s r3, 0
+ * daddh11 r1, r3, r3 ; get "hi" into r1 (dpfp1 unchanged)
+ * dexcl1 r0, r1, r3 ; get "low" into r0 (dpfp1 low clobbered)
+ * dexcl1 0, r1, r0 ; restore dpfp1 to orig value
+ *
+ * However we can tweak the read, so that read-out of outgoing task's FPU regs
+ * and write of incoming task's regs happen in one shot. So all the work is
+ * done before context switch
+ */
+
+void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
+{
+ unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
+ unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
+
+ const unsigned int zero = 0;
+
+ __asm__ __volatile__(
+ "daddh11 %0, %2, %2\n"
+ "dexcl1 %1, %3, %4\n"
+ : "=&r" (*(saveto + 1)), /* early clobber must here */
+ "=&r" (*(saveto))
+ : "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
+ );
+
+ __asm__ __volatile__(
+ "daddh22 %0, %2, %2\n"
+ "dexcl2 %1, %3, %4\n"
+ : "=&r"(*(saveto + 3)), /* early clobber must here */
+ "=&r"(*(saveto + 2))
+ : "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
+ );
+}
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
new file mode 100644
index 000000000000..006dec3fc353
--- /dev/null
+++ b/arch/arc/kernel/head.S
@@ -0,0 +1,111 @@
+/*
+ * ARC CPU startup Code
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Dec 2007
+ * -Check if we are running on Simulator or on real hardware
+ * to skip certain things during boot on simulator
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/entry.h>
+#include <linux/linkage.h>
+#include <asm/arcregs.h>
+
+ .cpu A7
+
+ .section .init.text, "ax",@progbits
+ .type stext, @function
+ .globl stext
+stext:
+ ;-------------------------------------------------------------------
+ ; Don't clobber r0-r4 yet. It might have bootloader provided info
+ ;-------------------------------------------------------------------
+
+#ifdef CONFIG_SMP
+ ; Only Boot (Master) proceeds. Others wait in platform dependent way
+ ; IDENTITY Reg [ 3 2 1 0 ]
+ ; (cpu-id) ^^^ => Zero for UP ARC700
+ ; => #Core-ID if SMP (Master 0)
+ GET_CPU_ID r5
+ cmp r5, 0
+ jnz arc_platform_smp_wait_to_boot
+#endif
+ ; Clear BSS before updating any globals
+ ; XXX: use ZOL here
+ mov r5, __bss_start
+ mov r6, __bss_stop
+1:
+ st.ab 0, [r5,4]
+ brlt r5, r6, 1b
+
+#ifdef CONFIG_CMDLINE_UBOOT
+ ; support for bootloader provided cmdline
+ ; If cmdline passed by u-boot, then
+ ; r0 = 1 (because ATAGS parsing, now retired, used to use 0)
+ ; r1 = magic number (board identity)
+ ; r2 = addr of cmdline string (somewhere in memory/flash)
+
+ brne r0, 1, .Lother_bootup_chores ; u-boot didn't pass cmdline
+ breq r2, 0, .Lother_bootup_chores ; or cmdline is NULL
+
+ mov r5, @command_line
+1:
+ ldb.ab r6, [r2, 1]
+ breq r6, 0, .Lother_bootup_chores
+ b.d 1b
+ stb.ab r6, [r5, 1]
+#endif
+
+.Lother_bootup_chores:
+
+ ; Identify if running on ISS vs Silicon
+ ; IDENTITY Reg [ 3 2 1 0 ]
+ ; (chip-id) ^^^^^ ==> 0xffff for ISS
+ lr r0, [identity]
+ lsr r3, r0, 16
+ cmp r3, 0xffff
+ mov.z r4, 0
+ mov.nz r4, 1
+ st r4, [@running_on_hw]
+
+ ; setup "current" tsk and optionally cache it in dedicated r25
+ mov r9, @init_task
+ SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch
+
+ ; setup stack (fp, sp)
+ mov fp, 0
+
+ ; tsk->thread_info is really a PAGE, whose bottom hoists stack
+ GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
+
+ j start_kernel ; "C" entry point
+
+#ifdef CONFIG_SMP
+;----------------------------------------------------------------
+; First lines of code run by secondary before jumping to 'C'
+;----------------------------------------------------------------
+ .section .init.text, "ax",@progbits
+ .type first_lines_of_secondary, @function
+ .globl first_lines_of_secondary
+
+first_lines_of_secondary:
+
+ ; setup per-cpu idle task as "current" on this CPU
+ ld r0, [@secondary_idle_tsk]
+ SET_CURR_TASK_ON_CPU r0, r1
+
+ ; setup stack (fp, sp)
+ mov fp, 0
+
+ ; set it's stack base to tsk->thread_info bottom
+ GET_TSK_STACK_BASE r0, sp
+
+ j start_kernel_secondary
+
+#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
new file mode 100644
index 000000000000..551c10dff481
--- /dev/null
+++ b/arch/arc/kernel/irq.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <asm/sections.h>
+#include <asm/irq.h>
+#include <asm/mach_desc.h>
+
+/*
+ * Early Hardware specific Interrupt setup
+ * -Called very early (start_kernel -> setup_arch -> setup_processor)
+ * -Platform Independent (must for any ARC700)
+ * -Needed for each CPU (hence not foldable into init_IRQ)
+ *
+ * what it does ?
+ * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
+ * -Disable all IRQs (on CPU side)
+ * -Optionally, setup the High priority Interrupts as Level 2 IRQs
+ */
+void __init arc_init_IRQ(void)
+{
+ int level_mask = 0;
+
+ write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
+
+ /* Disable all IRQs: enable them as devices request */
+ write_aux_reg(AUX_IENABLE, 0);
+
+ /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
+#ifdef CONFIG_ARC_IRQ3_LV2
+ level_mask |= (1 << 3);
+#endif
+#ifdef CONFIG_ARC_IRQ5_LV2
+ level_mask |= (1 << 5);
+#endif
+#ifdef CONFIG_ARC_IRQ6_LV2
+ level_mask |= (1 << 6);
+#endif
+
+ if (level_mask) {
+ pr_info("Level-2 interrupts bitset %x\n", level_mask);
+ write_aux_reg(AUX_IRQ_LEV, level_mask);
+ }
+}
+
+/*
+ * ARC700 core includes a simple on-chip intc supporting
+ * -per IRQ enable/disable
+ * -2 levels of interrupts (high/low)
+ * -all interrupts being level triggered
+ *
+ * To reduce platform code, we assume all IRQs directly hooked-up into intc.
+ * Platforms with external intc, hence cascaded IRQs, are free to over-ride
+ * below, per IRQ.
+ */
+
+static void arc_mask_irq(struct irq_data *data)
+{
+ arch_mask_irq(data->irq);
+}
+
+static void arc_unmask_irq(struct irq_data *data)
+{
+ arch_unmask_irq(data->irq);
+}
+
+static struct irq_chip onchip_intc = {
+ .name = "ARC In-core Intc",
+ .irq_mask = arc_mask_irq,
+ .irq_unmask = arc_unmask_irq,
+};
+
+static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (irq == TIMER0_IRQ)
+ irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
+ else
+ irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops arc_intc_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = arc_intc_domain_map,
+};
+
+static struct irq_domain *root_domain;
+
+void __init init_onchip_IRQ(void)
+{
+ struct device_node *intc = NULL;
+
+ intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc");
+ if(!intc)
+ panic("DeviceTree Missing incore intc\n");
+
+ root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
+ &arc_intc_domain_ops, NULL);
+
+ if (!root_domain)
+ panic("root irq domain not avail\n");
+
+ /* with this we don't need to export root_domain */
+ irq_set_default_host(root_domain);
+}
+
+/*
+ * Late Interrupt system init called from start_kernel for Boot CPU only
+ *
+ * Since slab must already be initialized, platforms can start doing any
+ * needed request_irq( )s
+ */
+void __init init_IRQ(void)
+{
+ init_onchip_IRQ();
+
+ /* Any external intc can be setup here */
+ if (machine_desc->init_irq)
+ machine_desc->init_irq();
+
+#ifdef CONFIG_SMP
+ /* Master CPU can initialize it's side of IPI */
+ if (machine_desc->init_smp)
+ machine_desc->init_smp(smp_processor_id());
+#endif
+}
+
+/*
+ * "C" Entry point for any ARC ISR, called from low level vector handler
+ * @irq is the vector number read from ICAUSE reg of on-chip intc
+ */
+void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ generic_handle_irq(irq);
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+int __init get_hw_config_num_irq(void)
+{
+ uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
+
+ switch (val & 0x03) {
+ case 0:
+ return 16;
+ case 1:
+ return 32;
+ case 2:
+ return 8;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/*
+ * arch_local_irq_enable - Enable interrupts.
+ *
+ * 1. Explicitly called to re-enable interrupts
+ * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
+ * which maybe in hard ISR itself
+ *
+ * Semantics of this function change depending on where it is called from:
+ *
+ * -If called from hard-ISR, it must not invert interrupt priorities
+ * e.g. suppose TIMER is high priority (Level 2) IRQ
+ * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
+ * Here local_irq_enable( ) shd not re-enable lower priority interrupts
+ * -If called from soft-ISR, it must re-enable all interrupts
+ * soft ISR are low prioity jobs which can be very slow, thus all IRQs
+ * must be enabled while they run.
+ * Now hardware context wise we may still be in L2 ISR (not done rtie)
+ * still we must re-enable both L1 and L2 IRQs
+ * Another twist is prev scenario with flow being
+ * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
+ * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
+ * over-written (this is deficiency in ARC700 Interrupt mechanism)
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
+
+void arch_local_irq_enable(void)
+{
+
+ unsigned long flags;
+ flags = arch_local_save_flags();
+
+ /* Allow both L1 and L2 at the onset */
+ flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+
+ /* Called from hard ISR (between irq_enter and irq_exit) */
+ if (in_irq()) {
+
+ /* If in L2 ISR, don't re-enable any further IRQs as this can
+ * cause IRQ priorities to get upside down. e.g. it could allow
+ * L1 be taken while in L2 hard ISR which is wrong not only in
+ * theory, it can also cause the dreaded L1-L2-L1 scenario
+ */
+ if (flags & STATUS_A2_MASK)
+ flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
+
+ /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
+ else if (flags & STATUS_A1_MASK)
+ flags &= ~(STATUS_E1_MASK);
+ }
+
+ /* called from soft IRQ, ideally we want to re-enable all levels */
+
+ else if (in_softirq()) {
+
+ /* However if this is case of L1 interrupted by L2,
+ * re-enabling both may cause whaco L1-L2-L1 scenario
+ * because ARC700 allows level 1 to interrupt an active L2 ISR
+ * Thus we disable both
+ * However some code, executing in soft ISR wants some IRQs
+ * to be enabled so we re-enable L2 only
+ *
+ * How do we determine L1 intr by L2
+ * -A2 is set (means in L2 ISR)
+ * -E1 is set in this ISR's pt_regs->status32 which is
+ * saved copy of status32_l2 when l2 ISR happened
+ */
+ struct pt_regs *pt = get_irq_regs();
+ if ((flags & STATUS_A2_MASK) && pt &&
+ (pt->status32 & STATUS_A1_MASK)) {
+ /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
+ flags &= ~(STATUS_E1_MASK);
+ }
+ }
+
+ arch_local_irq_restore(flags);
+}
+
+#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
+
+/*
+ * Simpler version for only 1 level of interrupt
+ * Here we only Worry about Level 1 Bits
+ */
+void arch_local_irq_enable(void)
+{
+ unsigned long flags;
+
+ /*
+ * ARC IDE Drivers tries to re-enable interrupts from hard-isr
+ * context which is simply wrong
+ */
+ if (in_irq()) {
+ WARN_ONCE(1, "IRQ enabled from hard-isr");
+ return;
+ }
+
+ flags = arch_local_save_flags();
+ flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+ arch_local_irq_restore(flags);
+}
+#endif
+EXPORT_SYMBOL(arch_local_irq_enable);
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
new file mode 100644
index 000000000000..2888ba5be47e
--- /dev/null
+++ b/arch/arc/kernel/kgdb.c
@@ -0,0 +1,205 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kgdb.h>
+#include <asm/disasm.h>
+#include <asm/cacheflush.h>
+
+static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+ struct callee_regs *cregs)
+{
+ int regno;
+
+ for (regno = 0; regno <= 26; regno++)
+ gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs);
+
+ for (regno = 27; regno < GDB_MAX_REGS; regno++)
+ gdb_regs[regno] = 0;
+
+ gdb_regs[_FP] = kernel_regs->fp;
+ gdb_regs[__SP] = kernel_regs->sp;
+ gdb_regs[_BLINK] = kernel_regs->blink;
+ gdb_regs[_RET] = kernel_regs->ret;
+ gdb_regs[_STATUS32] = kernel_regs->status32;
+ gdb_regs[_LP_COUNT] = kernel_regs->lp_count;
+ gdb_regs[_LP_END] = kernel_regs->lp_end;
+ gdb_regs[_LP_START] = kernel_regs->lp_start;
+ gdb_regs[_BTA] = kernel_regs->bta;
+ gdb_regs[_STOP_PC] = kernel_regs->ret;
+}
+
+static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+ struct callee_regs *cregs)
+{
+ int regno;
+
+ for (regno = 0; regno <= 26; regno++)
+ set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs);
+
+ kernel_regs->fp = gdb_regs[_FP];
+ kernel_regs->sp = gdb_regs[__SP];
+ kernel_regs->blink = gdb_regs[_BLINK];
+ kernel_regs->ret = gdb_regs[_RET];
+ kernel_regs->status32 = gdb_regs[_STATUS32];
+ kernel_regs->lp_count = gdb_regs[_LP_COUNT];
+ kernel_regs->lp_end = gdb_regs[_LP_END];
+ kernel_regs->lp_start = gdb_regs[_LP_START];
+ kernel_regs->bta = gdb_regs[_BTA];
+}
+
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+ to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+ current->thread.callee_reg);
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+ from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+ current->thread.callee_reg);
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
+ struct task_struct *task)
+{
+ if (task)
+ to_gdb_regs(gdb_regs, task_pt_regs(task),
+ (struct callee_regs *) task->thread.callee_reg);
+}
+
+struct single_step_data_t {
+ uint16_t opcode[2];
+ unsigned long address[2];
+ int is_branch;
+ int armed;
+} single_step_data;
+
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (single_step_data.armed) {
+ int i;
+
+ for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) {
+ memcpy((void *) single_step_data.address[i],
+ &single_step_data.opcode[i],
+ BREAK_INSTR_SIZE);
+
+ flush_icache_range(single_step_data.address[i],
+ single_step_data.address[i] +
+ BREAK_INSTR_SIZE);
+ }
+ single_step_data.armed = 0;
+ }
+}
+
+static void place_trap(unsigned long address, void *save)
+{
+ memcpy(save, (void *) address, BREAK_INSTR_SIZE);
+ memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+ flush_icache_range(address, address + BREAK_INSTR_SIZE);
+}
+
+static void do_single_step(struct pt_regs *regs)
+{
+ single_step_data.is_branch = disasm_next_pc((unsigned long)
+ regs->ret, regs, (struct callee_regs *)
+ current->thread.callee_reg,
+ &single_step_data.address[0],
+ &single_step_data.address[1]);
+
+ place_trap(single_step_data.address[0], &single_step_data.opcode[0]);
+
+ if (single_step_data.is_branch) {
+ place_trap(single_step_data.address[1],
+ &single_step_data.opcode[1]);
+ }
+
+ single_step_data.armed++;
+}
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ char *remcomInBuffer, char *remcomOutBuffer,
+ struct pt_regs *regs)
+{
+ unsigned long addr;
+ char *ptr;
+
+ undo_single_step(regs);
+
+ switch (remcomInBuffer[0]) {
+ case 's':
+ case 'c':
+ ptr = &remcomInBuffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->ret = addr;
+
+ case 'D':
+ case 'k':
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+ if (remcomInBuffer[0] == 's') {
+ do_single_step(regs);
+ atomic_set(&kgdb_cpu_doing_single_step,
+ smp_processor_id());
+ }
+
+ return 0;
+ }
+ return -1;
+}
+
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int kgdb_arch_init(void)
+{
+ single_step_data.armed = 0;
+ return 0;
+}
+
+void kgdb_trap(struct pt_regs *regs, int param)
+{
+ /* trap_s 3 is used for breakpoints that overwrite existing
+ * instructions, while trap_s 4 is used for compiled breakpoints.
+ *
+ * with trap_s 3 breakpoints the original instruction needs to be
+ * restored and continuation needs to start at the location of the
+ * breakpoint.
+ *
+ * with trap_s 4 (compiled) breakpoints, continuation needs to
+ * start after the breakpoint.
+ */
+ if (param == 3)
+ instruction_pointer(regs) -= BREAK_INSTR_SIZE;
+
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+}
+
+void kgdb_arch_exit(void)
+{
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ instruction_pointer(regs) = ip;
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+ /* breakpoint instruction: TRAP_S 0x3 */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ .gdb_bpt_instr = {0x78, 0x7e},
+#else
+ .gdb_bpt_instr = {0x7e, 0x78},
+#endif
+};
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
new file mode 100644
index 000000000000..3bfeacb674de
--- /dev/null
+++ b/arch/arc/kernel/kprobes.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/current.h>
+#include <asm/disasm.h>
+
+#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
+ (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ /* Attempt to probe at unaligned address */
+ if ((unsigned long)p->addr & 0x01)
+ return -EINVAL;
+
+ /* Address should not be in exception handling code */
+
+ p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
+ p->opcode = *p->addr;
+
+ return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = UNIMP_S_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ arch_disarm_kprobe(p);
+
+ /* Can we remove the kprobe in the middle of kprobe handling? */
+ if (p->ainsn.t1_addr) {
+ *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t1_addr = NULL;
+ }
+
+ if (p->ainsn.t2_addr) {
+ *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t2_addr = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static inline void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __get_cpu_var(current_kprobe) = p;
+}
+
+static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
+ struct pt_regs *regs)
+{
+ /* Remove the trap instructions inserted for single step and
+ * restore the original instructions
+ */
+ if (p->ainsn.t1_addr) {
+ *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t1_addr = NULL;
+ }
+
+ if (p->ainsn.t2_addr) {
+ *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t2_addr = NULL;
+ }
+
+ return;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long next_pc;
+ unsigned long tgt_if_br = 0;
+ int is_branch;
+ unsigned long bta;
+
+ /* Copy the opcode back to the kprobe location and execute the
+ * instruction. Because of this we will not be able to get into the
+ * same kprobe until this kprobe is done
+ */
+ *(p->addr) = p->opcode;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+
+ /* Now we insert the trap at the next location after this instruction to
+ * single step. If it is a branch we insert the trap at possible branch
+ * targets
+ */
+
+ bta = regs->bta;
+
+ if (regs->status32 & 0x40) {
+ /* We are in a delay slot with the branch taken */
+
+ next_pc = bta & ~0x01;
+
+ if (!p->ainsn.is_short) {
+ if (bta & 0x01)
+ regs->blink += 2;
+ else {
+ /* Branch not taken */
+ next_pc += 2;
+
+ /* next pc is taken from bta after executing the
+ * delay slot instruction
+ */
+ regs->bta += 2;
+ }
+ }
+
+ is_branch = 0;
+ } else
+ is_branch =
+ disasm_next_pc((unsigned long)p->addr, regs,
+ (struct callee_regs *) current->thread.callee_reg,
+ &next_pc, &tgt_if_br);
+
+ p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
+ p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
+ *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ if (is_branch) {
+ p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
+ p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
+ *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+ }
+}
+
+int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ preempt_disable();
+
+ kcb = get_kprobe_ctlblk();
+ p = get_kprobe((unsigned long *)addr);
+
+ if (p) {
+ /*
+ * We have reentered the kprobe_handler, since another kprobe
+ * was hit while within the handler, we save the original
+ * kprobes and single step on the instruction of the new probe
+ * without calling any user handlers to avoid recursive
+ * kprobes.
+ */
+ if (kprobe_running()) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ }
+
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /* If we have no pre-handler or it returned 0, we continue with
+ * normal processing. If we have a pre-handler and it returned
+ * non-zero - which is expected from setjmp_pre_handler for
+ * jprobe, we return without single stepping and leave that to
+ * the break-handler which is invoked by a kprobe from
+ * jprobe_return
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ return 1;
+ } else if (kprobe_running()) {
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs)) {
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ return 1;
+ }
+ }
+
+ /* no_kprobe: */
+ preempt_enable_no_resched();
+ return 0;
+}
+
+static int __kprobes arc_post_kprobe_handler(unsigned long addr,
+ struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ resume_execution(cur, addr, regs);
+
+ /* Rearm the kprobe */
+ arch_arm_kprobe(cur);
+
+ /*
+ * When we return from trap instruction we go to the next instruction
+ * We restored the actual instruction in resume_exectuiont and we to
+ * return to the same address and execute it
+ */
+ regs->ret = addr;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+
+ reset_current_kprobe();
+
+out:
+ preempt_enable_no_resched();
+ return 1;
+}
+
+/*
+ * Fault can be for the instruction being single stepped or for the
+ * pre/post handlers in the module.
+ * This is applicable for applications like user probes, where we have the
+ * probe in user space and the handlers in the kernel
+ */
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single stepped
+ * caused the fault. We reset the current kprobe and allow the
+ * exception handler as if it is regular exception. In our
+ * case it doesn't matter because the system will be halted
+ */
+ resume_execution(cur, (unsigned long)cur->addr, regs);
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ preempt_enable_no_resched();
+ break;
+
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We are here because the instructions in the pre/post handler
+ * caused the fault.
+ */
+
+ /* We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned zero,
+ * try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ unsigned long addr = args->err;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_IERR:
+ if (arc_kprobe_handler(addr, args->regs))
+ return NOTIFY_STOP;
+ break;
+
+ case DIE_TRAP:
+ if (arc_post_kprobe_handler(addr, args->regs))
+ return NOTIFY_STOP;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long sp_addr = regs->sp;
+
+ kcb->jprobe_saved_regs = *regs;
+ memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
+ regs->ret = (unsigned long)(jp->entry);
+
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ __asm__ __volatile__("unimp_s");
+ return;
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long sp_addr;
+
+ *regs = kcb->jprobe_saved_regs;
+ sp_addr = regs->sp;
+ memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+static void __used kretprobe_trampoline_holder(void)
+{
+ __asm__ __volatile__(".global kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n" "nop\n");
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+
+ ri->ret_addr = (kprobe_opcode_t *) regs->blink;
+
+ /* Replace the return addr with trampoline addr */
+ regs->blink = (unsigned long)&kretprobe_trampoline;
+}
+
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more than one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address) {
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ regs->ret = orig_ret_address;
+
+ reset_current_kprobe();
+ kretprobe_hash_unlock(current, &flags);
+ preempt_enable_no_resched();
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+
+ /* By returning a non zero value, we are telling the kprobe handler
+ * that we don't want the post_handler to run
+ */
+ return 1;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ /* Registering the trampoline code for the kret probe */
+ return register_kprobe(&trampoline_p);
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP);
+}
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
new file mode 100644
index 000000000000..cdd359352c0a
--- /dev/null
+++ b/arch/arc/kernel/module.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <asm/unwind.h>
+
+static inline void arc_write_me(unsigned short *addr, unsigned long value)
+{
+ *addr = (value & 0xffff0000) >> 16;
+ *(addr + 1) = (value & 0xffff);
+}
+
+/* ARC specific section quirks - before relocation loop in generic loader
+ *
+ * For dwarf unwinding out of modules, this needs to
+ * 1. Ensure the .debug_frame is allocatable (ARC Linker bug: despite
+ * -fasynchronous-unwind-tables it doesn't).
+ * 2. Since we are iterating thru sec hdr tbl anyways, make a note of
+ * the exact section index, for later use.
+ */
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstr, struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ int i;
+
+ mod->arch.unw_sec_idx = 0;
+ mod->arch.unw_info = NULL;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if (strcmp(secstr+sechdrs[i].sh_name, ".debug_frame") == 0) {
+ sechdrs[i].sh_flags |= SHF_ALLOC;
+ mod->arch.unw_sec_idx = i;
+ break;
+ }
+ }
+#endif
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ if (mod->arch.unw_info)
+ unwind_remove_table(mod->arch.unw_info, 0);
+#endif
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex, /* sec index for sym tbl */
+ unsigned int relsec, /* sec index for relo sec */
+ struct module *module)
+{
+ int i, n;
+ Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym_entry, *sym_sec;
+ Elf32_Addr relocation;
+ Elf32_Addr location;
+ Elf32_Addr sec_to_patch;
+ int relo_type;
+
+ sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
+ n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
+
+ pr_debug("\n========== Module Sym reloc ===========================\n");
+ pr_debug("Section to fixup %x\n", sec_to_patch);
+ pr_debug("=========================================================\n");
+ pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
+ pr_debug("=========================================================\n");
+
+ /* Loop thru entries in relocation section */
+ for (i = 0; i < n; i++) {
+
+ /* This is where to make the change */
+ location = sec_to_patch + rel_entry[i].r_offset;
+
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym_entry = sym_sec + ELF32_R_SYM(rel_entry[i].r_info);
+
+ relocation = sym_entry->st_value + rel_entry[i].r_addend;
+
+ pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
+ rel_entry[i].r_offset, rel_entry[i].r_addend,
+ sym_entry->st_value, location, relocation,
+ strtab + sym_entry->st_name);
+
+ /* This assumes modules are built with -mlong-calls
+ * so any branches/jumps are absolute 32 bit jmps
+ * global data access again is abs 32 bit.
+ * Both of these are handled by same relocation type
+ */
+ relo_type = ELF32_R_TYPE(rel_entry[i].r_info);
+
+ if (likely(R_ARC_32_ME == relo_type))
+ arc_write_me((unsigned short *)location, relocation);
+ else if (R_ARC_32 == relo_type)
+ *((Elf32_Addr *) location) = relocation;
+ else
+ goto relo_err;
+
+ }
+ return 0;
+
+relo_err:
+ pr_err("%s: unknown relocation: %u\n",
+ module->name, ELF32_R_TYPE(rel_entry[i].r_info));
+ return -ENOEXEC;
+
+}
+
+/* Just before lift off: After sections have been relocated, we add the
+ * dwarf section to unwinder table pool
+ * This couldn't be done in module_frob_arch_sections() because
+ * relocations had not been applied by then
+ */
+int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ void *unw;
+ int unwsec = mod->arch.unw_sec_idx;
+
+ if (unwsec) {
+ unw = unwind_add_table(mod, (void *)sechdrs[unwsec].sh_addr,
+ sechdrs[unwsec].sh_size);
+ mod->arch.unw_info = unw;
+ }
+#endif
+ return 0;
+}
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
new file mode 100644
index 000000000000..0a7531d99294
--- /dev/null
+++ b/arch/arc/kernel/process.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/elf.h>
+#include <linux/tick.h>
+
+SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
+{
+ task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
+ return 0;
+}
+
+/*
+ * We return the user space TLS data ptr as sys-call return code
+ * Ideally it should be copy to user.
+ * However we can cheat by the fact that some sys-calls do return
+ * absurdly high values
+ * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
+ * it won't be considered a sys-call error
+ * and it will be loads better than copy-to-user, which is a definite
+ * D-TLB Miss
+ */
+SYSCALL_DEFINE0(arc_gettls)
+{
+ return task_thread_info(current)->thr_ptr;
+}
+
+static inline void arch_idle(void)
+{
+ /* sleep, but enable all interrupts before committing */
+ __asm__("sleep 0x3");
+}
+
+void cpu_idle(void)
+{
+ /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
+
+doze:
+ local_irq_disable();
+ if (!need_resched()) {
+ arch_idle();
+ goto doze;
+ } else {
+ local_irq_enable();
+ }
+
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
+
+ schedule_preempt_disabled();
+ }
+}
+
+asmlinkage void ret_from_fork(void);
+
+/* Layout of Child kernel mode stack as setup at the end of this function is
+ *
+ * | ... |
+ * | ... |
+ * | unused |
+ * | |
+ * ------------------ <==== top of Stack (thread.ksp)
+ * | UNUSED 1 word|
+ * ------------------
+ * | r25 |
+ * ~ ~
+ * | --to-- | (CALLEE Regs of user mode)
+ * | r13 |
+ * ------------------
+ * | fp |
+ * | blink | @ret_from_fork
+ * ------------------
+ * | |
+ * ~ ~
+ * ~ ~
+ * | |
+ * ------------------
+ * | r12 |
+ * ~ ~
+ * | --to-- | (scratch Regs of user mode)
+ * | r0 |
+ * ------------------
+ * | UNUSED 1 word|
+ * ------------------ <===== END of PAGE
+ */
+int copy_thread(unsigned long clone_flags,
+ unsigned long usp, unsigned long arg,
+ struct task_struct *p)
+{
+ struct pt_regs *c_regs; /* child's pt_regs */
+ unsigned long *childksp; /* to unwind out of __switch_to() */
+ struct callee_regs *c_callee; /* child's callee regs */
+ struct callee_regs *parent_callee; /* paren't callee */
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Mark the specific anchors to begin with (see pic above) */
+ c_regs = task_pt_regs(p);
+ childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
+ c_callee = ((struct callee_regs *)childksp) - 1;
+
+ /*
+ * __switch_to() uses thread.ksp to start unwinding stack
+ * For kernel threads we don't need to create callee regs, the
+ * stack layout nevertheless needs to remain the same.
+ * Also, since __switch_to anyways unwinds callee regs, we use
+ * this to populate kernel thread entry-pt/args into callee regs,
+ * so that ret_from_kernel_thread() becomes simpler.
+ */
+ p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
+
+ /* __switch_to expects FP(0), BLINK(return addr) at top */
+ childksp[0] = 0; /* fp */
+ childksp[1] = (unsigned long)ret_from_fork; /* blink */
+
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(c_regs, 0, sizeof(struct pt_regs));
+
+ c_callee->r13 = arg; /* argument to kernel thread */
+ c_callee->r14 = usp; /* function */
+
+ return 0;
+ }
+
+ /*--------- User Task Only --------------*/
+
+ /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
+ childksp[0] = 0; /* for POP fp */
+ childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
+
+ /* Copy parents pt regs on child's kernel mode stack */
+ *c_regs = *regs;
+
+ if (usp)
+ c_regs->sp = usp;
+
+ c_regs->r0 = 0; /* fork returns 0 in child */
+
+ parent_callee = ((struct callee_regs *)regs) - 1;
+ *c_callee = *parent_callee;
+
+ if (unlikely(clone_flags & CLONE_SETTLS)) {
+ /*
+ * set task's userland tls data ptr from 4th arg
+ * clone C-lib call is difft from clone sys-call
+ */
+ task_thread_info(p)->thr_ptr = regs->r3;
+ } else {
+ /* Normal fork case: set parent's TLS ptr in child */
+ task_thread_info(p)->thr_ptr =
+ task_thread_info(current)->thr_ptr;
+ }
+
+ return 0;
+}
+
+/*
+ * Some archs flush debug and FPU info here
+ */
+void flush_thread(void)
+{
+}
+
+/*
+ * Free any architecture-specific thread data structures, etc.
+ */
+void exit_thread(void)
+{
+}
+
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+ return 0;
+}
+
+/*
+ * API: expected by schedular Code: If thread is sleeping where is that.
+ * What is this good for? it will be always the scheduler or ret_from_fork.
+ * So we hard code that anyways.
+ */
+unsigned long thread_saved_pc(struct task_struct *t)
+{
+ struct pt_regs *regs = task_pt_regs(t);
+ unsigned long blink = 0;
+
+ /*
+ * If the thread being queried for in not itself calling this, then it
+ * implies it is not executing, which in turn implies it is sleeping,
+ * which in turn implies it got switched OUT by the schedular.
+ * In that case, it's kernel mode blink can reliably retrieved as per
+ * the picture above (right above pt_regs).
+ */
+ if (t != current && t->state != TASK_RUNNING)
+ blink = *((unsigned int *)regs - 1);
+
+ return blink;
+}
+
+int elf_check_arch(const struct elf32_hdr *x)
+{
+ unsigned int eflags;
+
+ if (x->e_machine != EM_ARCOMPACT)
+ return 0;
+
+ eflags = x->e_flags;
+ if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
+ pr_err("ABI mismatch - you need newer toolchain\n");
+ force_sigsegv(SIGSEGV, current);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(elf_check_arch);
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
new file mode 100644
index 000000000000..c6a81c58d0f3
--- /dev/null
+++ b/arch/arc/kernel/ptrace.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/regset.h>
+#include <linux/unistd.h>
+#include <linux/elf.h>
+
+static struct callee_regs *task_callee_regs(struct task_struct *tsk)
+{
+ struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
+ return tmp;
+}
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_regs *ptregs = task_pt_regs(target);
+ const struct callee_regs *cregs = task_callee_regs(target);
+ int ret = 0;
+ unsigned int stop_pc_val;
+
+#define REG_O_CHUNK(START, END, PTR) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+ offsetof(struct user_regs_struct, START), \
+ offsetof(struct user_regs_struct, END));
+
+#define REG_O_ONE(LOC, PTR) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+ REG_O_CHUNK(scratch, callee, ptregs);
+ REG_O_CHUNK(callee, efa, cregs);
+ REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address);
+
+ if (!ret) {
+ if (in_brkpt_trap(ptregs)) {
+ stop_pc_val = target->thread.fault_address;
+ pr_debug("\t\tstop_pc (brk-pt)\n");
+ } else {
+ stop_pc_val = ptregs->ret;
+ pr_debug("\t\tstop_pc (others)\n");
+ }
+
+ REG_O_ONE(stop_pc, &stop_pc_val);
+ }
+
+ return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const struct pt_regs *ptregs = task_pt_regs(target);
+ const struct callee_regs *cregs = task_callee_regs(target);
+ int ret = 0;
+
+#define REG_IN_CHUNK(FIRST, NEXT, PTR) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), \
+ offsetof(struct user_regs_struct, FIRST), \
+ offsetof(struct user_regs_struct, NEXT));
+
+#define REG_IN_ONE(LOC, PTR) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+#define REG_IGNORE_ONE(LOC) \
+ if (!ret) \
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+ /* TBD: disallow updates to STATUS32, orig_r8 etc*/
+ REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */
+ REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
+ REG_IGNORE_ONE(efa); /* efa update invalid */
+ REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
+
+ return ret;
+}
+
+enum arc_getset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset arc_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = genregs_get,
+ .set = genregs_set,
+ }
+};
+
+static const struct user_regset_view user_arc_view = {
+ .name = UTS_MACHINE,
+ .e_machine = EM_ARCOMPACT,
+ .regsets = arc_regsets,
+ .n = ARRAY_SIZE(arc_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_arc_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = -EIO;
+
+ pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
+
+ switch (request) {
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int syscall_trace_entry(struct pt_regs *regs)
+{
+ if (tracehook_report_syscall_entry(regs))
+ return ULONG_MAX;
+
+ return regs->r8;
+}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ tracehook_report_syscall_exit(regs, 0);
+}
diff --git a/arch/arc/kernel/reset.c b/arch/arc/kernel/reset.c
new file mode 100644
index 000000000000..e227a2b1c943
--- /dev/null
+++ b/arch/arc/kernel/reset.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+
+void machine_halt(void)
+{
+ /* Halt the processor */
+ __asm__ __volatile__("flag 1\n");
+}
+
+void machine_restart(char *__unused)
+{
+ /* Soft reset : jump to reset vector */
+ pr_info("Put your restart handler here\n");
+ machine_halt();
+}
+
+void machine_power_off(void)
+{
+ /* FIXME :: power off ??? */
+ machine_halt();
+}
+
+void (*pm_power_off) (void) = NULL;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
new file mode 100644
index 000000000000..dc0f968dae0a
--- /dev/null
+++ b/arch/arc/kernel/setup.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/of_fdt.h>
+#include <asm/sections.h>
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+#include <asm/cache.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/irq.h>
+#include <asm/arcregs.h>
+#include <asm/prom.h>
+#include <asm/unwind.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
+
+int running_on_hw = 1; /* vs. on ISS */
+
+char __initdata command_line[COMMAND_LINE_SIZE];
+struct machine_desc *machine_desc __initdata;
+
+struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
+
+struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+
+
+void __init read_arc_build_cfg_regs(void)
+{
+ struct bcr_perip uncached_space;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ FIX_PTR(cpu);
+
+ READ_BCR(AUX_IDENTITY, cpu->core);
+
+ cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
+
+ cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
+ if (cpu->vec_base == 0)
+ cpu->vec_base = (unsigned int)_int_vec_base_lds;
+
+ READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
+ cpu->uncached_base = uncached_space.start << 24;
+
+ cpu->extn.mul = read_aux_reg(ARC_REG_MUL_BCR);
+ cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR);
+ cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR);
+ cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR);
+ cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR);
+ READ_BCR(ARC_REG_MAC_BCR, cpu->extn_mac_mul);
+
+ cpu->extn.ext_arith = read_aux_reg(ARC_REG_EXTARITH_BCR);
+ cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR);
+
+ /* Note that we read the CCM BCRs independent of kernel config
+ * This is to catch the cases where user doesn't know that
+ * CCMs are present in hardware build
+ */
+ {
+ struct bcr_iccm iccm;
+ struct bcr_dccm dccm;
+ struct bcr_dccm_base dccm_base;
+ unsigned int bcr_32bit_val;
+
+ bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
+ if (bcr_32bit_val) {
+ iccm = *((struct bcr_iccm *)&bcr_32bit_val);
+ cpu->iccm.base_addr = iccm.base << 16;
+ cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
+ }
+
+ bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
+ if (bcr_32bit_val) {
+ dccm = *((struct bcr_dccm *)&bcr_32bit_val);
+ cpu->dccm.sz = 0x800 << (dccm.sz);
+
+ READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
+ cpu->dccm.base_addr = dccm_base.addr << 8;
+ }
+ }
+
+ READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
+
+ read_decode_mmu_bcr();
+ read_decode_cache_bcr();
+
+ READ_BCR(ARC_REG_FP_BCR, cpu->fp);
+ READ_BCR(ARC_REG_DPFP_BCR, cpu->dpfp);
+}
+
+static const struct cpuinfo_data arc_cpu_tbl[] = {
+ { {0x10, "ARCTangent A5"}, 0x1F},
+ { {0x20, "ARC 600" }, 0x2F},
+ { {0x30, "ARC 700" }, 0x33},
+ { {0x34, "ARC 700 R4.10"}, 0x34},
+ { {0x00, NULL } }
+};
+
+char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+ struct bcr_identity *core = &cpu->core;
+ const struct cpuinfo_data *tbl;
+ int be = 0;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ be = 1;
+#endif
+ FIX_PTR(cpu);
+
+ n += scnprintf(buf + n, len - n,
+ "\nARC IDENTITY\t: Family [%#02x]"
+ " Cpu-id [%#02x] Chip-id [%#4x]\n",
+ core->family, core->cpu_id,
+ core->chip_id);
+
+ for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
+ if ((core->family >= tbl->info.id) &&
+ (core->family <= tbl->up_range)) {
+ n += scnprintf(buf + n, len - n,
+ "processor\t: %s %s\n",
+ tbl->info.str,
+ be ? "[Big Endian]" : "");
+ break;
+ }
+ }
+
+ if (tbl->info.id == 0)
+ n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
+
+ n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n",
+ (unsigned int)(arc_get_core_freq() / 1000000),
+ (unsigned int)(arc_get_core_freq() / 10000) % 100);
+
+ n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n",
+ (cpu->timers & 0x200) ? "TIMER1" : "",
+ (cpu->timers & 0x100) ? "TIMER0" : "");
+
+ n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n",
+ cpu->vec_base);
+
+ n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n",
+ cpu->uncached_base);
+
+ return buf;
+}
+
+static const struct id_to_str mul_type_nm[] = {
+ { 0x0, "N/A"},
+ { 0x1, "32x32 (spl Result Reg)" },
+ { 0x2, "32x32 (ANY Result Reg)" }
+};
+
+static const struct id_to_str mac_mul_nm[] = {
+ {0x0, "N/A"},
+ {0x1, "N/A"},
+ {0x2, "Dual 16 x 16"},
+ {0x3, "N/A"},
+ {0x4, "32x16"},
+ {0x5, "N/A"},
+ {0x6, "Dual 16x16 and 32x16"}
+};
+
+char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+
+ FIX_PTR(cpu);
+#define IS_AVAIL1(var, str) ((var) ? str : "")
+#define IS_AVAIL2(var, str) ((var == 0x2) ? str : "")
+#define IS_USED(var) ((var) ? "(in-use)" : "(not used)")
+
+ n += scnprintf(buf + n, len - n,
+ "Extn [700-Base]\t: %s %s %s %s %s %s\n",
+ IS_AVAIL2(cpu->extn.norm, "norm,"),
+ IS_AVAIL2(cpu->extn.barrel, "barrel-shift,"),
+ IS_AVAIL1(cpu->extn.swap, "swap,"),
+ IS_AVAIL2(cpu->extn.minmax, "minmax,"),
+ IS_AVAIL1(cpu->extn.crc, "crc,"),
+ IS_AVAIL2(cpu->extn.ext_arith, "ext-arith"));
+
+ n += scnprintf(buf + n, len - n, "Extn [700-MPY]\t: %s",
+ mul_type_nm[cpu->extn.mul].str);
+
+ n += scnprintf(buf + n, len - n, " MAC MPY: %s\n",
+ mac_mul_nm[cpu->extn_mac_mul.type].str);
+
+ if (cpu->core.family == 0x34) {
+ n += scnprintf(buf + n, len - n,
+ "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n",
+ IS_USED(__CONFIG_ARC_HAS_LLSC_VAL),
+ IS_USED(__CONFIG_ARC_HAS_SWAPE_VAL),
+ IS_USED(__CONFIG_ARC_HAS_RTSC_VAL));
+ }
+
+ n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s",
+ !(cpu->dccm.sz || cpu->iccm.sz) ? "N/A" : "");
+
+ if (cpu->dccm.sz)
+ n += scnprintf(buf + n, len - n, "DCCM: @ %x, %d KB ",
+ cpu->dccm.base_addr, TO_KB(cpu->dccm.sz));
+
+ if (cpu->iccm.sz)
+ n += scnprintf(buf + n, len - n, "ICCM: @ %x, %d KB",
+ cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
+
+ n += scnprintf(buf + n, len - n, "\nExtn [FPU]\t: %s",
+ !(cpu->fp.ver || cpu->dpfp.ver) ? "N/A" : "");
+
+ if (cpu->fp.ver)
+ n += scnprintf(buf + n, len - n, "SP [v%d] %s",
+ cpu->fp.ver, cpu->fp.fast ? "(fast)" : "");
+
+ if (cpu->dpfp.ver)
+ n += scnprintf(buf + n, len - n, "DP [v%d] %s",
+ cpu->dpfp.ver, cpu->dpfp.fast ? "(fast)" : "");
+
+ n += scnprintf(buf + n, len - n, "\n");
+
+#ifdef _ASM_GENERIC_UNISTD_H
+ n += scnprintf(buf + n, len - n,
+ "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
+#endif
+
+ return buf;
+}
+
+void __init arc_chk_ccms(void)
+{
+#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+
+#ifdef CONFIG_ARC_HAS_DCCM
+ /*
+ * DCCM can be arbit placed in hardware.
+ * Make sure it's placement/sz matches what Linux is built with
+ */
+ if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
+ panic("Linux built with incorrect DCCM Base address\n");
+
+ if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
+ panic("Linux built with incorrect DCCM Size\n");
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICCM
+ if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
+ panic("Linux built with incorrect ICCM Size\n");
+#endif
+#endif
+}
+
+/*
+ * Ensure that FP hardware and kernel config match
+ * -If hardware contains DPFP, kernel needs to save/restore FPU state
+ * across context switches
+ * -If hardware lacks DPFP, but kernel configured to save FPU state then
+ * kernel trying to access non-existant DPFP regs will crash
+ *
+ * We only check for Dbl precision Floating Point, because only DPFP
+ * hardware has dedicated regs which need to be saved/restored on ctx-sw
+ * (Single Precision uses core regs), thus kernel is kind of oblivious to it
+ */
+void __init arc_chk_fpu(void)
+{
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+
+ if (cpu->dpfp.ver) {
+#ifndef CONFIG_ARC_FPU_SAVE_RESTORE
+ pr_warn("DPFP support broken in this kernel...\n");
+#endif
+ } else {
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+ panic("H/w lacks DPFP support, apps won't work\n");
+#endif
+ }
+}
+
+/*
+ * Initialize and setup the processor core
+ * This is called by all the CPUs thus should not do special case stuff
+ * such as only for boot CPU etc
+ */
+
+void __init setup_processor(void)
+{
+ char str[512];
+ int cpu_id = smp_processor_id();
+
+ read_arc_build_cfg_regs();
+ arc_init_IRQ();
+
+ printk(arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
+
+ arc_mmu_init();
+ arc_cache_init();
+ arc_chk_ccms();
+
+ printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
+
+#ifdef CONFIG_SMP
+ printk(arc_platform_smp_cpuinfo());
+#endif
+
+ arc_chk_fpu();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+#ifdef CONFIG_CMDLINE_UBOOT
+ /* Make sure that a whitespace is inserted before */
+ strlcat(command_line, " ", sizeof(command_line));
+#endif
+ /*
+ * Append .config cmdline to base command line, which might already
+ * contain u-boot "bootargs" (handled by head.S, if so configured)
+ */
+ strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ machine_desc = setup_machine_fdt(__dtb_start);
+ if (!machine_desc)
+ panic("Embedded DT invalid\n");
+
+ /* To force early parsing of things like mem=xxx */
+ parse_early_param();
+
+ /* Platform/board specific: e.g. early console registration */
+ if (machine_desc->init_early)
+ machine_desc->init_early();
+
+ setup_processor();
+
+#ifdef CONFIG_SMP
+ smp_init_cpus();
+#endif
+
+ setup_arch_memory();
+
+ /* copy flat DT out of .init and then unflatten it */
+ copy_devtree();
+ unflatten_device_tree();
+
+ /* Can be issue if someone passes cmd line arg "ro"
+ * But that is unlikely so keeping it as it is
+ */
+ root_mountflags &= ~MS_RDONLY;
+
+ console_verbose();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+
+ arc_unwind_init();
+ arc_unwind_setup();
+}
+
+static int __init customize_machine(void)
+{
+ /* Add platform devices */
+ if (machine_desc->init_machine)
+ machine_desc->init_machine();
+
+ return 0;
+}
+arch_initcall(customize_machine);
+
+static int __init init_late_machine(void)
+{
+ if (machine_desc->init_late)
+ machine_desc->init_late();
+
+ return 0;
+}
+late_initcall(init_late_machine);
+/*
+ * Get CPU information for use by the procfs.
+ */
+
+#define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
+#define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ char *str;
+ int cpu_id = ptr_to_cpu(v);
+
+ str = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!str)
+ goto done;
+
+ seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, "Bogo MIPS : \t%lu.%02lu\n",
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100);
+
+ seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+#ifdef CONFIG_SMP
+ seq_printf(m, arc_platform_smp_cpuinfo());
+#endif
+
+ free_page((unsigned long)str);
+done:
+ seq_printf(m, "\n\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ /*
+ * Callback returns cpu-id to iterator for show routine, NULL to stop.
+ * However since NULL is also a valid cpu-id (0), we use a round-about
+ * way to pass it w/o having to kmalloc/free a 2 byte string.
+ * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
+ */
+ return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo
+};
+
+static DEFINE_PER_CPU(struct cpu, cpu_topology);
+
+static int __init topology_init(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu)
+ register_cpu(&per_cpu(cpu_topology, cpu), cpu);
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
new file mode 100644
index 000000000000..ee6ef2f60a28
--- /dev/null
+++ b/arch/arc/kernel/signal.c
@@ -0,0 +1,360 @@
+/*
+ * Signal Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Jan 2010 (Restarting of timer related syscalls)
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ * -do_signal() supports TIF_RESTORE_SIGMASK
+ * -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
+ * -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
+ * -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
+ * -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
+ * the job to do_signal()
+ *
+ * vineetg: July 2009
+ * -Modified Code to support the uClibc provided userland sigreturn stub
+ * to avoid kernel synthesing it on user stack at runtime, costing TLB
+ * probes and Cache line flushes.
+ *
+ * vineetg: July 2009
+ * -In stash_usr_regs( ) and restore_usr_regs( ), save/restore of user regs
+ * in done in block copy rather than one word at a time.
+ * This saves around 2K of code and improves LMBench lat_sig <catch>
+ *
+ * rajeshwarr: Feb 2009
+ * - Support for Realtime Signals
+ *
+ * vineetg: Aug 11th 2008: Bug #94183
+ * -ViXS were still seeing crashes when using insmod to load drivers.
+ * It turned out that the code to change Execute permssions for TLB entries
+ * of user was not guarded for interrupts (mod_tlb_permission)
+ * This was cauing TLB entries to be overwritten on unrelated indexes
+ *
+ * Vineetg: July 15th 2008: Bug #94183
+ * -Exception happens in Delay slot of a JMP, and before user space resumes,
+ * Signal is delivered (Ctrl + C) = >SIGINT.
+ * setup_frame( ) sets up PC,SP,BLINK to enable user space signal handler
+ * to run, but doesn't clear the Delay slot bit from status32. As a result,
+ * on resuming user mode, signal handler branches off to BTA of orig JMP
+ * -FIX: clear the DE bit from status32 in setup_frame( )
+ *
+ * Rahul Trivedi, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <asm/ucontext.h>
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+#define MAGIC_SIGALTSTK 0x07302004
+ unsigned int sigret_magic;
+};
+
+static int
+stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ sigset_t *set)
+{
+ int err;
+ err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+
+ return err;
+}
+
+static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+{
+ sigset_t set;
+ int err;
+
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ if (!err)
+ set_current_blocked(&set);
+
+ err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ return err;
+}
+
+static inline int is_do_ss_needed(unsigned int magic)
+{
+ if (MAGIC_SIGALTSTK == magic)
+ return 1;
+ else
+ return 0;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct rt_sigframe __user *sf;
+ unsigned int magic;
+ int err;
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /* Since we stacked the signal on a word boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->sp & 3)
+ goto badframe;
+
+ sf = (struct rt_sigframe __force __user *)(regs->sp);
+
+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+ goto badframe;
+
+ err = restore_usr_regs(regs, sf);
+ err |= __get_user(magic, &sf->sigret_magic);
+ if (err)
+ goto badframe;
+
+ if (unlikely(is_do_ss_needed(magic)))
+ if (restore_altstack(&sf->uc.uc_stack))
+ goto badframe;
+
+ /* Don't restart from sigreturn */
+ syscall_wont_restart(regs);
+
+ return regs->r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+ struct pt_regs *regs,
+ unsigned long framesize)
+{
+ unsigned long sp = regs->sp;
+ void __user *frame;
+
+ /* This is the X/Open sanctioned signal stack switching */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ /* No matter what happens, 'sp' must be word
+ * aligned otherwise nasty things could happen
+ */
+
+ /* ATPCS B01 mandates 8-byte alignment */
+ frame = (void __user *)((sp - framesize) & ~7);
+
+ /* Check that we can actually write to the signal frame */
+ if (!access_ok(VERIFY_WRITE, frame, framesize))
+ frame = NULL;
+
+ return frame;
+}
+
+/*
+ * translate the signal
+ */
+static inline int map_sig(int sig)
+{
+ struct thread_info *thread = current_thread_info();
+ if (thread->exec_domain && thread->exec_domain->signal_invmap
+ && sig < 32)
+ sig = thread->exec_domain->signal_invmap[sig];
+ return sig;
+}
+
+static int
+setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *sf;
+ unsigned int magic = 0;
+ int err = 0;
+
+ sf = get_sigframe(ka, regs, sizeof(struct rt_sigframe));
+ if (!sf)
+ return 1;
+
+ /*
+ * SA_SIGINFO requires 3 args to signal handler:
+ * #1: sig-no (common to any handler)
+ * #2: struct siginfo
+ * #3: struct ucontext (completely populated)
+ */
+ if (unlikely(ka->sa.sa_flags & SA_SIGINFO)) {
+ err |= copy_siginfo_to_user(&sf->info, info);
+ err |= __put_user(0, &sf->uc.uc_flags);
+ err |= __put_user(NULL, &sf->uc.uc_link);
+ err |= __save_altstack(&sf->uc.uc_stack, regs->sp);
+
+ /* setup args 2 and 3 for user mode handler */
+ regs->r1 = (unsigned long)&sf->info;
+ regs->r2 = (unsigned long)&sf->uc;
+
+ /*
+ * small optim to avoid unconditonally calling do_sigaltstack
+ * in sigreturn path, now that we only have rt_sigreturn
+ */
+ magic = MAGIC_SIGALTSTK;
+ }
+
+ /*
+ * w/o SA_SIGINFO, struct ucontext is partially populated (only
+ * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+ * during signal handler execution. This works for SA_SIGINFO as well
+ * although the semantics are now overloaded (the same reg state can be
+ * inspected by userland: but are they allowed to fiddle with it ?
+ */
+ err |= stash_usr_regs(sf, regs, set);
+ err |= __put_user(magic, &sf->sigret_magic);
+ if (err)
+ return err;
+
+ /* #1 arg to the user Signal handler */
+ regs->r0 = map_sig(signo);
+
+ /* setup PC of user space signal handler */
+ regs->ret = (unsigned long)ka->sa.sa_handler;
+
+ /*
+ * handler returns using sigreturn stub provided already by userpsace
+ */
+ BUG_ON(!(ka->sa.sa_flags & SA_RESTORER));
+ regs->blink = (unsigned long)ka->sa.sa_restorer;
+
+ /* User Stack for signal handler will be above the frame just carved */
+ regs->sp = (unsigned long)sf;
+
+ /*
+ * Bug 94183, Clear the DE bit, so that when signal handler
+ * starts to run, it doesn't use BTA
+ */
+ regs->status32 &= ~STATUS_DE_MASK;
+ regs->status32 |= STATUS_L_MASK;
+
+ return err;
+}
+
+static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
+{
+ switch (regs->r0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ /*
+ * ERESTARTNOHAND means that the syscall should
+ * only be restarted if there was no handler for
+ * the signal, and since we only get here if there
+ * is a handler, we don't restart
+ */
+ regs->r0 = -EINTR; /* ERESTART_xxx is internal */
+ break;
+
+ case -ERESTARTSYS:
+ /*
+ * ERESTARTSYS means to restart the syscall if
+ * there is no handler or the handler was
+ * registered with SA_RESTART
+ */
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->r0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+
+ case -ERESTARTNOINTR:
+ /*
+ * ERESTARTNOINTR means that the syscall should
+ * be called again after the signal handler returns.
+ * Setup reg state just as it was before doing the trap
+ * r0 has been clobbered with sys call ret code thus it
+ * needs to be reloaded with orig first arg to syscall
+ * in orig_r0. Rest of relevant reg-file:
+ * r8 (syscall num) and (r1 - r7) will be reset to
+ * their orig user space value when we ret from kernel
+ */
+ regs->r0 = regs->orig_r0;
+ regs->ret -= 4;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
+ struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+
+ /* Set up the stack frame */
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (ret)
+ force_sigsegv(sig, current);
+ else
+ signal_delivered(sig, info, ka, regs, 0);
+}
+
+void do_signal(struct pt_regs *regs)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
+ int restart_scall;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+ restart_scall = in_syscall(regs) && syscall_restartable(regs);
+
+ if (signr > 0) {
+ if (restart_scall) {
+ arc_restart_syscall(&ka, regs);
+ syscall_wont_restart(regs); /* No more restarts */
+ }
+ handle_signal(signr, &ka, &info, regs);
+ return;
+ }
+
+ if (restart_scall) {
+ /* No handler for syscall: restart it */
+ if (regs->r0 == -ERESTARTNOHAND ||
+ regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) {
+ regs->r0 = regs->orig_r0;
+ regs->ret -= 4;
+ } else if (regs->r0 == -ERESTART_RESTARTBLOCK) {
+ regs->r8 = __NR_restart_syscall;
+ regs->ret -= 4;
+ }
+ syscall_wont_restart(regs); /* No more restarts */
+ }
+
+ /* If there's no signal to deliver, restore the saved sigmask back */
+ restore_saved_sigmask();
+}
+
+void do_notify_resume(struct pt_regs *regs)
+{
+ /*
+ * ASM glue gaurantees that this is only called when returning to
+ * user mode
+ */
+ if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+ tracehook_notify_resume(regs);
+}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
new file mode 100644
index 000000000000..3af3e06dcf02
--- /dev/null
+++ b/arch/arc/kernel/smp.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * RajeshwarR: Dec 11, 2007
+ * -- Added support for Inter Processor Interrupts
+ *
+ * Vineetg: Nov 1st, 2007
+ * -- Initial Write (Borrowed heavily from ARM)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock_types.h>
+#include <linux/reboot.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/mach_desc.h>
+
+arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+struct plat_smp_ops plat_smp_ops;
+
+/* XXX: per cpu ? Only needed once in early seconday boot */
+struct task_struct *secondary_idle_tsk;
+
+/* Called from start_kernel */
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ set_cpu_possible(i, true);
+}
+
+/* called from init ( ) => process 1 */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
+
+ /*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time.
+ */
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+
+}
+
+/*
+ * After power-up, a non Master CPU needs to wait for Master to kick start it
+ *
+ * The default implementation halts
+ *
+ * This relies on platform specific support allowing Master to directly set
+ * this CPU's PC (to be @first_lines_of_secondary() and kick start it.
+ *
+ * In lack of such h/w assist, platforms can override this function
+ * - make this function busy-spin on a token, eventually set by Master
+ * (from arc_platform_smp_wakeup_cpu())
+ * - Once token is available, jump to @first_lines_of_secondary
+ * (using inline asm).
+ *
+ * Alert: can NOT use stack here as it has not been determined/setup for CPU.
+ * If it turns out to be elaborate, it's better to code it in assembly
+ *
+ */
+void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu)
+{
+ /*
+ * As a hack for debugging - since debugger will single-step over the
+ * FLAG insn - wrap the halt itself it in a self loop
+ */
+ __asm__ __volatile__(
+ "1: \n"
+ " flag 1 \n"
+ " b 1b \n");
+}
+
+const char *arc_platform_smp_cpuinfo(void)
+{
+ return plat_smp_ops.info;
+}
+
+/*
+ * The very first "C" code executed by secondary
+ * Called from asm stub in head.S
+ * "current"/R25 already setup by low level boot code
+ */
+void __cpuinit start_kernel_secondary(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ /* MMU, Caches, Vector Table, Interrupts etc */
+ setup_processor();
+
+ atomic_inc(&mm->mm_users);
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+
+ notify_cpu_starting(cpu);
+ set_cpu_online(cpu, true);
+
+ pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
+
+ if (machine_desc->init_smp)
+ machine_desc->init_smp(smp_processor_id());
+
+ arc_local_timer_setup(cpu);
+
+ local_irq_enable();
+ preempt_disable();
+ cpu_idle();
+}
+
+/*
+ * Called from kernel_init( ) -> smp_init( ) - for each CPU
+ *
+ * At this point, Secondary Processor is "HALT"ed:
+ * -It booted, but was halted in head.S
+ * -It was configured to halt-on-reset
+ * So need to wake it up.
+ *
+ * Essential requirements being where to run from (PC) and stack (SP)
+*/
+int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long wait_till;
+
+ secondary_idle_tsk = idle;
+
+ pr_info("Idle Task [%d] %p", cpu, idle);
+ pr_info("Trying to bring up CPU%u ...\n", cpu);
+
+ if (plat_smp_ops.cpu_kick)
+ plat_smp_ops.cpu_kick(cpu,
+ (unsigned long)first_lines_of_secondary);
+
+ /* wait for 1 sec after kicking the secondary */
+ wait_till = jiffies + HZ;
+ while (time_before(jiffies, wait_till)) {
+ if (cpu_online(cpu))
+ break;
+ }
+
+ if (!cpu_online(cpu)) {
+ pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
+ return -1;
+ }
+
+ secondary_idle_tsk = NULL;
+
+ return 0;
+}
+
+/*
+ * not supported here
+ */
+int __init setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+/*****************************************************************************/
+/* Inter Processor Interrupt Handling */
+/*****************************************************************************/
+
+/*
+ * structures for inter-processor calls
+ * A Collection of single bit ipi messages
+ *
+ */
+
+/*
+ * TODO_rajesh investigate tlb message types.
+ * IPI Timer not needed because each ARC has an individual Interrupting Timer
+ */
+enum ipi_msg_type {
+ IPI_NOP = 0,
+ IPI_RESCHEDULE = 1,
+ IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
+ IPI_CPU_STOP
+};
+
+struct ipi_data {
+ unsigned long bits;
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
+{
+ unsigned long flags;
+ unsigned int cpu;
+
+ local_irq_save(flags);
+
+ for_each_cpu(cpu, callmap) {
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ set_bit(msg, &ipi->bits);
+ }
+
+ /* Call the platform specific cross-CPU call function */
+ if (plat_smp_ops.ipi_send)
+ plat_smp_ops.ipi_send((void *)callmap);
+
+ local_irq_restore(flags);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ ipi_send_msg(&targets, IPI_CPU_STOP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ ipi_send_msg(mask, IPI_CALL_FUNC);
+}
+
+/*
+ * ipi_cpu_stop - handle IPI from smp_send_stop()
+ */
+static void ipi_cpu_stop(unsigned int cpu)
+{
+ machine_halt();
+}
+
+static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu)
+{
+ unsigned long msg = 0;
+
+ do {
+ msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
+
+ switch (msg) {
+ case IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+
+ case IPI_CPU_STOP:
+ ipi_cpu_stop(cpu);
+ break;
+ }
+ } while (msg < BITS_PER_LONG);
+
+}
+
+/*
+ * arch-common ISR to handle for inter-processor interrupts
+ * Has hooks for platform specific IPI
+ */
+irqreturn_t do_IPI(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ unsigned long ops;
+
+ if (plat_smp_ops.ipi_clear)
+ plat_smp_ops.ipi_clear(cpu, irq);
+
+ /*
+ * XXX: is this loop really needed
+ * And do we need to move ipi_clean inside
+ */
+ while ((ops = xchg(&ipi->bits, 0)) != 0)
+ __do_IPI(&ops, ipi, cpu);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * API called by platform code to hookup arch-common ISR to their IPI IRQ
+ */
+static DEFINE_PER_CPU(int, ipi_dev);
+int smp_ipi_irq_setup(int cpu, int irq)
+{
+ int *dev_id = &per_cpu(ipi_dev, smp_processor_id());
+ return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id);
+}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
new file mode 100644
index 000000000000..a63ff842564b
--- /dev/null
+++ b/arch/arc/kernel/stacktrace.c
@@ -0,0 +1,254 @@
+/*
+ * stacktrace.c : stacktracing APIs needed by rest of kernel
+ * (wrappers over ARC dwarf based unwinder)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: aug 2009
+ * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
+ * for displaying task's kernel mode call stack in /proc/<pid>/stack
+ * -Iterator based approach to have single copy of unwinding core and APIs
+ * needing unwinding, implement the logic in iterator regarding:
+ * = which frame onwards to start capture
+ * = which frame to stop capturing (wchan)
+ * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
+ *
+ * vineetg: March 2009
+ * -Implemented correct versions of thread_saved_pc() and get_wchan()
+ *
+ * rajeshwarr: 2008
+ * -Initial implementation
+ */
+
+#include <linux/ptrace.h>
+#include <linux/export.h>
+#include <linux/stacktrace.h>
+#include <linux/kallsyms.h>
+#include <asm/arcregs.h>
+#include <asm/unwind.h>
+#include <asm/switch_to.h>
+
+/*-------------------------------------------------------------------------
+ * Unwinder Iterator
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+static void seed_unwind_frame_info(struct task_struct *tsk,
+ struct pt_regs *regs,
+ struct unwind_frame_info *frame_info)
+{
+ if (tsk == NULL && regs == NULL) {
+ unsigned long fp, sp, blink, ret;
+ frame_info->task = current;
+
+ __asm__ __volatile__(
+ "mov %0,r27\n\t"
+ "mov %1,r28\n\t"
+ "mov %2,r31\n\t"
+ "mov %3,r63\n\t"
+ : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
+ );
+
+ frame_info->regs.r27 = fp;
+ frame_info->regs.r28 = sp;
+ frame_info->regs.r31 = blink;
+ frame_info->regs.r63 = ret;
+ frame_info->call_frame = 0;
+ } else if (regs == NULL) {
+
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = KSTK_FP(tsk);
+ frame_info->regs.r28 = KSTK_ESP(tsk);
+ frame_info->regs.r31 = KSTK_BLINK(tsk);
+ frame_info->regs.r63 = (unsigned int)__switch_to;
+
+ /* In the prologue of __switch_to, first FP is saved on stack
+ * and then SP is copied to FP. Dwarf assumes cfa as FP based
+ * but we didn't save FP. The value retrieved above is FP's
+ * state in previous frame.
+ * As a work around for this, we unwind from __switch_to start
+ * and adjust SP accordingly. The other limitation is that
+ * __switch_to macro is dwarf rules are not generated for inline
+ * assembly code
+ */
+ frame_info->regs.r27 = 0;
+ frame_info->regs.r28 += 64;
+ frame_info->call_frame = 0;
+
+ } else {
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = regs->fp;
+ frame_info->regs.r28 = regs->sp;
+ frame_info->regs.r31 = regs->blink;
+ frame_info->regs.r63 = regs->ret;
+ frame_info->call_frame = 0;
+ }
+}
+
+#endif
+
+static noinline unsigned int
+arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ int (*consumer_fn) (unsigned int, void *), void *arg)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ int ret = 0;
+ unsigned int address;
+ struct unwind_frame_info frame_info;
+
+ seed_unwind_frame_info(tsk, regs, &frame_info);
+
+ while (1) {
+ address = UNW_PC(&frame_info);
+
+ if (address && __kernel_text_address(address)) {
+ if (consumer_fn(address, arg) == -1)
+ break;
+ }
+
+ ret = arc_unwind(&frame_info);
+
+ if (ret == 0) {
+ frame_info.regs.r63 = frame_info.regs.r31;
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ return address; /* return the last address it saw */
+#else
+ /* On ARC, only Dward based unwinder works. fp based backtracing is
+ * not possible (-fno-omit-frame-pointer) because of the way function
+ * prelogue is setup (callee regs saved and then fp set and not other
+ * way around
+ */
+ pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ return 0;
+
+#endif
+}
+
+/*-------------------------------------------------------------------------
+ * callbacks called by unwinder iterator to implement kernel APIs
+ *
+ * The callback can return -1 to force the iterator to stop, which by default
+ * keeps going till the bottom-most frame.
+ *-------------------------------------------------------------------------
+ */
+
+/* Call-back which plugs into unwinding core to dump the stack in
+ * case of panic/OOPs/BUG etc
+ */
+static int __print_sym(unsigned int address, void *unused)
+{
+ __print_symbol(" %s\n", address);
+ return 0;
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/* Call-back which plugs into unwinding core to capture the
+ * traces needed by kernel on /proc/<pid>/stack
+ */
+static int __collect_all(unsigned int address, void *arg)
+{
+ struct stack_trace *trace = arg;
+
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = address;
+
+ if (trace->nr_entries >= trace->max_entries)
+ return -1;
+
+ return 0;
+}
+
+static int __collect_all_but_sched(unsigned int address, void *arg)
+{
+ struct stack_trace *trace = arg;
+
+ if (in_sched_functions(address))
+ return 0;
+
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = address;
+
+ if (trace->nr_entries >= trace->max_entries)
+ return -1;
+
+ return 0;
+}
+
+#endif
+
+static int __get_first_nonsched(unsigned int address, void *unused)
+{
+ if (in_sched_functions(address))
+ return 0;
+
+ return -1;
+}
+
+/*-------------------------------------------------------------------------
+ * APIs expected by various kernel sub-systems
+ *-------------------------------------------------------------------------
+ */
+
+noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
+{
+ pr_info("\nStack Trace:\n");
+ arc_unwind_core(tsk, regs, __print_sym, NULL);
+}
+EXPORT_SYMBOL(show_stacktrace);
+
+/* Expected by sched Code */
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ show_stacktrace(tsk, NULL);
+}
+
+/* Expected by Rest of kernel code */
+void dump_stack(void)
+{
+ show_stacktrace(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+/* Another API expected by schedular, shows up in "ps" as Wait Channel
+ * Ofcourse just returning schedule( ) would be pointless so unwind until
+ * the function is not in schedular code
+ */
+unsigned int get_wchan(struct task_struct *tsk)
+{
+ return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/*
+ * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
+ * A typical use is when /proc/<pid>/stack is queried by userland
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ arc_unwind_core(current, NULL, __collect_all, trace);
+}
+#endif
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
new file mode 100644
index 000000000000..f6bdd07583f3
--- /dev/null
+++ b/arch/arc/kernel/sys.c
@@ -0,0 +1,18 @@
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#define sys_clone sys_clone_wrapper
+#define sys_fork sys_fork_wrapper
+#define sys_vfork sys_vfork_wrapper
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[NR_syscalls] = {
+ [0 ... NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
new file mode 100644
index 000000000000..f13f72807aa5
--- /dev/null
+++ b/arch/arc/kernel/time.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Jan 1011
+ * -sched_clock( ) no longer jiffies based. Uses the same clocksource
+ * as gtod
+ *
+ * Rajeshwarr/Vineetg: Mar 2008
+ * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
+ * for arch independent gettimeofday()
+ * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
+ *
+ * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
+ */
+
+/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
+ * Each can programmed to go from @count to @limit and optionally
+ * interrupt when that happens.
+ * A write to Control Register clears the Interrupt
+ *
+ * We've designated TIMER0 for events (clockevents)
+ * while TIMER1 for free running (clocksource)
+ *
+ * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
+ */
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <asm/irq.h>
+#include <asm/arcregs.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+#define ARC_TIMER_MAX 0xFFFFFFFF
+
+/********** Clock Source Device *********/
+
+#ifdef CONFIG_ARC_HAS_RTSC
+
+int __cpuinit arc_counter_setup(void)
+{
+ /* RTSC insn taps into cpu clk, needs no setup */
+
+ /* For SMP, only allowed if cross-core-sync, hence usable as cs */
+ return 1;
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+ unsigned long flags;
+ union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ struct { u32 high, low; };
+#else
+ struct { u32 low, high; };
+#endif
+ cycle_t full;
+ } stamp;
+
+ flags = arch_local_irq_save();
+
+ __asm__ __volatile(
+ " .extCoreRegister tsch, 58, r, cannot_shortcut \n"
+ " rtsc %0, 0 \n"
+ " mov %1, 0 \n"
+ : "=r" (stamp.low), "=r" (stamp.high));
+
+ arch_local_irq_restore(flags);
+
+ return stamp.full;
+}
+
+static struct clocksource arc_counter = {
+ .name = "ARC RTSC",
+ .rating = 300,
+ .read = arc_counter_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#else /* !CONFIG_ARC_HAS_RTSC */
+
+static bool is_usable_as_clocksource(void)
+{
+#ifdef CONFIG_SMP
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+/*
+ * set 32bit TIMER1 to keep counting monotonically and wraparound
+ */
+int __cpuinit arc_counter_setup(void)
+{
+ write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
+ write_aux_reg(ARC_REG_TIMER1_CNT, 0);
+ write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+
+ return is_usable_as_clocksource();
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+ return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
+}
+
+static struct clocksource arc_counter = {
+ .name = "ARC Timer1",
+ .rating = 300,
+ .read = arc_counter_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#endif
+
+/********** Clock Event Device *********/
+
+/*
+ * Arm the timer to interrupt after @limit cycles
+ * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
+ */
+static void arc_timer_event_setup(unsigned int limit)
+{
+ write_aux_reg(ARC_REG_TIMER0_LIMIT, limit);
+ write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
+
+ write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+}
+
+/*
+ * Acknowledge the interrupt (oneshot) and optionally re-arm it (periodic)
+ * -Any write to CTRL Reg will ack the intr (NH bit: Count when not halted)
+ * -Rearming is done by setting the IE bit
+ *
+ * Small optimisation: Normal code would have been
+ * if (irq_reenable)
+ * CTRL_REG = (IE | NH);
+ * else
+ * CTRL_REG = NH;
+ * However since IE is BIT0 we can fold the branch
+ */
+static void arc_timer_event_ack(unsigned int irq_reenable)
+{
+ write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+}
+
+static int arc_clkevent_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ arc_timer_event_setup(delta);
+ return 0;
+}
+
+static void arc_clkevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ arc_timer_event_setup(arc_get_core_freq() / HZ);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
+ .name = "ARC Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .mode = CLOCK_EVT_MODE_UNUSED,
+ .rating = 300,
+ .irq = TIMER0_IRQ, /* hardwired, no need for resources */
+ .set_next_event = arc_clkevent_set_next_event,
+ .set_mode = arc_clkevent_set_mode,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device);
+
+ arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC);
+ clk->event_handler(clk);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction arc_timer_irq = {
+ .name = "Timer0 (clock-evt-dev)",
+ .flags = IRQF_TIMER | IRQF_PERCPU,
+ .handler = timer_irq_handler,
+};
+
+/*
+ * Setup the local event timer for @cpu
+ * N.B. weak so that some exotic ARC SoCs can completely override it
+ */
+void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu)
+{
+ struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
+
+ clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
+
+ clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
+ clk->cpumask = cpumask_of(cpu);
+
+ clockevents_register_device(clk);
+
+ /*
+ * setup the per-cpu timer IRQ handler - for all cpus
+ * For non boot CPU explicitly unmask at intc
+ * setup_irq() -> .. -> irq_startup() already does this on boot-cpu
+ */
+ if (!cpu)
+ setup_irq(TIMER0_IRQ, &arc_timer_irq);
+ else
+ arch_unmask_irq(TIMER0_IRQ);
+}
+
+/*
+ * Called from start_kernel() - boot CPU only
+ *
+ * -Sets up h/w timers as applicable on boot cpu
+ * -Also sets up any global state needed for timer subsystem:
+ * - for "counting" timer, registers a clocksource, usable across CPUs
+ * (provided that underlying counter h/w is synchronized across cores)
+ * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
+ */
+void __init time_init(void)
+{
+ /*
+ * sets up the timekeeping free-flowing counter which also returns
+ * whether the counter is usable as clocksource
+ */
+ if (arc_counter_setup())
+ /*
+ * CLK upto 4.29 GHz can be safely represented in 32 bits
+ * because Max 32 bit number is 4,294,967,295
+ */
+ clocksource_register_hz(&arc_counter, arc_get_core_freq());
+
+ /* sets up the periodic event timer */
+ arc_local_timer_setup(smp_processor_id());
+
+ if (machine_desc->init_time)
+ machine_desc->init_time();
+}
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
new file mode 100644
index 000000000000..7496995371e8
--- /dev/null
+++ b/arch/arc/kernel/traps.c
@@ -0,0 +1,170 @@
+/*
+ * Traps/Non-MMU Exception handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -user-space unaligned access emulation
+ *
+ * Rahul Trivedi: Codito Technologies 2004
+ */
+
+#include <linux/sched.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+#include <asm/kprobes.h>
+#include <asm/unaligned.h>
+#include <asm/kgdb.h>
+
+void __init trap_init(void)
+{
+ return;
+}
+
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+ unsigned long cause_reg)
+{
+ show_kernel_fault_diag(str, regs, address, cause_reg);
+
+ /* DEAD END */
+ __asm__("flag 1");
+}
+
+/*
+ * Helper called for bulk of exceptions NOT needing specific handling
+ * -for user faults enqueues requested signal
+ * -for kernel, chk if due to copy_(to|from)_user, otherwise die()
+ */
+static noinline int handle_exception(unsigned long cause, char *str,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ if (user_mode(regs)) {
+ struct task_struct *tsk = current;
+
+ tsk->thread.fault_address = (__force unsigned int)info->si_addr;
+ tsk->thread.cause_code = cause;
+
+ force_sig_info(info->si_signo, info, tsk);
+
+ } else {
+ /* If not due to copy_(to|from)_user, we are doomed */
+ if (fixup_exception(regs))
+ return 0;
+
+ die(str, regs, (unsigned long)info->si_addr, cause);
+ }
+
+ return 1;
+}
+
+#define DO_ERROR_INFO(signr, str, name, sicode) \
+int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \
+{ \
+ siginfo_t info = { \
+ .si_signo = signr, \
+ .si_errno = 0, \
+ .si_code = sicode, \
+ .si_addr = (void __user *)address, \
+ }; \
+ return handle_exception(cause, str, regs, &info);\
+}
+
+/*
+ * Entry points for exceptions NOT needing specific handling
+ */
+DO_ERROR_INFO(SIGILL, "Priv Op/Disabled Extn", do_privilege_fault, ILL_PRVOPC)
+DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC)
+DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
+DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR)
+DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+/*
+ * Entry Point for Misaligned Data access Exception, for emulating in software
+ */
+int do_misaligned_access(unsigned long cause, unsigned long address,
+ struct pt_regs *regs, struct callee_regs *cregs)
+{
+ if (misaligned_fixup(address, regs, cause, cregs) != 0) {
+ siginfo_t info;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void __user *)address;
+ return handle_exception(cause, "Misaligned Access", regs,
+ &info);
+ }
+ return 0;
+}
+
+#else
+DO_ERROR_INFO(SIGSEGV, "Misaligned Access", do_misaligned_access, SEGV_ACCERR)
+#endif
+
+/*
+ * Entry point for miscll errors such as Nested Exceptions
+ * -Duplicate TLB entry is handled seperately though
+ */
+void do_machine_check_fault(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ die("Machine Check Exception", regs, address, cause);
+}
+
+
+/*
+ * Entry point for traps induced by ARCompact TRAP_S <n> insn
+ * This is same family as TRAP0/SWI insn (use the same vector).
+ * The only difference being SWI insn take no operand, while TRAP_S does
+ * which reflects in ECR Reg as 8 bit param.
+ * Thus TRAP_S <n> can be used for specific purpose
+ * -1 used for software breakpointing (gdb)
+ * -2 used by kprobes
+ */
+void do_non_swi_trap(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ unsigned int param = cause & 0xff;
+
+ switch (param) {
+ case 1:
+ trap_is_brkpt(cause, address, regs);
+ break;
+
+ case 2:
+ trap_is_kprobe(param, address, regs);
+ break;
+
+ case 3:
+ case 4:
+ kgdb_trap(regs, param);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Entry point for Instruction Error Exception
+ * -For a corner case, ARC kprobes implementation resorts to using
+ * this exception, hence the check
+ */
+void do_insterror_or_kprobe(unsigned long cause,
+ unsigned long address,
+ struct pt_regs *regs)
+{
+ /* Check if this exception is caused by kprobes */
+ if (notify_die(DIE_IERR, "kprobe_ierr", regs, address,
+ cause, SIGILL) == NOTIFY_STOP)
+ return;
+
+ insterror_is_error(cause, address, regs);
+}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
new file mode 100644
index 000000000000..7c10873c311f
--- /dev/null
+++ b/arch/arc/kernel/troubleshoot.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ */
+
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/fs_struct.h>
+#include <linux/proc_fs.h>
+#include <linux/file.h>
+#include <asm/arcregs.h>
+
+/*
+ * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
+ * -Prints 3 regs per line and a CR.
+ * -To continue, callee regs right after scratch, special handling of CR
+ */
+static noinline void print_reg_file(long *reg_rev, int start_num)
+{
+ unsigned int i;
+ char buf[512];
+ int n = 0, len = sizeof(buf);
+
+ /* weird loop because pt_regs regs rev r12..r0, r25..r13 */
+ for (i = start_num; i < start_num + 13; i++) {
+ n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
+ i, (unsigned long)*reg_rev);
+
+ if (((i + 1) % 3) == 0)
+ n += scnprintf(buf + n, len - n, "\n");
+
+ reg_rev--;
+ }
+
+ if (start_num != 0)
+ n += scnprintf(buf + n, len - n, "\n\n");
+
+ pr_info("%s", buf);
+}
+
+static void show_callee_regs(struct callee_regs *cregs)
+{
+ print_reg_file(&(cregs->r13), 13);
+}
+
+void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+{
+ struct path path;
+ char *path_nm = NULL;
+ struct mm_struct *mm;
+ struct file *exe_file;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ goto done;
+
+ exe_file = get_mm_exe_file(mm);
+ mmput(mm);
+
+ if (exe_file) {
+ path = exe_file->f_path;
+ path_get(&exe_file->f_path);
+ fput(exe_file);
+ path_nm = d_path(&path, buf, 255);
+ path_put(&path);
+ }
+
+done:
+ pr_info("%s, TGID %u\n", path_nm, tsk->tgid);
+}
+EXPORT_SYMBOL(print_task_path_n_nm);
+
+static void show_faulting_vma(unsigned long address, char *buf)
+{
+ struct vm_area_struct *vma;
+ struct inode *inode;
+ unsigned long ino = 0;
+ dev_t dev = 0;
+ char *nm = buf;
+
+ vma = find_vma(current->active_mm, address);
+
+ /* check against the find_vma( ) behaviour which returns the next VMA
+ * if the container VMA is not found
+ */
+ if (vma && (vma->vm_start <= address)) {
+ struct file *file = vma->vm_file;
+ if (file) {
+ struct path *path = &file->f_path;
+ nm = d_path(path, buf, PAGE_SIZE - 1);
+ inode = vma->vm_file->f_path.dentry->d_inode;
+ dev = inode->i_sb->s_dev;
+ ino = inode->i_ino;
+ }
+ pr_info(" @off 0x%lx in [%s]\n"
+ " VMA: 0x%08lx to 0x%08lx\n\n",
+ address - vma->vm_start, nm, vma->vm_start, vma->vm_end);
+ } else
+ pr_info(" @No matching VMA found\n");
+}
+
+static void show_ecr_verbose(struct pt_regs *regs)
+{
+ unsigned int vec, cause_code, cause_reg;
+ unsigned long address;
+
+ cause_reg = current->thread.cause_code;
+ pr_info("\n[ECR]: 0x%08x => ", cause_reg);
+
+ /* For Data fault, this is data address not instruction addr */
+ address = current->thread.fault_address;
+
+ vec = cause_reg >> 16;
+ cause_code = (cause_reg >> 8) & 0xFF;
+
+ /* For DTLB Miss or ProtV, display the memory involved too */
+ if (vec == ECR_V_DTLB_MISS) {
+ pr_cont("Invalid (%s) @ 0x%08lx by insn @ 0x%08lx\n",
+ (cause_code == 0x01) ? "Read From" :
+ ((cause_code == 0x02) ? "Write to" : "EX"),
+ address, regs->ret);
+ } else if (vec == ECR_V_ITLB_MISS) {
+ pr_cont("Insn could not be fetched\n");
+ } else if (vec == ECR_V_MACH_CHK) {
+ pr_cont("%s\n", (cause_code == 0x0) ?
+ "Double Fault" : "Other Fatal Err");
+
+ } else if (vec == ECR_V_PROTV) {
+ if (cause_code == ECR_C_PROTV_INST_FETCH)
+ pr_cont("Execute from Non-exec Page\n");
+ else if (cause_code == ECR_C_PROTV_LOAD)
+ pr_cont("Read from Non-readable Page\n");
+ else if (cause_code == ECR_C_PROTV_STORE)
+ pr_cont("Write to Non-writable Page\n");
+ else if (cause_code == ECR_C_PROTV_XCHG)
+ pr_cont("Data exchange protection violation\n");
+ else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
+ pr_cont("Misaligned r/w from 0x%08lx\n", address);
+ } else if (vec == ECR_V_INSN_ERR) {
+ pr_cont("Illegal Insn\n");
+ } else {
+ pr_cont("Check Programmer's Manual\n");
+ }
+}
+
+/************************************************************************
+ * API called by rest of kernel
+ ***********************************************************************/
+
+void show_regs(struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+ struct callee_regs *cregs;
+ char *buf;
+
+ buf = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!buf)
+ return;
+
+ print_task_path_n_nm(tsk, buf);
+
+ if (current->thread.cause_code)
+ show_ecr_verbose(regs);
+
+ pr_info("[EFA]: 0x%08lx\n", current->thread.fault_address);
+ pr_info("[ERET]: 0x%08lx (PC of Faulting Instr)\n", regs->ret);
+
+ show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+
+ /* can't use print_vma_addr() yet as it doesn't check for
+ * non-inclusive vma
+ */
+
+ /* print special regs */
+ pr_info("status32: 0x%08lx\n", regs->status32);
+ pr_info(" SP: 0x%08lx\tFP: 0x%08lx\n", regs->sp, regs->fp);
+ pr_info("BTA: 0x%08lx\tBLINK: 0x%08lx\n",
+ regs->bta, regs->blink);
+ pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
+
+ /* print regs->r0 thru regs->r12
+ * Sequential printing was generating horrible code
+ */
+ print_reg_file(&(regs->r0), 0);
+
+ /* If Callee regs were saved, display them too */
+ cregs = (struct callee_regs *)current->thread.callee_reg;
+ if (cregs)
+ show_callee_regs(cregs);
+
+ free_page((unsigned long)buf);
+}
+
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address, unsigned long cause_reg)
+{
+ current->thread.fault_address = address;
+ current->thread.cause_code = cause_reg;
+
+ /* Caller and Callee regs */
+ show_regs(regs);
+
+ /* Show stack trace if this Fatality happened in kernel mode */
+ if (!user_mode(regs))
+ show_stacktrace(current, regs);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/debugfs.h>
+
+static struct dentry *test_dentry;
+static struct dentry *test_dir;
+static struct dentry *test_u32_dentry;
+
+static u32 clr_on_read = 1;
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+u32 numitlb, numdtlb, num_pte_not_present;
+
+static int fill_display_data(char *kbuf)
+{
+ size_t num = 0;
+ num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
+ num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
+ num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
+
+ if (clr_on_read)
+ numitlb = numdtlb = num_pte_not_present = 0;
+
+ return num;
+}
+
+static int tlb_stats_open(struct inode *inode, struct file *file)
+{
+ file->private_data = (void *)__get_free_page(GFP_KERNEL);
+ return 0;
+}
+
+/* called on user read(): display the couters */
+static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
+ char __user *user_buf, /* user buffer */
+ size_t len, /* length of buffer */
+ loff_t *offset) /* offset in the file */
+{
+ size_t num;
+ char *kbuf = (char *)file->private_data;
+
+ /* All of the data can he shoved in one iteration */
+ if (*offset != 0)
+ return 0;
+
+ num = fill_display_data(kbuf);
+
+ /* simple_read_from_buffer() is helper for copy to user space
+ It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
+ @3 (offset) into the user space address starting at @1 (user_buf).
+ @5 (len) is max size of user buffer
+ */
+ return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
+}
+
+/* called on user write : clears the counters */
+static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
+ size_t length, loff_t *offset)
+{
+ numitlb = numdtlb = num_pte_not_present = 0;
+ return length;
+}
+
+static int tlb_stats_close(struct inode *inode, struct file *file)
+{
+ free_page((unsigned long)(file->private_data));
+ return 0;
+}
+
+static const struct file_operations tlb_stats_file_ops = {
+ .read = tlb_stats_output,
+ .write = tlb_stats_clear,
+ .open = tlb_stats_open,
+ .release = tlb_stats_close
+};
+#endif
+
+static int __init arc_debugfs_init(void)
+{
+ test_dir = debugfs_create_dir("arc", NULL);
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
+ &tlb_stats_file_ops);
+#endif
+
+ test_u32_dentry =
+ debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
+
+ return 0;
+}
+
+module_init(arc_debugfs_init);
+
+static void __exit arc_debugfs_exit(void)
+{
+ debugfs_remove(test_u32_dentry);
+ debugfs_remove(test_dentry);
+ debugfs_remove(test_dir);
+}
+module_exit(arc_debugfs_exit);
+
+#endif
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
new file mode 100644
index 000000000000..4cd81633febd
--- /dev/null
+++ b/arch/arc/kernel/unaligned.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg : May 2011
+ * -Adapted (from .26 to .35)
+ * -original contribution by Tim.yao@amlogic.com
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <asm/disasm.h>
+
+#define __get8_unaligned_check(val, addr, err) \
+ __asm__( \
+ "1: ldb.ab %1, [%2, 1]\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, 1\n" \
+ " b 2b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 3b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (val), "=r" (addr) \
+ : "0" (err), "2" (addr))
+
+#define get16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_unaligned_check(v, a, err); \
+ val = v ; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define get32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_unaligned_check(v, a, err); \
+ val = v << 0; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 16; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 24; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr;\
+ \
+ __asm__( \
+ "1: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "2: stb %1, [%2]\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "4: mov %0, 1\n" \
+ " b 3b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr;\
+ __asm__( \
+ \
+ "1: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "2: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "3: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "4: stb %1, [%2]\n" \
+ "5:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "6: mov %0, 1\n" \
+ " b 5b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 6b\n" \
+ " .long 2b, 6b\n" \
+ " .long 3b, 6b\n" \
+ " .long 4b, 6b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+/* sysctl hooks */
+int unaligned_enabled __read_mostly = 1; /* Enabled by default */
+int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
+
+static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ int val;
+
+ /* register write back */
+ if ((state->aa == 1) || (state->aa == 2)) {
+ set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
+
+ if (state->aa == 2)
+ state->src2 = 0;
+ }
+
+ if (state->zz == 0) {
+ get32_unaligned_check(val, state->src1 + state->src2);
+ } else {
+ get16_unaligned_check(val, state->src1 + state->src2);
+
+ if (state->x)
+ val = (val << 16) >> 16;
+ }
+
+ if (state->pref == 0)
+ set_reg(state->dest, val, regs, cregs);
+
+ return;
+
+fault: state->fault = 1;
+}
+
+static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ /* register write back */
+ if ((state->aa == 1) || (state->aa == 2)) {
+ set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
+
+ if (state->aa == 3)
+ state->src3 = 0;
+ } else if (state->aa == 3) {
+ if (state->zz == 2) {
+ set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
+ regs, cregs);
+ } else if (!state->zz) {
+ set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
+ regs, cregs);
+ } else {
+ goto fault;
+ }
+ }
+
+ /* write fix-up */
+ if (!state->zz)
+ put32_unaligned_check(state->src1, state->src2 + state->src3);
+ else
+ put16_unaligned_check(state->src1, state->src2 + state->src3);
+
+ return;
+
+fault: state->fault = 1;
+}
+
+/*
+ * Handle an unaligned access
+ * Returns 0 if successfully handled, 1 if some error happened
+ */
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs)
+{
+ struct disasm_state state;
+ char buf[TASK_COMM_LEN];
+
+ /* handle user mode only and only if enabled by sysadmin */
+ if (!user_mode(regs) || !unaligned_enabled)
+ return 1;
+
+ if (no_unaligned_warning) {
+ pr_warn_once("%s(%d) made unaligned access which was emulated"
+ " by kernel assist\n. This can degrade application"
+ " performance significantly\n. To enable further"
+ " logging of such instances, please \n"
+ " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
+ get_task_comm(buf, current), task_pid_nr(current));
+ } else {
+ /* Add rate limiting if it gets down to it */
+ pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
+ get_task_comm(buf, current), task_pid_nr(current),
+ address, regs->ret);
+
+ }
+
+ disasm_instr(regs->ret, &state, 1, regs, cregs);
+
+ if (state.fault)
+ goto fault;
+
+ /* ldb/stb should not have unaligned exception */
+ if ((state.zz == 1) || (state.di))
+ goto fault;
+
+ if (!state.write)
+ fixup_load(&state, regs, cregs);
+ else
+ fixup_store(&state, regs, cregs);
+
+ if (state.fault)
+ goto fault;
+
+ if (delay_mode(regs)) {
+ regs->ret = regs->bta;
+ regs->status32 &= ~STATUS_DE_MASK;
+ } else {
+ regs->ret += state.instr_len;
+ }
+
+ return 0;
+
+fault:
+ pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
+ state.words[0], address);
+
+ return 1;
+}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
new file mode 100644
index 000000000000..a8d02223da44
--- /dev/null
+++ b/arch/arc/kernel/unwind.c
@@ -0,0 +1,1329 @@
+/*
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2002-2006 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whistles, so there
+ * is not much point in implementing the full Dwarf2 unwind API.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <asm/unwind.h>
+
+extern char __start_unwind[], __end_unwind[];
+/* extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];*/
+
+/* #define UNWIND_DEBUG */
+
+#ifdef UNWIND_DEBUG
+int dbg_unw;
+#define unw_debug(fmt, ...) \
+do { \
+ if (dbg_unw) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0);
+#else
+#define unw_debug(fmt, ...)
+#endif
+
+#define MAX_STACK_DEPTH 8
+
+#define EXTRA_INFO(f) { \
+ BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
+ % FIELD_SIZEOF(struct unwind_frame_info, f)) \
+ + offsetof(struct unwind_frame_info, f) \
+ / FIELD_SIZEOF(struct unwind_frame_info, f), \
+ FIELD_SIZEOF(struct unwind_frame_info, f) \
+ }
+#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
+
+static const struct {
+ unsigned offs:BITS_PER_LONG / 2;
+ unsigned width:BITS_PER_LONG / 2;
+} reg_info[] = {
+UNW_REGISTER_INFO};
+
+#undef PTREGS_INFO
+#undef EXTRA_INFO
+
+#ifndef REG_INVALID
+#define REG_INVALID(r) (reg_info[r].width == 0)
+#endif
+
+#define DW_CFA_nop 0x00
+#define DW_CFA_set_loc 0x01
+#define DW_CFA_advance_loc1 0x02
+#define DW_CFA_advance_loc2 0x03
+#define DW_CFA_advance_loc4 0x04
+#define DW_CFA_offset_extended 0x05
+#define DW_CFA_restore_extended 0x06
+#define DW_CFA_undefined 0x07
+#define DW_CFA_same_value 0x08
+#define DW_CFA_register 0x09
+#define DW_CFA_remember_state 0x0a
+#define DW_CFA_restore_state 0x0b
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_register 0x0d
+#define DW_CFA_def_cfa_offset 0x0e
+#define DW_CFA_def_cfa_expression 0x0f
+#define DW_CFA_expression 0x10
+#define DW_CFA_offset_extended_sf 0x11
+#define DW_CFA_def_cfa_sf 0x12
+#define DW_CFA_def_cfa_offset_sf 0x13
+#define DW_CFA_val_offset 0x14
+#define DW_CFA_val_offset_sf 0x15
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_lo_user 0x1c
+#define DW_CFA_GNU_window_save 0x2d
+#define DW_CFA_GNU_args_size 0x2e
+#define DW_CFA_GNU_negative_offset_extended 0x2f
+#define DW_CFA_hi_user 0x3f
+
+#define DW_EH_PE_FORM 0x07
+#define DW_EH_PE_native 0x00
+#define DW_EH_PE_leb128 0x01
+#define DW_EH_PE_data2 0x02
+#define DW_EH_PE_data4 0x03
+#define DW_EH_PE_data8 0x04
+#define DW_EH_PE_signed 0x08
+#define DW_EH_PE_ADJUST 0x70
+#define DW_EH_PE_abs 0x00
+#define DW_EH_PE_pcrel 0x10
+#define DW_EH_PE_textrel 0x20
+#define DW_EH_PE_datarel 0x30
+#define DW_EH_PE_funcrel 0x40
+#define DW_EH_PE_aligned 0x50
+#define DW_EH_PE_indirect 0x80
+#define DW_EH_PE_omit 0xff
+
+typedef unsigned long uleb128_t;
+typedef signed long sleb128_t;
+
+static struct unwind_table {
+ struct {
+ unsigned long pc;
+ unsigned long range;
+ } core, init;
+ const void *address;
+ unsigned long size;
+ const unsigned char *header;
+ unsigned long hdrsz;
+ struct unwind_table *link;
+ const char *name;
+} root_table;
+
+struct unwind_item {
+ enum item_location {
+ Nowhere,
+ Memory,
+ Register,
+ Value
+ } where;
+ uleb128_t value;
+};
+
+struct unwind_state {
+ uleb128_t loc, org;
+ const u8 *cieStart, *cieEnd;
+ uleb128_t codeAlign;
+ sleb128_t dataAlign;
+ struct cfa {
+ uleb128_t reg, offs;
+ } cfa;
+ struct unwind_item regs[ARRAY_SIZE(reg_info)];
+ unsigned stackDepth:8;
+ unsigned version:8;
+ const u8 *label;
+ const u8 *stack[MAX_STACK_DEPTH];
+};
+
+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
+
+static struct unwind_table *find_table(unsigned long pc)
+{
+ struct unwind_table *table;
+
+ for (table = &root_table; table; table = table->link)
+ if ((pc >= table->core.pc
+ && pc < table->core.pc + table->core.range)
+ || (pc >= table->init.pc
+ && pc < table->init.pc + table->init.range))
+ break;
+
+ return table;
+}
+
+static unsigned long read_pointer(const u8 **pLoc,
+ const void *end, signed ptrType);
+
+static void init_unwind_table(struct unwind_table *table, const char *name,
+ const void *core_start, unsigned long core_size,
+ const void *init_start, unsigned long init_size,
+ const void *table_start, unsigned long table_size,
+ const u8 *header_start, unsigned long header_size)
+{
+ const u8 *ptr = header_start + 4;
+ const u8 *end = header_start + header_size;
+
+ table->core.pc = (unsigned long)core_start;
+ table->core.range = core_size;
+ table->init.pc = (unsigned long)init_start;
+ table->init.range = init_size;
+ table->address = table_start;
+ table->size = table_size;
+
+ /* See if the linker provided table looks valid. */
+ if (header_size <= 4
+ || header_start[0] != 1
+ || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
+ || header_start[2] == DW_EH_PE_omit
+ || read_pointer(&ptr, end, header_start[2]) <= 0
+ || header_start[3] == DW_EH_PE_omit)
+ header_start = NULL;
+
+ table->hdrsz = header_size;
+ smp_wmb();
+ table->header = header_start;
+ table->link = NULL;
+ table->name = name;
+}
+
+void __init arc_unwind_init(void)
+{
+ init_unwind_table(&root_table, "kernel", _text, _end - _text, NULL, 0,
+ __start_unwind, __end_unwind - __start_unwind,
+ NULL, 0);
+ /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
+}
+
+static const u32 bad_cie, not_fde;
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
+static signed fde_pointer_type(const u32 *cie);
+
+struct eh_frame_hdr_table_entry {
+ unsigned long start, fde;
+};
+
+static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
+{
+ const struct eh_frame_hdr_table_entry *e1 = p1;
+ const struct eh_frame_hdr_table_entry *e2 = p2;
+
+ return (e1->start > e2->start) - (e1->start < e2->start);
+}
+
+static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
+{
+ struct eh_frame_hdr_table_entry *e1 = p1;
+ struct eh_frame_hdr_table_entry *e2 = p2;
+ unsigned long v;
+
+ v = e1->start;
+ e1->start = e2->start;
+ e2->start = v;
+ v = e1->fde;
+ e1->fde = e2->fde;
+ e2->fde = v;
+}
+
+static void __init setup_unwind_table(struct unwind_table *table,
+ void *(*alloc) (unsigned long))
+{
+ const u8 *ptr;
+ unsigned long tableSize = table->size, hdrSize;
+ unsigned n;
+ const u32 *fde;
+ struct {
+ u8 version;
+ u8 eh_frame_ptr_enc;
+ u8 fde_count_enc;
+ u8 table_enc;
+ unsigned long eh_frame_ptr;
+ unsigned int fde_count;
+ struct eh_frame_hdr_table_entry table[];
+ } __attribute__ ((__packed__)) *header;
+
+ if (table->header)
+ return;
+
+ if (table->hdrsz)
+ pr_warn(".eh_frame_hdr for '%s' present but unusable\n",
+ table->name);
+
+ if (tableSize & (sizeof(*fde) - 1))
+ return;
+
+ for (fde = table->address, n = 0;
+ tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ const u32 *cie = cie_for_fde(fde, table);
+ signed ptrType;
+
+ if (cie == &not_fde)
+ continue;
+ if (cie == NULL || cie == &bad_cie)
+ return;
+ ptrType = fde_pointer_type(cie);
+ if (ptrType < 0)
+ return;
+
+ ptr = (const u8 *)(fde + 2);
+ if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
+ ptrType)) {
+ /* FIXME_Rajesh We have 4 instances of null addresses
+ * instead of the initial loc addr
+ * return;
+ */
+ }
+ ++n;
+ }
+
+ if (tableSize || !n)
+ return;
+
+ hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+ + 2 * n * sizeof(unsigned long);
+ header = alloc(hdrSize);
+ if (!header)
+ return;
+ header->version = 1;
+ header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
+ header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
+ header->table_enc = DW_EH_PE_abs | DW_EH_PE_native;
+ put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
+ BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
+ % __alignof(typeof(header->fde_count)));
+ header->fde_count = n;
+
+ BUILD_BUG_ON(offsetof(typeof(*header), table)
+ % __alignof(typeof(*header->table)));
+ for (fde = table->address, tableSize = table->size, n = 0;
+ tableSize;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ /* const u32 *cie = fde + 1 - fde[1] / sizeof(*fde); */
+ const u32 *cie = (const u32 *)(fde[1]);
+
+ if (fde[1] == 0xffffffff)
+ continue; /* this is a CIE */
+ ptr = (const u8 *)(fde + 2);
+ header->table[n].start = read_pointer(&ptr,
+ (const u8 *)(fde + 1) +
+ *fde,
+ fde_pointer_type(cie));
+ header->table[n].fde = (unsigned long)fde;
+ ++n;
+ }
+ WARN_ON(n != header->fde_count);
+
+ sort(header->table,
+ n,
+ sizeof(*header->table),
+ cmp_eh_frame_hdr_table_entries, swap_eh_frame_hdr_table_entries);
+
+ table->hdrsz = hdrSize;
+ smp_wmb();
+ table->header = (const void *)header;
+}
+
+static void *__init balloc(unsigned long sz)
+{
+ return __alloc_bootmem_nopanic(sz,
+ sizeof(unsigned int),
+ __pa(MAX_DMA_ADDRESS));
+}
+
+void __init arc_unwind_setup(void)
+{
+ setup_unwind_table(&root_table, balloc);
+}
+
+#ifdef CONFIG_MODULES
+
+static struct unwind_table *last_table;
+
+/* Must be called with module_mutex held. */
+void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size)
+{
+ struct unwind_table *table;
+
+ if (table_size <= 0)
+ return NULL;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ init_unwind_table(table, module->name,
+ module->module_core, module->core_size,
+ module->module_init, module->init_size,
+ table_start, table_size,
+ NULL, 0);
+
+#ifdef UNWIND_DEBUG
+ unw_debug("Table added for [%s] %lx %lx\n",
+ module->name, table->core.pc, table->core.range);
+#endif
+ if (last_table)
+ last_table->link = table;
+ else
+ root_table.link = table;
+ last_table = table;
+
+ return table;
+}
+
+struct unlink_table_info {
+ struct unwind_table *table;
+ int init_only;
+};
+
+static int unlink_table(void *arg)
+{
+ struct unlink_table_info *info = arg;
+ struct unwind_table *table = info->table, *prev;
+
+ for (prev = &root_table; prev->link && prev->link != table;
+ prev = prev->link)
+ ;
+
+ if (prev->link) {
+ if (info->init_only) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ info->table = NULL;
+ } else {
+ prev->link = table->link;
+ if (!prev->link)
+ last_table = prev;
+ }
+ } else
+ info->table = NULL;
+
+ return 0;
+}
+
+/* Must be called with module_mutex held. */
+void unwind_remove_table(void *handle, int init_only)
+{
+ struct unwind_table *table = handle;
+ struct unlink_table_info info;
+
+ if (!table || table == &root_table)
+ return;
+
+ if (init_only && table == last_table) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ return;
+ }
+
+ info.table = table;
+ info.init_only = init_only;
+
+ unlink_table(&info); /* XXX: SMP */
+ kfree(table);
+}
+
+#endif /* CONFIG_MODULES */
+
+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ uleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (uleb128_t) (*cur & 0x7f) << shift;
+ if (!(*cur++ & 0x80))
+ break;
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ sleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (sleb128_t) (*cur & 0x7f) << shift;
+ if (!(*cur & 0x80)) {
+ value |= -(*cur++ & 0x40) << shift;
+ break;
+ }
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
+{
+ const u32 *cie;
+
+ if (!*fde || (*fde & (sizeof(*fde) - 1)))
+ return &bad_cie;
+
+ if (fde[1] == 0xffffffff)
+ return &not_fde; /* this is a CIE */
+
+ if ((fde[1] & (sizeof(*fde) - 1)))
+/* || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) */
+ return NULL; /* this is not a valid FDE */
+
+ /* cie = fde + 1 - fde[1] / sizeof(*fde); */
+ cie = (u32 *) fde[1];
+
+ if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
+ || (*cie & (sizeof(*cie) - 1))
+ || (cie[1] != 0xffffffff))
+ return NULL; /* this is not a (valid) CIE */
+ return cie;
+}
+
+static unsigned long read_pointer(const u8 **pLoc, const void *end,
+ signed ptrType)
+{
+ unsigned long value = 0;
+ union {
+ const u8 *p8;
+ const u16 *p16u;
+ const s16 *p16s;
+ const u32 *p32u;
+ const s32 *p32s;
+ const unsigned long *pul;
+ } ptr;
+
+ if (ptrType < 0 || ptrType == DW_EH_PE_omit)
+ return 0;
+ ptr.p8 = *pLoc;
+ switch (ptrType & DW_EH_PE_FORM) {
+ case DW_EH_PE_data2:
+ if (end < (const void *)(ptr.p16u + 1))
+ return 0;
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned((u16 *) ptr.p16s++);
+ else
+ value = get_unaligned((u16 *) ptr.p16u++);
+ break;
+ case DW_EH_PE_data4:
+#ifdef CONFIG_64BIT
+ if (end < (const void *)(ptr.p32u + 1))
+ return 0;
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned(ptr.p32s++);
+ else
+ value = get_unaligned(ptr.p32u++);
+ break;
+ case DW_EH_PE_data8:
+ BUILD_BUG_ON(sizeof(u64) != sizeof(value));
+#else
+ BUILD_BUG_ON(sizeof(u32) != sizeof(value));
+#endif
+ case DW_EH_PE_native:
+ if (end < (const void *)(ptr.pul + 1))
+ return 0;
+ value = get_unaligned((unsigned long *)ptr.pul++);
+ break;
+ case DW_EH_PE_leb128:
+ BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
+ value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end)
+ : get_uleb128(&ptr.p8, end);
+ if ((const void *)ptr.p8 > end)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ switch (ptrType & DW_EH_PE_ADJUST) {
+ case DW_EH_PE_abs:
+ break;
+ case DW_EH_PE_pcrel:
+ value += (unsigned long)*pLoc;
+ break;
+ default:
+ return 0;
+ }
+ if ((ptrType & DW_EH_PE_indirect)
+ && __get_user(value, (unsigned long __user *)value))
+ return 0;
+ *pLoc = ptr.p8;
+
+ return value;
+}
+
+static signed fde_pointer_type(const u32 *cie)
+{
+ const u8 *ptr = (const u8 *)(cie + 2);
+ unsigned version = *ptr;
+
+ if (version != 1)
+ return -1; /* unsupported */
+
+ if (*++ptr) {
+ const char *aug;
+ const u8 *end = (const u8 *)(cie + 1) + *cie;
+ uleb128_t len;
+
+ /* check if augmentation size is first (and thus present) */
+ if (*ptr != 'z')
+ return -1;
+
+ /* check if augmentation string is nul-terminated */
+ aug = (const void *)ptr;
+ ptr = memchr(aug, 0, end - ptr);
+ if (ptr == NULL)
+ return -1;
+
+ ++ptr; /* skip terminator */
+ get_uleb128(&ptr, end); /* skip code alignment */
+ get_sleb128(&ptr, end); /* skip data alignment */
+ /* skip return address column */
+ version <= 1 ? (void) ++ptr : (void)get_uleb128(&ptr, end);
+ len = get_uleb128(&ptr, end); /* augmentation length */
+
+ if (ptr + len < ptr || ptr + len > end)
+ return -1;
+
+ end = ptr + len;
+ while (*++aug) {
+ if (ptr >= end)
+ return -1;
+ switch (*aug) {
+ case 'L':
+ ++ptr;
+ break;
+ case 'P':{
+ signed ptrType = *ptr++;
+
+ if (!read_pointer(&ptr, end, ptrType)
+ || ptr > end)
+ return -1;
+ }
+ break;
+ case 'R':
+ return *ptr;
+ default:
+ return -1;
+ }
+ }
+ }
+ return DW_EH_PE_native | DW_EH_PE_abs;
+}
+
+static int advance_loc(unsigned long delta, struct unwind_state *state)
+{
+ state->loc += delta * state->codeAlign;
+
+ /* FIXME_Rajesh: Probably we are defining for the initial range as well;
+ return delta > 0;
+ */
+ unw_debug("delta %3lu => loc 0x%lx: ", delta, state->loc);
+ return 1;
+}
+
+static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value,
+ struct unwind_state *state)
+{
+ if (reg < ARRAY_SIZE(state->regs)) {
+ state->regs[reg].where = where;
+ state->regs[reg].value = value;
+
+#ifdef UNWIND_DEBUG
+ unw_debug("r%lu: ", reg);
+ switch (where) {
+ case Nowhere:
+ unw_debug("s ");
+ break;
+ case Memory:
+ unw_debug("c(%lu) ", value);
+ break;
+ case Register:
+ unw_debug("r(%lu) ", value);
+ break;
+ case Value:
+ unw_debug("v(%lu) ", value);
+ break;
+ default:
+ break;
+ }
+#endif
+ }
+}
+
+static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
+ signed ptrType, struct unwind_state *state)
+{
+ union {
+ const u8 *p8;
+ const u16 *p16;
+ const u32 *p32;
+ } ptr;
+ int result = 1;
+ u8 opcode;
+
+ if (start != state->cieStart) {
+ state->loc = state->org;
+ result =
+ processCFI(state->cieStart, state->cieEnd, 0, ptrType,
+ state);
+ if (targetLoc == 0 && state->label == NULL)
+ return result;
+ }
+ for (ptr.p8 = start; result && ptr.p8 < end;) {
+ switch (*ptr.p8 >> 6) {
+ uleb128_t value;
+
+ case 0:
+ opcode = *ptr.p8++;
+
+ switch (opcode) {
+ case DW_CFA_nop:
+ unw_debug("cfa nop ");
+ break;
+ case DW_CFA_set_loc:
+ state->loc = read_pointer(&ptr.p8, end,
+ ptrType);
+ if (state->loc == 0)
+ result = 0;
+ unw_debug("cfa_set_loc: 0x%lx ", state->loc);
+ break;
+ case DW_CFA_advance_loc1:
+ unw_debug("\ncfa advance loc1:");
+ result = ptr.p8 < end
+ && advance_loc(*ptr.p8++, state);
+ break;
+ case DW_CFA_advance_loc2:
+ value = *ptr.p8++;
+ value += *ptr.p8++ << 8;
+ unw_debug("\ncfa advance loc2:");
+ result = ptr.p8 <= end + 2
+ /* && advance_loc(*ptr.p16++, state); */
+ && advance_loc(value, state);
+ break;
+ case DW_CFA_advance_loc4:
+ unw_debug("\ncfa advance loc4:");
+ result = ptr.p8 <= end + 4
+ && advance_loc(*ptr.p32++, state);
+ break;
+ case DW_CFA_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_offset_extended: ");
+ set_rule(value, Memory,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_offset_extended_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Memory,
+ get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value,
+ get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_restore_extended:
+ unw_debug("cfa_restore_extended: ");
+ case DW_CFA_undefined:
+ unw_debug("cfa_undefined: ");
+ case DW_CFA_same_value:
+ unw_debug("cfa_same_value: ");
+ set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0,
+ state);
+ break;
+ case DW_CFA_register:
+ unw_debug("cfa_register: ");
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Register,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_remember_state:
+ unw_debug("cfa_remember_state: ");
+ if (ptr.p8 == state->label) {
+ state->label = NULL;
+ return 1;
+ }
+ if (state->stackDepth >= MAX_STACK_DEPTH)
+ return 0;
+ state->stack[state->stackDepth++] = ptr.p8;
+ break;
+ case DW_CFA_restore_state:
+ unw_debug("cfa_restore_state: ");
+ if (state->stackDepth) {
+ const uleb128_t loc = state->loc;
+ const u8 *label = state->label;
+
+ state->label =
+ state->stack[state->stackDepth - 1];
+ memcpy(&state->cfa, &badCFA,
+ sizeof(state->cfa));
+ memset(state->regs, 0,
+ sizeof(state->regs));
+ state->stackDepth = 0;
+ result =
+ processCFI(start, end, 0, ptrType,
+ state);
+ state->loc = loc;
+ state->label = label;
+ } else
+ return 0;
+ break;
+ case DW_CFA_def_cfa:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
+ /*nobreak*/
+ case DW_CFA_def_cfa_offset:
+ state->cfa.offs = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_def_cfa_offset: 0x%lx ",
+ state->cfa.offs);
+ break;
+ case DW_CFA_def_cfa_sf:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ /*nobreak */
+ case DW_CFA_def_cfa_offset_sf:
+ state->cfa.offs = get_sleb128(&ptr.p8, end)
+ * state->dataAlign;
+ break;
+ case DW_CFA_def_cfa_register:
+ unw_debug("cfa_def_cfa_regsiter: ");
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ break;
+ /*todo case DW_CFA_def_cfa_expression: */
+ /*todo case DW_CFA_expression: */
+ /*todo case DW_CFA_val_expression: */
+ case DW_CFA_GNU_args_size:
+ get_uleb128(&ptr.p8, end);
+ break;
+ case DW_CFA_GNU_negative_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Memory,
+ (uleb128_t) 0 - get_uleb128(&ptr.p8,
+ end),
+ state);
+ break;
+ case DW_CFA_GNU_window_save:
+ default:
+ unw_debug("UNKNOW OPCODE 0x%x\n", opcode);
+ result = 0;
+ break;
+ }
+ break;
+ case 1:
+ unw_debug("\ncfa_adv_loc: ");
+ result = advance_loc(*ptr.p8++ & 0x3f, state);
+ break;
+ case 2:
+ unw_debug("cfa_offset: ");
+ value = *ptr.p8++ & 0x3f;
+ set_rule(value, Memory, get_uleb128(&ptr.p8, end),
+ state);
+ break;
+ case 3:
+ unw_debug("cfa_restore: ");
+ set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
+ break;
+ }
+
+ if (ptr.p8 > end)
+ result = 0;
+ if (result && targetLoc != 0 && targetLoc < state->loc)
+ return 1;
+ }
+
+ return result && ptr.p8 == end && (targetLoc == 0 || (
+ /*todo While in theory this should apply, gcc in practice omits
+ everything past the function prolog, and hence the location
+ never reaches the end of the function.
+ targetLoc < state->loc && */ state->label == NULL));
+}
+
+/* Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error. */
+int arc_unwind(struct unwind_frame_info *frame)
+{
+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
+ const u32 *fde = NULL, *cie = NULL;
+ const u8 *ptr = NULL, *end = NULL;
+ unsigned long pc = UNW_PC(frame) - frame->call_frame;
+ unsigned long startLoc = 0, endLoc = 0, cfa;
+ unsigned i;
+ signed ptrType = -1;
+ uleb128_t retAddrReg = 0;
+ const struct unwind_table *table;
+ struct unwind_state state;
+ unsigned long *fptr;
+ unsigned long addr;
+
+ unw_debug("\n\nUNWIND FRAME:\n");
+ unw_debug("PC: 0x%lx BLINK: 0x%lx, SP: 0x%lx, FP: 0x%x\n",
+ UNW_PC(frame), UNW_BLINK(frame), UNW_SP(frame),
+ UNW_FP(frame));
+
+ if (UNW_PC(frame) == 0)
+ return -EINVAL;
+
+#ifdef UNWIND_DEBUG
+ {
+ unsigned long *sptr = (unsigned long *)UNW_SP(frame);
+ unw_debug("\nStack Dump:\n");
+ for (i = 0; i < 20; i++, sptr++)
+ unw_debug("0x%p: 0x%lx\n", sptr, *sptr);
+ unw_debug("\n");
+ }
+#endif
+
+ table = find_table(pc);
+ if (table != NULL
+ && !(table->size & (sizeof(*fde) - 1))) {
+ const u8 *hdr = table->header;
+ unsigned long tableSize;
+
+ smp_rmb();
+ if (hdr && hdr[0] == 1) {
+ switch (hdr[3] & DW_EH_PE_FORM) {
+ case DW_EH_PE_native:
+ tableSize = sizeof(unsigned long);
+ break;
+ case DW_EH_PE_data2:
+ tableSize = 2;
+ break;
+ case DW_EH_PE_data4:
+ tableSize = 4;
+ break;
+ case DW_EH_PE_data8:
+ tableSize = 8;
+ break;
+ default:
+ tableSize = 0;
+ break;
+ }
+ ptr = hdr + 4;
+ end = hdr + table->hdrsz;
+ if (tableSize && read_pointer(&ptr, end, hdr[1])
+ == (unsigned long)table->address
+ && (i = read_pointer(&ptr, end, hdr[2])) > 0
+ && i == (end - ptr) / (2 * tableSize)
+ && !((end - ptr) % (2 * tableSize))) {
+ do {
+ const u8 *cur =
+ ptr + (i / 2) * (2 * tableSize);
+
+ startLoc = read_pointer(&cur,
+ cur + tableSize,
+ hdr[3]);
+ if (pc < startLoc)
+ i /= 2;
+ else {
+ ptr = cur - tableSize;
+ i = (i + 1) / 2;
+ }
+ } while (startLoc && i > 1);
+ if (i == 1
+ && (startLoc = read_pointer(&ptr,
+ ptr + tableSize,
+ hdr[3])) != 0
+ && pc >= startLoc)
+ fde = (void *)read_pointer(&ptr,
+ ptr +
+ tableSize,
+ hdr[3]);
+ }
+ }
+
+ if (fde != NULL) {
+ cie = cie_for_fde(fde, table);
+ ptr = (const u8 *)(fde + 2);
+ if (cie != NULL
+ && cie != &bad_cie
+ && cie != &not_fde
+ && (ptrType = fde_pointer_type(cie)) >= 0
+ && read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType) == startLoc) {
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &=
+ DW_EH_PE_FORM | DW_EH_PE_signed;
+ endLoc =
+ startLoc + read_pointer(&ptr,
+ (const u8 *)(fde +
+ 1) +
+ *fde, ptrType);
+ if (pc >= endLoc)
+ fde = NULL;
+ } else
+ fde = NULL;
+ }
+ if (fde == NULL) {
+ for (fde = table->address, tableSize = table->size;
+ cie = NULL, tableSize > sizeof(*fde)
+ && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde,
+ fde += 1 + *fde / sizeof(*fde)) {
+ cie = cie_for_fde(fde, table);
+ if (cie == &bad_cie) {
+ cie = NULL;
+ break;
+ }
+ if (cie == NULL
+ || cie == &not_fde
+ || (ptrType = fde_pointer_type(cie)) < 0)
+ continue;
+ ptr = (const u8 *)(fde + 2);
+ startLoc = read_pointer(&ptr,
+ (const u8 *)(fde + 1) +
+ *fde, ptrType);
+ if (!startLoc)
+ continue;
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &=
+ DW_EH_PE_FORM | DW_EH_PE_signed;
+ endLoc =
+ startLoc + read_pointer(&ptr,
+ (const u8 *)(fde +
+ 1) +
+ *fde, ptrType);
+ if (pc >= startLoc && pc < endLoc)
+ break;
+ }
+ }
+ }
+ if (cie != NULL) {
+ memset(&state, 0, sizeof(state));
+ state.cieEnd = ptr; /* keep here temporarily */
+ ptr = (const u8 *)(cie + 2);
+ end = (const u8 *)(cie + 1) + *cie;
+ frame->call_frame = 1;
+ if ((state.version = *ptr) != 1)
+ cie = NULL; /* unsupported version */
+ else if (*++ptr) {
+ /* check if augmentation size is first (thus present) */
+ if (*ptr == 'z') {
+ while (++ptr < end && *ptr) {
+ switch (*ptr) {
+ /* chk for ignorable or already handled
+ * nul-terminated augmentation string */
+ case 'L':
+ case 'P':
+ case 'R':
+ continue;
+ case 'S':
+ frame->call_frame = 0;
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ if (ptr >= end || *ptr)
+ cie = NULL;
+ }
+ ++ptr;
+ }
+ if (cie != NULL) {
+ /* get code aligment factor */
+ state.codeAlign = get_uleb128(&ptr, end);
+ /* get data aligment factor */
+ state.dataAlign = get_sleb128(&ptr, end);
+ if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
+ cie = NULL;
+ else {
+ retAddrReg =
+ state.version <= 1 ? *ptr++ : get_uleb128(&ptr,
+ end);
+ unw_debug("CIE Frame Info:\n");
+ unw_debug("return Address register 0x%lx\n",
+ retAddrReg);
+ unw_debug("data Align: %ld\n", state.dataAlign);
+ unw_debug("code Align: %lu\n", state.codeAlign);
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ ptr += augSize;
+ }
+ if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(retAddrReg)
+ || reg_info[retAddrReg].width !=
+ sizeof(unsigned long))
+ cie = NULL;
+ }
+ }
+ if (cie != NULL) {
+ state.cieStart = ptr;
+ ptr = state.cieEnd;
+ state.cieEnd = end;
+ end = (const u8 *)(fde + 1) + *fde;
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ if ((ptr += augSize) > end)
+ fde = NULL;
+ }
+ }
+ if (cie == NULL || fde == NULL) {
+#ifdef CONFIG_FRAME_POINTER
+ unsigned long top, bottom;
+
+ top = STACK_TOP_UNW(frame->task);
+ bottom = STACK_BOTTOM_UNW(frame->task);
+#if FRAME_RETADDR_OFFSET < 0
+ if (UNW_SP(frame) < top && UNW_FP(frame) <= UNW_SP(frame)
+ && bottom < UNW_FP(frame)
+#else
+ if (UNW_SP(frame) > top && UNW_FP(frame) >= UNW_SP(frame)
+ && bottom > UNW_FP(frame)
+#endif
+ && !((UNW_SP(frame) | UNW_FP(frame))
+ & (sizeof(unsigned long) - 1))) {
+ unsigned long link;
+
+ if (!__get_user(link, (unsigned long *)
+ (UNW_FP(frame) + FRAME_LINK_OFFSET))
+#if FRAME_RETADDR_OFFSET < 0
+ && link > bottom && link < UNW_FP(frame)
+#else
+ && link > UNW_FP(frame) && link < bottom
+#endif
+ && !(link & (sizeof(link) - 1))
+ && !__get_user(UNW_PC(frame),
+ (unsigned long *)(UNW_FP(frame)
+ + FRAME_RETADDR_OFFSET)))
+ {
+ UNW_SP(frame) =
+ UNW_FP(frame) + FRAME_RETADDR_OFFSET
+#if FRAME_RETADDR_OFFSET < 0
+ -
+#else
+ +
+#endif
+ sizeof(UNW_PC(frame));
+ UNW_FP(frame) = link;
+ return 0;
+ }
+ }
+#endif
+ return -ENXIO;
+ }
+ state.org = startLoc;
+ memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
+
+ unw_debug("\nProcess instructions\n");
+
+ /* process instructions
+ * For ARC, we optimize by having blink(retAddrReg) with
+ * the sameValue in the leaf function, so we should not check
+ * state.regs[retAddrReg].where == Nowhere
+ */
+ if (!processCFI(ptr, end, pc, ptrType, &state)
+ || state.loc > endLoc
+/* || state.regs[retAddrReg].where == Nowhere */
+ || state.cfa.reg >= ARRAY_SIZE(reg_info)
+ || reg_info[state.cfa.reg].width != sizeof(unsigned long)
+ || state.cfa.offs % sizeof(unsigned long))
+ return -EIO;
+
+#ifdef UNWIND_DEBUG
+ unw_debug("\n");
+
+ unw_debug("\nRegister State Based on the rules parsed from FDE:\n");
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+
+ if (REG_INVALID(i))
+ continue;
+
+ switch (state.regs[i].where) {
+ case Nowhere:
+ break;
+ case Memory:
+ unw_debug(" r%d: c(%lu),", i, state.regs[i].value);
+ break;
+ case Register:
+ unw_debug(" r%d: r(%lu),", i, state.regs[i].value);
+ break;
+ case Value:
+ unw_debug(" r%d: v(%lu),", i, state.regs[i].value);
+ break;
+ }
+ }
+
+ unw_debug("\n");
+#endif
+
+ /* update frame */
+#ifndef CONFIG_AS_CFI_SIGNAL_FRAME
+ if (frame->call_frame
+ && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
+ frame->call_frame = 0;
+#endif
+ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
+ startLoc = min_t(unsigned long, UNW_SP(frame), cfa);
+ endLoc = max_t(unsigned long, UNW_SP(frame), cfa);
+ if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
+ startLoc = min(STACK_LIMIT(cfa), cfa);
+ endLoc = max(STACK_LIMIT(cfa), cfa);
+ }
+
+ unw_debug("\nCFA reg: 0x%lx, offset: 0x%lx => 0x%lx\n",
+ state.cfa.reg, state.cfa.offs, cfa);
+
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+ if (REG_INVALID(i)) {
+ if (state.regs[i].where == Nowhere)
+ continue;
+ return -EIO;
+ }
+ switch (state.regs[i].where) {
+ default:
+ break;
+ case Register:
+ if (state.regs[i].value >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(state.regs[i].value)
+ || reg_info[i].width >
+ reg_info[state.regs[i].value].width)
+ return -EIO;
+ switch (reg_info[state.regs[i].value].width) {
+ case sizeof(u8):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u8);
+ break;
+ case sizeof(u16):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u16);
+ break;
+ case sizeof(u32):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u32);
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u64);
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+ break;
+ }
+ }
+
+ unw_debug("\nRegister state after evaluation with realtime Stack:\n");
+ fptr = (unsigned long *)(&frame->regs);
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i, fptr++) {
+
+ if (REG_INVALID(i))
+ continue;
+ switch (state.regs[i].where) {
+ case Nowhere:
+ if (reg_info[i].width != sizeof(UNW_SP(frame))
+ || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
+ != &UNW_SP(frame))
+ continue;
+ UNW_SP(frame) = cfa;
+ break;
+ case Register:
+ switch (reg_info[i].width) {
+ case sizeof(u8):
+ FRAME_REG(i, u8) = state.regs[i].value;
+ break;
+ case sizeof(u16):
+ FRAME_REG(i, u16) = state.regs[i].value;
+ break;
+ case sizeof(u32):
+ FRAME_REG(i, u32) = state.regs[i].value;
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ FRAME_REG(i, u64) = state.regs[i].value;
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+ break;
+ case Value:
+ if (reg_info[i].width != sizeof(unsigned long))
+ return -EIO;
+ FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
+ * state.dataAlign;
+ break;
+ case Memory:
+ addr = cfa + state.regs[i].value * state.dataAlign;
+
+ if ((state.regs[i].value * state.dataAlign)
+ % sizeof(unsigned long)
+ || addr < startLoc
+ || addr + sizeof(unsigned long) < addr
+ || addr + sizeof(unsigned long) > endLoc)
+ return -EIO;
+
+ switch (reg_info[i].width) {
+ case sizeof(u8):
+ __get_user(FRAME_REG(i, u8),
+ (u8 __user *)addr);
+ break;
+ case sizeof(u16):
+ __get_user(FRAME_REG(i, u16),
+ (u16 __user *)addr);
+ break;
+ case sizeof(u32):
+ __get_user(FRAME_REG(i, u32),
+ (u32 __user *)addr);
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ __get_user(FRAME_REG(i, u64),
+ (u64 __user *)addr);
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+
+ break;
+ }
+ unw_debug("r%d: 0x%lx ", i, *fptr);
+ }
+
+ return 0;
+#undef FRAME_REG
+}
+EXPORT_SYMBOL(arc_unwind);
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..d3c92f52d444
--- /dev/null
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+OUTPUT_ARCH(arc)
+ENTRY(_stext)
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+SECTIONS
+{
+ /*
+ * ICCM starts at 0x8000_0000. So if kernel is relocated to some other
+ * address, make sure peripheral at 0x8z doesn't clash with ICCM
+ * Essentially vector is also in ICCM.
+ */
+
+ . = CONFIG_LINUX_LINK_BASE;
+
+ _int_vec_base_lds = .;
+ .vector : {
+ *(.vector)
+ . = ALIGN(PAGE_SIZE);
+ }
+
+#ifdef CONFIG_ARC_HAS_ICCM
+ .text.arcfp : {
+ *(.text.arcfp)
+ . = ALIGN(CONFIG_ARC_ICCM_SZ * 1024);
+ }
+#endif
+
+ /*
+ * The reason for having a seperate subsection .init.ramfs is to
+ * prevent objump from including it in kernel dumps
+ *
+ * Reason for having .init.ramfs above .init is to make sure that the
+ * binary blob is tucked away to one side, reducing the displacement
+ * between .init.text and .text, avoiding any possible relocation
+ * errors because of calls from .init.text to .text
+ * Yes such calls do exist. e.g.
+ * decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( )
+ */
+
+ __init_begin = .;
+
+ .init.ramfs : { INIT_RAM_FS }
+
+ . = ALIGN(PAGE_SIZE);
+ _stext = .;
+
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(L1_CACHE_BYTES)
+
+ /* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */
+ .init.data : {
+ INIT_DATA
+ INIT_SETUP(L1_CACHE_BYTES)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ }
+
+ .init.arch.info : {
+ __arch_info_begin = .;
+ *(.arch.info.init)
+ __arch_info_end = .;
+ }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ /*
+ * .exit.text is discard at runtime, not link time, to deal with
+ * references from .debug_frame
+ * It will be init freed, being inside [__init_start : __init_end]
+ */
+ .exit.text : { EXIT_TEXT }
+ .exit.data : { EXIT_DATA }
+
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
+ .text : {
+ _text = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ }
+ EXCEPTION_TABLE(L1_CACHE_BYTES)
+ _etext = .;
+
+ _sdata = .;
+ RO_DATA_SECTION(PAGE_SIZE)
+
+ /*
+ * 1. this is .data essentially
+ * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned
+ */
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+
+ _edata = .;
+
+ BSS_SECTION(0, 0, 0)
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+ . = ALIGN(PAGE_SIZE);
+ .debug_frame : {
+ __start_unwind = .;
+ *(.debug_frame)
+ __end_unwind = .;
+ }
+#else
+ /DISCARD/ : { *(.debug_frame) }
+#endif
+
+ NOTES
+
+ . = ALIGN(PAGE_SIZE);
+ _end = . ;
+
+ STABS_DEBUG
+ DISCARDS
+
+ .arcextmap 0 : {
+ *(.gnu.linkonce.arcextmap.*)
+ *(.arcextmap.*)
+ }
+
+ /* open-coded because we need .debug_frame seperately for unwinding */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+
+#ifdef CONFIG_ARC_HAS_DCCM
+ . = CONFIG_ARC_DCCM_BASE;
+ __arc_dccm_base = .;
+ .data.arcfp : {
+ *(.data.arcfp)
+ }
+ . = ALIGN(CONFIG_ARC_DCCM_SZ * 1024);
+#endif
+}