summaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-03-02 16:58:56 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2013-03-02 16:58:56 +0100
commite23b62256a361611cbd45cd1456638f1a5106b5c (patch)
tree472968c961432a1d9d0c3634ca20433f1d9cd29b /arch/arc/include/asm
parentMerge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus (diff)
parentARC: split elf.h into uapi and export it for userspace (diff)
downloadlinux-e23b62256a361611cbd45cd1456638f1a5106b5c.tar.xz
linux-e23b62256a361611cbd45cd1456638f1a5106b5c.zip
Merge tag 'arc-v3.9-rc1-late' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull new ARC architecture from Vineet Gupta: "Initial ARC Linux port with some fixes on top for 3.9-rc1: I would like to introduce the Linux port to ARC Processors (from Synopsys) for 3.9-rc1. The patch-set has been discussed on the public lists since Nov and has received a fair bit of review, specially from Arnd, tglx, Al and other subsystem maintainers for DeviceTree, kgdb... The arch bits are in arch/arc, some asm-generic changes (acked by Arnd), a minor change to PARISC (acked by Helge). The series is a touch bigger for a new port for 2 main reasons: 1. It enables a basic kernel in first sub-series and adds ptrace/kgdb/.. later 2. Some of the fallout of review (DeviceTree support, multi-platform- image support) were added on top of orig series, primarily to record the revision history. This updated pull request additionally contains - fixes due to our GNU tools catching up with the new syscall/ptrace ABI - some (minor) cross-arch Kconfig updates." * tag 'arc-v3.9-rc1-late' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (82 commits) ARC: split elf.h into uapi and export it for userspace ARC: Fixup the current ABI version ARC: gdbserver using regset interface possibly broken ARC: Kconfig cleanup tracking cross-arch Kconfig pruning in merge window ARC: make a copy of flat DT ARC: [plat-arcfpga] DT arc-uart bindings change: "baud" => "current-speed" ARC: Ensure CONFIG_VIRT_TO_BUS is not enabled ARC: Fix pt_orig_r8 access ARC: [3.9] Fallout of hlist iterator update ARC: 64bit RTSC timestamp hardware issue ARC: Don't fiddle with non-existent caches ARC: Add self to MAINTAINERS ARC: Provide a default serial.h for uart drivers needing BASE_BAUD ARC: [plat-arcfpga] defconfig for fully loaded ARC Linux ARC: [Review] Multi-platform image #8: platform registers SMP callbacks ARC: [Review] Multi-platform image #7: SMP common code to use callbacks ARC: [Review] Multi-platform image #6: cpu-to-dma-addr optional ARC: [Review] Multi-platform image #5: NR_IRQS defined by ARC core ARC: [Review] Multi-platform image #4: Isolate platform headers ARC: [Review] Multi-platform image #3: switch to board callback ...
Diffstat (limited to 'arch/arc/include/asm')
-rw-r--r--arch/arc/include/asm/Kbuild49
-rw-r--r--arch/arc/include/asm/arcregs.h433
-rw-r--r--arch/arc/include/asm/asm-offsets.h9
-rw-r--r--arch/arc/include/asm/atomic.h232
-rw-r--r--arch/arc/include/asm/barrier.h42
-rw-r--r--arch/arc/include/asm/bitops.h516
-rw-r--r--arch/arc/include/asm/bug.h37
-rw-r--r--arch/arc/include/asm/cache.h75
-rw-r--r--arch/arc/include/asm/cacheflush.h67
-rw-r--r--arch/arc/include/asm/checksum.h101
-rw-r--r--arch/arc/include/asm/clk.h22
-rw-r--r--arch/arc/include/asm/cmpxchg.h143
-rw-r--r--arch/arc/include/asm/current.h32
-rw-r--r--arch/arc/include/asm/defines.h56
-rw-r--r--arch/arc/include/asm/delay.h68
-rw-r--r--arch/arc/include/asm/disasm.h116
-rw-r--r--arch/arc/include/asm/dma-mapping.h221
-rw-r--r--arch/arc/include/asm/dma.h14
-rw-r--r--arch/arc/include/asm/elf.h78
-rw-r--r--arch/arc/include/asm/entry.h724
-rw-r--r--arch/arc/include/asm/exec.h15
-rw-r--r--arch/arc/include/asm/futex.h151
-rw-r--r--arch/arc/include/asm/io.h105
-rw-r--r--arch/arc/include/asm/irq.h25
-rw-r--r--arch/arc/include/asm/irqflags.h153
-rw-r--r--arch/arc/include/asm/kdebug.h19
-rw-r--r--arch/arc/include/asm/kgdb.h61
-rw-r--r--arch/arc/include/asm/kprobes.h62
-rw-r--r--arch/arc/include/asm/linkage.h63
-rw-r--r--arch/arc/include/asm/mach_desc.h87
-rw-r--r--arch/arc/include/asm/mmu.h23
-rw-r--r--arch/arc/include/asm/mmu_context.h213
-rw-r--r--arch/arc/include/asm/module.h28
-rw-r--r--arch/arc/include/asm/mutex.h18
-rw-r--r--arch/arc/include/asm/page.h109
-rw-r--r--arch/arc/include/asm/perf_event.h13
-rw-r--r--arch/arc/include/asm/pgalloc.h134
-rw-r--r--arch/arc/include/asm/pgtable.h405
-rw-r--r--arch/arc/include/asm/processor.h151
-rw-r--r--arch/arc/include/asm/prom.h14
-rw-r--r--arch/arc/include/asm/ptrace.h130
-rw-r--r--arch/arc/include/asm/sections.h18
-rw-r--r--arch/arc/include/asm/segment.h24
-rw-r--r--arch/arc/include/asm/serial.h25
-rw-r--r--arch/arc/include/asm/setup.h37
-rw-r--r--arch/arc/include/asm/smp.h130
-rw-r--r--arch/arc/include/asm/spinlock.h144
-rw-r--r--arch/arc/include/asm/spinlock_types.h35
-rw-r--r--arch/arc/include/asm/string.h40
-rw-r--r--arch/arc/include/asm/switch_to.h41
-rw-r--r--arch/arc/include/asm/syscall.h72
-rw-r--r--arch/arc/include/asm/syscalls.h29
-rw-r--r--arch/arc/include/asm/thread_info.h121
-rw-r--r--arch/arc/include/asm/timex.h18
-rw-r--r--arch/arc/include/asm/tlb-mmu1.h104
-rw-r--r--arch/arc/include/asm/tlb.h58
-rw-r--r--arch/arc/include/asm/tlbflush.h28
-rw-r--r--arch/arc/include/asm/uaccess.h751
-rw-r--r--arch/arc/include/asm/unaligned.h29
-rw-r--r--arch/arc/include/asm/unwind.h163
60 files changed, 6881 insertions, 0 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
new file mode 100644
index 000000000000..48af742f8b5a
--- /dev/null
+++ b/arch/arc/include/asm/Kbuild
@@ -0,0 +1,49 @@
+generic-y += auxvec.h
+generic-y += bugs.h
+generic-y += bitsperlong.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += fb.h
+generic-y += ftrace.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
new file mode 100644
index 000000000000..1b907c465666
--- /dev/null
+++ b/arch/arc/include/asm/arcregs.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ARCREGS_H
+#define _ASM_ARC_ARCREGS_H
+
+#ifdef __KERNEL__
+
+/* Build Configuration Registers */
+#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
+#define ARC_REG_CRC_BCR 0x62
+#define ARC_REG_DVFB_BCR 0x64
+#define ARC_REG_EXTARITH_BCR 0x65
+#define ARC_REG_VECBASE_BCR 0x68
+#define ARC_REG_PERIBASE_BCR 0x69
+#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */
+#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */
+#define ARC_REG_MMU_BCR 0x6f
+#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
+#define ARC_REG_TIMERS_BCR 0x75
+#define ARC_REG_ICCM_BCR 0x78
+#define ARC_REG_XY_MEM_BCR 0x79
+#define ARC_REG_MAC_BCR 0x7a
+#define ARC_REG_MUL_BCR 0x7b
+#define ARC_REG_SWAP_BCR 0x7c
+#define ARC_REG_NORM_BCR 0x7d
+#define ARC_REG_MIXMAX_BCR 0x7e
+#define ARC_REG_BARREL_BCR 0x7f
+#define ARC_REG_D_UNCACH_BCR 0x6A
+
+/* status32 Bits Positions */
+#define STATUS_H_BIT 0 /* CPU Halted */
+#define STATUS_E1_BIT 1 /* Int 1 enable */
+#define STATUS_E2_BIT 2 /* Int 2 enable */
+#define STATUS_A1_BIT 3 /* Int 1 active */
+#define STATUS_A2_BIT 4 /* Int 2 active */
+#define STATUS_AE_BIT 5 /* Exception active */
+#define STATUS_DE_BIT 6 /* PC is in delay slot */
+#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_L_BIT 12 /* Loop inhibit */
+
+/* These masks correspond to the status word(STATUS_32) bits */
+#define STATUS_H_MASK (1<<STATUS_H_BIT)
+#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
+#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
+#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_L_MASK (1<<STATUS_L_BIT)
+
+/*
+ * ECR: Exception Cause Reg bits-n-pieces
+ * [23:16] = Exception Vector
+ * [15: 8] = Exception Cause Code
+ * [ 7: 0] = Exception Parameters (for certain types only)
+ */
+#define ECR_VEC_MASK 0xff0000
+#define ECR_CODE_MASK 0x00ff00
+#define ECR_PARAM_MASK 0x0000ff
+
+/* Exception Cause Vector Values */
+#define ECR_V_INSN_ERR 0x02
+#define ECR_V_MACH_CHK 0x20
+#define ECR_V_ITLB_MISS 0x21
+#define ECR_V_DTLB_MISS 0x22
+#define ECR_V_PROTV 0x23
+
+/* Protection Violation Exception Cause Code Values */
+#define ECR_C_PROTV_INST_FETCH 0x00
+#define ECR_C_PROTV_LOAD 0x01
+#define ECR_C_PROTV_STORE 0x02
+#define ECR_C_PROTV_XCHG 0x03
+#define ECR_C_PROTV_MISALIG_DATA 0x04
+
+/* DTLB Miss Exception Cause Code Values */
+#define ECR_C_BIT_DTLB_LD_MISS 8
+#define ECR_C_BIT_DTLB_ST_MISS 9
+
+
+/* Auxiliary registers */
+#define AUX_IDENTITY 4
+#define AUX_INTR_VEC_BASE 0x25
+#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+#define AUX_IRQ_LV12 0x43 /* interrupt level register */
+
+#define AUX_IENABLE 0x40c
+#define AUX_ITRIGGER 0x40d
+#define AUX_IPULSE 0x415
+
+/* Timer related Aux registers */
+#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
+#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
+#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
+#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
+#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
+#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
+
+#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
+#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
+
+/* MMU Management regs */
+#define ARC_REG_TLBPD0 0x405
+#define ARC_REG_TLBPD1 0x406
+#define ARC_REG_TLBINDEX 0x407
+#define ARC_REG_TLBCOMMAND 0x408
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
+
+/* Error code if probe fails */
+#define TLB_LKUP_ERR 0x80000000
+
+/* TLB Commands */
+#define TLBWrite 0x1
+#define TLBRead 0x2
+#define TLBGetIndex 0x3
+#define TLBProbe 0x4
+
+#if (CONFIG_ARC_MMU_VER >= 2)
+#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
+#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
+#else
+#undef TLBWriteNI /* These cmds don't exist on older MMU */
+#undef TLBIVUTLB
+#endif
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
+#define ARC_REG_IC_IVIC 0x10
+#define ARC_REG_IC_CTRL 0x11
+#define ARC_REG_IC_IVIL 0x19
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_IC_PTAG 0x1E
+#endif
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_CACHE_DISABLE 0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR 0x72
+#define ARC_REG_DC_IVDC 0x47
+#define ARC_REG_DC_CTRL 0x48
+#define ARC_REG_DC_IVDL 0x4A
+#define ARC_REG_DC_FLSH 0x4B
+#define ARC_REG_DC_FLDL 0x4C
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_DC_PTAG 0x5C
+#endif
+
+/* Bit val in DC_CTRL */
+#define DC_CTRL_INV_MODE_FLUSH 0x40
+#define DC_CTRL_FLUSH_STATUS 0x100
+
+/* MMU Management regs */
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
+
+/*
+ * Floating Pt Registers
+ * Status regs are read-only (build-time) so need not be saved/restored
+ */
+#define ARC_AUX_FP_STAT 0x300
+#define ARC_AUX_DPFP_1L 0x301
+#define ARC_AUX_DPFP_1H 0x302
+#define ARC_AUX_DPFP_2L 0x303
+#define ARC_AUX_DPFP_2H 0x304
+#define ARC_AUX_DPFP_STAT 0x305
+
+#ifndef __ASSEMBLY__
+
+/*
+ ******************************************************************
+ * Inline ASM macros to read/write AUX Regs
+ * Essentially invocation of lr/sr insns from "C"
+ */
+
+#if 1
+
+#define read_aux_reg(reg) __builtin_arc_lr(reg)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(reg_immed, val) \
+ __builtin_arc_sr((unsigned int)val, reg_immed)
+
+#else
+
+#define read_aux_reg(reg) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " lr %0, [%1]" \
+ : "=r"(__ret) \
+ : "i"(reg)); \
+ __ret; \
+})
+
+/*
+ * Aux Reg address is specified as long immediate by caller
+ * e.g.
+ * write_aux_reg(0x69, some_val);
+ * This generates tightest code.
+ */
+#define write_aux_reg(reg_imm, val) \
+({ \
+ __asm__ __volatile__( \
+ " sr %0, [%1] \n" \
+ : \
+ : "ir"(val), "i"(reg_imm)); \
+})
+
+/*
+ * Aux Reg address is specified in a variable
+ * * e.g.
+ * reg_num = 0x69
+ * write_aux_reg2(reg_num, some_val);
+ * This has to generate glue code to load the reg num from
+ * memory to a reg hence not recommended.
+ */
+#define write_aux_reg2(reg_in_var, val) \
+({ \
+ unsigned int tmp; \
+ \
+ __asm__ __volatile__( \
+ " ld %0, [%2] \n\t" \
+ " sr %1, [%0] \n\t" \
+ : "=&r"(tmp) \
+ : "r"(val), "memory"(&reg_in_var)); \
+})
+
+#endif
+
+#define READ_BCR(reg, into) \
+{ \
+ unsigned int tmp; \
+ tmp = read_aux_reg(reg); \
+ if (sizeof(tmp) == sizeof(into)) { \
+ into = *((typeof(into) *)&tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+#define WRITE_BCR(reg, into) \
+{ \
+ unsigned int tmp; \
+ if (sizeof(tmp) == sizeof(into)) { \
+ tmp = (*(unsigned int *)(into)); \
+ write_aux_reg(reg, tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+/* Helpers */
+#define TO_KB(bytes) ((bytes) >> 10)
+#define TO_MB(bytes) (TO_KB(bytes) >> 10)
+#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
+#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+ struct {
+ unsigned int l, h;
+ } aux_dpfp[2];
+};
+#endif
+
+/*
+ ***************************************************************
+ * Build Configuration Registers, with encoded hardware config
+ */
+struct bcr_identity {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int chip_id:16, cpu_id:8, family:8;
+#else
+ unsigned int family:8, cpu_id:8, chip_id:16;
+#endif
+};
+
+struct bcr_mmu_1_2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
+#else
+ unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
+#endif
+};
+
+struct bcr_mmu_3 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
+ u_itlb:4, u_dtlb:4;
+#else
+ unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
+ ways:4, ver:8;
+#endif
+};
+
+#define EXTN_SWAP_VALID 0x1
+#define EXTN_NORM_VALID 0x2
+#define EXTN_MINMAX_VALID 0x2
+#define EXTN_BARREL_VALID 0x2
+
+struct bcr_extn {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2,
+ norm:2, swap:1;
+#else
+ unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2,
+ crc:1, pad:20;
+#endif
+};
+
+/* DSP Options Ref Manual */
+struct bcr_extn_mac_mul {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:16, type:8, ver:8;
+#else
+ unsigned int ver:8, type:8, pad:16;
+#endif
+};
+
+struct bcr_extn_xymem {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
+#else
+ unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
+#endif
+};
+
+struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+ unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+};
+
+struct bcr_perip {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int start:8, pad2:8, sz:8, pad:8;
+#else
+ unsigned int pad:8, sz:8, pad2:8, start:8;
+#endif
+};
+struct bcr_iccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int base:16, pad:5, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, pad:5, base:16;
+#endif
+};
+
+/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
+struct bcr_dccm_base {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int addr:24, ver:8;
+#else
+ unsigned int ver:8, addr:24;
+#endif
+};
+
+/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
+struct bcr_dccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int res:21, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, res:21;
+#endif
+};
+
+/* Both SP and DP FPU BCRs have same format */
+struct bcr_fp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int fast:1, ver:8;
+#else
+ unsigned int ver:8, fast:1;
+#endif
+};
+
+/*
+ *******************************************************************
+ * Generic structures to hold build configuration used at runtime
+ */
+
+struct cpuinfo_arc_mmu {
+ unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
+};
+
+struct cpuinfo_arc_cache {
+ unsigned int has_aliasing, sz, line_len, assoc, ver;
+};
+
+struct cpuinfo_arc_ccm {
+ unsigned int base_addr, sz;
+};
+
+struct cpuinfo_arc {
+ struct cpuinfo_arc_cache icache, dcache;
+ struct cpuinfo_arc_mmu mmu;
+ struct bcr_identity core;
+ unsigned int timers;
+ unsigned int vec_base;
+ unsigned int uncached_base;
+ struct cpuinfo_arc_ccm iccm, dccm;
+ struct bcr_extn extn;
+ struct bcr_extn_xymem extn_xymem;
+ struct bcr_extn_mac_mul extn_mac_mul;
+ struct bcr_fp fp, dpfp;
+};
+
+extern struct cpuinfo_arc cpuinfo_arc700[];
+
+#endif /* __ASEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/asm-offsets.h b/arch/arc/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..dad18768fe43
--- /dev/null
+++ b/arch/arc/include/asm/asm-offsets.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <generated/asm-offsets.h>
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
new file mode 100644
index 000000000000..83f03ca6caf6
--- /dev/null
+++ b/arch/arc/include/asm/atomic.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ATOMIC_H
+#define _ASM_ARC_ATOMIC_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#define atomic_read(v) ((v)->counter)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " add %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp) /* Early clobber, to prevent reg reuse */
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " sub %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+}
+
+/* add and also return the new value */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " add %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " sub %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bic %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(addr), "ir"(mask)
+ : "cc");
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#ifndef CONFIG_SMP
+
+ /* violating atomic_xxx API locking protocol in UP for optimization sake */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#else
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ /*
+ * Independent of hardware support, all of the atomic_xxx() APIs need
+ * to follow the same locking rules to make sure that a "hardware"
+ * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+ * sequence
+ *
+ * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+ * requires the locking.
+ */
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter = i;
+ atomic_ops_unlock(flags);
+}
+#endif
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter += i;
+ atomic_ops_unlock(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter -= i;
+ atomic_ops_unlock(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ unsigned long temp;
+
+ atomic_ops_lock(flags);
+ temp = v->counter;
+ temp += i;
+ v->counter = temp;
+ atomic_ops_unlock(flags);
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ unsigned long temp;
+
+ atomic_ops_lock(flags);
+ temp = v->counter;
+ temp -= i;
+ v->counter = temp;
+ atomic_ops_unlock(flags);
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ *addr &= ~mask;
+ atomic_ops_unlock(flags);
+}
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+/**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v
+ */
+#define __atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
+ c = old; \
+ c; \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+#define atomic_inc(v) atomic_add(1, v)
+#define atomic_dec(v) atomic_sub(1, v)
+
+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+
+#define ATOMIC_INIT(i) { (i) }
+
+#include <asm-generic/atomic64.h>
+
+#endif
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
new file mode 100644
index 000000000000..f6cb7c4ffb35
--- /dev/null
+++ b/arch/arc/include/asm/barrier.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
+#define mb() __asm__ __volatile__ ("" : : : "memory")
+#define rmb() mb()
+#define wmb() mb()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define read_barrier_depends() mb()
+
+/* TODO-vineetg verify the correctness of macros here */
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#endif
+
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#define smp_read_barrier_depends() do { } while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
new file mode 100644
index 000000000000..647a83a8e756
--- /dev/null
+++ b/arch/arc/include/asm/bitops.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
+ * The Kconfig glue ensures that in SMP, this is only set if the container
+ * SoC/platform has cross-core coherent LLOCK/SCOND
+ */
+#if defined(CONFIG_ARC_HAS_LLSC)
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bset %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bclr %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bxor %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+/*
+ * Semantically:
+ * Test the bit
+ * if clear
+ * set it and return 0 (old value)
+ * else
+ * return 1 (old value).
+ *
+ * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
+ * and the old value of bit is returned
+ */
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bset %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bclr %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bxor %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#include <asm/smp.h>
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ *
+ * There's "significant" micro-optimization in writing our own variants of
+ * bitops (over generic variants)
+ *
+ * (1) The generic APIs have "signed" @nr while we have it "unsigned"
+ * This avoids extra code to be generated for pointer arithmatic, since
+ * is "not sure" that index is NOT -ve
+ * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
+ * only consider bottom 5 bits of @nr, so NO need to mask them off.
+ * (GCC Quirk: however for constant @nr we still need to do the masking
+ * at compile time)
+ */
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp | (1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp & ~(1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp ^ (1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old | (1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old & ~(1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old ^ (1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+/***************************************
+ * Non atomic variants
+ **************************************/
+
+static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp | (1UL << nr);
+}
+
+static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp & ~(1UL << nr);
+}
+
+static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp ^ (1UL << nr);
+}
+
+static inline int
+__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old | (1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old & ~(1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old ^ (1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int
+__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+ return ((1UL << (nr & 31)) &
+ (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
+}
+
+static inline int
+__test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+ unsigned long mask;
+
+ addr += nr >> 5;
+
+ /* ARC700 only considers 5 bits in bit-fiddling insn */
+ mask = 1 << nr;
+
+ return ((mask & *addr) != 0);
+}
+
+#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
+ __constant_test_bit((nr), (addr)) : \
+ __test_bit((nr), (addr)))
+
+/*
+ * Count the number of zeros, starting from MSB
+ * Helper for fls( ) friends
+ * This is a pure count, so (1-32) or (0-31) doesn't apply
+ * It could be 0 to 32, based on num of 0's in there
+ * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
+ */
+static inline __attribute__ ((const)) int clz(unsigned int x)
+{
+ unsigned int res;
+
+ __asm__ __volatile__(
+ " norm.f %0, %1 \n"
+ " mov.n %0, 0 \n"
+ " add.p %0, %0, 1 \n"
+ : "=r"(res)
+ : "r"(x)
+ : "cc");
+
+ return res;
+}
+
+static inline int constant_fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned long x)
+{
+ if (__builtin_constant_p(x))
+ return constant_fls(x);
+
+ return 32 - clz(x);
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __fls(unsigned long x)
+{
+ if (!x)
+ return 0;
+ else
+ return fls(x) - 1;
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __ffs(unsigned long word)
+{
+ if (!word)
+ return word;
+
+ return ffs(word) - 1;
+}
+
+/*
+ * ffz = Find First Zero in word.
+ * @return:[0-31], 32 if all 1's
+ */
+#define ffz(x) __ffs(~(x))
+
+/* TODO does this affect uni-processor code */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
new file mode 100644
index 000000000000..2ad8f9b1c54b
--- /dev/null
+++ b/arch/arc/include/asm/bug.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_BUG_H
+#define _ASM_ARC_BUG_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+struct task_struct;
+
+void show_regs(struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address, unsigned long cause_reg);
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+ unsigned long cause_reg);
+
+#define BUG() do { \
+ dump_stack(); \
+ pr_warn("Kernel BUG in %s: %s: %d!\n", \
+ __FILE__, __func__, __LINE__); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
new file mode 100644
index 000000000000..6632273861fd
--- /dev/null
+++ b/arch/arc/include/asm/cache.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHE_H
+#define __ARC_ASM_CACHE_H
+
+/* In case $$ not config, setup a dummy number for rest of kernel */
+#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
+#define L1_CACHE_SHIFT 6
+#else
+#define L1_CACHE_SHIFT CONFIG_ARC_CACHE_LINE_SHIFT
+#endif
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define ARC_ICACHE_WAYS 2
+#define ARC_DCACHE_WAYS 4
+
+/* Helpers */
+#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
+#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
+
+#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
+#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
+
+#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN
+#error "Need to fix some code as I/D cache lines not same"
+#else
+#define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK))
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* Uncached access macros */
+#define arc_read_uncached_32(ptr) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " ld.di %0, [%1] \n" \
+ : "=r"(__ret) \
+ : "r"(ptr)); \
+ __ret; \
+})
+
+#define arc_write_uncached_32(ptr, data)\
+({ \
+ __asm__ __volatile__( \
+ " st.di %0, [%1] \n" \
+ : \
+ : "r"(data), "r"(ptr)); \
+})
+
+/* used to give SHMLBA a value to avoid Cache Aliasing */
+extern unsigned int ARC_shmlba;
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+/*
+ * ARC700 doesn't cache any access in top 256M.
+ * Ideal for wiring memory mapped peripherals as we don't need to do
+ * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
+ */
+#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
+
+extern void arc_cache_init(void);
+extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+extern void __init read_decode_cache_bcr(void);
+#endif
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
new file mode 100644
index 000000000000..97ee96f26505
--- /dev/null
+++ b/arch/arc/include/asm/cacheflush.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ * -flush_cache_dup_mm (fork)
+ * -likewise for flush_cache_mm (exit/execve)
+ * -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
+ *
+ * vineetg: April 2008
+ * -Added a critical CacheLine flush to copy_to_user_page( ) which
+ * was causing gdbserver to not setup breakpoints consistently
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+void flush_cache_all(void);
+
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
+ int len);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+void flush_dcache_page(struct page *page);
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz);
+void dma_cache_inv(unsigned long start, unsigned long sz);
+void dma_cache_wback(unsigned long start, unsigned long sz);
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+/* TBD: optimize this */
+#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vunmap(start, end) flush_cache_all()
+
+/*
+ * VM callbacks when entire/range of user-space V-P mappings are
+ * torn-down/get-invalidated
+ *
+ * Currently we don't support D$ aliasing configs for our VIPT caches
+ * NOPS for VIPT Cache with non-aliasing D$ configurations only
+ */
+#define flush_cache_dup_mm(mm) /* called on fork */
+#define flush_cache_mm(mm) /* called on munmap/exit */
+#define flush_cache_range(mm, u_vstart, u_vend)
+#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+ if (vma->vm_flags & VM_EXEC) \
+ flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len); \
+
+#endif
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
new file mode 100644
index 000000000000..10957298b7a3
--- /dev/null
+++ b/arch/arc/include/asm/checksum.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Joern Rennecke <joern.rennecke@embecosm.com>: Jan 2012
+ * -Insn Scheduling improvements to csum core routines.
+ * = csum_fold( ) largely derived from ARM version.
+ * = ip_fast_cum( ) to have module scheduling
+ * -gcc 4.4.x broke networking. Alias analysis needed to be primed.
+ * worked around by adding memory clobber to ip_fast_csum( )
+ *
+ * vineetg: May 2010
+ * -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
+ */
+
+#ifndef _ASM_ARC_CHECKSUM_H
+#define _ASM_ARC_CHECKSUM_H
+
+/*
+ * Fold a partial checksum
+ *
+ * The 2 swords comprising the 32bit sum are added, any carry to 16th bit
+ * added back and final sword result inverted.
+ */
+static inline __sum16 csum_fold(__wsum s)
+{
+ unsigned r = s << 16 | s >> 16; /* ror */
+ s = ~s;
+ s -= r;
+ return s >> 16;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ const void *ptr = iph;
+ unsigned int tmp, tmp2, sum;
+
+ __asm__(
+ " ld.ab %0, [%3, 4] \n"
+ " ld.ab %2, [%3, 4] \n"
+ " sub %1, %4, 2 \n"
+ " lsr.f lp_count, %1, 1 \n"
+ " bcc 0f \n"
+ " add.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ "0: lp 1f \n"
+ " ld.ab %1, [%3, 4] \n"
+ " adc.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ " adc.f %0, %0, %1 \n"
+ "1: adc.f %0, %0, %2 \n"
+ " add.cs %0,%0,1 \n"
+ : "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
+ : "r"(ihl)
+ : "cc", "lp_count", "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * TCP pseudo Header is 12 bytes:
+ * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ __asm__ __volatile__(
+ " add.f %0, %0, %1 \n"
+ " adc.f %0, %0, %2 \n"
+ " adc.f %0, %0, %3 \n"
+ " adc.f %0, %0, %4 \n"
+ " adc %0, %0, 0 \n"
+ : "+&r"(sum)
+ : "r"(saddr), "r"(daddr),
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ "r"(len),
+#else
+ "r"(len << 8),
+#endif
+ "r"(htons(proto))
+ : "cc");
+
+ return sum;
+}
+
+#define csum_fold csum_fold
+#define ip_fast_csum ip_fast_csum
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_ARC_CHECKSUM_H */
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
new file mode 100644
index 000000000000..bf9d29f5bd53
--- /dev/null
+++ b/arch/arc/include/asm/clk.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_CLK_H
+#define _ASM_ARC_CLK_H
+
+/* Although we can't really hide core_freq, the accessor is still better way */
+extern unsigned long core_freq;
+
+static inline unsigned long arc_get_core_freq(void)
+{
+ return core_freq;
+}
+
+extern int arc_set_core_freq(unsigned long);
+
+#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..03cd6894855d
--- /dev/null
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_CMPXCHG_H
+#define __ASM_ARC_CMPXCHG_H
+
+#include <linux/types.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " brne %0, %2, 2f \n"
+ " scond %3, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev)
+ : "r"(ptr), "ir"(expected),
+ "r"(new) /* can't be "ir". scond can't take limm for "b" */
+ : "cc");
+
+ return prev;
+}
+
+#else
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ unsigned long flags;
+ int prev;
+ volatile unsigned long *p = ptr;
+
+ atomic_ops_lock(flags);
+ prev = *p;
+ if (prev == expected)
+ *p = new;
+ atomic_ops_unlock(flags);
+ return prev;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), (unsigned long)(n)))
+
+/*
+ * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
+ * just to gaurantee semantics.
+ * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
+ * which also happens to be atomic_ops_lock.
+ *
+ * Thus despite semantically being different, implementation of atomic_cmpxchg()
+ * is same as cmpxchg().
+ */
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+
+/*
+ * xchg (reg with memory) based on "Native atomic" EX insn
+ */
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+ int size)
+{
+ extern unsigned long __xchg_bad_pointer(void);
+
+ switch (size) {
+ case 4:
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r"(val)
+ : "r"(ptr)
+ : "memory");
+
+ return val;
+ }
+ return __xchg_bad_pointer();
+}
+
+#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+ sizeof(*(ptr))))
+
+/*
+ * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
+ * not require any locking. However there's a quirk.
+ * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
+ * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
+ * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
+ * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
+ *
+ * This however is only relevant if SMP and/or ARC lacks LLSC
+ * if (UP or LLSC)
+ * xchg doesn't need serialization
+ * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
+ * xchg needs serialization
+ */
+
+#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
+
+#define xchg(ptr, with) \
+({ \
+ unsigned long flags; \
+ typeof(*(ptr)) old_val; \
+ \
+ atomic_ops_lock(flags); \
+ old_val = _xchg(ptr, with); \
+ atomic_ops_unlock(flags); \
+ old_val; \
+})
+
+#else
+
+#define xchg(ptr, with) _xchg(ptr, with)
+
+#endif
+
+/*
+ * "atomic" variant of xchg()
+ * REQ: It needs to follow the same serialization rules as other atomic_xxx()
+ * Since xchg() doesn't always do that, it would seem that following defintion
+ * is incorrect. But here's the rationale:
+ * SMP : Even xchg() takes the atomic_ops_lock, so OK.
+ * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
+ * is natively "SMP safe", no serialization required).
+ * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
+ * could clobber them. atomic_xchg() itself would be 1 insn, so it
+ * can't be clobbered by others. Thus no serialization required when
+ * atomic_xchg is involved.
+ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
new file mode 100644
index 000000000000..87b918585c4a
--- /dev/null
+++ b/arch/arc/include/asm/current.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: May 16th, 2008
+ * - Current macro is now implemented as "global register" r25
+ */
+
+#ifndef _ASM_ARC_CURRENT_H
+#define _ASM_ARC_CURRENT_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+register struct task_struct *curr_arc asm("r25");
+#define current (curr_arc)
+
+#else
+#include <asm-generic/current.h>
+#endif /* ! CONFIG_ARC_CURR_IN_REG */
+
+#endif /* ! __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/defines.h b/arch/arc/include/asm/defines.h
new file mode 100644
index 000000000000..6097bb439cc5
--- /dev/null
+++ b/arch/arc/include/asm/defines.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_DEFINES_H__
+#define __ARC_ASM_DEFINES_H__
+
+#if defined(CONFIG_ARC_MMU_V1)
+#define CONFIG_ARC_MMU_VER 1
+#elif defined(CONFIG_ARC_MMU_V2)
+#define CONFIG_ARC_MMU_VER 2
+#elif defined(CONFIG_ARC_MMU_V3)
+#define CONFIG_ARC_MMU_VER 3
+#endif
+
+#ifdef CONFIG_ARC_HAS_LLSC
+#define __CONFIG_ARC_HAS_LLSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_LLSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_SWAPE
+#define __CONFIG_ARC_HAS_SWAPE_VAL 1
+#else
+#define __CONFIG_ARC_HAS_SWAPE_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_RTSC
+#define __CONFIG_ARC_HAS_RTSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_RTSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_MMU_SASID
+#define __CONFIG_ARC_MMU_SASID_VAL 1
+#else
+#define __CONFIG_ARC_MMU_SASID_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+#define __CONFIG_ARC_HAS_ICACHE 1
+#else
+#define __CONFIG_ARC_HAS_ICACHE 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+#define __CONFIG_ARC_HAS_DCACHE 1
+#else
+#define __CONFIG_ARC_HAS_DCACHE 0
+#endif
+
+#endif /* __ARC_ASM_DEFINES_H__ */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
new file mode 100644
index 000000000000..442ce5d0f709
--- /dev/null
+++ b/arch/arc/include/asm/delay.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Delay routines using pre computed loops_per_jiffy value.
+ *
+ * vineetg: Feb 2012
+ * -Rewrote in "C" to avoid dealing with availability of H/w MPY
+ * -Also reduced the num of MPY operations from 3 to 2
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_UDELAY_H
+#define __ASM_ARC_UDELAY_H
+
+#include <asm/param.h> /* HZ */
+
+static inline void __delay(unsigned long loops)
+{
+ __asm__ __volatile__(
+ "1: sub.f %0, %0, 1 \n"
+ " jpnz 1b \n"
+ : "+r"(loops)
+ :
+ : "cc");
+}
+
+extern void __bad_udelay(void);
+
+/*
+ * Normal Math for computing loops in "N" usecs
+ * -we have precomputed @loops_per_jiffy
+ * -1 sec has HZ jiffies
+ * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
+ *
+ * Approximate Division by multiplication:
+ * -Mathematically if we multiply and divide a number by same value the
+ * result remains unchanged: In this case, we use 2^32
+ * -> (loops_per_N_usec * 2^32 ) / 2^32
+ * -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
+ * -> (loops_per_jiffy * HZ * N * 4295) / 2^32
+ *
+ * -Divide by 2^32 is very simply right shift by 32
+ * -We simply need to ensure that the multiply per above eqn happens in
+ * 64-bit precision (if CPU doesn't support it - gcc can emaulate it)
+ */
+
+static inline void __udelay(unsigned long usecs)
+{
+ unsigned long loops;
+
+ /* (long long) cast ensures 64 bit MPY - real or emulated
+ * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
+ */
+ loops = ((long long)(usecs * 4295 * HZ) *
+ (long long)(loops_per_jiffy)) >> 32;
+
+ __delay(loops);
+}
+
+#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
+ : __udelay(n)) : __udelay(n))
+
+#endif /* __ASM_ARC_UDELAY_H */
diff --git a/arch/arc/include/asm/disasm.h b/arch/arc/include/asm/disasm.h
new file mode 100644
index 000000000000..f1cce3d059a1
--- /dev/null
+++ b/arch/arc/include/asm/disasm.h
@@ -0,0 +1,116 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_DISASM_H__
+#define __ARC_DISASM_H__
+
+enum {
+ op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
+ op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
+ op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
+ op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
+ op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
+ op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
+ op_B_S = 30, op_BL_S = 31
+};
+
+enum flow {
+ noflow,
+ direct_jump,
+ direct_call,
+ indirect_jump,
+ indirect_call,
+ invalid_instr
+};
+
+#define IS_BIT(word, n) ((word) & (1<<n))
+#define BITS(word, s, e) (((word) >> (s)) & (~((-2) << ((e) - (s)))))
+
+#define MAJOR_OPCODE(word) (BITS((word), 27, 31))
+#define MINOR_OPCODE(word) (BITS((word), 16, 21))
+#define FIELD_A(word) (BITS((word), 0, 5))
+#define FIELD_B(word) ((BITS((word), 12, 14)<<3) | \
+ (BITS((word), 24, 26)))
+#define FIELD_C(word) (BITS((word), 6, 11))
+#define FIELD_u6(word) FIELDC(word)
+#define FIELD_s12(word) sign_extend(((BITS((word), 0, 5) << 6) | \
+ BITS((word), 6, 11)), 12)
+
+/* note that for BL/BRcc these two macro's need another AND statement to mask
+ * out bit 1 (make the result a multiple of 4) */
+#define FIELD_s9(word) sign_extend(((BITS(word, 15, 15) << 8) | \
+ BITS(word, 16, 23)), 9)
+#define FIELD_s21(word) sign_extend(((BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+#define FIELD_s25(word) sign_extend(((BITS(word, 0, 3) << 21) | \
+ (BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+
+/* note: these operate on 16 bits! */
+#define FIELD_S_A(word) ((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
+#define FIELD_S_B(word) ((BITS((word), 10, 10)<<3) | \
+ BITS((word), 8, 10))
+#define FIELD_S_C(word) ((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
+#define FIELD_S_H(word) ((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
+#define FIELD_S_u5(word) (BITS((word), 0, 4))
+#define FIELD_S_u6(word) (BITS((word), 0, 4) << 1)
+#define FIELD_S_u7(word) (BITS((word), 0, 4) << 2)
+#define FIELD_S_u10(word) (BITS((word), 0, 7) << 2)
+#define FIELD_S_s7(word) sign_extend(BITS((word), 0, 5) << 1, 9)
+#define FIELD_S_s8(word) sign_extend(BITS((word), 0, 7) << 1, 9)
+#define FIELD_S_s9(word) sign_extend(BITS((word), 0, 8), 9)
+#define FIELD_S_s10(word) sign_extend(BITS((word), 0, 8) << 1, 10)
+#define FIELD_S_s11(word) sign_extend(BITS((word), 0, 8) << 2, 11)
+#define FIELD_S_s13(word) sign_extend(BITS((word), 0, 10) << 2, 13)
+
+#define STATUS32_L 0x00000100
+#define REG_LIMM 62
+
+struct disasm_state {
+ /* generic info */
+ unsigned long words[2];
+ int instr_len;
+ int major_opcode;
+ /* info for branch/jump */
+ int is_branch;
+ int target;
+ int delay_slot;
+ enum flow flow;
+ /* info for load/store */
+ int src1, src2, src3, dest, wb_reg;
+ int zz, aa, x, pref, di;
+ int fault, write;
+};
+
+static inline int sign_extend(int value, int bits)
+{
+ if (IS_BIT(value, (bits - 1)))
+ value |= (0xffffffff << bits);
+
+ return value;
+}
+
+static inline int is_short_instr(unsigned long addr)
+{
+ uint16_t word = *((uint16_t *)addr);
+ int opcode = (word >> 11) & 0x1F;
+ return (opcode >= 0x0B);
+}
+
+void disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs);
+int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
+ *cregs, unsigned long *fall_thru, unsigned long *target);
+long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
+void set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs);
+
+#endif /* __ARC_DISASM_H__ */
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..31f77aec0823
--- /dev/null
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -0,0 +1,221 @@
+/*
+ * DMA Mapping glue for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-coherent.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
+/*
+ * dma_map_* API take cpu addresses, which is kernel logical address in the
+ * untranslated address space (0x8000_0000) based. The dma address (bus addr)
+ * ideally needs to be 0x0000_0000 based hence these glue routines.
+ * However given that intermediate bus bridges can ignore the high bit, we can
+ * do with these routines being no-ops.
+ * If a platform/device comes up which sriclty requires 0 based bus addr
+ * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
+ */
+#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
+#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
+
+#else
+#include <plat/dma_addr.h>
+#endif
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+ dma_addr_t dma_handle);
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+/*
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
+ */
+
+static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+ default:
+ pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+ }
+}
+
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir);
+
+#define _dma_cache_sync(addr, sz, dir) \
+do { \
+ if (__builtin_constant_p(dir)) \
+ __inline_dma_cache_sync(addr, sz, dir); \
+ else \
+ __arc_dma_cache_sync(addr, sz, dir); \
+} \
+while (0);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ _dma_cache_sync((unsigned long)cpu_addr, size, dir);
+ return plat_kernel_addr_to_dma(dev, cpu_addr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long paddr = page_to_phys(page) + offset;
+ return dma_map_single(dev, (void *)paddr, size, dir);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
+
+ return nents;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+ DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+ DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+ size, DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+ size, DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+ /* Support 32 bit DMA mask exclusively */
+ return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+#endif
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
new file mode 100644
index 000000000000..ca7c45181de9
--- /dev/null
+++ b/arch/arc/include/asm/dma.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_H
+#define ASM_ARC_DMA_H
+
+#define MAX_DMA_ADDRESS 0xC0000000
+
+#endif
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
new file mode 100644
index 000000000000..f4c8d36ebecb
--- /dev/null
+++ b/arch/arc/include/asm/elf.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_ELF_H
+#define __ASM_ARC_ELF_H
+
+#include <linux/types.h>
+#include <uapi/asm/elf.h>
+
+/* These ELF defines belong to uapi but libc elf.h already defines them */
+#define EM_ARCOMPACT 93
+
+/* ARC Relocations (kernel Modules only) */
+#define R_ARC_32 0x4
+#define R_ARC_32_ME 0x1B
+#define R_ARC_S25H_PCREL 0x10
+#define R_ARC_S25W_PCREL 0x11
+
+/*to set parameters in the core dumps */
+#define ELF_ARCH EM_ARCOMPACT
+#define ELF_CLASS ELFCLASS32
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+
+/*
+ * To ensure that
+ * -we don't load something for the wrong architecture.
+ * -The userspace is using the correct syscall ABI
+ */
+struct elf32_hdr;
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/*
+ * When the program starts, a1 contains a pointer to a function to be
+ * registered with atexit, as per the SVR4 ABI. A value of 0 means we
+ * have no such handler.
+ */
+#define ELF_PLAT_INIT(_r, load_addr) ((_r)->r0 = 0)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+
+#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
new file mode 100644
index 000000000000..23daa326fc9b
--- /dev/null
+++ b/arch/arc/include/asm/entry.h
@@ -0,0 +1,724 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ * Stack switching code can no longer reliably rely on the fact that
+ * if we are NOT in user mode, stack is switched to kernel mode.
+ * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ * it's prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we also need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ * -Modified CALLEE_REG save/restore macros to handle the fact that
+ * r25 contains the kernel current task ptr
+ * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ * address Write back load ld.ab instead of seperate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_H
+#define __ASM_ARC_ENTRY_H
+
+#ifdef __ASSEMBLY__
+#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/asm-offsets.h>
+#include <asm/arcregs.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h> /* For VMALLOC_START */
+#include <asm/thread_info.h> /* For THREAD_SIZE */
+
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a reg1, [reg2, x] => Pre Incr
+ * Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab reg1, [reg2, x] => Post Incr
+ * Eff Addr for load = [reg2]
+ */
+
+/*--------------------------------------------------------------
+ * Save caller saved registers (scratch registers) ( r0 - r12 )
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLER_SAVED
+ st.a r0, [sp, -4]
+ st.a r1, [sp, -4]
+ st.a r2, [sp, -4]
+ st.a r3, [sp, -4]
+ st.a r4, [sp, -4]
+ st.a r5, [sp, -4]
+ st.a r6, [sp, -4]
+ st.a r7, [sp, -4]
+ st.a r8, [sp, -4]
+ st.a r9, [sp, -4]
+ st.a r10, [sp, -4]
+ st.a r11, [sp, -4]
+ st.a r12, [sp, -4]
+.endm
+
+/*--------------------------------------------------------------
+ * Restore caller saved registers (scratch registers)
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLER_SAVED
+ ld.ab r12, [sp, 4]
+ ld.ab r11, [sp, 4]
+ ld.ab r10, [sp, 4]
+ ld.ab r9, [sp, 4]
+ ld.ab r8, [sp, 4]
+ ld.ab r7, [sp, 4]
+ ld.ab r6, [sp, 4]
+ ld.ab r5, [sp, 4]
+ ld.ab r4, [sp, 4]
+ ld.ab r3, [sp, 4]
+ ld.ab r2, [sp, 4]
+ ld.ab r1, [sp, 4]
+ ld.ab r0, [sp, 4]
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ * on kernel stack.
+ * User mode callee regs need to be saved in case of
+ * -fork and friends for replicating from parent to child
+ * -before going into do_signal( ) for ptrace/core-dump
+ * Special case handling is required for r25 in case it is used by kernel
+ * for caching task ptr. Low level exception/ISR save user mode r25
+ * into task->thread.user_r25. So it needs to be retrieved from there and
+ * saved into kernel stack with rest of callee reg-file
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_USER
+ st.a r13, [sp, -4]
+ st.a r14, [sp, -4]
+ st.a r15, [sp, -4]
+ st.a r16, [sp, -4]
+ st.a r17, [sp, -4]
+ st.a r18, [sp, -4]
+ st.a r19, [sp, -4]
+ st.a r20, [sp, -4]
+ st.a r21, [sp, -4]
+ st.a r22, [sp, -4]
+ st.a r23, [sp, -4]
+ st.a r24, [sp, -4]
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ; Retrieve orig r25 and save it on stack
+ ld r12, [r25, TASK_THREAD + THREAD_USER_R25]
+ st.a r12, [sp, -4]
+#else
+ st.a r25, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" callee_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ * kernel mode callee regs needed to be saved in case of context switch
+ * If r25 is used for caching task pointer then that need not be saved
+ * as it can be re-created from current task global
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_KERNEL
+ st.a r13, [sp, -4]
+ st.a r14, [sp, -4]
+ st.a r15, [sp, -4]
+ st.a r16, [sp, -4]
+ st.a r17, [sp, -4]
+ st.a r18, [sp, -4]
+ st.a r19, [sp, -4]
+ st.a r20, [sp, -4]
+ st.a r21, [sp, -4]
+ st.a r22, [sp, -4]
+ st.a r23, [sp, -4]
+ st.a r24, [sp, -4]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ sub sp, sp, 8
+#else
+ st.a r25, [sp, -4]
+ sub sp, sp, 4
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_KERNEL:
+ * Loads callee (non scratch) Reg File by popping from Kernel mode stack.
+ * This is reverse of SAVE_CALLEE_SAVED,
+ *
+ * NOTE:
+ * Ideally this shd only be called in switch_to for loading
+ * switched-IN task's CALLEE Reg File.
+ * For all other cases RESTORE_CALLEE_SAVED_FAST must be used
+ * which simply pops the stack w/o touching regs.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_KERNEL
+
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */
+#else
+ add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
+ ld.ab r25, [sp, 4]
+#endif
+
+ ld.ab r24, [sp, 4]
+ ld.ab r23, [sp, 4]
+ ld.ab r22, [sp, 4]
+ ld.ab r21, [sp, 4]
+ ld.ab r20, [sp, 4]
+ ld.ab r19, [sp, 4]
+ ld.ab r18, [sp, 4]
+ ld.ab r17, [sp, 4]
+ ld.ab r16, [sp, 4]
+ ld.ab r15, [sp, 4]
+ ld.ab r14, [sp, 4]
+ ld.ab r13, [sp, 4]
+
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_USER:
+ * This is called after do_signal where tracer might have changed callee regs
+ * thus we need to restore the reg file.
+ * Special case handling is required for r25 in case it is used by kernel
+ * for caching task ptr. Ptrace would have modified on-kernel-stack value of
+ * r25, which needs to be shoved back into task->thread.user_r25 where from
+ * Low level exception/ISR return code will retrieve to populate with rest of
+ * callee reg-file.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_USER
+
+ add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld.ab r12, [sp, 4]
+ st r12, [r25, TASK_THREAD + THREAD_USER_R25]
+#else
+ ld.ab r25, [sp, 4]
+#endif
+
+ ld.ab r24, [sp, 4]
+ ld.ab r23, [sp, 4]
+ ld.ab r22, [sp, 4]
+ ld.ab r21, [sp, 4]
+ ld.ab r20, [sp, 4]
+ ld.ab r19, [sp, 4]
+ ld.ab r18, [sp, 4]
+ ld.ab r17, [sp, 4]
+ ld.ab r16, [sp, 4]
+ ld.ab r15, [sp, 4]
+ ld.ab r14, [sp, 4]
+ ld.ab r13, [sp, 4]
+.endm
+
+/*--------------------------------------------------------------
+ * Super FAST Restore callee saved regs by simply re-adjusting SP
+ *-------------------------------------------------------------*/
+.macro DISCARD_CALLEE_SAVED_USER
+ add sp, sp, 14 * 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore User mode r25 saved in task_struct->thread.user_r25
+ *-------------------------------------------------------------*/
+.macro RESTORE_USER_R25
+ ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
+.endm
+
+/*-------------------------------------------------------------
+ * given a tsk struct, get to the base of it's kernel mode stack
+ * tsk->thread_info is really a PAGE, whose bottom hoists stack
+ * which grows upwards towards thread_info
+ *------------------------------------------------------------*/
+
+.macro GET_TSK_STACK_BASE tsk, out
+
+ /* Get task->thread_info (this is essentially start of a PAGE) */
+ ld \out, [\tsk, TASK_THREAD_INFO]
+
+ /* Go to end of page where stack begins (grows upwards) */
+ add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */
+
+.endm
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry : r9 contains pre-IRQ/exception/trap status32
+ * Exit : SP is set to kernel mode stack pointer
+ * If CURR_IN_REG, r25 set to "current" task pointer
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+ /* User Mode when this happened ? Yes: Proceed to switch stack */
+ bbit1 r9, STATUS_U_BIT, 88f
+
+ /* OK we were already in kernel mode when this event happened, thus can
+ * assume SP is kernel mode SP. _NO_ need to do any stack switching
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ /* However....
+ * If Level 2 Interrupts enabled, we may end up with a corner case:
+ * 1. User Task executing
+ * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+ * 3. But before it could switch SP from USER to KERNEL stack
+ * a L2 IRQ "Interrupts" L1
+ * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * not switched.
+ * To handle this, we may need to switch stack even if in kernel mode
+ * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+ */
+ brlo sp, VMALLOC_START, 88f
+
+ /* TODO: vineetg:
+ * We need to be a bit more cautious here. What if a kernel bug in
+ * L1 ISR, caused SP to go whaco (some small value which looks like
+ * USER stk) and then we take L2 ISR.
+ * Above brlo alone would treat it as a valid L1-L2 sceanrio
+ * instead of shouting alound
+ * The only feasible way is to make sure this L2 happened in
+ * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+ * L1 ISR before it switches stack
+ */
+
+#endif
+
+ /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
+ * safe-keeping not really needed, but it keeps the epilogue code
+ * (SP restore) simpler/uniform.
+ */
+ b.d 77f
+
+ st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+ GET_CURR_TASK_ON_CPU r9
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+ /* If current task pointer cached in r25, time to
+ * -safekeep USER r25 in task->thread_struct->user_r25
+ * -load r25 with current task ptr
+ */
+ st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
+ mov r25, r9
+#endif
+
+ /* With current tsk in r9, get it's kernel mode stack base */
+ GET_TSK_STACK_BASE r9, r9
+
+#ifdef PT_REGS_CANARY
+ st 0xabcdabcd, [r9, 0]
+#endif
+
+ /* Save Pre Intr/Exception User SP on kernel stack */
+ st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8
+
+ /* CAUTION:
+ * SP should be set at the very end when we are done with everything
+ * In case of 2 levels of interrupt we depend on value of SP to assume
+ * that everything else is done (loading r25 etc)
+ */
+
+ /* set SP to point to kernel mode stack */
+ mov sp, r9
+
+77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
+
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN reg
+
+ ld \reg, [sp, PT_status32]
+ bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
+ bset \reg, \reg, STATUS_L_BIT
+ sr \reg, [erstatus]
+ mov \reg, 55f
+ sr \reg, [eret]
+
+ rtie
+55:
+.endm
+
+/*
+ * @reg [OUT] &thread_info of "current"
+ */
+.macro GET_CURR_THR_INFO_FROM_SP reg
+ and \reg, sp, ~(THREAD_SIZE - 1)
+.endm
+
+/*
+ * @reg [OUT] thread_info->flags of "current"
+ */
+.macro GET_CURR_THR_INFO_FLAGS reg
+ GET_CURR_THR_INFO_FROM_SP \reg
+ ld \reg, [\reg, THREAD_INFO_FLAGS]
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of ptregs.
+ *-------------------------------------------------------------*/
+.macro EXCPN_PROLOG_FREEUP_REG reg
+#ifdef CONFIG_SMP
+ sr \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+ st \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+.macro EXCPN_PROLOG_RESTORE_REG reg
+#ifdef CONFIG_SMP
+ lr \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+ ld \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
+ * Requires SP to be already switched to kernel mode Stack
+ * sp points to the next free element on the stack at exit of this macro.
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ * Note that syscalls are implemented via TRAP which is also a exception
+ * from CPU's point of view
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_EXCEPTION marker
+
+ st \marker, [sp, 8]
+ st r0, [sp, 4] /* orig_r0, needed only for sys calls */
+
+ /* Restore r9 used to code the early prologue */
+ EXCPN_PROLOG_RESTORE_REG r9
+
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ lr r9, [eret]
+ st.a r9, [sp, -4]
+ lr r9, [erstatus]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [erbta]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbeef
+ st r9, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for exceptions
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_SYS
+ SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for sys calls
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_TRAP
+ /*
+ * Setup pt_regs->orig_r8.
+ * Encode syscall number (r8) in upper short word of event type (r9)
+ * N.B. #1: This is already endian safe (see ptrace.h)
+ * #2: Only r9 can be used as scratch as it is already clobbered
+ * and it's contents are no longer needed by the latter part
+ * of exception prologue
+ */
+ lsl r9, r8, 16
+ or r9, r9, orig_r8_IS_SCALL
+
+ SAVE_ALL_EXCEPTION r9
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro RESTORE_ALL_SYS
+
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4]
+ sr r9, [erbta]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [erstatus]
+ ld.ab r9, [sp, 4]
+ sr r9, [eret]
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save all registers used by interrupt handlers.
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_INT1
+
+ /* restore original r9 , saved in int1_saved_reg
+ * It will be saved on stack in macro: SAVE_CALLER_SAVED
+ */
+#ifdef CONFIG_SMP
+ lr r9, [ARC_REG_SCRATCH_DATA0]
+#else
+ ld r9, [@int1_saved_reg]
+#endif
+
+ /* now we are ready to save the remaining context :) */
+ st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
+ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ st.a ilink1, [sp, -4]
+ lr r9, [status32_l1]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [bta_l1]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbee1
+ st r9, [sp, -4]
+#endif
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+.macro SAVE_ALL_INT2
+
+ /* TODO-vineetg: SMP we can't use global nor can we use
+ * SCRATCH0 as we do for int1 because while int1 is using
+ * it, int2 can come
+ */
+ /* retsore original r9 , saved in sys_saved_r9 */
+ ld r9, [@int2_saved_reg]
+
+ /* now we are ready to save the remaining context :) */
+ st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
+ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ st.a ilink2, [sp, -4]
+ lr r9, [status32_l2]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [bta_l2]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbee2
+ st r9, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+
+.macro RESTORE_ALL_INT1
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4] /* Actual reg file */
+ sr r9, [bta_l1]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [status32_l1]
+ ld.ab r9, [sp, 4]
+ mov ilink1, r9
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+.macro RESTORE_ALL_INT2
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4]
+ sr r9, [bta_l2]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [status32_l2]
+ ld.ab r9, [sp, 4]
+ mov ilink2, r9
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+
+.endm
+
+
+/* Get CPU-ID of this core */
+.macro GET_CPU_ID reg
+ lr \reg, [identity]
+ lsr \reg, \reg, 8
+ bmsk \reg, \reg, 7
+.endm
+
+#ifdef CONFIG_SMP
+
+/*-------------------------------------------------
+ * Retrieve the current running task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ */
+.macro GET_CURR_TASK_ON_CPU reg
+ GET_CPU_ID \reg
+ ld.as \reg, [@_current_task, \reg]
+.endm
+
+/*-------------------------------------------------
+ * Save a new task as the "current" task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ *
+ * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
+ * because ST r0, [r1, offset] can ONLY have s9 @offset
+ * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
+ */
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ GET_CPU_ID \tmp
+ add2 \tmp, @_current_task, \tmp
+ st \tsk, [\tmp]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov r25, \tsk
+#endif
+
+.endm
+
+
+#else /* Uniprocessor implementation of macros */
+
+.macro GET_CURR_TASK_ON_CPU reg
+ ld \reg, [@_current_task]
+.endm
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ st \tsk, [@_current_task]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov r25, \tsk
+#endif
+.endm
+
+#endif /* SMP / UNI */
+
+/* ------------------------------------------------------------------
+ * Get the ptr to some field of Current Task at @off in task struct
+ * -Uses r25 for Current task ptr if that is enabled
+ */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ add \reg, r25, \off
+.endm
+
+#else
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ GET_CURR_TASK_ON_CPU \reg
+ add \reg, \reg, \off
+.endm
+
+#endif /* CONFIG_ARC_CURR_IN_REG */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/exec.h b/arch/arc/include/asm/exec.h
new file mode 100644
index 000000000000..28abc6905e07
--- /dev/null
+++ b/arch/arc/include/asm/exec.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_EXEC_H
+#define __ASM_ARC_EXEC_H
+
+/* Align to 16b */
+#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
+
+#endif
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
new file mode 100644
index 000000000000..4dc64ddebece
--- /dev/null
+++ b/arch/arc/include/asm/futex.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: August 2010: From Android kernel work
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+ \
+ __asm__ __volatile__( \
+ "1: ld %1, [%2] \n" \
+ insn "\n" \
+ "2: st %0, [%2] \n" \
+ " mov %0, 0 \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: mov %0, %4 \n" \
+ " b 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .word 1b, 4b \n" \
+ " .word 2b, 4b \n" \
+ " .previous \n" \
+ \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
+ : "cc", "memory")
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+/* Compare-xchg with preemption disabled.
+ * Notes:
+ * -Best-Effort: Exchg happens only if compare succeeds.
+ * If compare fails, returns; leaving retry/looping to upper layers
+ * -successful cmp-xchg: return orig value in @addr (same as cmp val)
+ * -Compare fails: return orig value in @addr
+ * -user access r/w fails: return -EFAULT
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+ u32 newval)
+{
+ u32 val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ /* TBD : can use llock/scond */
+ __asm__ __volatile__(
+ "1: ld %0, [%3] \n"
+ " brne %0, %1, 3f \n"
+ "2: st %2, [%3] \n"
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ "4: mov %0, %4 \n"
+ " b 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 2b, 4b \n"
+ " .previous\n"
+ : "=&r"(val)
+ : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+ : "cc", "memory");
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ *uval = val;
+ return val;
+}
+
+#endif
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
new file mode 100644
index 000000000000..473424d7528b
--- /dev/null
+++ b/arch/arc/include/asm/io.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_IO_H
+#define _ASM_ARC_IO_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+
+#define PCI_IOBASE ((void __iomem *)0)
+
+extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long flags);
+extern void iounmap(const void __iomem *addr);
+
+#define ioremap_nocache(phy, sz) ioremap(phy, sz)
+#define ioremap_wc(phy, sz) ioremap(phy, sz)
+
+/* Change struct page to physical address */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 b;
+
+ __asm__ __volatile__(
+ " ldb%U1 %0, %1 \n"
+ : "=r" (b)
+ : "m" (*(volatile u8 __force *)addr)
+ : "memory");
+
+ return b;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 s;
+
+ __asm__ __volatile__(
+ " ldw%U1 %0, %1 \n"
+ : "=r" (s)
+ : "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+ return s;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 w;
+
+ __asm__ __volatile__(
+ " ld%U1 %0, %1 \n"
+ : "=r" (w)
+ : "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+ return w;
+}
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stb%U1 %0, %1 \n"
+ :
+ : "r" (b), "m" (*(volatile u8 __force *)addr)
+ : "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 s, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stw%U1 %0, %1 \n"
+ :
+ : "r" (s), "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " st%U1 %0, %1 \n"
+ :
+ : "r" (w), "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+}
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
new file mode 100644
index 000000000000..4c588f9820cf
--- /dev/null
+++ b/arch/arc/include/asm/irq.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQ_H
+#define __ASM_ARC_IRQ_H
+
+#define NR_IRQS 32
+
+/* Platform Independent IRQs */
+#define TIMER0_IRQ 3
+#define TIMER1_IRQ 4
+
+#include <asm-generic/irq.h>
+
+extern void __init arc_init_IRQ(void);
+extern int __init get_hw_config_num_irq(void);
+
+void __cpuinit arc_local_timer_setup(unsigned int cpu);
+
+#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
new file mode 100644
index 000000000000..ccd84806b62f
--- /dev/null
+++ b/arch/arc/include/asm/irqflags.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQFLAGS_H
+#define __ASM_ARC_IRQFLAGS_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ * -Remove explicit mov of current status32 into reg, that is not needed
+ * -Use BIC insn instead of INVERTED + AND
+ * -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#ifdef __KERNEL__
+
+#include <asm/arcregs.h>
+
+#ifndef __ASSEMBLY__
+
+/******************************************************************
+ * IRQ Control Macros
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+ unsigned long temp, flags;
+
+ __asm__ __volatile__(
+ " lr %1, [status32] \n"
+ " bic %0, %1, %2 \n"
+ " and.f 0, %1, %2 \n"
+ " flag.nz %0 \n"
+ : "=r"(temp), "=r"(flags)
+ : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+ : "cc");
+
+ return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+ __asm__ __volatile__(
+ " flag %0 \n"
+ :
+ : "r"(flags));
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+extern void arch_local_irq_enable(void);
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ " and %0, %0, %1 \n"
+ " flag %0 \n"
+ : "=&r"(temp)
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ : "=&r"(temp));
+
+ return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ | STATUS_E2_MASK
+#endif
+ ));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arch_mask_irq(unsigned int irq)
+{
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb &= ~(1 << irq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static inline void arch_unmask_irq(unsigned int irq)
+{
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb |= (1 << irq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+#else
+
+.macro IRQ_DISABLE scratch
+ lr \scratch, [status32]
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+.macro IRQ_DISABLE_SAVE scratch, save
+ lr \scratch, [status32]
+ mov \save, \scratch /* Make a copy */
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+.macro IRQ_ENABLE scratch
+ lr \scratch, [status32]
+ or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* KERNEL */
+
+#endif
diff --git a/arch/arc/include/asm/kdebug.h b/arch/arc/include/asm/kdebug.h
new file mode 100644
index 000000000000..3fbe6c472c0a
--- /dev/null
+++ b/arch/arc/include/asm/kdebug.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_IERR,
+ DIE_OOPS
+};
+
+#endif
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
new file mode 100644
index 000000000000..f3c4934f0ca9
--- /dev/null
+++ b/arch/arc/include/asm/kgdb.h
@@ -0,0 +1,61 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_KGDB_H__
+#define __ARC_KGDB_H__
+
+#ifdef CONFIG_KGDB
+
+#include <asm/user.h>
+
+/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
+ * register API yet */
+#undef DBG_MAX_REG_NUM
+
+#define GDB_MAX_REGS 39
+
+#define BREAK_INSTR_SIZE 2
+#define CACHE_FLUSH_IS_SAFE 1
+#define NUMREGBYTES (GDB_MAX_REGS * 4)
+#define BUFMAX 2048
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__ ("trap_s 0x4\n");
+}
+
+extern void kgdb_trap(struct pt_regs *regs, int param);
+
+enum arc700_linux_regnums {
+ _R0 = 0,
+ _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+ _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+ _R25, _R26,
+ _BTA = 27,
+ _LP_START = 28,
+ _LP_END = 29,
+ _LP_COUNT = 30,
+ _STATUS32 = 31,
+ _BLINK = 32,
+ _FP = 33,
+ __SP = 34,
+ _EFA = 35,
+ _RET = 36,
+ _ORIG_R8 = 37,
+ _STOP_PC = 38
+};
+
+#else
+static inline void kgdb_trap(struct pt_regs *regs, int param)
+{
+}
+#endif
+
+#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
new file mode 100644
index 000000000000..4d9c211fce70
--- /dev/null
+++ b/arch/arc/include/asm/kprobes.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARC_KPROBES_H
+#define _ARC_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+
+typedef u16 kprobe_opcode_t;
+
+#define UNIMP_S_INSTRUCTION 0x79e0
+#define TRAP_S_2_INSTRUCTION 0x785e
+
+#define MAX_INSN_SIZE 8
+#define MAX_STACK_SIZE 64
+
+struct arch_specific_insn {
+ int is_short;
+ kprobe_opcode_t *t1_addr, *t2_addr;
+ kprobe_opcode_t t1_opcode, t2_opcode;
+};
+
+#define flush_insn_slot(p) do { } while (0)
+
+#define kretprobe_blacklist_size 0
+
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *p);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+};
+
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ struct pt_regs jprobe_saved_regs;
+ char jprobes_stack[MAX_STACK_SIZE];
+ struct prev_kprobe prev_kprobe;
+};
+
+int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
+void kretprobe_trampoline(void);
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs);
+#else
+static void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
new file mode 100644
index 000000000000..0283e9e44e0d
--- /dev/null
+++ b/arch/arc/include/asm/linkage.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifdef __ASSEMBLY__
+
+/* Can't use the ENTRY macro in linux/linkage.h
+ * gas considers ';' as comment vs. newline
+ */
+.macro ARC_ENTRY name
+ .global \name
+ .align 4
+ \name:
+.endm
+
+.macro ARC_EXIT name
+#define ASM_PREV_SYM_ADDR(name) .-##name
+ .size \ name, ASM_PREV_SYM_ADDR(\name)
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_DATA nm
+#ifdef CONFIG_ARC_HAS_DCCM
+ .section .data.arcfp
+#else
+ .section .data
+#endif
+ .global \nm
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_CODE
+#ifdef CONFIG_ARC_HAS_ICCM
+ .section .text.arcfp, "ax",@progbits
+#else
+ .section .text, "ax",@progbits
+#endif
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_ARC_HAS_ICCM
+#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#else
+#define __arcfp_code __attribute__((__section__(".text")))
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCCM
+#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#else
+#define __arcfp_data __attribute__((__section__(".data")))
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
new file mode 100644
index 000000000000..9998dc846ebb
--- /dev/null
+++ b/arch/arc/include/asm/mach_desc.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * based on METAG mach/arch.h (which in turn was based on ARM)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MACH_DESC_H_
+#define _ASM_ARC_MACH_DESC_H_
+
+/**
+ * struct machine_desc - Board specific callbacks, called from ARC common code
+ * Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
+ * a multi-platform kernel builds with array of such descriptors.
+ * We extend the early DT scan to also match the DT's "compatible" string
+ * against the @dt_compat of all such descriptors, and one with highest
+ * "DT score" is selected as global @machine_desc.
+ *
+ * @name: Board/SoC name
+ * @dt_compat: Array of device tree 'compatible' strings
+ * (XXX: although only 1st entry is looked at)
+ * @init_early: Very early callback [called from setup_arch()]
+ * @init_irq: setup external IRQ controllers [called from init_IRQ()]
+ * @init_smp: for each CPU (e.g. setup IPI)
+ * [(M):init_IRQ(), (o):start_kernel_secondary()]
+ * @init_time: platform specific clocksource/clockevent registration
+ * [called from time_init()]
+ * @init_machine: arch initcall level callback (e.g. populate static
+ * platform devices or parse Devicetree)
+ * @init_late: Late initcall level callback
+ *
+ */
+struct machine_desc {
+ const char *name;
+ const char **dt_compat;
+
+ void (*init_early)(void);
+ void (*init_irq)(void);
+#ifdef CONFIG_SMP
+ void (*init_smp)(unsigned int);
+#endif
+ void (*init_time)(void);
+ void (*init_machine)(void);
+ void (*init_late)(void);
+
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p) \
+ for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+static inline struct machine_desc *default_machine_desc(void)
+{
+ /* the default machine is the last one linked in */
+ if (__arch_info_end - 1 < __arch_info_begin)
+ return NULL;
+ return __arch_info_end - 1;
+}
+
+/*
+ * Set of macros to define architecture features.
+ * This is built into a table by the linker.
+ */
+#define MACHINE_START(_type, _name) \
+static const struct machine_desc __mach_desc_##_type \
+__used \
+__attribute__((__section__(".arch.info.init"))) = { \
+ .name = _name,
+
+#define MACHINE_END \
+};
+
+extern struct machine_desc *setup_machine_fdt(void *dt);
+extern void __init copy_devtree(void);
+
+#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
new file mode 100644
index 000000000000..56b02320f1a9
--- /dev/null
+++ b/arch/arc/include/asm/mmu.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMU_H
+#define _ASM_ARC_MMU_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long asid; /* Pvt Addr-Space ID for mm */
+#ifdef CONFIG_ARC_TLB_DBG
+ struct task_struct *tsk;
+#endif
+} mm_context_t;
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
new file mode 100644
index 000000000000..0d71fb11b57c
--- /dev/null
+++ b/arch/arc/include/asm/mmu_context.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Refactored get_new_mmu_context( ) to only handle live-mm.
+ * retiring-mm handled in other hooks
+ *
+ * Vineetg: March 25th, 2008: Bug #92690
+ * -Major rewrite of Core ASID allocation routine get_new_mmu_context
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_MMU_CONTEXT_H
+#define _ASM_ARC_MMU_CONTEXT_H
+
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+
+#include <asm-generic/mm_hooks.h>
+
+/* ARC700 ASID Management
+ *
+ * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
+ * with same vaddr (different tasks) to co-exit. This provides for
+ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
+ *
+ * Linux assigns each task a unique ASID. A simple round-robin allocation
+ * of H/w ASID is done using software tracker @asid_cache.
+ * When it reaches max 255, the allocation cycle starts afresh by flushing
+ * the entire TLB and wrapping ASID back to zero.
+ *
+ * For book-keeping, Linux uses a couple of data-structures:
+ * -mm_struct has an @asid field to keep a note of task's ASID (needed at the
+ * time of say switch_mm( )
+ * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
+ * given an ASID, finding the mm struct associated.
+ *
+ * The round-robin allocation algorithm allows for ASID stealing.
+ * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
+ * already assigned to another (switched-out) task. Obviously the prev owner
+ * is marked with an invalid ASID to make it request for a new ASID when it
+ * gets scheduled next time. However its TLB entries (with ASID "x") could
+ * exist, which must be cleared before the same ASID is used by the new owner.
+ * Flushing them would be plausible but costly solution. Instead we force a
+ * allocation policy quirk, which ensures that a stolen ASID won't have any
+ * TLB entries associates, alleviating the need to flush.
+ * The quirk essentially is not allowing ASID allocated in prev cycle
+ * to be used past a roll-over in the next cycle.
+ * When this happens (i.e. task ASID > asid tracker), task needs to refresh
+ * its ASID, aligning it to current value of tracker. If the task doesn't get
+ * scheduled past a roll-over, hence its ASID is not yet realigned with
+ * tracker, such ASID is anyways safely reusable because it is
+ * gauranteed that TLB entries with that ASID wont exist.
+ */
+
+#define FIRST_ASID 0
+#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
+#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
+#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
+
+/* ASID to mm struct mapping */
+extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
+
+extern int asid_cache;
+
+/*
+ * Assign a new ASID to task. If the task already has an ASID, it is
+ * relinquished.
+ */
+static inline void get_new_mmu_context(struct mm_struct *mm)
+{
+ struct mm_struct *prev_owner;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * Relinquish the currently owned ASID (if any).
+ * Doing unconditionally saves a cmp-n-branch; for already unused
+ * ASID slot, the value was/remains NULL
+ */
+ asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
+
+ /* move to new ASID */
+ if (++asid_cache > MAX_ASID) { /* ASID roll-over */
+ asid_cache = FIRST_ASID;
+ flush_tlb_all();
+ }
+
+ /*
+ * Is next ASID already owned by some-one else (we are stealing it).
+ * If so, let the orig owner be aware of this, so when it runs, it
+ * asks for a brand new ASID. This would only happen for a long-lived
+ * task with ASID from prev allocation cycle (before ASID roll-over).
+ *
+ * This might look wrong - if we are re-using some other task's ASID,
+ * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
+ * care of such a case: it ensures that task with ASID from prev alloc
+ * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
+ * The stealing scenario described here will only happen if that task
+ * didn't get a chance to refresh it's ASID - implying stale entries
+ * won't exist.
+ */
+ prev_owner = asid_mm_map[asid_cache];
+ if (prev_owner)
+ prev_owner->context.asid = NO_ASID;
+
+ /* Assign new ASID to tsk */
+ asid_mm_map[asid_cache] = mm;
+ mm->context.asid = asid_cache;
+
+#ifdef CONFIG_ARC_TLB_DBG
+ pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
+ " pid:%u, assigned asid:%lu\n",
+ (unsigned int)mm, (unsigned int)prev_owner,
+ (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
+ (mm->context.tsk)->pid, mm->context.asid);
+#endif
+
+ write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context.asid = NO_ASID;
+#ifdef CONFIG_ARC_TLB_DBG
+ mm->context.tsk = tsk;
+#endif
+ return 0;
+}
+
+/* Prepare the MMU for task: setup PID reg with allocated ASID
+ If task doesn't have an ASID (never alloc or stolen, get a new ASID)
+*/
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+#ifndef CONFIG_SMP
+ /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+ /*
+ * Get a new ASID if task doesn't have a valid one. Possible when
+ * -task never had an ASID (fresh after fork)
+ * -it's ASID was stolen - past an ASID roll-over.
+ * -There's a third obscure scenario (if this task is running for the
+ * first time afer an ASID rollover), where despite having a valid
+ * ASID, we force a get for new ASID - see comments at top.
+ *
+ * Both the non-alloc scenario and first-use-after-rollover can be
+ * detected using the single condition below: NO_ASID = 256
+ * while asid_cache is always a valid ASID value (0-255).
+ */
+ if (next->context.asid > asid_cache) {
+ get_new_mmu_context(next);
+ } else {
+ /*
+ * XXX: This will never happen given the chks above
+ * BUG_ON(next->context.asid > MAX_ASID);
+ */
+ write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
+ }
+
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ asid_mm_map[mm->context.asid] = NULL;
+ mm->context.asid = NO_ASID;
+
+ local_irq_restore(flags);
+}
+
+/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
+ * for retiring-mm. However destroy_context( ) still needs to do that because
+ * between mm_release( ) = >deactive_mm( ) and
+ * mmput => .. => __mmdrop( ) => destroy_context( )
+ * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * again (this teased me for a whole day).
+ */
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+#ifndef CONFIG_SMP
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+ /* Unconditionally get a new ASID */
+ get_new_mmu_context(next);
+
+}
+
+#define enter_lazy_tlb(mm, tsk)
+
+#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
new file mode 100644
index 000000000000..518222bb3f8e
--- /dev/null
+++ b/arch/arc/include/asm/module.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+
+ */
+
+#ifndef _ASM_ARC_MODULE_H
+#define _ASM_ARC_MODULE_H
+
+#include <asm-generic/module.h>
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+struct mod_arch_specific {
+ void *unw_info;
+ int unw_sec_idx;
+};
+#endif
+
+#define MODULE_PROC_FAMILY "ARC700"
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+
+#endif /* _ASM_ARC_MODULE_H */
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
new file mode 100644
index 000000000000..a2f88ff9f506
--- /dev/null
+++ b/arch/arc/include/asm/mutex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
+ * atomic dec based which can "count" any number of lock contenders.
+ * This ideally needs to be fixed in core, but for now switching to dec ver.
+ */
+#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
+#include <asm-generic/mutex-dec.h>
+#else
+#include <asm-generic/mutex-xchg.h>
+#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
new file mode 100644
index 000000000000..bdf546104551
--- /dev/null
+++ b/arch/arc/include/asm/page.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARC_PAGE_H
+#define __ASM_ARC_PAGE_H
+
+#include <uapi/asm/page.h>
+
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+/* TBD: for now don't worry about VIPT D$ aliasing */
+#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(addr, vaddr, pg) clear_page(addr)
+#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#define pte_pgprot(x) __pgprot(pte_val(x))
+
+#else /* !STRICT_MM_TYPECHECKS */
+
+typedef unsigned long pte_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+#define __pte(x) (x)
+#define __pgprot(x) (x)
+#define pte_pgprot(x) (x)
+
+#endif
+
+#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
+
+#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+/*
+ * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
+ *
+ * These macros have historically been misnamed
+ * virt here means link-address/program-address as embedded in object code.
+ * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
+ * 128th page, and virt_to_page( ) will return the struct page corresp to it.
+ * mem_map[ ] is an array of struct page for each page frame in the system
+ *
+ * Independent of where linux is linked at, link-addr = physical address
+ * So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
+ * would have been wrong in case kernel is not at 0x8zs
+ */
+#define __pa(vaddr) ((unsigned long)vaddr)
+#define __va(paddr) ((void *)((unsigned long)(paddr)))
+
+#define virt_to_page(kaddr) \
+ (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+/* Default Permissions for page, used in mmap.c */
+#ifdef CONFIG_ARC_STACK_NONEXEC
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#else
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#endif
+
+#define WANT_PAGE_VIRTUAL 1
+
+#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
new file mode 100644
index 000000000000..115ad96480e6
--- /dev/null
+++ b/arch/arc/include/asm/perf_event.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_PERF_EVENT_H
+#define __ASM_PERF_EVENT_H
+
+#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
new file mode 100644
index 000000000000..36a9f20c21a3
--- /dev/null
+++ b/arch/arc/include/asm/pgalloc.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2011
+ * -"/proc/meminfo | grep PageTables" kept on increasing
+ * Recently added pgtable dtor was not getting called.
+ *
+ * vineetg: May 2011
+ * -Variable pg-sz means that Page Tables could be variable sized themselves
+ * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
+ * -Page Table size capped to max 1 to save memory - hence verified.
+ * -Since these deal with constants, gcc compile-time optimizes them.
+ *
+ * vineetg: Nov 2010
+ * -Added pgtable ctor/dtor used for pgtable mem accounting
+ *
+ * vineetg: April 2010
+ * -Switched pgtable_t from being struct page * to unsigned long
+ * =Needed so that Page Table allocator (pte_alloc_one) is not forced to
+ * to deal with struct page. Thay way in future we can make it allocate
+ * multiple PG Tbls in one Page Frame
+ * =sweet side effect is avoiding calls to ugly page_address( ) from the
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGALLOC_H
+#define _ASM_ARC_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/log2.h>
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+ pmd_set(pmd, pte);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
+{
+ pmd_set(pmd, (pte_t *) ptep);
+}
+
+static inline int __get_order_pgd(void)
+{
+ return get_order(PTRS_PER_PGD * 4);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ int num, num2;
+ pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+
+ if (ret) {
+ num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
+ memzero(ret, num * sizeof(pgd_t));
+
+ num2 = VMALLOC_SIZE / PGDIR_SIZE;
+ memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
+
+ memzero(ret + num + num2,
+ (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
+
+ }
+ return ret;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_pages((unsigned long)pgd, __get_order_pgd());
+}
+
+
+/*
+ * With software-only page-tables, addr-split for traversal is tweakable and
+ * that directly governs how big tables would be at each level.
+ * Further, the MMU page size is configurable.
+ * Thus we need to programatically assert the size constraint
+ * All of this is const math, allowing gcc to do constant folding/propagation.
+ */
+
+static inline int __get_order_pte(void)
+{
+ return get_order(PTRS_PER_PTE * 4);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+ __get_order_pte());
+
+ return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pgtable_t pte_pg;
+
+ pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ if (pte_pg) {
+ memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+ pgtable_page_ctor(virt_to_page(pte_pg));
+ }
+
+ return pte_pg;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
+{
+ pgtable_page_dtor(virt_to_page(ptep));
+ free_pages(ptep, __get_order_pte());
+}
+
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+
+#define check_pgt_cache() do { } while (0)
+#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
+
+#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
new file mode 100644
index 000000000000..b7e36684c091
--- /dev/null
+++ b/arch/arc/include/asm/pgtable.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
+ * They are semantically the same although in different contexts
+ * VALID marks a TLB entry exists and it will only happen if PRESENT
+ * - Utilise some unused free bits to confine PTE flags to 12 bits
+ * This is a must for 4k pg-sz
+ *
+ * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
+ * -TLB Locking never really existed, except for initial specs
+ * -SILENT_xxx not needed for our port
+ * -Per my request, MMU V3 changes the layout of some of the bits
+ * to avoid a few shifts in TLB Miss handlers.
+ *
+ * vineetg: April 2010
+ * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
+ * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
+ *
+ * vineetg: April 2010
+ * -Switched form 8:11:13 split for page table lookup to 11:8:13
+ * -this speeds up page table allocation itself as we now have to memset 1K
+ * instead of 8k per page table.
+ * -TODO: Right now page table alloc is 8K and rest 7K is unused
+ * need to optimise it
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGTABLE_H
+#define _ASM_ARC_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/**************************************************************************
+ * Page Table Flags
+ *
+ * ARC700 MMU only deals with softare managed TLB entries.
+ * Page Tables are purely for Linux VM's consumption and the bits below are
+ * suited to that (uniqueness). Hence some are not implemented in the TLB and
+ * some have different value in TLB.
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * seperate PD0 and PD1, which combined forms a translation entry)
+ * while for PTE perspective, they are 8 and 9 respectively
+ * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
+ * (saves some bit shift ops in TLB Miss hdlrs)
+ */
+
+#if (CONFIG_ARC_MMU_VER <= 2)
+
+#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
+#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
+#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
+#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
+#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
+#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
+#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
+#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
+#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
+#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
+
+#else
+
+/* PD1 */
+#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
+#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
+#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
+#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
+#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
+#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
+
+/* PD0 */
+#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
+#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
+#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
+ usable for shared TLB entries (H) */
+
+#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
+#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
+
+#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
+#endif
+
+/* Kernel allowed all permissions for all pages */
+#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifdef CONFIG_ARC_CACHE_PAGES
+#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
+#else
+#define _PAGE_DEF_CACHEABLE (0)
+#endif
+
+/* Helper for every "user" page
+ * -kernel can R/W/X
+ * -by default cached, unless config otherwise
+ * -present in memory
+ */
+#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
+
+/* More Abbrevaited helpers */
+#define PAGE_U_NONE __pgprot(___DEF)
+#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
+#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
+#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_EXECUTE)
+
+#define PAGE_SHARED PAGE_U_W_R
+
+/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
+ * kernel vaddr space - visible in all addr spaces, but kernel mode only
+ * Thus Global, all-kernel-access, no-user-access, cached
+ */
+#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL)
+
+/* ioremap */
+#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
+ _PAGE_GLOBAL)
+
+/**************************************************************************
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+ *
+ * Certain cases have 1:1 mapping
+ * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
+ * which directly corresponds to PAGE_U_X_R
+ *
+ * Other rules which cause the divergence from 1:1 mapping
+ *
+ * 1. Although ARC700 can do exclusive execute/write protection (meaning R
+ * can be tracked independet of X/W unlike some other CPUs), still to
+ * keep things consistent with other archs:
+ * -Write implies Read: W => R
+ * -Execute implies Read: X => R
+ *
+ * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
+ * This is to enable COW mechanism
+ */
+ /* xwr */
+#define __P000 PAGE_U_NONE
+#define __P001 PAGE_U_R
+#define __P010 PAGE_U_R /* Pvt-W => !W */
+#define __P011 PAGE_U_R /* Pvt-W => !W */
+#define __P100 PAGE_U_X_R /* X => R */
+#define __P101 PAGE_U_X_R
+#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
+#define __P111 PAGE_U_X_R /* Pvt-W => !W */
+
+#define __S000 PAGE_U_NONE
+#define __S001 PAGE_U_R
+#define __S010 PAGE_U_W_R /* W => R */
+#define __S011 PAGE_U_W_R
+#define __S100 PAGE_U_X_R /* X => R */
+#define __S101 PAGE_U_X_R
+#define __S110 PAGE_U_X_W_R /* X => R */
+#define __S111 PAGE_U_X_W_R
+
+/****************************************************************
+ * Page Table Lookup split
+ *
+ * We implement 2 tier paging and since this is all software, we are free
+ * to customize the span of a PGD / PTE entry to suit us
+ *
+ * 32 bit virtual address
+ * -------------------------------------------------------
+ * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
+ * -------------------------------------------------------
+ * | | |
+ * | | --> off in page frame
+ * | |
+ * | ---> index into Page Table
+ * |
+ * ----> index into Page Directory
+ */
+
+#define BITS_IN_PAGE PAGE_SHIFT
+
+/* Optimal Sizing of Pg Tbl - based on MMU page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_8K)
+#define BITS_FOR_PTE 8
+#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define BITS_FOR_PTE 8
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define BITS_FOR_PTE 9
+#endif
+
+#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
+
+#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#ifdef __ASSEMBLY__
+#define PTRS_PER_PTE (1 << BITS_FOR_PTE)
+#define PTRS_PER_PGD (1 << BITS_FOR_PGD)
+#else
+#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
+#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
+#endif
+/*
+ * Number of entries a user land program use.
+ * TASK_SIZE is the maximum vaddr that can be used by a userland program.
+ */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * No special requirements for lowest virtual address we permit any user space
+ * mapping to be mapped at.
+ */
+#define FIRST_USER_ADDRESS 0
+
+
+/****************************************************************
+ * Bucket load of VM Helpers
+ */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* the zero page used for uninitialized and anonymous pages */
+extern char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+/* find the page descriptor of the Page Tbl ref by PMD entry */
+#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
+
+/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
+#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
+
+/* In a 2 level sys, setup the PGD entry with PTE value */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+ pmd_val(*pmdp) = (unsigned long)ptep;
+}
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
+
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
+#define pmd_present(x) (pmd_val(x))
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+
+#define pte_page(x) (mem_map + \
+ (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
+
+#define mk_pte(page, pgprot) \
+({ \
+ pte_t pte; \
+ pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
+ pte; \
+})
+
+/* TBD: Non linear mapping stuff */
+static inline int pte_file(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_FILE;
+}
+
+#define PTE_FILE_MAX_BITS 30
+#define pgoff_to_pte(x) __pte(x)
+#define pte_to_pgoff(x) (pte_val(x) >> 2)
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+/*
+ * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
+ * and returns ptr to PTE entry corresponding to @addr
+ */
+#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
+ __pte_index(addr))
+
+/* No mapping of Page Tables in high mem etc, so following same as above */
+#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
+#define pte_offset_map(dir, addr) pte_offset(dir, addr)
+
+/* Zoo of pte_xxx function */
+#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
+#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
+#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
+#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
+#define pte_special(pte) (0)
+
+#define PTE_BIT_FUNC(fn, op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
+PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Macro to mark a page protection as uncacheable */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ set_pte(ptep, pteval);
+}
+
+/*
+ * All kernel related VM pages are in init's mm.
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
+#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
+
+/*
+ * Macro to quickly access the PGD entry, utlising the fact that some
+ * arch may cache the pointer to Page Directory of "current" task
+ * in a MMU register
+ *
+ * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
+ * becomes read a register
+ *
+ * ********CAUTION*******:
+ * Kernel code might be dealing with some mm_struct of NON "current"
+ * Thus use this macro only when you are certain that "current" is current
+ * e.g. when dealing with signal frame setup code etc
+ */
+#ifndef CONFIG_SMP
+#define pgd_offset_fast(mm, addr) \
+({ \
+ pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
+ pgd_base + pgd_index(addr); \
+})
+#else
+#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
+#endif
+
+extern void paging_init(void);
+extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep);
+
+/* Encode swap {type,off} tuple into PTE
+ * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
+ * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
+ */
+#define __swp_entry(type, off) ((swp_entry_t) { \
+ ((type) & 0x1f) | ((off) << 13) })
+
+/* Decode a PTE containing swap "identifier "into constituents */
+#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
+#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
+
+/* NOPs, to keep generic kernel happy */
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define kern_addr_valid(addr) (1)
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma, from, pfn, size, prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
new file mode 100644
index 000000000000..5f26b2c1cba0
--- /dev/null
+++ b/arch/arc/include/asm/processor.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: March 2009
+ * -Implemented task_pt_regs( )
+ *
+ * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_PROCESSOR_H
+#define __ASM_ARC_PROCESSOR_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
+
+/* Arch specific stuff which needs to be saved per task.
+ * However these items are not so important so as to earn a place in
+ * struct thread_info
+ */
+struct thread_struct {
+ unsigned long ksp; /* kernel mode stack pointer */
+ unsigned long callee_reg; /* pointer to callee regs */
+ unsigned long fault_address; /* dbls as brkpt holder as well */
+ unsigned long cause_code; /* Exception Cause Code (ECR) */
+#ifdef CONFIG_ARC_CURR_IN_REG
+ unsigned long user_r25;
+#endif
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+ struct arc_fpu fpu;
+#endif
+};
+
+#define INIT_THREAD { \
+ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+}
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *t);
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1)
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while (0)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+/*
+ * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
+ * get optimised away by gcc
+ */
+#ifdef CONFIG_SMP
+#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
+#else
+#define cpu_relax() do { } while (0)
+#endif
+
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
+
+/*
+ * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * These can't be derived from pt_regs as that would give correp user-mode val
+ */
+#define KSTK_ESP(tsk) (tsk->thread.ksp)
+#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4)))
+#define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4)))
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * E1,E2 so that Interrupts are enabled in user mode
+ * L set, so Loop inhibited to begin with
+ * lp_start and lp_end seeded with bogus non-zero values so to easily catch
+ * the ARC700 sr to lp_start hardware bug
+ */
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ set_fs(USER_DS); /* reads from user space */ \
+ (_regs)->ret = (_pc); \
+ /* Interrupts enabled in User Mode */ \
+ (_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK \
+ | STATUS_E1_MASK | STATUS_E2_MASK; \
+ (_regs)->sp = (_usp); \
+ /* bogus seed values for debugging */ \
+ (_regs)->lp_start = 0x10; \
+ (_regs)->lp_end = 0x80; \
+} while (0)
+
+extern unsigned int get_wchan(struct task_struct *p);
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ * Should the PC register be read instead ? This macro does not seem to
+ * be used in many places so this wont be all that bad.
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+#endif /* !__ASSEMBLY__ */
+
+/* Kernels Virtual memory area.
+ * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
+ * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
+ * of the translated bottom 2GB for kernel virtual memory and protect
+ * these pages from user accesses by disabling Ru, Eu and Wu.
+ */
+#define VMALLOC_SIZE (0x10000000) /* 256M */
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+#define VMALLOC_END (PAGE_OFFSET)
+
+/* Most of the architectures seem to be keeping some kind of padding between
+ * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
+ */
+#define USER_KERNEL_GUTTER 0x10000000
+
+/* User address space:
+ * On ARC700, CPU allows the entire lower half of 32 bit address space to be
+ * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
+ * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
+ * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
+ * Thus total User vaddr space is (0:0x5FFF_FFFF)
+ */
+#define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h
new file mode 100644
index 000000000000..692d0d0789a7
--- /dev/null
+++ b/arch/arc/include/asm/prom.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_PROM_H_
+#define _ASM_ARC_PROM_H_
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+#endif
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
new file mode 100644
index 000000000000..8ae783d20a81
--- /dev/null
+++ b/arch/arc/include/asm/ptrace.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+#ifndef __ASM_ARC_PTRACE_H
+#define __ASM_ARC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+
+/* THE pt_regs: Defines how regs are saved during entry into kernel */
+
+struct pt_regs {
+ /*
+ * 1 word gutter after reg-file has been saved
+ * Technically not needed, Since SP always points to a "full" location
+ * (vs. "empty"). But pt_regs is shared with tools....
+ */
+ long res;
+
+ /* Real registers */
+ long bta; /* bta_l1, bta_l2, erbta */
+ long lp_start;
+ long lp_end;
+ long lp_count;
+ long status32; /* status32_l1, status32_l2, erstatus */
+ long ret; /* ilink1, ilink2 or eret */
+ long blink;
+ long fp;
+ long r26; /* gp */
+ long r12;
+ long r11;
+ long r10;
+ long r9;
+ long r8;
+ long r7;
+ long r6;
+ long r5;
+ long r4;
+ long r3;
+ long r2;
+ long r1;
+ long r0;
+ long sp; /* user/kernel sp depending on where we came from */
+ long orig_r0;
+
+ /*to distinguish bet excp, syscall, irq */
+ union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ /* so that assembly code is same for LE/BE */
+ unsigned long orig_r8:16, event:16;
+#else
+ unsigned long event:16, orig_r8:16;
+#endif
+ long orig_r8_word;
+ };
+};
+
+/* Callee saved registers - need to be saved only when you are scheduled out */
+
+struct callee_regs {
+ long res; /* Again this is not needed */
+ long r25;
+ long r24;
+ long r23;
+ long r22;
+ long r21;
+ long r20;
+ long r19;
+ long r18;
+ long r17;
+ long r16;
+ long r15;
+ long r14;
+ long r13;
+};
+
+#define instruction_pointer(regs) ((regs)->ret)
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* return 1 if user mode or 0 if kernel mode */
+#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
+
+#define user_stack_pointer(regs)\
+({ unsigned int sp; \
+ if (user_mode(regs)) \
+ sp = (regs)->sp;\
+ else \
+ sp = -1; \
+ sp; \
+})
+
+/* return 1 if PC in delay slot */
+#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
+
+#define in_syscall(regs) (regs->event & orig_r8_IS_SCALL)
+#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT)
+
+#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED)
+#define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED)
+
+#define current_pt_regs() \
+({ \
+ /* open-coded current_thread_info() */ \
+ register unsigned long sp asm ("sp"); \
+ unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
+ (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \
+})
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define orig_r8_IS_SCALL 0x0001
+#define orig_r8_IS_SCALL_RESTARTED 0x0002
+#define orig_r8_IS_BRKPT 0x0004
+#define orig_r8_IS_EXCPN 0x0004
+#define orig_r8_IS_IRQ1 0x0010
+#define orig_r8_IS_IRQ2 0x0020
+
+#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
new file mode 100644
index 000000000000..6fc1159dfefe
--- /dev/null
+++ b/arch/arc/include/asm/sections.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SECTIONS_H
+#define _ASM_ARC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _int_vec_base_lds[];
+extern char __arc_dccm_base[];
+extern char __dtb_start[];
+
+#endif
diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h
new file mode 100644
index 000000000000..da2c45979817
--- /dev/null
+++ b/arch/arc/include/asm/segment.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASMARC_SEGMENT_H
+#define __ASMARC_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(0)
+#define USER_DS MAKE_MM_SEG(TASK_SIZE)
+
+#define segment_eq(a, b) ((a) == (b))
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASMARC_SEGMENT_H */
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
new file mode 100644
index 000000000000..4dff5a1e4128
--- /dev/null
+++ b/arch/arc/include/asm/serial.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SERIAL_H
+#define _ASM_ARC_SERIAL_H
+
+/*
+ * early-8250 requires BASE_BAUD to be defined and includes this header.
+ * We put in a typical value:
+ * (core clk / 16) - i.e. UART samples 16 times per sec.
+ * Athough in multi-platform-image this might not work, specially if the
+ * clk driving the UART is different.
+ * We can't use DeviceTree as this is typically for early serial.
+ */
+
+#include <asm/clk.h>
+
+#define BASE_BAUD (arc_get_core_freq() / 16)
+
+#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
new file mode 100644
index 000000000000..229e50681497
--- /dev/null
+++ b/arch/arc/include/asm/setup.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASMARC_SETUP_H
+#define __ASMARC_SETUP_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define COMMAND_LINE_SIZE 256
+
+/*
+ * Data structure to map a ID to string
+ * Used a lot for bootup reporting of hardware diversity
+ */
+struct id_to_str {
+ int id;
+ const char *str;
+};
+
+struct cpuinfo_data {
+ struct id_to_str info;
+ int up_range;
+};
+
+extern int root_mountflags, end_mem;
+extern int running_on_hw;
+
+void __init setup_processor(void);
+void __init setup_arch_memory(void);
+
+#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
new file mode 100644
index 000000000000..c4fb211dcd25
--- /dev/null
+++ b/arch/arc/include/asm/smp.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_SMP_H
+#define __ASM_ARC_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+/* including cpumask.h leads to cyclic deps hence this Forward declaration */
+struct cpumask;
+
+/*
+ * APIs provided by arch SMP code to generic code
+ */
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+/*
+ * APIs provided by arch SMP code to rest of arch code
+ */
+extern void __init smp_init_cpus(void);
+extern void __init first_lines_of_secondary(void);
+extern const char *arc_platform_smp_cpuinfo(void);
+
+/*
+ * API expected BY platform smp code (FROM arch smp code)
+ *
+ * smp_ipi_irq_setup:
+ * Takes @cpu and @irq to which the arch-common ISR is hooked up
+ */
+extern int smp_ipi_irq_setup(int cpu, int irq);
+
+/*
+ * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
+ *
+ * @info: SoC SMP specific info for /proc/cpuinfo etc
+ * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
+ * @ipi_send: To send IPI to a @cpumask
+ * @ips_clear: To clear IPI received by @cpu at @irq
+ */
+struct plat_smp_ops {
+ const char *info;
+ void (*cpu_kick)(int cpu, unsigned long pc);
+ void (*ipi_send)(void *callmap);
+ void (*ipi_clear)(int cpu, int irq);
+};
+
+/* TBD: stop exporting it for direct population by platform */
+extern struct plat_smp_ops plat_smp_ops;
+
+#endif /* CONFIG_SMP */
+
+/*
+ * ARC700 doesn't support atomic Read-Modify-Write ops.
+ * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
+ * based on retry-if-irq-in-atomic (with hardware assist).
+ * However despite these, we provide the IRQ disabling variant
+ *
+ * (1) These insn were introduced only in 4.10 release. So for older released
+ * support needed.
+ *
+ * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * gaurantted by the platform (not something which core handles).
+ * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
+ * disabling for atomicity.
+ *
+ * However exported spinlock API is not usable due to cyclic hdr deps
+ * (even after system.h disintegration upstream)
+ * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
+ * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
+ *
+ * So the workaround is to use the lowest level arch spinlock API.
+ * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
+ * but same is not true for ARCH backend, hence the need for 2 variants
+ */
+#ifndef CONFIG_ARC_HAS_LLSC
+
+#include <linux/irqflags.h>
+#ifdef CONFIG_SMP
+
+#include <asm/spinlock.h>
+
+extern arch_spinlock_t smp_atomic_ops_lock;
+extern arch_spinlock_t smp_bitops_lock;
+
+#define atomic_ops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_atomic_ops_lock); \
+} while (0)
+
+#define atomic_ops_unlock(flags) do { \
+ arch_spin_unlock(&smp_atomic_ops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#define bitops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_bitops_lock); \
+} while (0)
+
+#define bitops_unlock(flags) do { \
+ arch_spin_unlock(&smp_bitops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#else /* !CONFIG_SMP */
+
+#define atomic_ops_lock(flags) local_irq_save(flags)
+#define atomic_ops_unlock(flags) local_irq_restore(flags)
+
+#define bitops_lock(flags) local_irq_save(flags)
+#define bitops_unlock(flags) local_irq_restore(flags)
+
+#endif /* !CONFIG_SMP */
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+#endif
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
new file mode 100644
index 000000000000..f158197ac5b0
--- /dev/null
+++ b/arch/arc/include/asm/spinlock.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/spinlock_types.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
+
+#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ " breq %0, %2, 1b \n"
+ : "+&r" (tmp)
+ : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ : "+r" (tmp)
+ : "r"(&(lock->slock))
+ : "memory");
+
+ return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ *
+ * The spinlock itself is contained in @counter and access to it is
+ * serialized with @lock_mutex.
+ *
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+/* Would read_trylock() succeed? */
+#define arch_read_can_lock(x) ((x)->counter > 0)
+
+/* Would write_trylock() succeed? */
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ */
+ if (rw->counter > 0) {
+ rw->counter--;
+ ret = 1;
+ }
+
+ arch_spin_unlock(&(rw->lock_mutex));
+
+ smp_mb();
+ return ret;
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ */
+ if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ rw->counter = 0;
+ ret = 1;
+ }
+ arch_spin_unlock(&(rw->lock_mutex));
+
+ return ret;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ while (!arch_read_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ while (!arch_write_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter++;
+ arch_spin_unlock(&(rw->lock_mutex));
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+ arch_spin_unlock(&(rw->lock_mutex));
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..8276bfd61704
--- /dev/null
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED__ 0
+#define __ARCH_SPIN_LOCK_LOCKED__ 1
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED__ }
+#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
+
+/*
+ * Unlocked: 0x01_00_00_00
+ * Read lock(s): 0x00_FF_00_00 to say 0x01
+ * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
+ */
+typedef struct {
+ volatile unsigned int counter;
+ arch_spinlock_t lock_mutex;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
+#define __ARCH_RW_LOCK_UNLOCKED { .counter = __ARCH_RW_LOCK_UNLOCKED__ }
+
+#endif
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
new file mode 100644
index 000000000000..87676c8f1412
--- /dev/null
+++ b/arch/arc/include/asm/string.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -We had half-optimised memset/memcpy, got better versions of those
+ * -Added memcmp, strchr, strcpy, strcmp, strlen
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_STRING_H
+#define _ASM_ARC_STRING_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_STRCHR
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRLEN
+
+extern void *memset(void *ptr, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void memzero(void *ptr, __kernel_size_t n);
+extern int memcmp(const void *, const void *, __kernel_size_t);
+extern char *strchr(const char *s, int c);
+extern char *strcpy(char *dest, const char *src);
+extern int strcmp(const char *cs, const char *ct);
+extern __kernel_size_t strlen(const char *);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
new file mode 100644
index 000000000000..1b171ab5fec0
--- /dev/null
+++ b/arch/arc/include/asm/switch_to.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SWITCH_TO_H
+#define _ASM_ARC_SWITCH_TO_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+
+extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
+#define ARC_FPU_PREV(p, n) fpu_save_restore(p, n)
+#define ARC_FPU_NEXT(t)
+
+#else
+
+#define ARC_FPU_PREV(p, n)
+#define ARC_FPU_NEXT(n)
+
+#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+
+struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
+
+#define switch_to(prev, next, last) \
+do { \
+ ARC_FPU_PREV(prev, next); \
+ last = __switch_to(prev, next);\
+ ARC_FPU_NEXT(next); \
+ mb(); \
+} while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
new file mode 100644
index 000000000000..33ab3048e9b2
--- /dev/null
+++ b/arch/arc/include/asm/syscall.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALL_H
+#define _ASM_ARC_SYSCALL_H 1
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h> /* in_syscall() */
+
+static inline long
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ if (user_mode(regs) && in_syscall(regs))
+ return regs->orig_r8;
+ else
+ return -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
+ regs->r8 = regs->orig_r8;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ /* 0 if syscall succeeded, otherwise -Errorcode */
+ return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ regs->r0 = (long) error ?: val;
+}
+
+/*
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ */
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args)
+{
+ unsigned long *inside_ptregs = &(regs->r0);
+ inside_ptregs -= i;
+
+ BUG_ON((i + n) > 6);
+
+ while (n--) {
+ args[i++] = (*inside_ptregs);
+ inside_ptregs--;
+ }
+}
+
+#endif
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
new file mode 100644
index 000000000000..e53a5340ba4f
--- /dev/null
+++ b/arch/arc/include/asm/syscalls.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALLS_H
+#define _ASM_ARC_SYSCALLS_H 1
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+int sys_clone_wrapper(int, int, int, int, int);
+int sys_fork_wrapper(void);
+int sys_vfork_wrapper(void);
+int sys_cacheflush(uint32_t, uint32_t uint32_t);
+int sys_arc_settls(void *);
+int sys_arc_gettls(void);
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
new file mode 100644
index 000000000000..2d50a4cdd7f3
--- /dev/null
+++ b/arch/arc/include/asm/thread_info.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Oct 2009
+ * No need for ARC specific thread_info allocator (kmalloc/free). This is
+ * anyways one page allocation, thus slab alloc can be short-circuited and
+ * the generic version (get_free_page) would be loads better.
+ *
+ * Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+
+#ifdef CONFIG_16KSTACKS
+#define THREAD_SIZE_ORDER 1
+#else
+#define THREAD_SIZE_ORDER 0
+#endif
+
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+#include <asm/segment.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ struct task_struct *task; /* main task structure */
+ mm_segment_t addr_limit; /* thread address space */
+ struct exec_domain *exec_domain;/* execution domain */
+ __u32 cpu; /* current CPU */
+ unsigned long thr_ptr; /* TLS ptr */
+ struct restart_block restart_block;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+static inline __attribute_const__ struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm("sp");
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE 0x10000000
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_RESTORE_SIGMASK 0 /* restore sig mask in do_signal() */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
+#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
+
+/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE 16
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_MEMDIE (1<<TIF_MEMDIE)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME)
+
+/*
+ * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
+ * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * syscall, so all that reamins to be tested is _TIF_WORK_MASK
+ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/timex.h b/arch/arc/include/asm/timex.h
new file mode 100644
index 000000000000..0a82960a75e9
--- /dev/null
+++ b/arch/arc/include/asm/timex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TIMEX_H
+#define _ASM_ARC_TIMEX_H
+
+#define CLOCK_TICK_RATE 80000000 /* slated to be removed */
+
+#include <asm-generic/timex.h>
+
+/* XXX: get_cycles() to be implemented with RTSC insn */
+
+#endif /* _ASM_ARC_TIMEX_H */
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
new file mode 100644
index 000000000000..a5ff961b1efc
--- /dev/null
+++ b/arch/arc/include/asm/tlb-mmu1.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_TLB_MMU_V1_H__
+#define __ASM_TLB_MMU_V1_H__
+
+#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1)
+
+#include <asm/tlb.h>
+
+.macro TLB_WRITE_HEURISTICS
+
+#define JH_HACK1
+#undef JH_HACK2
+#undef JH_HACK3
+
+#ifdef JH_HACK3
+; Calculate set index for 2-way MMU
+; -avoiding use of GetIndex from MMU
+; and its unpleasant LFSR pseudo-random sequence
+;
+; r1 = TLBPD0 from TLB_RELOAD above
+;
+; -- jh_ex_way_set not cleared on startup
+; didn't want to change setup.c
+; hence extra instruction to clean
+;
+; -- should be in cache since in same line
+; as r0/r1 saves above
+;
+ld r0,[jh_ex_way_sel] ; victim pointer
+and r0,r0,1 ; clean
+xor.f r0,r0,1 ; flip
+st r0,[jh_ex_way_sel] ; store back
+asr r0,r1,12 ; get set # <<1, note bit 12=R=0
+or.nz r0,r0,1 ; set way bit
+and r0,r0,0xff ; clean
+sr r0,[ARC_REG_TLBINDEX]
+#endif
+
+#ifdef JH_HACK2
+; JH hack #2
+; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU
+; Slower in thrash case (where it matters) because more code is executed
+; Inefficient due to two-register paradigm of this miss handler
+;
+/* r1 = data TLBPD0 at this point */
+lr r0,[eret] /* instruction address */
+xor r0,r0,r1 /* compare set # */
+and.f r0,r0,0x000fe000 /* 2-way MMU mask */
+bne 88f /* not in same set - no need to probe */
+
+lr r0,[eret] /* instruction address */
+and r0,r0,PAGE_MASK /* VPN of instruction address */
+; lr r1,[ARC_REG_TLBPD0] /* Data VPN+ASID - already in r1 from TLB_RELOAD*/
+and r1,r1,0xff /* Data ASID */
+or r0,r0,r1 /* Instruction address + Data ASID */
+
+lr r1,[ARC_REG_TLBPD0] /* save TLBPD0 containing data TLB*/
+sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
+sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
+sr r1,[ARC_REG_TLBPD0] /* restore TLBPD0 */
+
+xor r0,r0,1 /* flip bottom bit of data index */
+b.d 89f
+sr r0,[ARC_REG_TLBINDEX] /* and put it back */
+88:
+sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+89:
+#endif
+
+#ifdef JH_HACK1
+;
+; Always checks whether instruction will be kicked out by dtlb miss
+;
+mov_s r3, r1 ; save PD0 prepared by TLB_RELOAD in r3
+lr r0,[eret] /* instruction address */
+and r0,r0,PAGE_MASK /* VPN of instruction address */
+bmsk r1,r3,7 /* Data ASID, bits 7-0 */
+or_s r0,r0,r1 /* Instruction address + Data ASID */
+
+sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
+sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
+sr r3,[ARC_REG_TLBPD0] /* restore TLBPD0 */
+
+sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+lr r1,[ARC_REG_TLBINDEX] /* r1 = index where MMU wants to put data */
+cmp r0,r1 /* if no match on indices, go around */
+xor.eq r1,r1,1 /* flip bottom bit of data index */
+sr r1,[ARC_REG_TLBINDEX] /* and put it back */
+#endif
+
+.endm
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
new file mode 100644
index 000000000000..3eb2ce0bdfa3
--- /dev/null
+++ b/arch/arc/include/asm/tlb.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TLB_H
+#define _ASM_ARC_TLB_H
+
+#ifdef __KERNEL__
+
+#include <asm/pgtable.h>
+
+/* Masks for actual TLB "PD"s */
+#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
+#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
+ _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+ _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifndef __ASSEMBLY__
+
+#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
+
+/*
+ * This pair is called at time of munmap/exit to flush cache and TLB entries
+ * for mappings being torn down.
+ * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
+ * as we don't support aliasing configs in our VIPT D$.
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
+ * albiet for difft reasons - its better handled by moving to new ASID
+ *
+ * Note, read http://lkml.org/lkml/2004/1/15/6
+ */
+#define tlb_start_vma(tlb, vma)
+#define tlb_end_vma(tlb, vma)
+
+#define __tlb_remove_tlb_entry(tlb, ptep, address)
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
+#else
+#define tlb_paranoid_check(a, b)
+#endif
+
+void arc_mmu_init(void);
+extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
+void __init read_decode_mmu_bcr(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
new file mode 100644
index 000000000000..b2f9bc7f68c8
--- /dev/null
+++ b/arch/arc/include/asm/tlbflush.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_TLBFLUSH__
+#define __ASM_ARC_TLBFLUSH__
+
+#include <linux/mm.h>
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+
+/* XXX: Revisit for SMP */
+#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+
+#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
new file mode 100644
index 000000000000..32420824375b
--- /dev/null
+++ b/arch/arc/include/asm/uaccess.h
@@ -0,0 +1,751 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2010
+ * -__clear_user( ) called multiple times during elf load was byte loop
+ * converted to do as much word clear as possible.
+ *
+ * vineetg: Dec 2009
+ * -Hand crafted constant propagation for "constant" copy sizes
+ * -stock kernel shrunk by 33K at -O3
+ *
+ * vineetg: Sept 2009
+ * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
+ * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
+ * -Enabled when doing -Os
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_UACCESS_H
+#define _ASM_ARC_UACCESS_H
+
+#include <linux/sched.h>
+#include <asm/errno.h>
+#include <linux/string.h> /* for generic string functions */
+
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+
+/*
+ * Algorthmically, for __user_ok() we want do:
+ * (start < TASK_SIZE) && (start+len < TASK_SIZE)
+ * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
+ * emitted directly in code.
+ *
+ * This can however be rewritten as follows:
+ * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
+ *
+ * Because it essentially checks if buffer end is within limit and @len is
+ * non-ngeative, which implies that buffer start will be within limit too.
+ *
+ * The reason for rewriting being, for majorit yof cases, @len is generally
+ * compile time constant, causing first sub-expression to be compile time
+ * subsumed.
+ *
+ * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
+ * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
+ * would already have been done at this call site for __kernel_ok()
+ *
+ */
+#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
+ (((addr)+(sz)) <= get_fs()))
+#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
+ likely(__user_ok((addr), (sz))))
+
+/*********** Single byte/hword/word copies ******************/
+
+#define __get_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
+ case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
+ case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
+ case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+/*
+ * Returns 0 on success, -EFAULT if not.
+ * @ret already contains 0 - given that errors will be less likely
+ * (hence +r asm constraint below).
+ * In case of error, fixup code will make it -EFAULT
+ */
+#define __arc_get_user_one(dst, src, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __arc_get_user_one_64(dst, src, ret) \
+ __asm__ __volatile__( \
+ "1: ld %1,[%2]\n" \
+ "4: ld %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __put_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
+ case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
+ case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
+ case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+#define __arc_put_user_one(src, dst, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+#define __arc_put_user_one_64(src, dst, ret) \
+ __asm__ __volatile__( \
+ "1: st %1,[%2]\n" \
+ "4: st %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+
+static inline unsigned long
+__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* unaligned */
+ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__ (
+ " mov.f lp_count, %0 \n"
+ " lpnz 2f \n"
+ "1: ldb.ab %1, [%3, 1] \n"
+ " stb.ab %1, [%2, 1] \n"
+ " sub %0,%0,1 \n"
+ "2: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 2b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 3b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /*
+ * Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return n;
+ }
+
+ /*
+ * Hand-crafted constant propagation to reduce code sz of the
+ * laddered copy 16x,8,4,2,1
+ */
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ "1: ld.ab %3, [%2, 4] \n"
+ "11: ld.ab %4, [%2, 4] \n"
+ "12: ld.ab %5, [%2, 4] \n"
+ "13: ld.ab %6, [%2, 4] \n"
+ " st.ab %3, [%1, 4] \n"
+ " st.ab %4, [%1, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ "14: ld.ab %3, [%2,4] \n"
+ "15: ld.ab %4, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " st.ab %4, [%1,4] \n"
+ " sub %0,%0,8 \n"
+ "31: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ "16: ld.ab %3, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " sub %0,%0,4 \n"
+ "32: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ "17: ldw.ab %3, [%2,2] \n"
+ " stw.ab %3, [%1,2] \n"
+ " sub %0,%0,2 \n"
+ "33: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ "18: ldb.ab %3, [%2,2] \n"
+ " stb.ab %3, [%1,2] \n"
+ " sub %0,%0,1 \n"
+ "34: ; nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ "1: ld.ab %5, [%2, 4] \n"
+ "11: ld.ab %6, [%2, 4] \n"
+ "12: ld.ab %7, [%2, 4] \n"
+ "13: ld.ab %8, [%2, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " st.ab %7, [%1, 4] \n"
+ " st.ab %8, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ "14: ld.ab %5, [%2,4] \n"
+ "15: ld.ab %6, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " st.ab %6, [%1,4] \n"
+ " sub.f %0,%0,8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ "16: ld.ab %5, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " sub.f %0,%0,4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ "17: ldw.ab %5, [%2,2] \n"
+ " stw.ab %5, [%1,2] \n"
+ " sub.f %0,%0,2 \n"
+ "33: bbit0 %3,0,34f \n"
+ "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ " stb.ab %5, [%1,1] \n"
+ " sub.f %0,%0,1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+
+static inline unsigned long
+__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* unaligned */
+ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__(
+ " mov.f lp_count, %0 \n"
+ " lpnz 3f \n"
+ " ldb.ab %1, [%3, 1] \n"
+ "1: stb.ab %1, [%2, 1] \n"
+ " sub %0, %0, 1 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /* Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return n;
+ }
+
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ " ld.ab %3, [%2, 4] \n"
+ " ld.ab %4, [%2, 4] \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ "1: st.ab %3, [%1, 4] \n"
+ "11: st.ab %4, [%1, 4] \n"
+ "12: st.ab %5, [%1, 4] \n"
+ "13: st.ab %6, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ " ld.ab %4, [%2,4] \n"
+ "14: st.ab %3, [%1,4] \n"
+ "15: st.ab %4, [%1,4] \n"
+ " sub %0, %0, 8 \n"
+ "31:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ "16: st.ab %3, [%1,4] \n"
+ " sub %0, %0, 4 \n"
+ "32:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ " ldw.ab %3, [%2,2] \n"
+ "17: stw.ab %3, [%1,2] \n"
+ " sub %0, %0, 2 \n"
+ "33:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ " ldb.ab %3, [%2,1] \n"
+ "18: stb.ab %3, [%1,1] \n"
+ " sub %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ " ld.ab %7, [%2, 4] \n"
+ " ld.ab %8, [%2, 4] \n"
+ "1: st.ab %5, [%1, 4] \n"
+ "11: st.ab %6, [%1, 4] \n"
+ "12: st.ab %7, [%1, 4] \n"
+ "13: st.ab %8, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ " ld.ab %6, [%2,4] \n"
+ "14: st.ab %5, [%1,4] \n"
+ "15: st.ab %6, [%1,4] \n"
+ " sub.f %0, %0, 8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ "16: st.ab %5, [%1,4] \n"
+ " sub.f %0, %0, 4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ " ldw.ab %5, [%2,2] \n"
+ "17: stw.ab %5, [%1,2] \n"
+ " sub.f %0, %0, 2 \n"
+ "33: bbit0 %3,0,34f \n"
+ " ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ "18: stb.ab %5, [%1,1] \n"
+ " sub.f %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+{
+ long res = n;
+ unsigned char *d_char = to;
+
+ __asm__ __volatile__(
+ " bbit0 %0, 0, 1f \n"
+ "75: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "1: bbit0 %0, 1, 2f \n"
+ "76: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "2: asr.f lp_count, %1, 2 \n"
+ " lpnz 3f \n"
+ "77: st.ab %2, [%0,4] \n"
+ " sub %1, %1, 4 \n"
+ "3: bbit0 %1, 1, 4f \n"
+ "78: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "4: bbit0 %1, 0, 5f \n"
+ "79: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "5: \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 5b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 75b, 3b \n"
+ " .word 76b, 3b \n"
+ " .word 77b, 3b \n"
+ " .word 78b, 3b \n"
+ " .word 79b, 3b \n"
+ " .previous \n"
+ : "+r"(d_char), "+r"(res)
+ : "i"(0)
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return res;
+}
+
+static inline long
+__arc_strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ long res = count;
+ char val;
+ unsigned int hw_count;
+
+ if (count == 0)
+ return 0;
+
+ __asm__ __volatile__(
+ " lp 2f \n"
+ "1: ldb.ab %3, [%2, 1] \n"
+ " breq.d %3, 0, 2f \n"
+ " stb.ab %3, [%1, 1] \n"
+ "2: sub %0, %6, %4 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: mov %0, %5 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+ : "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
+ : "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
+ : "memory");
+
+ return res;
+}
+
+static inline long __arc_strnlen_user(const char __user *s, long n)
+{
+ long res, tmp1, cnt;
+ char val;
+
+ __asm__ __volatile__(
+ " mov %2, %1 \n"
+ "1: ldb.ab %3, [%0, 1] \n"
+ " breq.d %3, 0, 2f \n"
+ " sub.f %2, %2, 1 \n"
+ " bnz 1b \n"
+ " sub %2, %2, 1 \n"
+ "2: sub %0, %1, %2 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: mov %0, 0 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+ : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
+ : "0"(s), "1"(n)
+ : "memory");
+
+ return res;
+}
+
+#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
+#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
+#define __clear_user(d, n) __arc_clear_user(d, n)
+#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
+#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
+#else
+extern long arc_copy_from_user_noinline(void *to, const void __user * from,
+ unsigned long n);
+extern long arc_copy_to_user_noinline(void __user *to, const void *from,
+ unsigned long n);
+extern unsigned long arc_clear_user_noinline(void __user *to,
+ unsigned long n);
+extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+ long count);
+extern long arc_strnlen_user_noinline(const char __user *src, long n);
+
+#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
+#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
+#define __clear_user(d, n) arc_clear_user_noinline(d, n)
+#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
+#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
+
+#endif
+
+#include <asm-generic/uaccess.h>
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
new file mode 100644
index 000000000000..5dbe63f17b66
--- /dev/null
+++ b/arch/arc/include/asm/unaligned.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNALIGNED_H
+#define _ASM_ARC_UNALIGNED_H
+
+/* ARC700 can't handle unaligned Data accesses. */
+
+#include <asm-generic/unaligned.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs);
+#else
+static inline int
+misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
new file mode 100644
index 000000000000..7ca628b6ee2a
--- /dev/null
+++ b/arch/arc/include/asm/unwind.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNWIND_H
+#define _ASM_ARC_UNWIND_H
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+#include <linux/sched.h>
+
+struct arc700_regs {
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r16;
+ unsigned long r17;
+ unsigned long r18;
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+ unsigned long r23;
+ unsigned long r24;
+ unsigned long r25;
+ unsigned long r26;
+ unsigned long r27; /* fp */
+ unsigned long r28; /* sp */
+ unsigned long r29;
+ unsigned long r30;
+ unsigned long r31; /* blink */
+ unsigned long r63; /* pc */
+};
+
+struct unwind_frame_info {
+ struct arc700_regs regs;
+ struct task_struct *task;
+ unsigned call_frame:1;
+};
+
+#define UNW_PC(frame) ((frame)->regs.r63)
+#define UNW_SP(frame) ((frame)->regs.r28)
+#define UNW_BLINK(frame) ((frame)->regs.r31)
+
+/* Rajesh FIXME */
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame) ((frame)->regs.r27)
+#define FRAME_RETADDR_OFFSET 4
+#define FRAME_LINK_OFFSET 0
+#define STACK_BOTTOM_UNW(tsk) STACK_LIMIT((tsk)->thread.ksp)
+#define STACK_TOP_UNW(tsk) ((tsk)->thread.ksp)
+#else
+#define UNW_FP(frame) ((void)(frame), 0)
+#endif
+
+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#define UNW_REGISTER_INFO \
+ PTREGS_INFO(r0), \
+ PTREGS_INFO(r1), \
+ PTREGS_INFO(r2), \
+ PTREGS_INFO(r3), \
+ PTREGS_INFO(r4), \
+ PTREGS_INFO(r5), \
+ PTREGS_INFO(r6), \
+ PTREGS_INFO(r7), \
+ PTREGS_INFO(r8), \
+ PTREGS_INFO(r9), \
+ PTREGS_INFO(r10), \
+ PTREGS_INFO(r11), \
+ PTREGS_INFO(r12), \
+ PTREGS_INFO(r13), \
+ PTREGS_INFO(r14), \
+ PTREGS_INFO(r15), \
+ PTREGS_INFO(r16), \
+ PTREGS_INFO(r17), \
+ PTREGS_INFO(r18), \
+ PTREGS_INFO(r19), \
+ PTREGS_INFO(r20), \
+ PTREGS_INFO(r21), \
+ PTREGS_INFO(r22), \
+ PTREGS_INFO(r23), \
+ PTREGS_INFO(r24), \
+ PTREGS_INFO(r25), \
+ PTREGS_INFO(r26), \
+ PTREGS_INFO(r27), \
+ PTREGS_INFO(r28), \
+ PTREGS_INFO(r29), \
+ PTREGS_INFO(r30), \
+ PTREGS_INFO(r31), \
+ PTREGS_INFO(r63)
+
+#define UNW_DEFAULT_RA(raItem, dataAlign) \
+ ((raItem).where == Memory && !((raItem).value * (dataAlign) + 4))
+
+extern int arc_unwind(struct unwind_frame_info *frame);
+extern void arc_unwind_init(void);
+extern void arc_unwind_setup(void);
+extern void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size);
+extern void unwind_remove_table(void *handle, int init_only);
+
+static inline int
+arch_unwind_init_running(struct unwind_frame_info *info,
+ int (*callback) (struct unwind_frame_info *info,
+ void *arg),
+ void *arg)
+{
+ return 0;
+}
+
+static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+{
+ return 0;
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+ return;
+}
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+ struct pt_regs *regs)
+{
+ return;
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0)
+#define UNW_SP(frame) ((void)(frame), 0)
+#define UNW_FP(frame) ((void)(frame), 0)
+
+static inline void arc_unwind_init(void)
+{
+}
+
+static inline void arc_unwind_setup(void)
+{
+}
+#define unwind_add_table(a, b, c)
+#define unwind_remove_table(a, b)
+
+#endif /* CONFIG_ARC_DW2_UNWIND */
+
+#endif /* _ASM_ARC_UNWIND_H */