summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-01-05 01:17:33 +0100
committerTejun Heo <tj@kernel.org>2010-01-05 01:17:33 +0100
commit32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch)
treeb1ce838a37044bb38dfc128e2116ca35630e629a /arch/arm/include/asm
parentpercpu: refactor the code in pcpu_[de]populate_chunk() (diff)
parentMerge branch 'limits_cleanup' of git://decibel.fi.muni.cz/~xslaby/linux (diff)
downloadlinux-32032df6c2f6c9c6b2ada2ce42322231824f70c2.tar.xz
linux-32032df6c2f6c9c6b2ada2ce42322231824f70c2.zip
Merge branch 'master' into percpu
Conflicts: arch/powerpc/platforms/pseries/hvCall.S include/linux/percpu.h
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/asm-offsets.h1
-rw-r--r--arch/arm/include/asm/bitops.h6
-rw-r--r--arch/arm/include/asm/cacheflush.h45
-rw-r--r--arch/arm/include/asm/dma-mapping.h26
-rw-r--r--arch/arm/include/asm/elf.h4
-rw-r--r--arch/arm/include/asm/glue.h26
-rw-r--r--arch/arm/include/asm/hardware/cache-tauros2.h11
-rw-r--r--arch/arm/include/asm/hardware/coresight.h165
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h19
-rw-r--r--arch/arm/include/asm/kmap_types.h6
-rw-r--r--arch/arm/include/asm/mach-types.h1
-rw-r--r--arch/arm/include/asm/mach/irq.h4
-rw-r--r--arch/arm/include/asm/memory.h16
-rw-r--r--arch/arm/include/asm/mman.h3
-rw-r--r--arch/arm/include/asm/pgtable.h14
-rw-r--r--arch/arm/include/asm/proc-fns.h374
-rw-r--r--arch/arm/include/asm/smp_plat.h16
-rw-r--r--arch/arm/include/asm/socket.h2
-rw-r--r--arch/arm/include/asm/spinlock.h40
-rw-r--r--arch/arm/include/asm/spinlock_types.h8
-rw-r--r--arch/arm/include/asm/swab.h19
-rw-r--r--arch/arm/include/asm/system.h19
-rw-r--r--arch/arm/include/asm/thread_notify.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h3
-rw-r--r--arch/arm/include/asm/unistd.h10
25 files changed, 579 insertions, 261 deletions
diff --git a/arch/arm/include/asm/asm-offsets.h b/arch/arm/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/arm/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 63a481fbbed4..338ff19ae447 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -84,7 +84,7 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
*p = res | mask;
raw_local_irq_restore(flags);
- return res & mask;
+ return (res & mask) != 0;
}
static inline int
@@ -101,7 +101,7 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
*p = res & ~mask;
raw_local_irq_restore(flags);
- return res & mask;
+ return (res & mask) != 0;
}
static inline int
@@ -118,7 +118,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
*p = res ^ mask;
raw_local_irq_restore(flags);
- return res & mask;
+ return (res & mask) != 0;
}
#include <asm-generic/bitops/non-atomic.h>
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index fd03fb63a332..730aefcfbee3 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -211,7 +211,7 @@ struct cpu_cache_fns {
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*coherent_user_range)(unsigned long, unsigned long);
- void (*flush_kern_dcache_page)(void *);
+ void (*flush_kern_dcache_area)(void *, size_t);
void (*dma_inv_range)(const void *, const void *);
void (*dma_clean_range)(const void *, const void *);
@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
-#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
+#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
/*
* These are private to the dma-mapping API. Do not use directly.
@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
-#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
+#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
-extern void __cpuc_flush_dcache_page(void *);
+extern void __cpuc_flush_dcache_area(void *, size_t);
/*
* These are private to the dma-mapping API. Do not use directly.
@@ -331,15 +331,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
* Convert calls to our calling convention.
*/
#define flush_cache_all() __cpuc_flush_kern_all()
-#ifndef CONFIG_CPU_CACHE_VIPT
-static inline void flush_cache_mm(struct mm_struct *mm)
+
+static inline void vivt_flush_cache_mm(struct mm_struct *mm)
{
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
}
static inline void
-flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
@@ -347,7 +347,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
}
static inline void
-flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK;
@@ -356,7 +356,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l
}
static inline void
-flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len, int write)
{
@@ -365,6 +365,16 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
__cpuc_coherent_kern_range(addr, addr + len);
}
}
+
+#ifndef CONFIG_CPU_CACHE_VIPT
+#define flush_cache_mm(mm) \
+ vivt_flush_cache_mm(mm)
+#define flush_cache_range(vma,start,end) \
+ vivt_flush_cache_range(vma,start,end)
+#define flush_cache_page(vma,addr,pfn) \
+ vivt_flush_cache_page(vma,addr,pfn)
+#define flush_ptrace_access(vma,page,ua,ka,len,write) \
+ vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
#else
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
@@ -408,15 +418,19 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
-extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
-
static inline void __flush_icache_all(void)
{
+#ifdef CONFIG_ARM_ERRATA_411920
+ extern void v6_icache_inval_all(void);
+ v6_icache_inval_all();
+#else
asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
:
: "r" (0));
+#endif
}
#define ARCH_HAS_FLUSH_ANON_PAGE
@@ -434,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
{
/* highmem pages are always flushed upon kunmap already */
if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
- __cpuc_flush_dcache_page(page_address(page));
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
#define flush_dcache_mmap_lock(mapping) \
@@ -451,13 +465,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
*/
#define flush_icache_page(vma,page) do { } while (0)
-static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
- unsigned offset, size_t size)
-{
- const void *start = (void __force *)virt + offset;
- dmac_inv_range(start, start + size);
-}
-
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index ff46dfa68a97..a96300bf83fd 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -15,20 +15,15 @@
* must not be used by drivers.
*/
#ifndef __arch_page_to_dma
-
-#if !defined(CONFIG_HIGHMEM)
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
- return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
+ return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
}
-#elif defined(__pfn_to_bus)
-static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
+
+static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
{
- return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
+ return pfn_to_page(__bus_to_pfn(addr));
}
-#else
-#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
-#endif
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
@@ -45,6 +40,11 @@ static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
return __arch_page_to_dma(dev, page);
}
+static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
+{
+ return __arch_dma_to_page(dev, addr);
+}
+
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
return __arch_dma_to_virt(dev, addr);
@@ -257,9 +257,11 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
*/
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
enum dma_data_direction);
+extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+ enum dma_data_direction);
extern dma_addr_t dma_map_page(struct device *, struct page *,
unsigned long, size_t, enum dma_data_direction);
-extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
/*
@@ -352,7 +354,6 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
{
/* nothing to do */
}
-#endif /* CONFIG_DMABOUNCE */
/**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
@@ -371,8 +372,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- dma_unmap_single(dev, handle, size, dir);
+ /* nothing to do */
}
+#endif /* CONFIG_DMABOUNCE */
/**
* dma_sync_single_range_for_cpu
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index c3b911ee9151..a399bb5730f1 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -98,7 +98,9 @@ extern int elf_check_arch(const struct elf32_hdr *);
extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
-#define USE_ELF_CORE_DUMP
+int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+#define ELF_CORE_COPY_TASK_REGS dump_task_regs
+
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h
index a0e39d5d00c9..234a3fc1c78e 100644
--- a/arch/arm/include/asm/glue.h
+++ b/arch/arm/include/asm/glue.h
@@ -120,25 +120,39 @@
#endif
/*
- * Prefetch abort handler. If the CPU has an IFAR use that, otherwise
- * use the address of the aborted instruction
+ * Prefetch Abort Model
+ * ================
+ *
+ * We have the following to choose from:
+ * legacy - no IFSR, no IFAR
+ * v6 - ARMv6: IFSR, no IFAR
+ * v7 - ARMv7: IFSR and IFAR
*/
+
#undef CPU_PABORT_HANDLER
#undef MULTI_PABORT
-#ifdef CONFIG_CPU_PABRT_IFAR
+#ifdef CONFIG_CPU_PABRT_LEGACY
+# ifdef CPU_PABORT_HANDLER
+# define MULTI_PABORT 1
+# else
+# define CPU_PABORT_HANDLER legacy_pabort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PABRT_V6
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
-# define CPU_PABORT_HANDLER(reg, insn) mrc p15, 0, reg, cr6, cr0, 2
+# define CPU_PABORT_HANDLER v6_pabort
# endif
#endif
-#ifdef CONFIG_CPU_PABRT_NOIFAR
+#ifdef CONFIG_CPU_PABRT_V7
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
-# define CPU_PABORT_HANDLER(reg, insn) mov reg, insn
+# define CPU_PABORT_HANDLER v7_pabort
# endif
#endif
diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h
new file mode 100644
index 000000000000..538f17ca905b
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-tauros2.h
@@ -0,0 +1,11 @@
+/*
+ * arch/arm/include/asm/hardware/cache-tauros2.h
+ *
+ * Copyright (C) 2008 Marvell Semiconductor
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+extern void __init tauros2_init(void);
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
new file mode 100644
index 000000000000..f82b25d4f73e
--- /dev/null
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -0,0 +1,165 @@
+/*
+ * linux/arch/arm/include/asm/hardware/coresight.h
+ *
+ * CoreSight components' registers
+ *
+ * Copyright (C) 2009 Nokia Corporation.
+ * Alexander Shishkin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_HARDWARE_CORESIGHT_H
+#define __ASM_HARDWARE_CORESIGHT_H
+
+#define TRACER_ACCESSED_BIT 0
+#define TRACER_RUNNING_BIT 1
+#define TRACER_CYCLE_ACC_BIT 2
+#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT)
+#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
+#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
+
+struct tracectx {
+ unsigned int etb_bufsz;
+ void __iomem *etb_regs;
+ void __iomem *etm_regs;
+ unsigned long flags;
+ int ncmppairs;
+ int etm_portsz;
+ struct device *dev;
+ struct clk *emu_clk;
+ struct mutex mutex;
+};
+
+#define TRACER_TIMEOUT 10000
+
+#define etm_writel(t, v, x) \
+ (__raw_writel((v), (t)->etm_regs + (x)))
+#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+
+/* CoreSight Management Registers */
+#define CSMR_LOCKACCESS 0xfb0
+#define CSMR_LOCKSTATUS 0xfb4
+#define CSMR_AUTHSTATUS 0xfb8
+#define CSMR_DEVID 0xfc8
+#define CSMR_DEVTYPE 0xfcc
+/* CoreSight Component Registers */
+#define CSCR_CLASS 0xff4
+
+#define CSCR_PRSR 0x314
+
+#define UNLOCK_MAGIC 0xc5acce55
+
+/* ETM control register, "ETM Architecture", 3.3.1 */
+#define ETMR_CTRL 0
+#define ETMCTRL_POWERDOWN 1
+#define ETMCTRL_PROGRAM (1 << 10)
+#define ETMCTRL_PORTSEL (1 << 11)
+#define ETMCTRL_DO_CONTEXTID (3 << 14)
+#define ETMCTRL_PORTMASK1 (7 << 4)
+#define ETMCTRL_PORTMASK2 (1 << 21)
+#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
+#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21)
+#define ETMCTRL_DO_CPRT (1 << 1)
+#define ETMCTRL_DATAMASK (3 << 2)
+#define ETMCTRL_DATA_DO_DATA (1 << 2)
+#define ETMCTRL_DATA_DO_ADDR (1 << 3)
+#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
+#define ETMCTRL_BRANCH_OUTPUT (1 << 8)
+#define ETMCTRL_CYCLEACCURATE (1 << 12)
+
+/* ETM configuration code register */
+#define ETMR_CONFCODE (0x04)
+
+/* ETM trace start/stop resource control register */
+#define ETMR_TRACESSCTRL (0x18)
+
+/* ETM trigger event register */
+#define ETMR_TRIGEVT (0x08)
+
+/* address access type register bits, "ETM architecture",
+ * table 3-27 */
+/* - access type */
+#define ETMAAT_IFETCH 0
+#define ETMAAT_IEXEC 1
+#define ETMAAT_IEXECPASS 2
+#define ETMAAT_IEXECFAIL 3
+#define ETMAAT_DLOADSTORE 4
+#define ETMAAT_DLOAD 5
+#define ETMAAT_DSTORE 6
+/* - comparison access size */
+#define ETMAAT_JAVA (0 << 3)
+#define ETMAAT_THUMB (1 << 3)
+#define ETMAAT_ARM (3 << 3)
+/* - data value comparison control */
+#define ETMAAT_NOVALCMP (0 << 5)
+#define ETMAAT_VALMATCH (1 << 5)
+#define ETMAAT_VALNOMATCH (3 << 5)
+/* - exact match */
+#define ETMAAT_EXACTMATCH (1 << 7)
+/* - context id comparator control */
+#define ETMAAT_IGNCONTEXTID (0 << 8)
+#define ETMAAT_VALUE1 (1 << 8)
+#define ETMAAT_VALUE2 (2 << 8)
+#define ETMAAT_VALUE3 (3 << 8)
+/* - security level control */
+#define ETMAAT_IGNSECURITY (0 << 10)
+#define ETMAAT_NSONLY (1 << 10)
+#define ETMAAT_SONLY (2 << 10)
+
+#define ETMR_COMP_VAL(x) (0x40 + (x) * 4)
+#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4)
+
+/* ETM status register, "ETM Architecture", 3.3.2 */
+#define ETMR_STATUS (0x10)
+#define ETMST_OVERFLOW (1 << 0)
+#define ETMST_PROGBIT (1 << 1)
+#define ETMST_STARTSTOP (1 << 2)
+#define ETMST_TRIGGER (1 << 3)
+
+#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
+#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
+#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER)
+
+#define ETMR_TRACEENCTRL2 0x1c
+#define ETMR_TRACEENCTRL 0x24
+#define ETMTE_INCLEXCL (1 << 24)
+#define ETMR_TRACEENEVT 0x20
+#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
+ ETMCTRL_DATA_DO_ADDR | \
+ ETMCTRL_BRANCH_OUTPUT | \
+ ETMCTRL_DO_CONTEXTID)
+
+/* ETB registers, "CoreSight Components TRM", 9.3 */
+#define ETBR_DEPTH 0x04
+#define ETBR_STATUS 0x0c
+#define ETBR_READMEM 0x10
+#define ETBR_READADDR 0x14
+#define ETBR_WRITEADDR 0x18
+#define ETBR_TRIGGERCOUNT 0x1c
+#define ETBR_CTRL 0x20
+#define ETBR_FORMATTERCTRL 0x304
+#define ETBFF_ENFTC 1
+#define ETBFF_ENFCONT (1 << 1)
+#define ETBFF_FONFLIN (1 << 4)
+#define ETBFF_MANUAL_FLUSH (1 << 6)
+#define ETBFF_TRIGIN (1 << 8)
+#define ETBFF_TRIGEVT (1 << 9)
+#define ETBFF_TRIGFL (1 << 10)
+
+#define etb_writel(t, v, x) \
+ (__raw_writel((v), (t)->etb_regs + (x)))
+#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
+
+#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
+#define etm_unlock(t) \
+ do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+
+#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
+#define etb_unlock(t) \
+ do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+
+#endif /* __ASM_HARDWARE_CORESIGHT_H */
+
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 4b8e7f559929..5daea2961d48 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -215,6 +215,7 @@ extern int iop3xx_get_init_atu(void);
* IOP3XX I/O and Mem space regions for PCI autoconfiguration
*/
#define IOP3XX_PCI_LOWER_MEM_PA 0x80000000
+#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000
#define IOP3XX_PCI_IO_WINDOW_SIZE 0x00010000
#define IOP3XX_PCI_LOWER_IO_PA 0x90000000
@@ -233,7 +234,13 @@ extern int iop3xx_get_init_atu(void);
void iop3xx_map_io(void);
void iop_init_cp6_handler(void);
void iop_init_time(unsigned long tickrate);
-unsigned long iop_gettimeoffset(void);
+
+static inline u32 read_tmr0(void)
+{
+ u32 val;
+ asm volatile("mrc p6, 0, %0, c0, c1, 0" : "=r" (val));
+ return val;
+}
static inline void write_tmr0(u32 val)
{
@@ -252,6 +259,11 @@ static inline u32 read_tcr0(void)
return val;
}
+static inline void write_tcr0(u32 val)
+{
+ asm volatile("mcr p6, 0, %0, c2, c1, 0" : : "r" (val));
+}
+
static inline u32 read_tcr1(void)
{
u32 val;
@@ -259,6 +271,11 @@ static inline u32 read_tcr1(void)
return val;
}
+static inline void write_tcr1(u32 val)
+{
+ asm volatile("mcr p6, 0, %0, c3, c1, 0" : : "r" (val));
+}
+
static inline void write_trr0(u32 val)
{
asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val));
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index d16ec97ec9a9..c019949a5189 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -22,4 +22,10 @@ enum km_type {
KM_TYPE_NR
};
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define KM_NMI (-1)
+#define KM_NMI_PTE (-1)
+#define KM_IRQ_PTE (-1)
+#endif
+
#endif
diff --git a/arch/arm/include/asm/mach-types.h b/arch/arm/include/asm/mach-types.h
new file mode 100644
index 000000000000..948178cc6ba8
--- /dev/null
+++ b/arch/arm/include/asm/mach-types.h
@@ -0,0 +1 @@
+#include <generated/mach-types.h>
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index acac5302e4ea..8920b2d6e3b8 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *);
*/
#define do_bad_IRQ(irq,desc) \
do { \
- spin_lock(&desc->lock); \
+ raw_spin_lock(&desc->lock); \
handle_bad_irq(irq, desc); \
- spin_unlock(&desc->lock); \
+ raw_spin_unlock(&desc->lock); \
} while(0)
#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index cefedf062138..5421d82a2572 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -125,8 +125,10 @@
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
+#ifndef __virt_to_phys
#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
+#endif
/*
* Convert a physical address to a Page Frame Number and back
@@ -134,6 +136,12 @@
#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+/*
+ * Convert a page to/from a physical address
+ */
+#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+
#ifndef __ASSEMBLY__
/*
@@ -194,7 +202,8 @@ static inline void *phys_to_virt(unsigned long x)
#ifndef __virt_to_bus
#define __virt_to_bus __virt_to_phys
#define __bus_to_virt __phys_to_virt
-#define __pfn_to_bus(x) ((x) << PAGE_SHIFT)
+#define __pfn_to_bus(x) __pfn_to_phys(x)
+#define __bus_to_pfn(x) __phys_to_pfn(x)
#endif
static inline __deprecated unsigned long virt_to_bus(void *x)
@@ -293,11 +302,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#endif /* !CONFIG_DISCONTIGMEM */
/*
- * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
- */
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
-/*
* Optional coherency support. Currently used only by selected
* Intel XSC3-based systems.
*/
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
index 8eebf89f5ab1..41f99c573b93 100644
--- a/arch/arm/include/asm/mman.h
+++ b/arch/arm/include/asm/mman.h
@@ -1 +1,4 @@
#include <asm-generic/mman.h>
+
+#define arch_mmap_check(addr, len, flags) \
+ (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 201ccaa11f61..11397687f42c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -304,13 +304,23 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+#define __pgprot_modify(prot,mask,bits) \
+ __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) \
- __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED)
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
#define pgprot_writecombine(prot) \
- __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
+#if __LINUX_ARM_ARCH__ >= 7
+#define pgprot_dmacoherent(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
+#else
+#define pgprot_dmacoherent(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
+#endif
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 3976412685f8..8fdae9bc9abb 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -24,206 +24,228 @@
* CPU_NAME - the prefix for CPU related functions
*/
-#ifdef CONFIG_CPU_32
-# ifdef CONFIG_CPU_ARM610
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm6
-# endif
+#ifdef CONFIG_CPU_ARM610
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm6
# endif
-# ifdef CONFIG_CPU_ARM7TDMI
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm7tdmi
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM7TDMI
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm7tdmi
# endif
-# ifdef CONFIG_CPU_ARM710
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm7
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM710
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm7
# endif
-# ifdef CONFIG_CPU_ARM720T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm720
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM720T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm720
# endif
-# ifdef CONFIG_CPU_ARM740T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm740
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM740T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm740
# endif
-# ifdef CONFIG_CPU_ARM9TDMI
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm9tdmi
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM9TDMI
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm9tdmi
# endif
-# ifdef CONFIG_CPU_ARM920T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm920
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM920T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm920
# endif
-# ifdef CONFIG_CPU_ARM922T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm922
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM922T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm922
# endif
-# ifdef CONFIG_CPU_FA526
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_fa526
-# endif
+#endif
+
+#ifdef CONFIG_CPU_FA526
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_fa526
# endif
-# ifdef CONFIG_CPU_ARM925T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm925
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM925T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm925
# endif
-# ifdef CONFIG_CPU_ARM926T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm926
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM926T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm926
# endif
-# ifdef CONFIG_CPU_ARM940T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm940
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM940T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm940
# endif
-# ifdef CONFIG_CPU_ARM946E
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm946
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM946E
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm946
# endif
-# ifdef CONFIG_CPU_SA110
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_sa110
-# endif
+#endif
+
+#ifdef CONFIG_CPU_SA110
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_sa110
# endif
-# ifdef CONFIG_CPU_SA1100
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_sa1100
-# endif
+#endif
+
+#ifdef CONFIG_CPU_SA1100
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_sa1100
# endif
-# ifdef CONFIG_CPU_ARM1020
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm1020
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1020
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1020
# endif
-# ifdef CONFIG_CPU_ARM1020E
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm1020e
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1020E
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1020e
# endif
-# ifdef CONFIG_CPU_ARM1022
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm1022
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1022
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1022
# endif
-# ifdef CONFIG_CPU_ARM1026
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm1026
-# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1026
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1026
# endif
-# ifdef CONFIG_CPU_XSCALE
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_xscale
-# endif
+#endif
+
+#ifdef CONFIG_CPU_XSCALE
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_xscale
# endif
-# ifdef CONFIG_CPU_XSC3
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_xsc3
-# endif
+#endif
+
+#ifdef CONFIG_CPU_XSC3
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_xsc3
# endif
-# ifdef CONFIG_CPU_MOHAWK
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_mohawk
-# endif
+#endif
+
+#ifdef CONFIG_CPU_MOHAWK
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_mohawk
# endif
-# ifdef CONFIG_CPU_FEROCEON
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_feroceon
-# endif
+#endif
+
+#ifdef CONFIG_CPU_FEROCEON
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_feroceon
# endif
-# ifdef CONFIG_CPU_V6
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_v6
-# endif
+#endif
+
+#ifdef CONFIG_CPU_V6
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_v6
# endif
-# ifdef CONFIG_CPU_V7
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_v7
-# endif
+#endif
+
+#ifdef CONFIG_CPU_V7
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_v7
# endif
#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
new file mode 100644
index 000000000000..59303e200845
--- /dev/null
+++ b/arch/arm/include/asm/smp_plat.h
@@ -0,0 +1,16 @@
+/*
+ * ARM specific SMP header, this contains our implementation
+ * details.
+ */
+#ifndef __ASMARM_SMP_PLAT_H
+#define __ASMARM_SMP_PLAT_H
+
+#include <asm/cputype.h>
+
+/* all SMP configurations have the extended CPUID registers */
+static inline int tlb_ops_need_broadcast(void)
+{
+ return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
+}
+
+#endif
diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h
index 92ac61d294fd..90ffd04b8e74 100644
--- a/arch/arm/include/asm/socket.h
+++ b/arch/arm/include/asm/socket.h
@@ -60,4 +60,6 @@
#define SO_PROTOCOL 38
#define SO_DOMAIN 39
+#define SO_RXQ_OVFL 40
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index c13681ac1ede..c91c64cab922 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -17,13 +17,13 @@
* Locked value: 1
*/
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
smp_mb();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
}
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
@@ -86,7 +86,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
* just write zero since the lock is exclusively held.
*/
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
smp_mb();
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -142,7 +142,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
}
/* write_can_lock - would write_trylock() succeed? */
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
/*
* Read locks are a bit more hairy:
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
* currently active. However, we know we won't have any write
* locks.
*/
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
smp_mb();
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "cc");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2 = 1;
@@ -215,13 +215,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
}
/* read_can_lock - would read_trylock() succeed? */
-#define __raw_read_can_lock(x) ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 43e83f6d2ee5..d14d197ae04a 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h
index ca2bf2f6d6ea..9997ad20eff1 100644
--- a/arch/arm/include/asm/swab.h
+++ b/arch/arm/include/asm/swab.h
@@ -22,6 +22,24 @@
# define __SWAB_64_THRU_32__
#endif
+#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+ __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x));
+ return x;
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ __asm__ ("rev %0, %1" : "=r" (x) : "r" (x));
+ return x;
+}
+#define __arch_swab32 __arch_swab32
+
+#else
+
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__u32 t;
@@ -48,3 +66,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
#endif
+#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index d65b2f5bf41f..058e7e90881d 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -138,21 +138,26 @@ extern unsigned int user_debug;
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#endif
-#ifndef CONFIG_SMP
+#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
+#define mb() dmb()
+#define rmb() dmb()
+#define wmb() dmb()
+#else
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#endif
+
+#ifndef CONFIG_SMP
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#else
-#define mb() dmb()
-#define rmb() dmb()
-#define wmb() dmb()
-#define smp_mb() dmb()
-#define smp_rmb() dmb()
-#define smp_wmb() dmb()
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
#endif
+
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h
index f27379d7f72a..c4391ba20350 100644
--- a/arch/arm/include/asm/thread_notify.h
+++ b/arch/arm/include/asm/thread_notify.h
@@ -41,7 +41,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread)
* These are the reason codes for the thread notifier.
*/
#define THREAD_NOTIFY_FLUSH 0
-#define THREAD_NOTIFY_RELEASE 1
+#define THREAD_NOTIFY_EXIT 1
#define THREAD_NOTIFY_SWITCH 2
#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index a45ab5dd8255..c2f1605de359 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_WB))
dsb();
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
+ if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
if (tlb_flag(TLB_V3_FULL))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V4_U_FULL))
@@ -360,6 +360,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_V4_I_FULL))
asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
}
+ put_cpu();
if (tlb_flag(TLB_V6_U_ASID))
asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc");
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 89f7eade20af..4e506d09e5f9 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -403,6 +403,15 @@
#define __ARM_NR_set_tls (__ARM_NR_BASE+5)
/*
+ * *NOTE*: This is a ghost syscall private to the kernel. Only the
+ * __kuser_cmpxchg code in entry-armv.S should be aware of its
+ * existence. Don't ever use this from user code.
+ */
+#ifdef __KERNEL__
+#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
+#endif
+
+/*
* The following syscalls are obsolete and no longer available for EABI.
*/
#if defined(__ARM_EABI__) && !defined(__KERNEL__)
@@ -456,6 +465,7 @@
* Unimplemented (or alternatively implemented) syscalls
*/
#define __IGNORE_fadvise64_64 1
+#define __IGNORE_migrate_pages 1
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_UNISTD_H */