diff options
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/assembler.h | 133 | ||||
-rw-r--r-- | arch/arm/include/asm/elf.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/ftrace.h | 35 | ||||
-rw-r--r-- | arch/arm/include/asm/futex.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/mmc.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 23 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/page-nommu.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/page.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/pgalloc.h | 16 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable.h | 37 | ||||
-rw-r--r-- | arch/arm/include/asm/ptrace.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/socket.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/thread_info.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/uaccess.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/unified.h | 126 | ||||
-rw-r--r-- | arch/arm/include/asm/unistd.h | 7 |
17 files changed, 366 insertions, 49 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 15f8a092b700..00f46d9ce299 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -74,23 +74,56 @@ * Enable and disable interrupts */ #if __LINUX_ARM_ARCH__ >= 6 - .macro disable_irq + .macro disable_irq_notrace cpsid i .endm - .macro enable_irq + .macro enable_irq_notrace cpsie i .endm #else - .macro disable_irq + .macro disable_irq_notrace msr cpsr_c, #PSR_I_BIT | SVC_MODE .endm - .macro enable_irq + .macro enable_irq_notrace msr cpsr_c, #SVC_MODE .endm #endif + .macro asm_trace_hardirqs_off +#if defined(CONFIG_TRACE_IRQFLAGS) + stmdb sp!, {r0-r3, ip, lr} + bl trace_hardirqs_off + ldmia sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro asm_trace_hardirqs_on_cond, cond +#if defined(CONFIG_TRACE_IRQFLAGS) + /* + * actually the registers should be pushed and pop'd conditionally, but + * after bl the flags are certainly clobbered + */ + stmdb sp!, {r0-r3, ip, lr} + bl\cond trace_hardirqs_on + ldmia sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro asm_trace_hardirqs_on + asm_trace_hardirqs_on_cond al + .endm + + .macro disable_irq + disable_irq_notrace + asm_trace_hardirqs_off + .endm + + .macro enable_irq + asm_trace_hardirqs_on + enable_irq_notrace + .endm /* * Save the current IRQ state and disable IRQs. Note that this macro * assumes FIQs are enabled, and that the processor is in SVC mode. @@ -104,10 +137,16 @@ * Restore interrupt state previously stored in a register. We don't * guarantee that this will preserve the flags. */ - .macro restore_irqs, oldcpsr + .macro restore_irqs_notrace, oldcpsr msr cpsr_c, \oldcpsr .endm + .macro restore_irqs, oldcpsr + tst \oldcpsr, #PSR_I_BIT + asm_trace_hardirqs_on_cond eq + restore_irqs_notrace \oldcpsr + .endm + #define USER(x...) \ 9999: x; \ .section __ex_table,"a"; \ @@ -127,3 +166,87 @@ #endif #endif .endm + +#ifdef CONFIG_THUMB2_KERNEL + .macro setmode, mode, reg + mov \reg, #\mode + msr cpsr_c, \reg + .endm +#else + .macro setmode, mode, reg + msr cpsr_c, #\mode + .endm +#endif + +/* + * STRT/LDRT access macros with ARM and Thumb-2 variants + */ +#ifdef CONFIG_THUMB2_KERNEL + + .macro usraccoff, instr, reg, ptr, inc, off, cond, abort +9999: + .if \inc == 1 + \instr\cond\()bt \reg, [\ptr, #\off] + .elseif \inc == 4 + \instr\cond\()t \reg, [\ptr, #\off] + .else + .error "Unsupported inc macro argument" + .endif + + .section __ex_table,"a" + .align 3 + .long 9999b, \abort + .previous + .endm + + .macro usracc, instr, reg, ptr, inc, cond, rept, abort + @ explicit IT instruction needed because of the label + @ introduced by the USER macro + .ifnc \cond,al + .if \rept == 1 + itt \cond + .elseif \rept == 2 + ittt \cond + .else + .error "Unsupported rept macro argument" + .endif + .endif + + @ Slightly optimised to avoid incrementing the pointer twice + usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort + .if \rept == 2 + usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort + .endif + + add\cond \ptr, #\rept * \inc + .endm + +#else /* !CONFIG_THUMB2_KERNEL */ + + .macro usracc, instr, reg, ptr, inc, cond, rept, abort + .rept \rept +9999: + .if \inc == 1 + \instr\cond\()bt \reg, [\ptr], #\inc + .elseif \inc == 4 + \instr\cond\()t \reg, [\ptr], #\inc + .else + .error "Unsupported inc macro argument" + .endif + + .section __ex_table,"a" + .align 3 + .long 9999b, \abort + .previous + .endr + .endm + +#endif /* CONFIG_THUMB2_KERNEL */ + + .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f + usracc str, \reg, \ptr, \inc, \cond, \rept, \abort + .endm + + .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f + usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort + .endm diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index c207504de84d..c3b911ee9151 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -55,6 +55,9 @@ typedef struct user_fp elf_fpregset_t; #define R_ARM_MOVW_ABS_NC 43 #define R_ARM_MOVT_ABS 44 +#define R_ARM_THM_CALL 10 +#define R_ARM_THM_JUMP24 30 + /* * These are used to set parameters in the core dumps. */ diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 39c8bc1a006a..103f7ee97313 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -7,8 +7,43 @@ #ifndef __ASSEMBLY__ extern void mcount(void); +extern void __gnu_mcount_nc(void); #endif #endif +#ifndef __ASSEMBLY__ + +#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) +/* + * return_address uses walk_stackframe to do it's work. If both + * CONFIG_FRAME_POINTER=y and CONFIG_ARM_UNWIND=y walk_stackframe uses unwind + * information. For this to work in the function tracer many functions would + * have to be marked with __notrace. So for now just depend on + * !CONFIG_ARM_UNWIND. + */ + +void *return_address(unsigned int); + +#else + +extern inline void *return_address(unsigned int level) +{ + return NULL; +} + +#endif + +#define HAVE_ARCH_CALLER_ADDR + +#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +#define CALLER_ADDR1 ((unsigned long)return_address(1)) +#define CALLER_ADDR2 ((unsigned long)return_address(2)) +#define CALLER_ADDR3 ((unsigned long)return_address(3)) +#define CALLER_ADDR4 ((unsigned long)return_address(4)) +#define CALLER_ADDR5 ((unsigned long)return_address(5)) +#define CALLER_ADDR6 ((unsigned long)return_address(6)) + +#endif /* ifndef __ASSEMBLY__ */ + #endif /* _ASM_ARM_FTRACE */ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 9ee743b95de8..bfcc15929a7f 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -99,6 +99,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: ldrt %0, [%3]\n" " teq %0, %1\n" + " it eq @ explicit IT needed for the 2b label\n" "2: streqt %2, [%3]\n" "3:\n" " .section __ex_table,\"a\"\n" diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h index 4da332b03144..b490ecc79def 100644 --- a/arch/arm/include/asm/mach/mmc.h +++ b/arch/arm/include/asm/mach/mmc.h @@ -10,6 +10,8 @@ struct mmc_platform_data { unsigned int ocr_mask; /* available voltages */ u32 (*translate_vdd)(struct device *, unsigned int); unsigned int (*status)(struct device *); + int gpio_wp; + int gpio_cd; }; #endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 85763db87449..cefedf062138 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -44,7 +44,13 @@ * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 32MB of the kernel text. */ +#ifndef CONFIG_THUMB2_KERNEL #define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) +#else +/* smaller range for Thumb-2 symbols relocation (2^24)*/ +#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) +#endif + #if TASK_SIZE > MODULES_VADDR #error Top of user space clashes with start of module space #endif @@ -212,7 +218,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) * * page_to_pfn(page) convert a struct page * to a PFN number * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * - * pfn_valid(pfn) indicates whether a PFN number is valid * * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid @@ -221,10 +226,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET -#ifndef CONFIG_SPARSEMEM -#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) -#endif - #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) @@ -241,18 +242,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #define arch_pfn_to_nid(pfn) PFN_TO_NID(pfn) #define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT) -#define pfn_valid(pfn) \ - ({ \ - unsigned int nid = PFN_TO_NID(pfn); \ - int valid = nid < MAX_NUMNODES; \ - if (valid) { \ - pg_data_t *node = NODE_DATA(nid); \ - valid = (pfn - node->node_start_pfn) < \ - node->node_spanned_pages; \ - } \ - valid; \ - }) - #define virt_to_page(kaddr) \ (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr)) diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 263fed05ea33..bcdb9291ef0c 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -62,8 +62,10 @@ static inline void check_context(struct mm_struct *mm) static inline void check_context(struct mm_struct *mm) { +#ifdef CONFIG_MMU if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); +#endif } #define init_new_context(tsk,mm) 0 diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h index 3574c0deb37f..d1b162a18dcb 100644 --- a/arch/arm/include/asm/page-nommu.h +++ b/arch/arm/include/asm/page-nommu.h @@ -43,7 +43,4 @@ typedef unsigned long pgprot_t; #define __pmd(x) (x) #define __pgprot(x) (x) -extern unsigned long memory_start; -extern unsigned long memory_end; - #endif diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 9c746af1bf6e..3a32af4cce30 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -194,6 +194,10 @@ typedef unsigned long pgprot_t; typedef struct page *pgtable_t; +#ifndef CONFIG_SPARSEMEM +extern int pfn_valid(unsigned long); +#endif + #include <asm/memory.h> #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 3dcd64bf1824..b12cc98bbe04 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -36,6 +36,8 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); #define pgd_alloc(mm) get_pgd_slow(mm) #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) + /* * Allocate one PTE table. * @@ -57,7 +59,7 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) { pte_t *pte; - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); + pte = (pte_t *)__get_free_page(PGALLOC_GFP); if (pte) { clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); pte += PTRS_PER_PTE; @@ -71,10 +73,16 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *pte; - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); +#ifdef CONFIG_HIGHPTE + pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0); +#else + pte = alloc_pages(PGALLOC_GFP, 0); +#endif if (pte) { - void *page = page_address(pte); - clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); + if (!PageHighMem(pte)) { + void *page = page_address(pte); + clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); + } pgtable_page_ctor(pte); } diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index c433c6c73112..201ccaa11f61 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -162,10 +162,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * entries are stored 1024 bytes below. */ #define L_PTE_PRESENT (1 << 0) -#define L_PTE_FILE (1 << 1) /* only when !PRESENT */ #define L_PTE_YOUNG (1 << 1) -#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */ -#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */ +#define L_PTE_FILE (1 << 2) /* only when !PRESENT */ #define L_PTE_DIRTY (1 << 6) #define L_PTE_WRITE (1 << 7) #define L_PTE_USER (1 << 8) @@ -264,10 +262,19 @@ extern struct page *empty_zero_page; #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_offset_map_nested(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) + +#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr)) +#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr)) +#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0) +#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1) + +#ifndef CONFIG_HIGHPTE +#define __pte_map(dir,km) pmd_page_vaddr(*(dir)) +#define __pte_unmap(pte,km) do { } while (0) +#else +#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE) +#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km) +#endif #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) @@ -381,13 +388,13 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <--------------- offset --------------------> <--- type --> 0 0 + * <--------------- offset --------------------> <- type --> 0 0 0 * - * This gives us up to 127 swap files and 32GB per swap file. Note that + * This gives us up to 63 swap files and 32GB per swap file. Note that * the offset field is always non-zero. */ -#define __SWP_TYPE_SHIFT 2 -#define __SWP_TYPE_BITS 7 +#define __SWP_TYPE_SHIFT 3 +#define __SWP_TYPE_BITS 6 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) @@ -411,13 +418,13 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <------------------------ offset -------------------------> 1 0 + * <----------------------- offset ------------------------> 1 0 0 */ #define pte_file(pte) (pte_val(pte) & L_PTE_FILE) -#define pte_to_pgoff(x) (pte_val(x) >> 2) -#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) +#define pte_to_pgoff(x) (pte_val(x) >> 3) +#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE) -#define PTE_FILE_MAX_BITS 30 +#define PTE_FILE_MAX_BITS 29 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ /* FIXME: this is not correct */ diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 67b833c9b6b9..bbecccda76d0 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -82,6 +82,14 @@ #define PSR_ENDSTATE 0 #endif +/* + * These are 'magic' values for PTRACE_PEEKUSR that return info about where a + * process is located in memory. + */ +#define PT_TEXT_ADDR 0x10000 +#define PT_DATA_ADDR 0x10004 +#define PT_TEXT_END_ADDR 0x10008 + #ifndef __ASSEMBLY__ /* diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h index 537de4e0ef50..92ac61d294fd 100644 --- a/arch/arm/include/asm/socket.h +++ b/arch/arm/include/asm/socket.h @@ -57,4 +57,7 @@ #define SO_TIMESTAMPING 37 #define SCM_TIMESTAMPING SO_TIMESTAMPING +#define SO_PROTOCOL 38 +#define SO_DOMAIN 39 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 73394e50cbca..2dfb7d7a66e9 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -130,23 +130,28 @@ extern void vfp_sync_state(struct thread_info *thread); * TIF_SYSCALL_TRACE - syscall trace active * TIF_SIGPENDING - signal pending * TIF_NEED_RESCHED - rescheduling necessary + * TIF_NOTIFY_RESUME - callback before returning to user * TIF_USEDFPU - FPU was used by this task this quantum (SMP) * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 +#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_SYSCALL_TRACE 8 #define TIF_POLLING_NRFLAG 16 #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 #define TIF_FREEZE 19 +#define TIF_RESTORE_SIGMASK 20 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_FREEZE (1 << TIF_FREEZE) +#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) /* * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 0da9bc9b3b1d..1d6bd40a4322 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -17,6 +17,7 @@ #include <asm/memory.h> #include <asm/domain.h> #include <asm/system.h> +#include <asm/unified.h> #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -365,8 +366,10 @@ do { \ #define __put_user_asm_dword(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: strt " __reg_oper1 ", [%1], #4\n" \ - "2: strt " __reg_oper0 ", [%1]\n" \ + ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \ + ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \ + THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \ + THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \ "3:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h new file mode 100644 index 000000000000..073e85b9b961 --- /dev/null +++ b/arch/arm/include/asm/unified.h @@ -0,0 +1,126 @@ +/* + * include/asm-arm/unified.h - Unified Assembler Syntax helper macros + * + * Copyright (C) 2008 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_UNIFIED_H +#define __ASM_UNIFIED_H + +#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED) + .syntax unified +#endif + +#ifdef CONFIG_THUMB2_KERNEL + +#if __GNUC__ < 4 +#error Thumb-2 kernel requires gcc >= 4 +#endif + +/* The CPSR bit describing the instruction set (Thumb) */ +#define PSR_ISETSTATE PSR_T_BIT + +#define ARM(x...) +#define THUMB(x...) x +#define W(instr) instr.w +#define BSYM(sym) sym + 1 + +#else /* !CONFIG_THUMB2_KERNEL */ + +/* The CPSR bit describing the instruction set (ARM) */ +#define PSR_ISETSTATE 0 + +#define ARM(x...) x +#define THUMB(x...) +#define W(instr) instr +#define BSYM(sym) sym + +#endif /* CONFIG_THUMB2_KERNEL */ + +#ifndef CONFIG_ARM_ASM_UNIFIED + +/* + * If the unified assembly syntax isn't used (in ARM mode), these + * macros expand to an empty string + */ +#ifdef __ASSEMBLY__ + .macro it, cond + .endm + .macro itt, cond + .endm + .macro ite, cond + .endm + .macro ittt, cond + .endm + .macro itte, cond + .endm + .macro itet, cond + .endm + .macro itee, cond + .endm + .macro itttt, cond + .endm + .macro ittte, cond + .endm + .macro ittet, cond + .endm + .macro ittee, cond + .endm + .macro itett, cond + .endm + .macro itete, cond + .endm + .macro iteet, cond + .endm + .macro iteee, cond + .endm +#else /* !__ASSEMBLY__ */ +__asm__( +" .macro it, cond\n" +" .endm\n" +" .macro itt, cond\n" +" .endm\n" +" .macro ite, cond\n" +" .endm\n" +" .macro ittt, cond\n" +" .endm\n" +" .macro itte, cond\n" +" .endm\n" +" .macro itet, cond\n" +" .endm\n" +" .macro itee, cond\n" +" .endm\n" +" .macro itttt, cond\n" +" .endm\n" +" .macro ittte, cond\n" +" .endm\n" +" .macro ittet, cond\n" +" .endm\n" +" .macro ittee, cond\n" +" .endm\n" +" .macro itett, cond\n" +" .endm\n" +" .macro itete, cond\n" +" .endm\n" +" .macro iteet, cond\n" +" .endm\n" +" .macro iteee, cond\n" +" .endm\n"); +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_ARM_ASM_UNIFIED */ + +#endif /* !__ASM_UNIFIED_H */ diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 0e97b8cb77d5..9122c9ee18fb 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -360,8 +360,8 @@ #define __NR_readlinkat (__NR_SYSCALL_BASE+332) #define __NR_fchmodat (__NR_SYSCALL_BASE+333) #define __NR_faccessat (__NR_SYSCALL_BASE+334) - /* 335 for pselect6 */ - /* 336 for ppoll */ +#define __NR_pselect6 (__NR_SYSCALL_BASE+335) +#define __NR_ppoll (__NR_SYSCALL_BASE+336) #define __NR_unshare (__NR_SYSCALL_BASE+337) #define __NR_set_robust_list (__NR_SYSCALL_BASE+338) #define __NR_get_robust_list (__NR_SYSCALL_BASE+339) @@ -372,7 +372,7 @@ #define __NR_vmsplice (__NR_SYSCALL_BASE+343) #define __NR_move_pages (__NR_SYSCALL_BASE+344) #define __NR_getcpu (__NR_SYSCALL_BASE+345) - /* 346 for epoll_pwait */ +#define __NR_epoll_pwait (__NR_SYSCALL_BASE+346) #define __NR_kexec_load (__NR_SYSCALL_BASE+347) #define __NR_utimensat (__NR_SYSCALL_BASE+348) #define __NR_signalfd (__NR_SYSCALL_BASE+349) @@ -432,6 +432,7 @@ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_RT_SIGSUSPEND #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) #define __ARCH_WANT_SYS_TIME |