diff options
Diffstat (limited to 'arch/arc/include/asm')
-rw-r--r-- | arch/arc/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/arc/include/asm/cache.h | 8 | ||||
-rw-r--r-- | arch/arc/include/asm/irq.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/irqflags.h | 22 | ||||
-rw-r--r-- | arch/arc/include/asm/mach_desc.h | 17 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu_context.h | 61 | ||||
-rw-r--r-- | arch/arc/include/asm/perf_event.h | 204 | ||||
-rw-r--r-- | arch/arc/include/asm/pgalloc.h | 11 | ||||
-rw-r--r-- | arch/arc/include/asm/prom.h | 14 | ||||
-rw-r--r-- | arch/arc/include/asm/setup.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/smp.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/spinlock.h | 9 | ||||
-rw-r--r-- | arch/arc/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/tlbflush.h | 11 | ||||
-rw-r--r-- | arch/arc/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/unaligned.h | 3 |
17 files changed, 308 insertions, 69 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index d8dd660898b9..5943f7f9d325 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -46,3 +46,4 @@ generic-y += ucontext.h generic-y += user.h generic-y += vga.h generic-y += xor.h +generic-y += preempt.h diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index e4abdaac6f9f..2fd3162ec4df 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -17,13 +17,7 @@ #endif #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) - -/* For a rare case where customers have differently config I/D */ -#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES -#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES - -#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1)) -#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1)) +#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1)) /* * ARC700 doesn't cache any access in top 256M. diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h index c0a72105ee0b..291a70db68b8 100644 --- a/arch/arc/include/asm/irq.h +++ b/arch/arc/include/asm/irq.h @@ -18,8 +18,8 @@ #include <asm-generic/irq.h> -extern void __init arc_init_IRQ(void); -extern int __init get_hw_config_num_irq(void); +extern void arc_init_IRQ(void); +extern int get_hw_config_num_irq(void); void arc_local_timer_setup(unsigned int cpu); diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index b68b53f458d1..cb7efc29f16f 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -151,16 +151,38 @@ static inline void arch_unmask_irq(unsigned int irq) #else +#ifdef CONFIG_TRACE_IRQFLAGS + +.macro TRACE_ASM_IRQ_DISABLE + bl trace_hardirqs_off +.endm + +.macro TRACE_ASM_IRQ_ENABLE + bl trace_hardirqs_on +.endm + +#else + +.macro TRACE_ASM_IRQ_DISABLE +.endm + +.macro TRACE_ASM_IRQ_ENABLE +.endm + +#endif + .macro IRQ_DISABLE scratch lr \scratch, [status32] bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) flag \scratch + TRACE_ASM_IRQ_DISABLE .endm .macro IRQ_ENABLE scratch lr \scratch, [status32] or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) flag \scratch + TRACE_ASM_IRQ_ENABLE .endm #endif /* __ASSEMBLY__ */ diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index 9998dc846ebb..e8993a2be6c2 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h @@ -51,22 +51,12 @@ struct machine_desc { /* * Current machine - only accessible during boot. */ -extern struct machine_desc *machine_desc; +extern const struct machine_desc *machine_desc; /* * Machine type table - also only accessible during boot */ -extern struct machine_desc __arch_info_begin[], __arch_info_end[]; -#define for_each_machine_desc(p) \ - for (p = __arch_info_begin; p < __arch_info_end; p++) - -static inline struct machine_desc *default_machine_desc(void) -{ - /* the default machine is the last one linked in */ - if (__arch_info_end - 1 < __arch_info_begin) - return NULL; - return __arch_info_end - 1; -} +extern const struct machine_desc __arch_info_begin[], __arch_info_end[]; /* * Set of macros to define architecture features. @@ -81,7 +71,6 @@ __attribute__((__section__(".arch.info.init"))) = { \ #define MACHINE_END \ }; -extern struct machine_desc *setup_machine_fdt(void *dt); -extern void __init copy_devtree(void); +extern const struct machine_desc *setup_machine_fdt(void *dt); #endif diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h index c2663b32866b..8c84ae98c337 100644 --- a/arch/arc/include/asm/mmu.h +++ b/arch/arc/include/asm/mmu.h @@ -48,7 +48,7 @@ #ifndef __ASSEMBLY__ typedef struct { - unsigned long asid; /* 8 bit MMU PID + Generation cycle */ + unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */ } mm_context_t; #ifdef CONFIG_ARC_DBG_TLB_PARANOIA diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index 43a1b51bb8cc..1fd467ef658f 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h @@ -30,13 +30,13 @@ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch * * Linux assigns each task a unique ASID. A simple round-robin allocation - * of H/w ASID is done using software tracker @asid_cache. + * of H/w ASID is done using software tracker @asid_cpu. * When it reaches max 255, the allocation cycle starts afresh by flushing * the entire TLB and wrapping ASID back to zero. * * A new allocation cycle, post rollover, could potentially reassign an ASID * to a different task. Thus the rule is to refresh the ASID in a new cycle. - * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits + * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits * serve as cycle/generation indicator and natural 32 bit unsigned math * automagically increments the generation when lower 8 bits rollover. */ @@ -47,9 +47,11 @@ #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1) #define MM_CTXT_NO_ASID 0UL -#define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK) +#define asid_mm(mm, cpu) mm->context.asid[cpu] +#define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) -extern unsigned int asid_cache; +DECLARE_PER_CPU(unsigned int, asid_cache); +#define asid_cpu(cpu) per_cpu(asid_cache, cpu) /* * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) @@ -57,6 +59,7 @@ extern unsigned int asid_cache; */ static inline void get_new_mmu_context(struct mm_struct *mm) { + const unsigned int cpu = smp_processor_id(); unsigned long flags; local_irq_save(flags); @@ -71,28 +74,28 @@ static inline void get_new_mmu_context(struct mm_struct *mm) * first need to destroy the context, setting it to invalid * value. */ - if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK)) + if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) goto set_hw; /* move to new ASID and handle rollover */ - if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) { + if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { - flush_tlb_all(); + local_flush_tlb_all(); /* * Above checke for rollover of 8 bit ASID in 32 bit container. * If the container itself wrapped around, set it to a non zero * "generation" to distinguish from no context */ - if (!asid_cache) - asid_cache = MM_CTXT_FIRST_CYCLE; + if (!asid_cpu(cpu)) + asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE; } /* Assign new ASID to tsk */ - mm->context.asid = asid_cache; + asid_mm(mm, cpu) = asid_cpu(cpu); set_hw: - write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE); + write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); local_irq_restore(flags); } @@ -104,16 +107,45 @@ set_hw: static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { - mm->context.asid = MM_CTXT_NO_ASID; + int i; + + for_each_possible_cpu(i) + asid_mm(mm, i) = MM_CTXT_NO_ASID; + return 0; } +static inline void destroy_context(struct mm_struct *mm) +{ + unsigned long flags; + + /* Needed to elide CONFIG_DEBUG_PREEMPT warning */ + local_irq_save(flags); + asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; + local_irq_restore(flags); +} + /* Prepare the MMU for task: setup PID reg with allocated ASID If task doesn't have an ASID (never alloc or stolen, get a new ASID) */ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { + const int cpu = smp_processor_id(); + + /* + * Note that the mm_cpumask is "aggregating" only, we don't clear it + * for the switched-out task, unlike some other arches. + * It is used to enlist cpus for sending TLB flush IPIs and not sending + * it to CPUs where a task once ran-on, could cause stale TLB entry + * re-use, specially for a multi-threaded task. + * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps. + * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1 + * were to re-migrate to C1, it could access the unmapped region + * via any existing stale TLB entries. + */ + cpumask_set_cpu(cpu, mm_cpumask(next)); + #ifndef CONFIG_SMP /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); @@ -131,11 +163,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ #define activate_mm(prev, next) switch_mm(prev, next, NULL) -static inline void destroy_context(struct mm_struct *mm) -{ - mm->context.asid = MM_CTXT_NO_ASID; -} - /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping * for retiring-mm. However destroy_context( ) still needs to do that because * between mm_release( ) = >deactive_mm( ) and diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 115ad96480e6..cbf755e32a03 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -1,5 +1,7 @@ /* - * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com) + * Linux performance counter support for ARC + * + * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -10,4 +12,204 @@ #ifndef __ASM_PERF_EVENT_H #define __ASM_PERF_EVENT_H +/* real maximum varies per CPU, this is the maximum supported by the driver */ +#define ARC_PMU_MAX_HWEVENTS 64 + +#define ARC_REG_CC_BUILD 0xF6 +#define ARC_REG_CC_INDEX 0x240 +#define ARC_REG_CC_NAME0 0x241 +#define ARC_REG_CC_NAME1 0x242 + +#define ARC_REG_PCT_BUILD 0xF5 +#define ARC_REG_PCT_COUNTL 0x250 +#define ARC_REG_PCT_COUNTH 0x251 +#define ARC_REG_PCT_SNAPL 0x252 +#define ARC_REG_PCT_SNAPH 0x253 +#define ARC_REG_PCT_CONFIG 0x254 +#define ARC_REG_PCT_CONTROL 0x255 +#define ARC_REG_PCT_INDEX 0x256 + +#define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */ +#define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */ + +struct arc_reg_pct_build { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int m:8, c:8, r:6, s:2, v:8; +#else + unsigned int v:8, s:2, r:6, c:8, m:8; +#endif +}; + +struct arc_reg_cc_build { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int c:16, r:8, v:8; +#else + unsigned int v:8, r:8, c:16; +#endif +}; + +#define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0) +#define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1) +#define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2) +#define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3) +#define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4) +#define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5) +#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 6) + +/* + * The "generalized" performance events seem to really be a copy + * of the available events on x86 processors; the mapping to ARC + * events is not always possible 1-to-1. Fortunately, there doesn't + * seem to be an exact definition for these events, so we can cheat + * a bit where necessary. + * + * In particular, the following PERF events may behave a bit differently + * compared to other architectures: + * + * PERF_COUNT_HW_CPU_CYCLES + * Cycles not in halted state + * + * PERF_COUNT_HW_REF_CPU_CYCLES + * Reference cycles not in halted state, same as PERF_COUNT_HW_CPU_CYCLES + * for now as we don't do Dynamic Voltage/Frequency Scaling (yet) + * + * PERF_COUNT_HW_BUS_CYCLES + * Unclear what this means, Intel uses 0x013c, which according to + * their datasheet means "unhalted reference cycles". It sounds similar + * to PERF_COUNT_HW_REF_CPU_CYCLES, and we use the same counter for it. + * + * PERF_COUNT_HW_STALLED_CYCLES_BACKEND + * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND + * The ARC 700 can either measure stalls per pipeline stage, or all stalls + * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND + * and all pipeline flushes (e.g. caused by mispredicts, etc.) to + * STALLED_CYCLES_FRONTEND. + * + * We could start multiple performance counters and combine everything + * afterwards, but that makes it complicated. + * + * Note that I$ cache misses aren't counted by either of the two! + */ + +static const char * const arc_pmu_ev_hw_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = "crun", + [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun", + [PERF_COUNT_HW_BUS_CYCLES] = "crun", + [PERF_COUNT_HW_INSTRUCTIONS] = "iall", + [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush", + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall", + [PERF_COUNT_ARC_DCLM] = "dclm", + [PERF_COUNT_ARC_DCSM] = "dcsm", + [PERF_COUNT_ARC_ICM] = "icm", + [PERF_COUNT_ARC_BPOK] = "bpok", + [PERF_COUNT_ARC_EDTLB] = "edtlb", + [PERF_COUNT_ARC_EITLB] = "eitlb", +}; + +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xffff + +static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS, + [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + #endif /* __ASM_PERF_EVENT_H */ diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index 36a9f20c21a3..81208bfd9dcb 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h @@ -105,11 +105,16 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { pgtable_t pte_pg; + struct page *page; pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); - if (pte_pg) { - memzero((void *)pte_pg, PTRS_PER_PTE * 4); - pgtable_page_ctor(virt_to_page(pte_pg)); + if (!pte_pg) + return 0; + memzero((void *)pte_pg, PTRS_PER_PTE * 4); + page = virt_to_page(pte_pg); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return 0; } return pte_pg; diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h deleted file mode 100644 index 692d0d0789a7..000000000000 --- a/arch/arc/include/asm/prom.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_ARC_PROM_H_ -#define _ASM_ARC_PROM_H_ - -#define HAVE_ARCH_DEVTREE_FIXUPS - -#endif diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h index 229e50681497..e10f8cef56a8 100644 --- a/arch/arc/include/asm/setup.h +++ b/arch/arc/include/asm/setup.h @@ -31,7 +31,7 @@ struct cpuinfo_data { extern int root_mountflags, end_mem; extern int running_on_hw; -void __init setup_processor(void); +void setup_processor(void); void __init setup_arch_memory(void); #endif /* __ASMARC_SETUP_H */ diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index c4fb211dcd25..eefc29f08cdb 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h @@ -30,7 +30,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); * APIs provided by arch SMP code to rest of arch code */ extern void __init smp_init_cpus(void); -extern void __init first_lines_of_secondary(void); +extern void first_lines_of_secondary(void); extern const char *arc_platform_smp_cpuinfo(void); /* diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index f158197ac5b0..b6a8c2dfbe6e 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock) { - lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; + unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; + + __asm__ __volatile__( + " ex %0, [%1] \n" + : "+r" (tmp) + : "r"(&(lock->slock)) + : "memory"); + smp_mb(); } diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 2d50a4cdd7f3..45be21672011 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -80,8 +80,6 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flags * - these are process state flags that various assembly files may need to diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h index b2f9bc7f68c8..71c7b2e4b874 100644 --- a/arch/arc/include/asm/tlbflush.h +++ b/arch/arc/include/asm/tlbflush.h @@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -/* XXX: Revisit for SMP */ +#ifndef CONFIG_SMP #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) #define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e) #define flush_tlb_all() local_flush_tlb_all() #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) - +#else +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +#endif /* CONFIG_SMP */ #endif diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 32420824375b..30c9baffa96f 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -43,7 +43,7 @@ * Because it essentially checks if buffer end is within limit and @len is * non-ngeative, which implies that buffer start will be within limit too. * - * The reason for rewriting being, for majorit yof cases, @len is generally + * The reason for rewriting being, for majority of cases, @len is generally * compile time constant, causing first sub-expression to be compile time * subsumed. * @@ -53,7 +53,7 @@ * */ #define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ - (((addr)+(sz)) <= get_fs())) + ((addr) <= (get_fs() - (sz)))) #define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ likely(__user_ok((addr), (sz)))) diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h index 60702f3751d2..3e5f071bc00c 100644 --- a/arch/arc/include/asm/unaligned.h +++ b/arch/arc/include/asm/unaligned.h @@ -22,7 +22,8 @@ static inline int misaligned_fixup(unsigned long address, struct pt_regs *regs, struct callee_regs *cregs) { - return 0; + /* Not fixed */ + return 1; } #endif |