summaryrefslogtreecommitdiffstats
path: root/include/asm-arm/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-arm/system.h')
-rw-r--r--include/asm-arm/system.h49
1 files changed, 18 insertions, 31 deletions
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index b13a8da4847b..2f44b2044214 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -85,7 +85,9 @@ struct pt_regs;
void die(const char *msg, struct pt_regs *regs, int err)
__attribute__((noreturn));
-void die_if_kernel(const char *str, struct pt_regs *regs, int err);
+struct siginfo;
+void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
+ unsigned long err, unsigned long trap);
void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
@@ -104,6 +106,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
extern int cpu_architecture(void);
+extern void cpu_init(void);
#define set_cr(x) \
__asm__ __volatile__( \
@@ -144,34 +147,12 @@ extern unsigned int user_debug;
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
-#ifdef CONFIG_SMP
/*
- * Define our own context switch locking. This allows us to enable
- * interrupts over the context switch, otherwise we end up with high
- * interrupt latency. The real problem area is switch_mm() which may
- * do a full cache flush.
+ * switch_mm() may do a full cache flush over the context switch,
+ * so enable interrupts over the context switch to avoid high
+ * latency.
*/
-#define prepare_arch_switch(rq,next) \
-do { \
- spin_lock(&(next)->switch_lock); \
- spin_unlock_irq(&(rq)->lock); \
-} while (0)
-
-#define finish_arch_switch(rq,prev) \
- spin_unlock(&(prev)->switch_lock)
-
-#define task_running(rq,p) \
- ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
-#else
-/*
- * Our UP-case is more simple, but we assume knowledge of how
- * spin_unlock_irq() and friends are implemented. This avoids
- * us needlessly decrementing and incrementing the preempt count.
- */
-#define prepare_arch_switch(rq,next) local_irq_enable()
-#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock)
-#define task_running(rq,p) ((rq)->curr == (p))
-#endif
+#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
/*
* switch_to(prev, next) should switch from task `prev' to `next'
@@ -307,11 +288,10 @@ do { \
({ \
unsigned long flags; \
local_save_flags(flags); \
- flags & PSR_I_BIT; \
+ (int)(flags & PSR_I_BIT); \
})
#ifdef CONFIG_SMP
-#error SMP not supported
#define smp_mb() mb()
#define smp_rmb() rmb()
@@ -325,6 +305,8 @@ do { \
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
+#endif /* CONFIG_SMP */
+
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM, "swp" is terminally broken since it bypasses the
@@ -337,9 +319,16 @@ do { \
*
* We choose (1) since its the "easiest" to achieve here and is not
* dependent on the processor type.
+ *
+ * NOTE that this solution won't work on an SMP system, so explcitly
+ * forbid it here.
*/
+#ifdef CONFIG_SMP
+#error SMP is not supported on SA1100/SA110
+#else
#define swp_is_buggy
#endif
+#endif
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
@@ -382,8 +371,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
return ret;
}
-#endif /* CONFIG_SMP */
-
#endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)