summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/include')
-rw-r--r--arch/blackfin/include/asm/atomic.h155
-rw-r--r--arch/blackfin/include/asm/bfin-global.h7
-rw-r--r--arch/blackfin/include/asm/bfin5xx_spi.h28
-rw-r--r--arch/blackfin/include/asm/bfin_sdh.h19
-rw-r--r--arch/blackfin/include/asm/bfin_sport.h3
-rw-r--r--arch/blackfin/include/asm/bfrom.h5
-rw-r--r--arch/blackfin/include/asm/bitops.h204
-rw-r--r--arch/blackfin/include/asm/blackfin.h10
-rw-r--r--arch/blackfin/include/asm/byteorder.h42
-rw-r--r--arch/blackfin/include/asm/cache.h29
-rw-r--r--arch/blackfin/include/asm/cacheflush.h20
-rw-r--r--arch/blackfin/include/asm/checksum.h33
-rw-r--r--arch/blackfin/include/asm/context.S47
-rw-r--r--arch/blackfin/include/asm/cplb-mpu.h61
-rw-r--r--arch/blackfin/include/asm/cplb.h25
-rw-r--r--arch/blackfin/include/asm/cplbinit.h83
-rw-r--r--arch/blackfin/include/asm/cpu.h41
-rw-r--r--arch/blackfin/include/asm/delay.h35
-rw-r--r--arch/blackfin/include/asm/dma.h220
-rw-r--r--arch/blackfin/include/asm/entry.h11
-rw-r--r--arch/blackfin/include/asm/gpio.h278
-rw-r--r--arch/blackfin/include/asm/hardirq.h2
-rw-r--r--arch/blackfin/include/asm/io.h14
-rw-r--r--arch/blackfin/include/asm/ipipe.h278
-rw-r--r--arch/blackfin/include/asm/ipipe_base.h80
-rw-r--r--arch/blackfin/include/asm/irq.h296
-rw-r--r--arch/blackfin/include/asm/kgdb.h53
-rw-r--r--arch/blackfin/include/asm/l1layout.h5
-rw-r--r--arch/blackfin/include/asm/mem_init.h364
-rw-r--r--arch/blackfin/include/asm/mem_map.h75
-rw-r--r--arch/blackfin/include/asm/mmu.h1
-rw-r--r--arch/blackfin/include/asm/mmu_context.h27
-rw-r--r--arch/blackfin/include/asm/mutex-dec.h112
-rw-r--r--arch/blackfin/include/asm/mutex.h63
-rw-r--r--arch/blackfin/include/asm/pda.h71
-rw-r--r--arch/blackfin/include/asm/percpu.h12
-rw-r--r--arch/blackfin/include/asm/pgtable.h1
-rw-r--r--arch/blackfin/include/asm/processor.h28
-rw-r--r--arch/blackfin/include/asm/reboot.h6
-rw-r--r--arch/blackfin/include/asm/rwlock.h6
-rw-r--r--arch/blackfin/include/asm/serial.h1
-rw-r--r--arch/blackfin/include/asm/smp.h44
-rw-r--r--arch/blackfin/include/asm/spinlock.h87
-rw-r--r--arch/blackfin/include/asm/spinlock_types.h22
-rw-r--r--arch/blackfin/include/asm/swab.h48
-rw-r--r--arch/blackfin/include/asm/system.h185
-rw-r--r--arch/blackfin/include/asm/thread_info.h5
-rw-r--r--arch/blackfin/include/asm/uaccess.h89
-rw-r--r--arch/blackfin/include/asm/xor.h1
49 files changed, 2441 insertions, 891 deletions
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index 7cf508718605..94b2a9b19451 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -1,6 +1,7 @@
#ifndef __ARCH_BLACKFIN_ATOMIC__
#define __ARCH_BLACKFIN_ATOMIC__
+#include <linux/types.h>
#include <asm/system.h> /* local_irq_XXX() */
/*
@@ -13,108 +14,160 @@
* Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001
*/
-typedef struct {
- int counter;
-} atomic_t;
#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
-static __inline__ void atomic_add(int i, atomic_t * v)
+#ifdef CONFIG_SMP
+
+#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
+
+asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
+
+asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ __raw_atomic_update_asm(&v->counter, i);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ __raw_atomic_update_asm(&v->counter, -i);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ return __raw_atomic_update_asm(&v->counter, i);
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ return __raw_atomic_update_asm(&v->counter, -i);
+}
+
+static inline void atomic_inc(volatile atomic_t *v)
+{
+ __raw_atomic_update_asm(&v->counter, 1);
+}
+
+static inline void atomic_dec(volatile atomic_t *v)
+{
+ __raw_atomic_update_asm(&v->counter, -1);
+}
+
+static inline void atomic_clear_mask(int mask, atomic_t *v)
+{
+ __raw_atomic_clear_asm(&v->counter, mask);
+}
+
+static inline void atomic_set_mask(int mask, atomic_t *v)
+{
+ __raw_atomic_set_asm(&v->counter, mask);
+}
+
+static inline int atomic_test_mask(int mask, atomic_t *v)
+{
+ return __raw_atomic_test_asm(&v->counter, mask);
+}
+
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#else /* !CONFIG_SMP */
+
+#define atomic_read(v) ((v)->counter)
+
+static inline void atomic_add(int i, atomic_t *v)
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter += i;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
-static __inline__ void atomic_sub(int i, atomic_t * v)
+static inline void atomic_sub(int i, atomic_t *v)
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter -= i;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
-static inline int atomic_add_return(int i, atomic_t * v)
+static inline int atomic_add_return(int i, atomic_t *v)
{
int __temp = 0;
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter += i;
__temp = v->counter;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return __temp;
}
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static inline int atomic_sub_return(int i, atomic_t * v)
+static inline int atomic_sub_return(int i, atomic_t *v)
{
int __temp = 0;
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter -= i;
__temp = v->counter;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return __temp;
}
-static __inline__ void atomic_inc(volatile atomic_t * v)
+static inline void atomic_inc(volatile atomic_t *v)
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter++;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-static __inline__ void atomic_dec(volatile atomic_t * v)
+static inline void atomic_dec(volatile atomic_t *v)
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter--;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
-static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter &= ~mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
-static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter |= mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
/* Atomic operations are already serializing */
@@ -123,9 +176,25 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#endif /* !CONFIG_SMP */
+
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index 77295666c34b..daffc0684e75 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -47,6 +47,9 @@
# define DMA_UNCACHED_REGION (0)
#endif
+extern void bfin_setup_caches(unsigned int cpu);
+extern void bfin_setup_cpudata(unsigned int cpu);
+
extern unsigned long get_cclk(void);
extern unsigned long get_sclk(void);
extern unsigned long sclk_to_usecs(unsigned long sclk);
@@ -58,8 +61,6 @@ extern void dump_bfin_trace_buffer(void);
/* init functions only */
extern int init_arch_irq(void);
-extern void bfin_icache_init(void);
-extern void bfin_dcache_init(void);
extern void init_exception_vectors(void);
extern void program_IAR(void);
@@ -110,7 +111,7 @@ extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
#ifdef CONFIG_BFIN_ICACHE_LOCK
extern void cache_grab_lock(int way);
-extern void cache_lock(int way);
+extern void bfin_cache_lock(int way);
#endif
#endif
diff --git a/arch/blackfin/include/asm/bfin5xx_spi.h b/arch/blackfin/include/asm/bfin5xx_spi.h
index 9fa19158e38d..1306e6b22946 100644
--- a/arch/blackfin/include/asm/bfin5xx_spi.h
+++ b/arch/blackfin/include/asm/bfin5xx_spi.h
@@ -1,22 +1,12 @@
-/************************************************************
-
-* Copyright (C) 2006-2008, Analog Devices. All Rights Reserved
-*
-* FILE bfin5xx_spi.h
-* PROGRAMMER(S): Luke Yang (Analog Devices Inc.)
-*
-*
-* DATE OF CREATION: March. 10th 2006
-*
-* SYNOPSIS:
-*
-* DESCRIPTION: header file for SPI controller driver for Blackfin5xx.
-**************************************************************
-
-* MODIFICATION HISTORY:
-* March 10, 2006 bfin5xx_spi.h Created. (Luke Yang)
-
-************************************************************/
+/*
+ * Blackfin On-Chip SPI Driver
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef _SPI_CHANNEL_H_
#define _SPI_CHANNEL_H_
diff --git a/arch/blackfin/include/asm/bfin_sdh.h b/arch/blackfin/include/asm/bfin_sdh.h
new file mode 100644
index 000000000000..d61d5497c590
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_sdh.h
@@ -0,0 +1,19 @@
+/*
+ * bfin_sdh.h - Blackfin SDH definitions
+ *
+ * Copyright 2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_SDH_H__
+#define __BFIN_SDH_H__
+
+struct bfin_sd_host {
+ int dma_chan;
+ int irq_int0;
+ int irq_int1;
+ u16 pin_req[7];
+};
+
+#endif
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h
index c76ed8def302..fe88a2c19213 100644
--- a/arch/blackfin/include/asm/bfin_sport.h
+++ b/arch/blackfin/include/asm/bfin_sport.h
@@ -120,9 +120,6 @@ struct sport_register {
#define SPORT_IOC_MAGIC 'P'
#define SPORT_IOC_CONFIG _IOWR('P', 0x01, struct sport_config)
-/* Test purpose */
-#define ENABLE_AD73311 _IOWR('P', 0x02, int)
-
struct sport_dev {
struct cdev cdev; /* Char device structure */
diff --git a/arch/blackfin/include/asm/bfrom.h b/arch/blackfin/include/asm/bfrom.h
index cfe8024c3b2f..9e4be5e5e767 100644
--- a/arch/blackfin/include/asm/bfrom.h
+++ b/arch/blackfin/include/asm/bfrom.h
@@ -43,6 +43,11 @@ __attribute__((__noreturn__))
static inline void bfrom_SoftReset(void *new_stack)
{
while (1)
+ /*
+ * We don't declare the SP as clobbered on purpose, since
+ * it confuses the heck out of the compiler, and this function
+ * never returns
+ */
__asm__ __volatile__(
"sp = %[stack];"
"jump (%[bfrom_syscontrol]);"
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index b39a175c79c1..21b036eadab1 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -7,7 +7,6 @@
#include <linux/compiler.h>
#include <asm/byteorder.h> /* swab32 */
-#include <asm/system.h> /* save_flags */
#ifdef __KERNEL__
@@ -20,80 +19,107 @@
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffz.h>
-static __inline__ void set_bit(int nr, volatile unsigned long *addr)
+#ifdef CONFIG_SMP
+
+#include <linux/linkage.h>
+
+asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr);
+
+static inline void set_bit(int nr, volatile unsigned long *addr)
{
- int *a = (int *)addr;
- int mask;
- unsigned long flags;
+ volatile unsigned long *a = addr + (nr >> 5);
+ __raw_bit_set_asm(a, nr & 0x1f);
+}
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- local_irq_save(flags);
- *a |= mask;
- local_irq_restore(flags);
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr + (nr >> 5);
+ __raw_bit_clear_asm(a, nr & 0x1f);
}
-static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(int nr, volatile unsigned long *addr)
{
- int *a = (int *)addr;
- int mask;
+ volatile unsigned long *a = addr + (nr >> 5);
+ __raw_bit_toggle_asm(a, nr & 0x1f);
+}
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- *a |= mask;
+static inline int test_bit(int nr, const volatile unsigned long *addr)
+{
+ volatile const unsigned long *a = addr + (nr >> 5);
+ return __raw_bit_test_asm(a, nr & 0x1f) != 0;
}
-/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr + (nr >> 5);
+ return __raw_bit_test_set_asm(a, nr & 0x1f);
+}
+
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr + (nr >> 5);
+ return __raw_bit_test_clear_asm(a, nr & 0x1f);
+}
+
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr + (nr >> 5);
+ return __raw_bit_test_toggle_asm(a, nr & 0x1f);
+}
+
+#else /* !CONFIG_SMP */
+
+#include <asm/system.h> /* save_flags */
-static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
+static inline void set_bit(int nr, volatile unsigned long *addr)
{
int *a = (int *)addr;
int mask;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
- local_irq_save(flags);
- *a &= ~mask;
- local_irq_restore(flags);
+ local_irq_save_hw(flags);
+ *a |= mask;
+ local_irq_restore_hw(flags);
}
-static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
+static inline void clear_bit(int nr, volatile unsigned long *addr)
{
int *a = (int *)addr;
int mask;
-
+ unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
+ local_irq_save_hw(flags);
*a &= ~mask;
+ local_irq_restore_hw(flags);
}
-static __inline__ void change_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(int nr, volatile unsigned long *addr)
{
int mask, flags;
unsigned long *ADDR = (unsigned long *)addr;
ADDR += nr >> 5;
mask = 1 << (nr & 31);
- local_irq_save(flags);
- *ADDR ^= mask;
- local_irq_restore(flags);
-}
-
-static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
-{
- int mask;
- unsigned long *ADDR = (unsigned long *)addr;
-
- ADDR += nr >> 5;
- mask = 1 << (nr & 31);
+ local_irq_save_hw(flags);
*ADDR ^= mask;
+ local_irq_restore_hw(flags);
}
-static __inline__ int test_and_set_bit(int nr, void *addr)
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -101,27 +127,31 @@ static __inline__ int test_and_set_bit(int nr, void *addr)
a += nr >> 5;
mask = 1 << (nr & 0x1f);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
retval = (mask & *a) != 0;
*a |= mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return retval;
}
-static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *)addr;
+ unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
+ local_irq_save_hw(flags);
retval = (mask & *a) != 0;
- *a |= mask;
+ *a &= ~mask;
+ local_irq_restore_hw(flags);
+
return retval;
}
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -129,15 +159,52 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
a += nr >> 5;
mask = 1 << (nr & 0x1f);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
retval = (mask & *a) != 0;
+ *a ^= mask;
+ local_irq_restore_hw(flags);
+ return retval;
+}
+
+#endif /* CONFIG_SMP */
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+static inline void __set_bit(int nr, volatile unsigned long *addr)
+{
+ int *a = (int *)addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a |= mask;
+}
+
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
+{
+ int *a = (int *)addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
*a &= ~mask;
- local_irq_restore(flags);
+}
- return retval;
+static inline void __change_bit(int nr, volatile unsigned long *addr)
+{
+ int mask;
+ unsigned long *ADDR = (unsigned long *)addr;
+
+ ADDR += nr >> 5;
+ mask = 1 << (nr & 31);
+ *ADDR ^= mask;
}
-static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -145,26 +212,23 @@ static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
a += nr >> 5;
mask = 1 << (nr & 0x1f);
retval = (mask & *a) != 0;
- *a &= ~mask;
+ *a |= mask;
return retval;
}
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *)addr;
- unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
- local_irq_save(flags);
retval = (mask & *a) != 0;
- *a ^= mask;
- local_irq_restore(flags);
+ *a &= ~mask;
return retval;
}
-static __inline__ int __test_and_change_bit(int nr,
+static inline int __test_and_change_bit(int nr,
volatile unsigned long *addr)
{
int mask, retval;
@@ -177,16 +241,7 @@ static __inline__ int __test_and_change_bit(int nr,
return retval;
}
-/*
- * This routine doesn't need to be atomic.
- */
-static __inline__ int __constant_test_bit(int nr, const void *addr)
-{
- return ((1UL << (nr & 31)) &
- (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int __test_bit(int nr, const void *addr)
+static inline int __test_bit(int nr, const void *addr)
{
int *a = (int *)addr;
int mask;
@@ -196,10 +251,16 @@ static __inline__ int __test_bit(int nr, const void *addr)
return ((mask & *a) != 0);
}
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- __constant_test_bit((nr),(addr)) : \
- __test_bit((nr),(addr)))
+#ifndef CONFIG_SMP
+/*
+ * This routine doesn't need irq save and restore ops in UP
+ * context.
+ */
+static inline int test_bit(int nr, const void *addr)
+{
+ return __test_bit(nr, addr);
+}
+#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
@@ -213,6 +274,7 @@ static __inline__ int __test_bit(int nr, const void *addr)
#endif /* __KERNEL__ */
#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* _BLACKFIN_BITOPS_H */
diff --git a/arch/blackfin/include/asm/blackfin.h b/arch/blackfin/include/asm/blackfin.h
index 8749b0e321ab..8bb2cb139756 100644
--- a/arch/blackfin/include/asm/blackfin.h
+++ b/arch/blackfin/include/asm/blackfin.h
@@ -6,11 +6,6 @@
#ifndef _BLACKFIN_H_
#define _BLACKFIN_H_
-#define LO(con32) ((con32) & 0xFFFF)
-#define lo(con32) ((con32) & 0xFFFF)
-#define HI(con32) (((con32) >> 16) & 0xFFFF)
-#define hi(con32) (((con32) >> 16) & 0xFFFF)
-
#include <mach/anomaly.h>
#ifndef __ASSEMBLY__
@@ -65,6 +60,11 @@ static inline void CSYNC(void)
#else /* __ASSEMBLY__ */
+#define LO(con32) ((con32) & 0xFFFF)
+#define lo(con32) ((con32) & 0xFFFF)
+#define HI(con32) (((con32) >> 16) & 0xFFFF)
+#define hi(con32) (((con32) >> 16) & 0xFFFF)
+
/* SSYNC & CSYNC implementations for assembly files */
#define ssync(x) SSYNC(x)
diff --git a/arch/blackfin/include/asm/byteorder.h b/arch/blackfin/include/asm/byteorder.h
index 6a673d42da18..3e69106a4d37 100644
--- a/arch/blackfin/include/asm/byteorder.h
+++ b/arch/blackfin/include/asm/byteorder.h
@@ -1,48 +1,6 @@
#ifndef _BLACKFIN_BYTEORDER_H
#define _BLACKFIN_BYTEORDER_H
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-
-static __inline__ __attribute_const__ __u32 ___arch__swahb32(__u32 xx)
-{
- __u32 tmp;
- __asm__("%1 = %0 >> 8 (V);\n\t"
- "%0 = %0 << 8 (V);\n\t"
- "%0 = %0 | %1;\n\t"
- : "+d"(xx), "=&d"(tmp));
- return xx;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swahw32(__u32 xx)
-{
- __u32 rv;
- __asm__("%0 = PACK(%1.L, %1.H);\n\t": "=d"(rv): "d"(xx));
- return rv;
-}
-
-#define __arch__swahb32(x) ___arch__swahb32(x)
-#define __arch__swahw32(x) ___arch__swahw32(x)
-#define __arch__swab32(x) ___arch__swahb32(___arch__swahw32(x))
-
-static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 xx)
-{
- __u32 xw = xx;
- __asm__("%0 <<= 8;\n %0.L = %0.L + %0.H (NS);\n": "+d"(xw));
- return (__u16)xw;
-}
-
-#define __arch__swab16(x) ___arch__swab16(x)
-
-#endif
-
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-# define __BYTEORDER_HAS_U64__
-# define __SWAB_64_THRU_32__
-#endif
-
#include <linux/byteorder/little_endian.h>
#endif /* _BLACKFIN_BYTEORDER_H */
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 023d72133b5a..86637814cf25 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -12,6 +12,11 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES
+#ifdef CONFIG_SMP
+#define __cacheline_aligned
+#else
+#define ____cacheline_aligned
+
/*
* Put cacheline_aliged data to L1 data memory
*/
@@ -21,9 +26,33 @@
__section__(".data_l1.cacheline_aligned")))
#endif
+#endif
+
/*
* largest L1 which this arch supports
*/
#define L1_CACHE_SHIFT_MAX 5
+#if defined(CONFIG_SMP) && \
+ !defined(CONFIG_BFIN_CACHE_COHERENT) && \
+ defined(CONFIG_BFIN_DCACHE)
+#define __ARCH_SYNC_CORE_DCACHE
+#ifndef __ASSEMBLY__
+asmlinkage void __raw_smp_mark_barrier_asm(void);
+asmlinkage void __raw_smp_check_barrier_asm(void);
+
+static inline void smp_mark_barrier(void)
+{
+ __raw_smp_mark_barrier_asm();
+}
+static inline void smp_check_barrier(void)
+{
+ __raw_smp_check_barrier_asm();
+}
+
+void resync_core_dcache(void);
+#endif
+#endif
+
+
#endif
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 4403415583fa..1b040f5b4feb 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -35,6 +35,7 @@ extern void blackfin_icache_flush_range(unsigned long start_address, unsigned lo
extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dflush_page(void *page);
+extern void blackfin_invalidate_entire_dcache(void);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
@@ -44,12 +45,20 @@ extern void blackfin_dflush_page(void *page);
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
+#ifdef CONFIG_SMP
+#define flush_icache_range_others(start, end) \
+ smp_icache_flush_range_others((start), (end))
+#else
+#define flush_icache_range_others(start, end) do { } while (0)
+#endif
+
static inline void flush_icache_range(unsigned start, unsigned end)
{
#if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_ICACHE)
# if defined(CONFIG_BFIN_WT)
blackfin_icache_flush_range((start), (end));
+ flush_icache_range_others(start, end);
# else
blackfin_icache_dcache_flush_range((start), (end));
# endif
@@ -58,6 +67,7 @@ static inline void flush_icache_range(unsigned start, unsigned end)
# if defined(CONFIG_BFIN_ICACHE)
blackfin_icache_flush_range((start), (end));
+ flush_icache_range_others(start, end);
# endif
# if defined(CONFIG_BFIN_DCACHE)
blackfin_dcache_flush_range((start), (end));
@@ -66,10 +76,12 @@ static inline void flush_icache_range(unsigned start, unsigned end)
#endif
}
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_range ((unsigned) (dst), (unsigned) (dst) + (len)); \
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { memcpy(dst, src, len); \
+ flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
+ flush_icache_range_others((unsigned long) (dst), (unsigned long) (dst) + (len));\
} while (0)
+
#define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len)
#if defined(CONFIG_BFIN_DCACHE)
@@ -82,7 +94,7 @@ do { memcpy(dst, src, len); \
# define flush_dcache_page(page) blackfin_dflush_page(page_address(page))
#else
# define flush_dcache_range(start,end) do { } while (0)
-# define flush_dcache_page(page) do { } while (0)
+# define flush_dcache_page(page) do { } while (0)
#endif
extern unsigned long reserved_mem_dcache_on;
diff --git a/arch/blackfin/include/asm/checksum.h b/arch/blackfin/include/asm/checksum.h
index 6f6af2b8e9e0..793581fc9556 100644
--- a/arch/blackfin/include/asm/checksum.h
+++ b/arch/blackfin/include/asm/checksum.h
@@ -63,22 +63,23 @@ static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
-
- __asm__ ("%0 = %0 + %1;\n\t"
- "CC = AC0;\n\t"
- "if !CC jump 4;\n\t"
- "%0 = %0 + %4;\n\t"
- "%0 = %0 + %2;\n\t"
- "CC = AC0;\n\t"
- "if !CC jump 4;\n\t"
- "%0 = %0 + %4;\n\t"
- "%0 = %0 + %3;\n\t"
- "CC = AC0;\n\t"
- "if !CC jump 4;\n\t"
- "%0 = %0 + %4;\n\t"
- "NOP;\n\t"
- : "=d" (sum)
- : "d" (daddr), "d" (saddr), "d" ((ntohs(len)<<16)+proto*256), "d" (1), "0"(sum));
+ unsigned int carry;
+
+ __asm__ ("%0 = %0 + %2;\n\t"
+ "CC = AC0;\n\t"
+ "%1 = CC;\n\t"
+ "%0 = %0 + %1;\n\t"
+ "%0 = %0 + %3;\n\t"
+ "CC = AC0;\n\t"
+ "%1 = CC;\n\t"
+ "%0 = %0 + %1;\n\t"
+ "%0 = %0 + %4;\n\t"
+ "CC = AC0;\n\t"
+ "%1 = CC;\n\t"
+ "%0 = %0 + %1;\n\t"
+ : "=d" (sum), "=&d" (carry)
+ : "d" (daddr), "d" (saddr), "d" ((len + proto) << 8), "0"(sum)
+ : "CC");
return (sum);
}
diff --git a/arch/blackfin/include/asm/context.S b/arch/blackfin/include/asm/context.S
index c0e630edfb9a..16561ab18b38 100644
--- a/arch/blackfin/include/asm/context.S
+++ b/arch/blackfin/include/asm/context.S
@@ -303,9 +303,14 @@
RETI = [sp++];
RETS = [sp++];
- p0.h = _irq_flags;
- p0.l = _irq_flags;
+#ifdef CONFIG_SMP
+ GET_PDA(p0, r0);
+ r0 = [p0 + PDA_IRQFLAGS];
+#else
+ p0.h = _bfin_irq_flags;
+ p0.l = _bfin_irq_flags;
r0 = [p0];
+#endif
sti r0;
sp += 4; /* Skip Reserved */
@@ -353,3 +358,41 @@
csync;
.endm
+.macro save_context_cplb
+ [--sp] = (R7:0, P5:0);
+ [--sp] = fp;
+
+ [--sp] = a0.x;
+ [--sp] = a0.w;
+ [--sp] = a1.x;
+ [--sp] = a1.w;
+
+ [--sp] = LC0;
+ [--sp] = LC1;
+ [--sp] = LT0;
+ [--sp] = LT1;
+ [--sp] = LB0;
+ [--sp] = LB1;
+
+ [--sp] = RETS;
+.endm
+
+.macro restore_context_cplb
+ RETS = [sp++];
+
+ LB1 = [sp++];
+ LB0 = [sp++];
+ LT1 = [sp++];
+ LT0 = [sp++];
+ LC1 = [sp++];
+ LC0 = [sp++];
+
+ a1.w = [sp++];
+ a1.x = [sp++];
+ a0.w = [sp++];
+ a0.x = [sp++];
+
+ fp = [sp++];
+
+ (R7:0, P5:0) = [SP++];
+.endm
diff --git a/arch/blackfin/include/asm/cplb-mpu.h b/arch/blackfin/include/asm/cplb-mpu.h
deleted file mode 100644
index 75c67b99d607..000000000000
--- a/arch/blackfin/include/asm/cplb-mpu.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * File: include/asm-blackfin/cplbinit.h
- * Based on:
- * Author:
- *
- * Created:
- * Description:
- *
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __ASM_BFIN_CPLB_MPU_H
-#define __ASM_BFIN_CPLB_MPU_H
-
-struct cplb_entry {
- unsigned long data, addr;
-};
-
-struct mem_region {
- unsigned long start, end;
- unsigned long dcplb_data;
- unsigned long icplb_data;
-};
-
-extern struct cplb_entry dcplb_tbl[MAX_CPLBS];
-extern struct cplb_entry icplb_tbl[MAX_CPLBS];
-extern int first_switched_icplb;
-extern int first_mask_dcplb;
-extern int first_switched_dcplb;
-
-extern int nr_dcplb_miss, nr_icplb_miss, nr_icplb_supv_miss, nr_dcplb_prot;
-extern int nr_cplb_flush;
-
-extern int page_mask_order;
-extern int page_mask_nelts;
-
-extern unsigned long *current_rwx_mask;
-
-extern void flush_switched_cplbs(void);
-extern void set_mask_dcplbs(unsigned long *);
-
-extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *);
-
-#endif /* __ASM_BFIN_CPLB_MPU_H */
diff --git a/arch/blackfin/include/asm/cplb.h b/arch/blackfin/include/asm/cplb.h
index 9e8b4035fcec..ad566ff9ad16 100644
--- a/arch/blackfin/include/asm/cplb.h
+++ b/arch/blackfin/include/asm/cplb.h
@@ -30,7 +30,6 @@
#ifndef _CPLB_H
#define _CPLB_H
-#include <asm/blackfin.h>
#include <mach/anomaly.h>
#define SDRAM_IGENERIC (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | CPLB_PORTPRIO)
@@ -55,13 +54,24 @@
#endif
#define L1_DMEMORY (CPLB_LOCK | CPLB_COMMON)
+
+#ifdef CONFIG_SMP
+#define L2_ATTR (INITIAL_T | I_CPLB | D_CPLB)
+#define L2_IMEMORY (CPLB_COMMON | CPLB_LOCK)
+#define L2_DMEMORY (CPLB_COMMON | CPLB_LOCK)
+
+#else
#ifdef CONFIG_BFIN_L2_CACHEABLE
#define L2_IMEMORY (SDRAM_IGENERIC)
#define L2_DMEMORY (SDRAM_DGENERIC)
#else
#define L2_IMEMORY (CPLB_COMMON)
#define L2_DMEMORY (CPLB_COMMON)
-#endif
+#endif /* CONFIG_BFIN_L2_CACHEABLE */
+
+#define L2_ATTR (INITIAL_T | SWITCH_T | I_CPLB | D_CPLB)
+#endif /* CONFIG_SMP */
+
#define SDRAM_DNON_CHBL (CPLB_COMMON)
#define SDRAM_EBIU (CPLB_COMMON)
#define SDRAM_OOPS (CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY)
@@ -71,14 +81,7 @@
#define SIZE_1M 0x00100000 /* 1M */
#define SIZE_4M 0x00400000 /* 4M */
-#ifdef CONFIG_MPU
#define MAX_CPLBS 16
-#else
-#define MAX_CPLBS (16 * 2)
-#endif
-
-#define ASYNC_MEMORY_CPLB_COVERAGE ((ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
- ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) / SIZE_4M)
#define CPLB_ENABLE_ICACHE_P 0
#define CPLB_ENABLE_DCACHE_P 1
@@ -113,4 +116,8 @@
#define CPLB_INOCACHE CPLB_USER_RD | CPLB_VALID
#define CPLB_IDOCACHE CPLB_INOCACHE | CPLB_L1_CHBL
+#define FAULT_RW (1 << 16)
+#define FAULT_USERSUPV (1 << 17)
+#define FAULT_CPLBBITS 0x0000ffff
+
#endif /* _CPLB_H */
diff --git a/arch/blackfin/include/asm/cplbinit.h b/arch/blackfin/include/asm/cplbinit.h
index f845b41147ba..05b14a631d0c 100644
--- a/arch/blackfin/include/asm/cplbinit.h
+++ b/arch/blackfin/include/asm/cplbinit.h
@@ -32,61 +32,56 @@
#include <asm/blackfin.h>
#include <asm/cplb.h>
+#include <linux/threads.h>
-#ifdef CONFIG_MPU
-
-#include <asm/cplb-mpu.h>
-
+#ifdef CONFIG_CPLB_SWITCH_TAB_L1
+# define PDT_ATTR __attribute__((l1_data))
#else
+# define PDT_ATTR
+#endif
-#define INITIAL_T 0x1
-#define SWITCH_T 0x2
-#define I_CPLB 0x4
-#define D_CPLB 0x8
-
-#define IN_KERNEL 1
-
-enum
-{ZERO_P, L1I_MEM, L1D_MEM, SDRAM_KERN , SDRAM_RAM_MTD, SDRAM_DMAZ, RES_MEM, ASYNC_MEM, L2_MEM};
-
-struct cplb_desc {
- u32 start; /* start address */
- u32 end; /* end address */
- u32 psize; /* prefered size if any otherwise 1MB or 4MB*/
- u16 attr;/* attributes */
- u16 i_conf;/* I-CPLB DATA */
- u16 d_conf;/* D-CPLB DATA */
- u16 valid;/* valid */
- const s8 name[30];/* name */
+struct cplb_entry {
+ unsigned long data, addr;
};
-struct cplb_tab {
- u_long *tab;
- u16 pos;
- u16 size;
+struct cplb_boundary {
+ unsigned long eaddr; /* End of this region. */
+ unsigned long data; /* CPLB data value. */
};
-extern u_long icplb_table[];
-extern u_long dcplb_table[];
+extern struct cplb_boundary dcplb_bounds[];
+extern struct cplb_boundary icplb_bounds[];
+extern int dcplb_nr_bounds, icplb_nr_bounds;
-/* Till here we are discussing about the static memory management model.
- * However, the operating envoronments commonly define more CPLB
- * descriptors to cover the entire addressable memory than will fit into
- * the available on-chip 16 CPLB MMRs. When this happens, the below table
- * will be used which will hold all the potentially required CPLB descriptors
- *
- * This is how Page descriptor Table is implemented in uClinux/Blackfin.
- */
+extern struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
+extern struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
+extern int first_switched_icplb;
+extern int first_switched_dcplb;
-extern u_long ipdt_table[];
-extern u_long dpdt_table[];
-#ifdef CONFIG_CPLB_INFO
-extern u_long ipdt_swapcount_table[];
-extern u_long dpdt_swapcount_table[];
-#endif
+extern int nr_dcplb_miss[], nr_icplb_miss[], nr_icplb_supv_miss[];
+extern int nr_dcplb_prot[], nr_cplb_flush[];
+
+#ifdef CONFIG_MPU
+
+extern int first_mask_dcplb;
+
+extern int page_mask_order;
+extern int page_mask_nelts;
+
+extern unsigned long *current_rwx_mask[NR_CPUS];
+
+extern void flush_switched_cplbs(unsigned int);
+extern void set_mask_dcplbs(unsigned long *, unsigned int);
+
+extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *);
#endif /* CONFIG_MPU */
-extern void generate_cplb_tables(void);
+extern void bfin_icache_init(struct cplb_entry *icplb_tbl);
+extern void bfin_dcache_init(struct cplb_entry *icplb_tbl);
+#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
+extern void generate_cplb_tables_all(void);
+extern void generate_cplb_tables_cpu(unsigned int cpu);
+#endif
#endif
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h
new file mode 100644
index 000000000000..c2594ef877f6
--- /dev/null
+++ b/arch/blackfin/include/asm/cpu.h
@@ -0,0 +1,41 @@
+/*
+ * File: arch/blackfin/include/asm/cpu.h.
+ * Author: Philippe Gerum <rpm@xenomai.org>
+ *
+ * Copyright 2007 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __ASM_BLACKFIN_CPU_H
+#define __ASM_BLACKFIN_CPU_H
+
+#include <linux/percpu.h>
+
+struct task_struct;
+
+struct blackfin_cpudata {
+ struct cpu cpu;
+ struct task_struct *idle;
+ unsigned int imemctl;
+ unsigned int dmemctl;
+ unsigned long loops_per_jiffy;
+ unsigned long dcache_invld_count;
+};
+
+DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
+
+#endif
diff --git a/arch/blackfin/include/asm/delay.h b/arch/blackfin/include/asm/delay.h
index 0889c3abb593..c31f91cc1d5d 100644
--- a/arch/blackfin/include/asm/delay.h
+++ b/arch/blackfin/include/asm/delay.h
@@ -13,29 +13,7 @@
static inline void __delay(unsigned long loops)
{
- if (ANOMALY_05000312) {
- /* Interrupted loads to loop registers -> bad */
- unsigned long tmp;
- __asm__ __volatile__(
- "[--SP] = LC0;"
- "[--SP] = LT0;"
- "[--SP] = LB0;"
- "LSETUP (1f,1f) LC0 = %1;"
- "1: NOP;"
- /* We take advantage of the fact that LC0 is 0 at
- * the end of the loop. Otherwise we'd need some
- * NOPs after the CLI here.
- */
- "CLI %0;"
- "LB0 = [SP++];"
- "LT0 = [SP++];"
- "LC0 = [SP++];"
- "STI %0;"
- : "=d" (tmp)
- : "a" (loops)
- );
- } else
- __asm__ __volatile__ (
+__asm__ __volatile__ (
"LSETUP(1f, 1f) LC0 = %0;"
"1: NOP;"
:
@@ -47,16 +25,15 @@ static inline void __delay(unsigned long loops)
#include <linux/param.h> /* needed for HZ */
/*
- * Use only for very small delays ( < 1 msec). Should probably use a
- * lookup table, really, as the multiplications take much too long with
- * short delays. This is a "reasonable" implementation, though (and the
- * first constant multiplications gets optimized away if the delay is
- * a constant)
+ * close approximation borrowed from m68knommu to avoid 64-bit math
*/
+
+#define HZSCALE (268435456 / (1000000/HZ))
+
static inline void udelay(unsigned long usecs)
{
extern unsigned long loops_per_jiffy;
- __delay(usecs * loops_per_jiffy / (1000000 / HZ));
+ __delay((((usecs * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6);
}
#endif
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index 6509733bb0f6..e4f7b8043f02 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -1,44 +1,17 @@
/*
- * File: include/asm-blackfin/simple_bf533_dma.h
- * Based on: none - original work
- * Author: LG Soft India
- * Copyright (C) 2004-2005 Analog Devices Inc.
- * Created: Tue Sep 21 2004
- * Description: This file contains the major Data structures and constants
- * used for DMA Implementation in BF533
- * Modified:
+ * dma.h - Blackfin DMA defines/structures/etc...
*
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Copyright 2004-2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
#ifndef _BLACKFIN_DMA_H_
#define _BLACKFIN_DMA_H_
-#include <asm/io.h>
-#include <linux/slab.h>
-#include <asm/irq.h>
-#include <asm/signal.h>
-
-#include <linux/kernel.h>
-#include <mach/dma.h>
-#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <mach/dma.h>
#include <asm/blackfin.h>
+#include <asm/page.h>
#define MAX_DMA_ADDRESS PAGE_OFFSET
@@ -79,7 +52,7 @@ enum dma_chan_status {
#define DMA_SYNC_RESTART 1
struct dmasg {
- unsigned long next_desc_addr;
+ void *next_desc_addr;
unsigned long start_addr;
unsigned short cfg;
unsigned short x_count;
@@ -89,7 +62,7 @@ struct dmasg {
} __attribute__((packed));
struct dma_register {
- unsigned long next_desc_ptr; /* DMA Next Descriptor Pointer register */
+ void *next_desc_ptr; /* DMA Next Descriptor Pointer register */
unsigned long start_addr; /* DMA Start address register */
unsigned short cfg; /* DMA Configuration register */
@@ -109,7 +82,7 @@ struct dma_register {
short y_modify; /* DMA y_modify register */
unsigned short dummy5;
- unsigned long curr_desc_ptr; /* DMA Current Descriptor Pointer
+ void *curr_desc_ptr; /* DMA Current Descriptor Pointer
register */
unsigned long curr_addr_ptr; /* DMA Current Address Pointer
register */
@@ -131,19 +104,15 @@ struct dma_register {
};
-typedef irqreturn_t(*dma_interrupt_t) (int irq, void *dev_id);
-
+struct mutex;
struct dma_channel {
struct mutex dmalock;
- char *device_id;
+ const char *device_id;
enum dma_chan_status chan_status;
- struct dma_register *regs;
+ volatile struct dma_register *regs;
struct dmasg *sg; /* large mode descriptor */
- unsigned int ctrl_num; /* controller number */
- dma_interrupt_t irq_callback;
+ unsigned int irq;
void *data;
- unsigned int dma_enable_flag;
- unsigned int loopback_flag;
#ifdef CONFIG_PM
unsigned short saved_peripheral_map;
#endif
@@ -157,49 +126,132 @@ void blackfin_dma_resume(void);
/*******************************************************************************
* DMA API's
*******************************************************************************/
-/* functions to set register mode */
-void set_dma_start_addr(unsigned int channel, unsigned long addr);
-void set_dma_next_desc_addr(unsigned int channel, unsigned long addr);
-void set_dma_curr_desc_addr(unsigned int channel, unsigned long addr);
-void set_dma_x_count(unsigned int channel, unsigned short x_count);
-void set_dma_x_modify(unsigned int channel, short x_modify);
-void set_dma_y_count(unsigned int channel, unsigned short y_count);
-void set_dma_y_modify(unsigned int channel, short y_modify);
-void set_dma_config(unsigned int channel, unsigned short config);
-unsigned short set_bfin_dma_config(char direction, char flow_mode,
- char intr_mode, char dma_mode, char width,
- char syncmode);
-void set_dma_curr_addr(unsigned int channel, unsigned long addr);
-
-/* get curr status for polling */
-unsigned short get_dma_curr_irqstat(unsigned int channel);
-unsigned short get_dma_curr_xcount(unsigned int channel);
-unsigned short get_dma_curr_ycount(unsigned int channel);
-unsigned long get_dma_next_desc_ptr(unsigned int channel);
-unsigned long get_dma_curr_desc_ptr(unsigned int channel);
-unsigned long get_dma_curr_addr(unsigned int channel);
-
-/* set large DMA mode descriptor */
-void set_dma_sg(unsigned int channel, struct dmasg *sg, int nr_sg);
-
-/* check if current channel is in use */
-int dma_channel_active(unsigned int channel);
-
-/* common functions must be called in any mode */
+extern struct dma_channel dma_ch[MAX_DMA_CHANNELS];
+extern struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS];
+extern int channel2irq(unsigned int channel);
+
+static inline void set_dma_start_addr(unsigned int channel, unsigned long addr)
+{
+ dma_ch[channel].regs->start_addr = addr;
+}
+static inline void set_dma_next_desc_addr(unsigned int channel, void *addr)
+{
+ dma_ch[channel].regs->next_desc_ptr = addr;
+}
+static inline void set_dma_curr_desc_addr(unsigned int channel, void *addr)
+{
+ dma_ch[channel].regs->curr_desc_ptr = addr;
+}
+static inline void set_dma_x_count(unsigned int channel, unsigned short x_count)
+{
+ dma_ch[channel].regs->x_count = x_count;
+}
+static inline void set_dma_y_count(unsigned int channel, unsigned short y_count)
+{
+ dma_ch[channel].regs->y_count = y_count;
+}
+static inline void set_dma_x_modify(unsigned int channel, short x_modify)
+{
+ dma_ch[channel].regs->x_modify = x_modify;
+}
+static inline void set_dma_y_modify(unsigned int channel, short y_modify)
+{
+ dma_ch[channel].regs->y_modify = y_modify;
+}
+static inline void set_dma_config(unsigned int channel, unsigned short config)
+{
+ dma_ch[channel].regs->cfg = config;
+}
+static inline void set_dma_curr_addr(unsigned int channel, unsigned long addr)
+{
+ dma_ch[channel].regs->curr_addr_ptr = addr;
+}
+
+static inline unsigned short
+set_bfin_dma_config(char direction, char flow_mode,
+ char intr_mode, char dma_mode, char width, char syncmode)
+{
+ return (direction << 1) | (width << 2) | (dma_mode << 4) |
+ (intr_mode << 6) | (flow_mode << 12) | (syncmode << 5);
+}
+
+static inline unsigned short get_dma_curr_irqstat(unsigned int channel)
+{
+ return dma_ch[channel].regs->irq_status;
+}
+static inline unsigned short get_dma_curr_xcount(unsigned int channel)
+{
+ return dma_ch[channel].regs->curr_x_count;
+}
+static inline unsigned short get_dma_curr_ycount(unsigned int channel)
+{
+ return dma_ch[channel].regs->curr_y_count;
+}
+static inline void *get_dma_next_desc_ptr(unsigned int channel)
+{
+ return dma_ch[channel].regs->next_desc_ptr;
+}
+static inline void *get_dma_curr_desc_ptr(unsigned int channel)
+{
+ return dma_ch[channel].regs->curr_desc_ptr;
+}
+static inline unsigned short get_dma_config(unsigned int channel)
+{
+ return dma_ch[channel].regs->cfg;
+}
+static inline unsigned long get_dma_curr_addr(unsigned int channel)
+{
+ return dma_ch[channel].regs->curr_addr_ptr;
+}
+
+static inline void set_dma_sg(unsigned int channel, struct dmasg *sg, int ndsize)
+{
+ dma_ch[channel].regs->cfg =
+ (dma_ch[channel].regs->cfg & ~(0xf << 8)) |
+ ((ndsize & 0xf) << 8);
+ dma_ch[channel].regs->next_desc_ptr = sg;
+}
+
+static inline int dma_channel_active(unsigned int channel)
+{
+ if (dma_ch[channel].chan_status == DMA_CHANNEL_FREE)
+ return 0;
+ else
+ return 1;
+}
+
+static inline void disable_dma(unsigned int channel)
+{
+ dma_ch[channel].regs->cfg &= ~DMAEN;
+ SSYNC();
+ dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED;
+}
+static inline void enable_dma(unsigned int channel)
+{
+ dma_ch[channel].regs->curr_x_count = 0;
+ dma_ch[channel].regs->curr_y_count = 0;
+ dma_ch[channel].regs->cfg |= DMAEN;
+ dma_ch[channel].chan_status = DMA_CHANNEL_ENABLED;
+}
void free_dma(unsigned int channel);
-int dma_channel_active(unsigned int channel); /* check if a channel is in use */
-void disable_dma(unsigned int channel);
-void enable_dma(unsigned int channel);
-int request_dma(unsigned int channel, char *device_id);
-int set_dma_callback(unsigned int channel, dma_interrupt_t callback,
- void *data);
-void dma_disable_irq(unsigned int channel);
-void dma_enable_irq(unsigned int channel);
-void clear_dma_irqstat(unsigned int channel);
+int request_dma(unsigned int channel, const char *device_id);
+int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data);
+
+static inline void dma_disable_irq(unsigned int channel)
+{
+ disable_irq(dma_ch[channel].irq);
+}
+static inline void dma_enable_irq(unsigned int channel)
+{
+ enable_irq(dma_ch[channel].irq);
+}
+static inline void clear_dma_irqstat(unsigned int channel)
+{
+ dma_ch[channel].regs->irq_status = DMA_DONE | DMA_ERR;
+}
+
void *dma_memcpy(void *dest, const void *src, size_t count);
void *safe_dma_memcpy(void *dest, const void *src, size_t count);
-
-extern int channel2irq(unsigned int channel);
-extern struct dma_register *dma_io_base_addr[MAX_BLACKFIN_DMA_CHANNEL];
+void blackfin_dma_early_init(void);
#endif
diff --git a/arch/blackfin/include/asm/entry.h b/arch/blackfin/include/asm/entry.h
index c4f721e0d00d..b30a2968e274 100644
--- a/arch/blackfin/include/asm/entry.h
+++ b/arch/blackfin/include/asm/entry.h
@@ -27,6 +27,14 @@
#define SAVE_ALL_SYS save_context_no_interrupts
/* This is used for all normal interrupts. It saves a minimum of registers
to the stack, loads the IRQ number, and jumps to common code. */
+#ifdef CONFIG_IPIPE
+# define LOAD_IPIPE_IPEND \
+ P0.l = lo(IPEND); \
+ P0.h = hi(IPEND); \
+ R1 = [P0];
+#else
+# define LOAD_IPIPE_IPEND
+#endif
#define INTERRUPT_ENTRY(N) \
[--sp] = SYSCFG; \
\
@@ -34,6 +42,7 @@
[--sp] = R0; /*orig_r0*/ \
[--sp] = (R7:0,P5:0); \
R0 = (N); \
+ LOAD_IPIPE_IPEND \
jump __common_int_entry;
/* For timer interrupts, we need to save IPEND, since the user_mode
@@ -53,9 +62,11 @@
/* This one pushes RETI without using CLI. Interrupts are enabled. */
#define SAVE_CONTEXT_SYSCALL save_context_syscall
#define SAVE_CONTEXT save_context_with_interrupts
+#define SAVE_CONTEXT_CPLB save_context_cplb
#define RESTORE_ALL_SYS restore_context_no_interrupts
#define RESTORE_CONTEXT restore_context_with_interrupts
+#define RESTORE_CONTEXT_CPLB restore_context_cplb
#endif /* __ASSEMBLY__ */
#endif /* __BFIN_ENTRY_H */
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index ad33ac271fd9..d4a082ef75b4 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -27,68 +27,17 @@
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-/*
-* Number BF537/6/4 BF561 BF533/2/1
-* BF527/5/2
-*
-* GPIO_0 PF0 PF0 PF0
-* GPIO_1 PF1 PF1 PF1
-* GPIO_2 PF2 PF2 PF2
-* GPIO_3 PF3 PF3 PF3
-* GPIO_4 PF4 PF4 PF4
-* GPIO_5 PF5 PF5 PF5
-* GPIO_6 PF6 PF6 PF6
-* GPIO_7 PF7 PF7 PF7
-* GPIO_8 PF8 PF8 PF8
-* GPIO_9 PF9 PF9 PF9
-* GPIO_10 PF10 PF10 PF10
-* GPIO_11 PF11 PF11 PF11
-* GPIO_12 PF12 PF12 PF12
-* GPIO_13 PF13 PF13 PF13
-* GPIO_14 PF14 PF14 PF14
-* GPIO_15 PF15 PF15 PF15
-* GPIO_16 PG0 PF16
-* GPIO_17 PG1 PF17
-* GPIO_18 PG2 PF18
-* GPIO_19 PG3 PF19
-* GPIO_20 PG4 PF20
-* GPIO_21 PG5 PF21
-* GPIO_22 PG6 PF22
-* GPIO_23 PG7 PF23
-* GPIO_24 PG8 PF24
-* GPIO_25 PG9 PF25
-* GPIO_26 PG10 PF26
-* GPIO_27 PG11 PF27
-* GPIO_28 PG12 PF28
-* GPIO_29 PG13 PF29
-* GPIO_30 PG14 PF30
-* GPIO_31 PG15 PF31
-* GPIO_32 PH0 PF32
-* GPIO_33 PH1 PF33
-* GPIO_34 PH2 PF34
-* GPIO_35 PH3 PF35
-* GPIO_36 PH4 PF36
-* GPIO_37 PH5 PF37
-* GPIO_38 PH6 PF38
-* GPIO_39 PH7 PF39
-* GPIO_40 PH8 PF40
-* GPIO_41 PH9 PF41
-* GPIO_42 PH10 PF42
-* GPIO_43 PH11 PF43
-* GPIO_44 PH12 PF44
-* GPIO_45 PH13 PF45
-* GPIO_46 PH14 PF46
-* GPIO_47 PH15 PF47
-*/
-
#ifndef __ARCH_BLACKFIN_GPIO_H__
#define __ARCH_BLACKFIN_GPIO_H__
-#define gpio_bank(x) ((x) >> 4)
-#define gpio_bit(x) (1<<((x) & 0xF))
-#define gpio_sub_n(x) ((x) & 0xF)
+#define gpio_bank(x) ((x) >> 4)
+#define gpio_bit(x) (1<<((x) & 0xF))
+#define gpio_sub_n(x) ((x) & 0xF)
-#define GPIO_BANKSIZE 16
+#define GPIO_BANKSIZE 16
+#define GPIO_BANK_NUM DIV_ROUND_UP(MAX_BLACKFIN_GPIOS, GPIO_BANKSIZE)
+
+#include <mach/gpio.h>
#define GPIO_0 0
#define GPIO_1 1
@@ -139,151 +88,9 @@
#define GPIO_46 46
#define GPIO_47 47
-
#define PERIPHERAL_USAGE 1
#define GPIO_USAGE 0
-#ifdef BF533_FAMILY
-#define MAX_BLACKFIN_GPIOS 16
-
-#define GPIO_PF0 0
-#define GPIO_PF1 1
-#define GPIO_PF2 2
-#define GPIO_PF3 3
-#define GPIO_PF4 4
-#define GPIO_PF5 5
-#define GPIO_PF6 6
-#define GPIO_PF7 7
-#define GPIO_PF8 8
-#define GPIO_PF9 9
-#define GPIO_PF10 10
-#define GPIO_PF11 11
-#define GPIO_PF12 12
-#define GPIO_PF13 13
-#define GPIO_PF14 14
-#define GPIO_PF15 15
-
-#endif
-
-#if defined(BF527_FAMILY) || defined(BF537_FAMILY)
-#define MAX_BLACKFIN_GPIOS 48
-
-#define GPIO_PF0 0
-#define GPIO_PF1 1
-#define GPIO_PF2 2
-#define GPIO_PF3 3
-#define GPIO_PF4 4
-#define GPIO_PF5 5
-#define GPIO_PF6 6
-#define GPIO_PF7 7
-#define GPIO_PF8 8
-#define GPIO_PF9 9
-#define GPIO_PF10 10
-#define GPIO_PF11 11
-#define GPIO_PF12 12
-#define GPIO_PF13 13
-#define GPIO_PF14 14
-#define GPIO_PF15 15
-#define GPIO_PG0 16
-#define GPIO_PG1 17
-#define GPIO_PG2 18
-#define GPIO_PG3 19
-#define GPIO_PG4 20
-#define GPIO_PG5 21
-#define GPIO_PG6 22
-#define GPIO_PG7 23
-#define GPIO_PG8 24
-#define GPIO_PG9 25
-#define GPIO_PG10 26
-#define GPIO_PG11 27
-#define GPIO_PG12 28
-#define GPIO_PG13 29
-#define GPIO_PG14 30
-#define GPIO_PG15 31
-#define GPIO_PH0 32
-#define GPIO_PH1 33
-#define GPIO_PH2 34
-#define GPIO_PH3 35
-#define GPIO_PH4 36
-#define GPIO_PH5 37
-#define GPIO_PH6 38
-#define GPIO_PH7 39
-#define GPIO_PH8 40
-#define GPIO_PH9 41
-#define GPIO_PH10 42
-#define GPIO_PH11 43
-#define GPIO_PH12 44
-#define GPIO_PH13 45
-#define GPIO_PH14 46
-#define GPIO_PH15 47
-
-#define PORT_F GPIO_PF0
-#define PORT_G GPIO_PG0
-#define PORT_H GPIO_PH0
-
-#endif
-
-#ifdef BF548_FAMILY
-#include <mach/gpio.h>
-#endif
-
-#ifdef BF561_FAMILY
-#define MAX_BLACKFIN_GPIOS 48
-
-#define GPIO_PF0 0
-#define GPIO_PF1 1
-#define GPIO_PF2 2
-#define GPIO_PF3 3
-#define GPIO_PF4 4
-#define GPIO_PF5 5
-#define GPIO_PF6 6
-#define GPIO_PF7 7
-#define GPIO_PF8 8
-#define GPIO_PF9 9
-#define GPIO_PF10 10
-#define GPIO_PF11 11
-#define GPIO_PF12 12
-#define GPIO_PF13 13
-#define GPIO_PF14 14
-#define GPIO_PF15 15
-#define GPIO_PF16 16
-#define GPIO_PF17 17
-#define GPIO_PF18 18
-#define GPIO_PF19 19
-#define GPIO_PF20 20
-#define GPIO_PF21 21
-#define GPIO_PF22 22
-#define GPIO_PF23 23
-#define GPIO_PF24 24
-#define GPIO_PF25 25
-#define GPIO_PF26 26
-#define GPIO_PF27 27
-#define GPIO_PF28 28
-#define GPIO_PF29 29
-#define GPIO_PF30 30
-#define GPIO_PF31 31
-#define GPIO_PF32 32
-#define GPIO_PF33 33
-#define GPIO_PF34 34
-#define GPIO_PF35 35
-#define GPIO_PF36 36
-#define GPIO_PF37 37
-#define GPIO_PF38 38
-#define GPIO_PF39 39
-#define GPIO_PF40 40
-#define GPIO_PF41 41
-#define GPIO_PF42 42
-#define GPIO_PF43 43
-#define GPIO_PF44 44
-#define GPIO_PF45 45
-#define GPIO_PF46 46
-#define GPIO_PF47 47
-
-#define PORT_FIO0 GPIO_0
-#define PORT_FIO1 GPIO_16
-#define PORT_FIO2 GPIO_32
-#endif
-
#ifndef __ASSEMBLY__
/***********************************************************
@@ -425,20 +232,73 @@ struct gpio_port_s {
* MODIFICATION HISTORY :
**************************************************************/
-int gpio_request(unsigned, const char *);
-void gpio_free(unsigned);
+int bfin_gpio_request(unsigned gpio, const char *label);
+void bfin_gpio_free(unsigned gpio);
+int bfin_gpio_irq_request(unsigned gpio, const char *label);
+void bfin_gpio_irq_free(unsigned gpio);
+int bfin_gpio_direction_input(unsigned gpio);
+int bfin_gpio_direction_output(unsigned gpio, int value);
+int bfin_gpio_get_value(unsigned gpio);
+void bfin_gpio_set_value(unsigned gpio, int value);
-void gpio_set_value(unsigned gpio, int arg);
-int gpio_get_value(unsigned gpio);
+#ifdef CONFIG_GPIOLIB
+#include <asm-generic/gpio.h> /* cansleep wrappers */
-#ifndef BF548_FAMILY
-#define gpio_set_value(gpio, value) set_gpio_data(gpio, value)
-#endif
+static inline int gpio_get_value(unsigned int gpio)
+{
+ if (gpio < MAX_BLACKFIN_GPIOS)
+ return bfin_gpio_get_value(gpio);
+ else
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+ if (gpio < MAX_BLACKFIN_GPIOS)
+ bfin_gpio_set_value(gpio, value);
+ else
+ __gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+ return __gpio_cansleep(gpio);
+}
+
+#else /* !CONFIG_GPIOLIB */
+
+static inline int gpio_request(unsigned gpio, const char *label)
+{
+ return bfin_gpio_request(gpio, label);
+}
+
+static inline void gpio_free(unsigned gpio)
+{
+ return bfin_gpio_free(gpio);
+}
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
+static inline int gpio_direction_input(unsigned gpio)
+{
+ return bfin_gpio_direction_input(gpio);
+}
+
+static inline int gpio_direction_output(unsigned gpio, int value)
+{
+ return bfin_gpio_direction_output(gpio, value);
+}
+
+static inline int gpio_get_value(unsigned gpio)
+{
+ return bfin_gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ return bfin_gpio_set_value(gpio, value);
+}
#include <asm-generic/gpio.h> /* cansleep wrappers */
+#endif /* !CONFIG_GPIOLIB */
#include <asm/irq.h>
static inline int gpio_to_irq(unsigned gpio)
diff --git a/arch/blackfin/include/asm/hardirq.h b/arch/blackfin/include/asm/hardirq.h
index b6b19f1b9dab..717181a1749b 100644
--- a/arch/blackfin/include/asm/hardirq.h
+++ b/arch/blackfin/include/asm/hardirq.h
@@ -42,4 +42,6 @@ typedef struct {
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+extern void ack_bad_irq(unsigned int irq);
+
#endif
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h
index 7dc77a21fdf3..63b2d8c78570 100644
--- a/arch/blackfin/include/asm/io.h
+++ b/arch/blackfin/include/asm/io.h
@@ -94,12 +94,12 @@ static inline unsigned int readl(const volatile void __iomem *addr)
#define outw_p(x,addr) outw(x,addr)
#define outl_p(x,addr) outl(x,addr)
-#define ioread8_rep(a,d,c) insb(a,d,c)
-#define ioread16_rep(a,d,c) insw(a,d,c)
-#define ioread32_rep(a,d,c) insl(a,d,c)
-#define iowrite8_rep(a,s,c) outsb(a,s,c)
-#define iowrite16_rep(a,s,c) outsw(a,s,c)
-#define iowrite32_rep(a,s,c) outsl(a,s,c)
+#define ioread8_rep(a,d,c) readsb(a,d,c)
+#define ioread16_rep(a,d,c) readsw(a,d,c)
+#define ioread32_rep(a,d,c) readsl(a,d,c)
+#define iowrite8_rep(a,s,c) writesb(a,s,c)
+#define iowrite16_rep(a,s,c) writesw(a,s,c)
+#define iowrite32_rep(a,s,c) writesl(a,s,c)
#define ioread8(X) readb(X)
#define ioread16(X) readw(X)
@@ -108,6 +108,8 @@ static inline unsigned int readl(const volatile void __iomem *addr)
#define iowrite16(val,X) writew(val,X)
#define iowrite32(val,X) writel(val,X)
+#define mmiowb() wmb()
+
#define IO_SPACE_LIMIT 0xffffffff
/* Values for nocacheflag and cmode */
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
new file mode 100644
index 000000000000..76f53d8b9a0d
--- /dev/null
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -0,0 +1,278 @@
+/* -*- linux-c -*-
+ * include/asm-blackfin/ipipe.h
+ *
+ * Copyright (C) 2002-2007 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ASM_BLACKFIN_IPIPE_H
+#define __ASM_BLACKFIN_IPIPE_H
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+#include <linux/ipipe_percpu.h>
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/atomic.h>
+#include <asm/traps.h>
+
+#define IPIPE_ARCH_STRING "1.8-00"
+#define IPIPE_MAJOR_NUMBER 1
+#define IPIPE_MINOR_NUMBER 8
+#define IPIPE_PATCH_NUMBER 0
+
+#ifdef CONFIG_SMP
+#error "I-pipe/blackfin: SMP not implemented"
+#else /* !CONFIG_SMP */
+#define ipipe_processor_id() 0
+#endif /* CONFIG_SMP */
+
+#define prepare_arch_switch(next) \
+do { \
+ ipipe_schedule_notify(current, next); \
+ local_irq_disable_hw(); \
+} while (0)
+
+#define task_hijacked(p) \
+ ({ \
+ int __x__ = ipipe_current_domain != ipipe_root_domain; \
+ /* We would need to clear the SYNC flag for the root domain */ \
+ /* over the current processor in SMP mode. */ \
+ local_irq_enable_hw(); __x__; \
+ })
+
+struct ipipe_domain;
+
+struct ipipe_sysinfo {
+
+ int ncpus; /* Number of CPUs on board */
+ u64 cpufreq; /* CPU frequency (in Hz) */
+
+ /* Arch-dependent block */
+
+ struct {
+ unsigned tmirq; /* Timer tick IRQ */
+ u64 tmfreq; /* Timer frequency */
+ } archdep;
+};
+
+#define ipipe_read_tsc(t) \
+ ({ \
+ unsigned long __cy2; \
+ __asm__ __volatile__ ("1: %0 = CYCLES2\n" \
+ "%1 = CYCLES\n" \
+ "%2 = CYCLES2\n" \
+ "CC = %2 == %0\n" \
+ "if ! CC jump 1b\n" \
+ : "=r" (((unsigned long *)&t)[1]), \
+ "=r" (((unsigned long *)&t)[0]), \
+ "=r" (__cy2) \
+ : /*no input*/ : "CC"); \
+ t; \
+ })
+
+#define ipipe_cpu_freq() __ipipe_core_clock
+#define ipipe_tsc2ns(_t) (((unsigned long)(_t)) * __ipipe_freq_scale)
+#define ipipe_tsc2us(_t) (ipipe_tsc2ns(_t) / 1000 + 1)
+
+/* Private interface -- Internal use only */
+
+#define __ipipe_check_platform() do { } while (0)
+
+#define __ipipe_init_platform() do { } while (0)
+
+extern atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
+
+extern unsigned long __ipipe_irq_lvmask;
+
+extern struct ipipe_domain ipipe_root;
+
+/* enable/disable_irqdesc _must_ be used in pairs. */
+
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd,
+ unsigned irq);
+
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,
+ unsigned irq);
+
+#define __ipipe_enable_irq(irq) (irq_desc[irq].chip->unmask(irq))
+
+#define __ipipe_disable_irq(irq) (irq_desc[irq].chip->mask(irq))
+
+#define __ipipe_lock_root() \
+ set_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
+
+#define __ipipe_unlock_root() \
+ clear_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
+
+void __ipipe_enable_pipeline(void);
+
+#define __ipipe_hook_critical_ipi(ipd) do { } while (0)
+
+#define __ipipe_sync_pipeline(syncmask) \
+ do { \
+ struct ipipe_domain *ipd = ipipe_current_domain; \
+ if (likely(ipd != ipipe_root_domain || !test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags))) \
+ __ipipe_sync_stage(syncmask); \
+ } while (0)
+
+void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);
+
+int __ipipe_get_irq_priority(unsigned irq);
+
+int __ipipe_get_irqthread_priority(unsigned irq);
+
+void __ipipe_stall_root_raw(void);
+
+void __ipipe_unstall_root_raw(void);
+
+void __ipipe_serial_debug(const char *fmt, ...);
+
+DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
+
+extern unsigned long __ipipe_core_clock;
+
+extern unsigned long __ipipe_freq_scale;
+
+extern unsigned long __ipipe_irq_tail_hook;
+
+static inline unsigned long __ipipe_ffnz(unsigned long ul)
+{
+ return ffs(ul) - 1;
+}
+
+#define __ipipe_run_irqtail() /* Must be a macro */ \
+ do { \
+ asmlinkage void __ipipe_call_irqtail(void); \
+ unsigned long __pending; \
+ CSYNC(); \
+ __pending = bfin_read_IPEND(); \
+ if (__pending & 0x8000) { \
+ __pending &= ~0x8010; \
+ if (__pending && (__pending & (__pending - 1)) == 0) \
+ __ipipe_call_irqtail(); \
+ } \
+ } while (0)
+
+#define __ipipe_run_isr(ipd, irq) \
+ do { \
+ if (ipd == ipipe_root_domain) { \
+ /* \
+ * Note: the I-pipe implements a threaded interrupt model on \
+ * this arch for Linux external IRQs. The interrupt handler we \
+ * call here only wakes up the associated IRQ thread. \
+ */ \
+ if (ipipe_virtual_irq_p(irq)) { \
+ /* No irqtail here; virtual interrupts have no effect \
+ on IPEND so there is no need for processing \
+ deferral. */ \
+ local_irq_enable_nohead(ipd); \
+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
+ local_irq_disable_nohead(ipd); \
+ } else \
+ /* \
+ * No need to run the irqtail here either; \
+ * we can't be preempted by hw IRQs, so \
+ * non-Linux IRQs cannot stack over the short \
+ * thread wakeup code. Which in turn means \
+ * that no irqtail condition could be pending \
+ * for domains above Linux in the pipeline. \
+ */ \
+ ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
+ } else { \
+ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
+ local_irq_enable_nohead(ipd); \
+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
+ /* Attempt to exit the outer interrupt level before \
+ * starting the deferred IRQ processing. */ \
+ local_irq_disable_nohead(ipd); \
+ __ipipe_run_irqtail(); \
+ __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
+ } \
+ } while (0)
+
+#define __ipipe_syscall_watched_p(p, sc) \
+ (((p)->flags & PF_EVNOTIFY) || (unsigned long)sc >= NR_syscalls)
+
+void ipipe_init_irq_threads(void);
+
+int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
+
+#define IS_SYSIRQ(irq) ((irq) > IRQ_CORETMR && (irq) <= SYS_IRQS)
+#define IS_GPIOIRQ(irq) ((irq) >= GPIO_IRQ_BASE && (irq) < NR_IRQS)
+
+#define IRQ_SYSTMR IRQ_TIMER0
+#define IRQ_PRIOTMR CONFIG_IRQ_TIMER0
+
+#if defined(CONFIG_BF531) || defined(CONFIG_BF532) || defined(CONFIG_BF533)
+#define PRIO_GPIODEMUX(irq) CONFIG_PFA
+#elif defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
+#define PRIO_GPIODEMUX(irq) CONFIG_IRQ_PROG_INTA
+#elif defined(CONFIG_BF52x)
+#define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PORTF_INTA ? CONFIG_IRQ_PORTF_INTA : \
+ (irq) == IRQ_PORTG_INTA ? CONFIG_IRQ_PORTG_INTA : \
+ (irq) == IRQ_PORTH_INTA ? CONFIG_IRQ_PORTH_INTA : \
+ -1)
+#elif defined(CONFIG_BF561)
+#define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PROG0_INTA ? CONFIG_IRQ_PROG0_INTA : \
+ (irq) == IRQ_PROG1_INTA ? CONFIG_IRQ_PROG1_INTA : \
+ (irq) == IRQ_PROG2_INTA ? CONFIG_IRQ_PROG2_INTA : \
+ -1)
+#define bfin_write_TIMER_DISABLE(val) bfin_write_TMRS8_DISABLE(val)
+#define bfin_write_TIMER_ENABLE(val) bfin_write_TMRS8_ENABLE(val)
+#define bfin_write_TIMER_STATUS(val) bfin_write_TMRS8_STATUS(val)
+#define bfin_read_TIMER_STATUS() bfin_read_TMRS8_STATUS()
+#elif defined(CONFIG_BF54x)
+#define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PINT0 ? CONFIG_IRQ_PINT0 : \
+ (irq) == IRQ_PINT1 ? CONFIG_IRQ_PINT1 : \
+ (irq) == IRQ_PINT2 ? CONFIG_IRQ_PINT2 : \
+ (irq) == IRQ_PINT3 ? CONFIG_IRQ_PINT3 : \
+ -1)
+#define bfin_write_TIMER_DISABLE(val) bfin_write_TIMER_DISABLE0(val)
+#define bfin_write_TIMER_ENABLE(val) bfin_write_TIMER_ENABLE0(val)
+#define bfin_write_TIMER_STATUS(val) bfin_write_TIMER_STATUS0(val)
+#define bfin_read_TIMER_STATUS(val) bfin_read_TIMER_STATUS0(val)
+#else
+# error "no PRIO_GPIODEMUX() for this part"
+#endif
+
+#define __ipipe_root_tick_p(regs) ((regs->ipend & 0x10) != 0)
+
+#else /* !CONFIG_IPIPE */
+
+#define task_hijacked(p) 0
+#define ipipe_trap_notify(t, r) 0
+
+#define __ipipe_stall_root_raw() do { } while (0)
+#define __ipipe_unstall_root_raw() do { } while (0)
+
+#define ipipe_init_irq_threads() do { } while (0)
+#define ipipe_start_irq_thread(irq, desc) 0
+
+#define IRQ_SYSTMR IRQ_CORETMR
+#define IRQ_PRIOTMR IRQ_CORETMR
+
+#define __ipipe_root_tick_p(regs) 1
+
+#endif /* !CONFIG_IPIPE */
+
+#endif /* !__ASM_BLACKFIN_IPIPE_H */
diff --git a/arch/blackfin/include/asm/ipipe_base.h b/arch/blackfin/include/asm/ipipe_base.h
new file mode 100644
index 000000000000..cb1025aeabcf
--- /dev/null
+++ b/arch/blackfin/include/asm/ipipe_base.h
@@ -0,0 +1,80 @@
+/* -*- linux-c -*-
+ * include/asm-blackfin/_baseipipe.h
+ *
+ * Copyright (C) 2007 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ASM_BLACKFIN_IPIPE_BASE_H
+#define __ASM_BLACKFIN_IPIPE_BASE_H
+
+#ifdef CONFIG_IPIPE
+
+#define IPIPE_NR_XIRQS NR_IRQS
+#define IPIPE_IRQ_ISHIFT 5 /* 2^5 for 32bits arch. */
+
+/* Blackfin-specific, global domain flags */
+#define IPIPE_ROOTLOCK_FLAG 1 /* Lock pipeline for root */
+
+ /* Blackfin traps -- i.e. exception vector numbers */
+#define IPIPE_NR_FAULTS 52 /* We leave a gap after VEC_ILL_RES. */
+/* Pseudo-vectors used for kernel events */
+#define IPIPE_FIRST_EVENT IPIPE_NR_FAULTS
+#define IPIPE_EVENT_SYSCALL (IPIPE_FIRST_EVENT)
+#define IPIPE_EVENT_SCHEDULE (IPIPE_FIRST_EVENT + 1)
+#define IPIPE_EVENT_SIGWAKE (IPIPE_FIRST_EVENT + 2)
+#define IPIPE_EVENT_SETSCHED (IPIPE_FIRST_EVENT + 3)
+#define IPIPE_EVENT_INIT (IPIPE_FIRST_EVENT + 4)
+#define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 5)
+#define IPIPE_EVENT_CLEANUP (IPIPE_FIRST_EVENT + 6)
+#define IPIPE_LAST_EVENT IPIPE_EVENT_CLEANUP
+#define IPIPE_NR_EVENTS (IPIPE_LAST_EVENT + 1)
+
+#define IPIPE_TIMER_IRQ IRQ_CORETMR
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bitops.h>
+
+extern int test_bit(int nr, const void *addr);
+
+
+extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
+
+static inline void __ipipe_stall_root(void)
+{
+ volatile unsigned long *p = &__ipipe_root_status;
+ set_bit(0, p);
+}
+
+static inline unsigned long __ipipe_test_and_stall_root(void)
+{
+ volatile unsigned long *p = &__ipipe_root_status;
+ return test_and_set_bit(0, p);
+}
+
+static inline unsigned long __ipipe_test_root(void)
+{
+ const unsigned long *p = &__ipipe_root_status;
+ return test_bit(0, p);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_IPIPE */
+
+#endif /* !__ASM_BLACKFIN_IPIPE_BASE_H */
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 89f59e18af93..3d977909ce7d 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -17,56 +17,272 @@
#ifndef _BFIN_IRQ_H_
#define _BFIN_IRQ_H_
+/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/
#include <mach/irq.h>
-#include <asm/ptrace.h>
-
-/*******************************************************************************
- ***** INTRODUCTION ***********
- * On the Blackfin, the interrupt structure allows remmapping of the hardware
- * levels.
- * - I'm going to assume that the H/W level is going to stay at the default
- * settings. If someone wants to go through and abstart this out, feel free
- * to mod the interrupt numbering scheme.
- * - I'm abstracting the interrupts so that uClinux does not know anything
- * about the H/W levels. If you want to change the H/W AND keep the abstracted
- * levels that uClinux sees, you should be able to do most of it here.
- * - I've left the "abstract" numbering sparce in case someone wants to pull the
- * interrupts apart (just the TX/RX for the various devices)
- *******************************************************************************/
+#include <asm/pda.h>
+#include <asm/processor.h>
-/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/
+#ifdef CONFIG_SMP
+/* Forward decl needed due to cdef inter dependencies */
+static inline uint32_t __pure bfin_dspid(void);
+# define blackfin_core_id() (bfin_dspid() & 0xff)
+# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
+#else
+extern unsigned long bfin_irq_flags;
+#endif
-/*
- * Machine specific interrupt sources.
- *
- * Adding an interrupt service routine for a source with this bit
- * set indicates a special machine specific interrupt source.
- * The machine specific files define these sources.
- *
- * The IRQ_MACHSPEC bit is now gone - the only thing it did was to
- * introduce unnecessary overhead.
- *
- * All interrupt handling is actually machine specific so it is better
- * to use function pointers, as used by the Sparc port, and select the
- * interrupt handling functions when initializing the kernel. This way
- * we save some unnecessary overhead at run-time.
- * 01/11/97 - Jes
- */
+#ifdef CONFIG_IPIPE
+
+#include <linux/ipipe_trace.h>
+
+void __ipipe_unstall_root(void);
+
+void __ipipe_restore_root(unsigned long flags);
+
+#ifdef CONFIG_DEBUG_HWERR
+# define __all_masked_irq_flags 0x3f
+# define __save_and_cli_hw(x) \
+ __asm__ __volatile__( \
+ "cli %0;" \
+ "sti %1;" \
+ : "=&d"(x) \
+ : "d" (0x3F) \
+ )
+#else
+# define __all_masked_irq_flags 0x1f
+# define __save_and_cli_hw(x) \
+ __asm__ __volatile__( \
+ "cli %0;" \
+ : "=&d"(x) \
+ )
+#endif
+
+#define irqs_enabled_from_flags_hw(x) ((x) != __all_masked_irq_flags)
+#define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags))
+#define local_test_iflag_hw(x) irqs_enabled_from_flags_hw(x)
-extern void ack_bad_irq(unsigned int irq);
+#define local_save_flags(x) \
+ do { \
+ (x) = __ipipe_test_root() ? \
+ __all_masked_irq_flags : bfin_irq_flags; \
+ } while (0)
-static __inline__ int irq_canonicalize(int irq)
+#define local_irq_save(x) \
+ do { \
+ (x) = __ipipe_test_and_stall_root(); \
+ } while (0)
+
+#define local_irq_restore(x) __ipipe_restore_root(x)
+#define local_irq_disable() __ipipe_stall_root()
+#define local_irq_enable() __ipipe_unstall_root()
+#define irqs_disabled() __ipipe_test_root()
+
+#define local_save_flags_hw(x) \
+ __asm__ __volatile__( \
+ "cli %0;" \
+ "sti %0;" \
+ : "=d"(x) \
+ )
+
+#define irqs_disabled_hw() \
+ ({ \
+ unsigned long flags; \
+ local_save_flags_hw(flags); \
+ !irqs_enabled_from_flags_hw(flags); \
+ })
+
+static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
{
- return irq;
+ /* Merge virtual and real interrupt mask bits into a single
+ 32bit word. */
+ return (real & ~(1 << 31)) | ((virt != 0) << 31);
+}
+
+static inline int raw_demangle_irq_bits(unsigned long *x)
+{
+ int virt = (*x & (1 << 31)) != 0;
+ *x &= ~(1L << 31);
+ return virt;
}
-/* count of spurious interrupts */
-/* extern volatile unsigned int num_spurious; */
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
+
+#define local_irq_disable_hw() \
+ do { \
+ int _tmp_dummy; \
+ if (!irqs_disabled_hw()) \
+ ipipe_trace_begin(0x80000000); \
+ __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \
+ } while (0)
+
+#define local_irq_enable_hw() \
+ do { \
+ if (irqs_disabled_hw()) \
+ ipipe_trace_end(0x80000000); \
+ __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags)); \
+ } while (0)
+
+#define local_irq_save_hw(x) \
+ do { \
+ __save_and_cli_hw(x); \
+ if (local_test_iflag_hw(x)) \
+ ipipe_trace_begin(0x80000001); \
+ } while (0)
+
+#define local_irq_restore_hw(x) \
+ do { \
+ if (local_test_iflag_hw(x)) { \
+ ipipe_trace_end(0x80000001); \
+ local_irq_enable_hw_notrace(); \
+ } \
+ } while (0)
+
+#define local_irq_disable_hw_notrace() \
+ do { \
+ int _tmp_dummy; \
+ __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \
+ } while (0)
+
+#define local_irq_enable_hw_notrace() \
+ __asm__ __volatile__( \
+ "sti %0;" \
+ : \
+ : "d"(bfin_irq_flags) \
+ )
-#ifndef NO_IRQ
-#define NO_IRQ ((unsigned int)(-1))
+#define local_irq_save_hw_notrace(x) __save_and_cli_hw(x)
+
+#define local_irq_restore_hw_notrace(x) \
+ do { \
+ if (local_test_iflag_hw(x)) \
+ local_irq_enable_hw_notrace(); \
+ } while (0)
+
+#else /* CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#define local_irq_enable_hw() \
+ __asm__ __volatile__( \
+ "sti %0;" \
+ : \
+ : "d"(bfin_irq_flags) \
+ )
+
+#define local_irq_disable_hw() \
+ do { \
+ int _tmp_dummy; \
+ __asm__ __volatile__ ( \
+ "cli %0;" \
+ : "=d" (_tmp_dummy)); \
+ } while (0)
+
+#define local_irq_restore_hw(x) \
+ do { \
+ if (irqs_enabled_from_flags_hw(x)) \
+ local_irq_enable_hw(); \
+ } while (0)
+
+#define local_irq_save_hw(x) __save_and_cli_hw(x)
+
+#define local_irq_disable_hw_notrace() local_irq_disable_hw()
+#define local_irq_enable_hw_notrace() local_irq_enable_hw()
+#define local_irq_save_hw_notrace(x) local_irq_save_hw(x)
+#define local_irq_restore_hw_notrace(x) local_irq_restore_hw(x)
+
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#else /* !CONFIG_IPIPE */
+
+/*
+ * Interrupt configuring macros.
+ */
+#define local_irq_disable() \
+ do { \
+ int __tmp_dummy; \
+ __asm__ __volatile__( \
+ "cli %0;" \
+ : "=d" (__tmp_dummy) \
+ ); \
+ } while (0)
+
+#define local_irq_enable() \
+ __asm__ __volatile__( \
+ "sti %0;" \
+ : \
+ : "d" (bfin_irq_flags) \
+ )
+
+#ifdef CONFIG_DEBUG_HWERR
+# define __save_and_cli(x) \
+ __asm__ __volatile__( \
+ "cli %0;" \
+ "sti %1;" \
+ : "=&d" (x) \
+ : "d" (0x3F) \
+ )
+#else
+# define __save_and_cli(x) \
+ __asm__ __volatile__( \
+ "cli %0;" \
+ : "=&d" (x) \
+ )
#endif
-#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
+#define local_save_flags(x) \
+ __asm__ __volatile__( \
+ "cli %0;" \
+ "sti %0;" \
+ : "=d" (x) \
+ )
+
+#ifdef CONFIG_DEBUG_HWERR
+#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
+#else
+#define irqs_enabled_from_flags(x) ((x) != 0x1f)
+#endif
+
+#define local_irq_restore(x) \
+ do { \
+ if (irqs_enabled_from_flags(x)) \
+ local_irq_enable(); \
+ } while (0)
+
+/* For spinlocks etc */
+#define local_irq_save(x) __save_and_cli(x)
+
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ local_save_flags(flags); \
+ !irqs_enabled_from_flags(flags); \
+})
+
+#define local_irq_save_hw(x) local_irq_save(x)
+#define local_irq_restore_hw(x) local_irq_restore(x)
+#define local_irq_enable_hw() local_irq_enable()
+#define local_irq_disable_hw() local_irq_disable()
+#define irqs_disabled_hw() irqs_disabled()
+
+#endif /* !CONFIG_IPIPE */
+
+#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
+# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
+#else
+# define NOP_PAD_ANOMALY_05000244
+#endif
+
+#define idle_with_irq_disabled() \
+ __asm__ __volatile__( \
+ NOP_PAD_ANOMALY_05000244 \
+ ".align 8;" \
+ "sti %0;" \
+ "idle;" \
+ : \
+ : "d" (bfin_irq_flags) \
+ )
+
+static inline int irq_canonicalize(int irq)
+{
+ return irq;
+}
#endif /* _BFIN_IRQ_H_ */
diff --git a/arch/blackfin/include/asm/kgdb.h b/arch/blackfin/include/asm/kgdb.h
index 26ebac6646d8..c8b256d2ea30 100644
--- a/arch/blackfin/include/asm/kgdb.h
+++ b/arch/blackfin/include/asm/kgdb.h
@@ -1,32 +1,8 @@
-/*
- * File: include/asm-blackfin/kgdb.h
- * Based on:
- * Author: Sonic Zhang
- *
- * Created:
- * Description:
- *
- * Rev: $Id: kgdb_bfin_linux-2.6.x.patch 4934 2007-02-13 09:32:11Z sonicz $
- *
- * Modified:
- * Copyright 2005-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
+/* Blackfin KGDB header
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright 2005-2009 Analog Devices Inc.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Licensed under the GPL-2 or later.
*/
#ifndef __ASM_BLACKFIN_KGDB_H__
@@ -37,17 +13,18 @@
/* gdb locks */
#define KGDB_MAX_NO_CPUS 8
-/************************************************************************/
-/* BUFMAX defines the maximum number of characters in inbound/outbound buffers*/
-/* at least NUMREGBYTES*2 are needed for register packets */
-/* Longer buffer is needed to list all threads */
+/*
+ * BUFMAX defines the maximum number of characters in inbound/outbound buffers.
+ * At least NUMREGBYTES*2 are needed for register packets.
+ * Longer buffer is needed to list all threads.
+ */
#define BUFMAX 2048
/*
- * Note that this register image is different from
- * the register image that Linux produces at interrupt time.
- *
- * Linux's register image is defined by struct pt_regs in ptrace.h.
+ * Note that this register image is different from
+ * the register image that Linux produces at interrupt time.
+ *
+ * Linux's register image is defined by struct pt_regs in ptrace.h.
*/
enum regnames {
/* Core Registers */
@@ -104,14 +81,14 @@ enum regnames {
BFIN_RETX,
BFIN_RETN,
BFIN_RETE,
-
+
/* Pseudo Registers */
BFIN_PC,
BFIN_CC,
BFIN_EXTRA1, /* Address of .text section. */
BFIN_EXTRA2, /* Address of .data section. */
BFIN_EXTRA3, /* Address of .bss section. */
- BFIN_FDPIC_EXEC,
+ BFIN_FDPIC_EXEC,
BFIN_FDPIC_INTERP,
/* MMRs */
@@ -126,7 +103,7 @@ enum regnames {
static inline void arch_kgdb_breakpoint(void)
{
- asm(" EXCPT 2;");
+ asm("EXCPT 2;");
}
#define BREAK_INSTR_SIZE 2
#define CACHE_FLUSH_IS_SAFE 1
diff --git a/arch/blackfin/include/asm/l1layout.h b/arch/blackfin/include/asm/l1layout.h
index c13ded777828..79dbefaa5bef 100644
--- a/arch/blackfin/include/asm/l1layout.h
+++ b/arch/blackfin/include/asm/l1layout.h
@@ -8,6 +8,7 @@
#include <asm/blackfin.h>
+#ifndef CONFIG_SMP
#ifndef __ASSEMBLY__
/* Data that is "mapped" into the process VM at the start of the L1 scratch
@@ -24,8 +25,10 @@ struct l1_scratch_task_info
};
/* A pointer to the structure in memory. */
-#define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)L1_SCRATCH_START)
+#define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)\
+ get_l1_scratch_start())
#endif
+#endif
#endif
diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h
new file mode 100644
index 000000000000..61f7487fbf12
--- /dev/null
+++ b/arch/blackfin/include/asm/mem_init.h
@@ -0,0 +1,364 @@
+/*
+ * arch/blackfin/include/asm/mem_init.h - reprogram clocks / memory
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#if defined(EBIU_SDGCTL)
+#if defined(CONFIG_MEM_MT48LC16M16A2TG_75) || \
+ defined(CONFIG_MEM_MT48LC64M4A2FB_7E) || \
+ defined(CONFIG_MEM_MT48LC16M8A2TG_75) || \
+ defined(CONFIG_MEM_GENERIC_BOARD) || \
+ defined(CONFIG_MEM_MT48LC32M8A2_75) || \
+ defined(CONFIG_MEM_MT48LC8M32B2B5_7) || \
+ defined(CONFIG_MEM_MT48LC32M16A2TG_75) || \
+ defined(CONFIG_MEM_MT48LC32M8A2_75)
+#if (CONFIG_SCLK_HZ > 119402985)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_7
+#define SDRAM_tRAS_num 7
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_6
+#define SDRAM_tRAS_num 6
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_5
+#define SDRAM_tRAS_num 5
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_4
+#define SDRAM_tRAS_num 4
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_3
+#define SDRAM_tRAS_num 3
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667)
+#define SDRAM_tRP TRP_1
+#define SDRAM_tRP_num 1
+#define SDRAM_tRAS TRAS_4
+#define SDRAM_tRAS_num 3
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493)
+#define SDRAM_tRP TRP_1
+#define SDRAM_tRP_num 1
+#define SDRAM_tRAS TRAS_3
+#define SDRAM_tRAS_num 3
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119)
+#define SDRAM_tRP TRP_1
+#define SDRAM_tRP_num 1
+#define SDRAM_tRAS TRAS_2
+#define SDRAM_tRAS_num 2
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ <= 29850746)
+#define SDRAM_tRP TRP_1
+#define SDRAM_tRP_num 1
+#define SDRAM_tRAS TRAS_1
+#define SDRAM_tRAS_num 1
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#endif
+
+#if defined(CONFIG_MEM_MT48LC16M8A2TG_75) || \
+ defined(CONFIG_MEM_MT48LC8M32B2B5_7)
+ /*SDRAM INFORMATION: */
+#define SDRAM_Tref 64 /* Refresh period in milliseconds */
+#define SDRAM_NRA 4096 /* Number of row addresses in SDRAM */
+#define SDRAM_CL CL_3
+#endif
+
+#if defined(CONFIG_MEM_MT48LC32M8A2_75) || \
+ defined(CONFIG_MEM_MT48LC64M4A2FB_7E) || \
+ defined(CONFIG_MEM_GENERIC_BOARD) || \
+ defined(CONFIG_MEM_MT48LC32M16A2TG_75) || \
+ defined(CONFIG_MEM_MT48LC16M16A2TG_75) || \
+ defined(CONFIG_MEM_MT48LC32M8A2_75)
+ /*SDRAM INFORMATION: */
+#define SDRAM_Tref 64 /* Refresh period in milliseconds */
+#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */
+#define SDRAM_CL CL_3
+#endif
+
+
+#ifdef CONFIG_BFIN_KERNEL_CLOCK_MEMINIT_CALC
+/* Equation from section 17 (p17-46) of BF533 HRM */
+#define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num)
+
+/* Enable SCLK Out */
+#define mem_SDGCTL (SCTLE | SDRAM_CL | SDRAM_tRAS | SDRAM_tRP | SDRAM_tRCD | SDRAM_tWR | PSS)
+#else
+#define mem_SDRRC CONFIG_MEM_SDRRC
+#define mem_SDGCTL CONFIG_MEM_SDGCTL
+#endif
+#endif
+
+
+#if defined(EBIU_DDRCTL0)
+#define MIN_DDR_SCLK(x) (x*(CONFIG_SCLK_HZ/1000/1000)/1000 + 1)
+#define MAX_DDR_SCLK(x) (x*(CONFIG_SCLK_HZ/1000/1000)/1000)
+#define DDR_CLK_HZ(x) (1000*1000*1000/x)
+
+#if defined(CONFIG_MEM_MT46V32M16_6T)
+#define DDR_SIZE DEVSZ_512
+#define DDR_WIDTH DEVWD_16
+#define DDR_MAX_tCK 13
+
+#define DDR_tRC DDR_TRC(MIN_DDR_SCLK(60))
+#define DDR_tRAS DDR_TRAS(MIN_DDR_SCLK(42))
+#define DDR_tRP DDR_TRP(MIN_DDR_SCLK(15))
+#define DDR_tRFC DDR_TRFC(MIN_DDR_SCLK(72))
+#define DDR_tREFI DDR_TREFI(MAX_DDR_SCLK(7800))
+
+#define DDR_tRCD DDR_TRCD(MIN_DDR_SCLK(15))
+#define DDR_tWTR DDR_TWTR(1)
+#define DDR_tMRD DDR_TMRD(MIN_DDR_SCLK(12))
+#define DDR_tWR DDR_TWR(MIN_DDR_SCLK(15))
+#endif
+
+#if defined(CONFIG_MEM_MT46V32M16_5B)
+#define DDR_SIZE DEVSZ_512
+#define DDR_WIDTH DEVWD_16
+#define DDR_MAX_tCK 13
+
+#define DDR_tRC DDR_TRC(MIN_DDR_SCLK(55))
+#define DDR_tRAS DDR_TRAS(MIN_DDR_SCLK(40))
+#define DDR_tRP DDR_TRP(MIN_DDR_SCLK(15))
+#define DDR_tRFC DDR_TRFC(MIN_DDR_SCLK(70))
+#define DDR_tREFI DDR_TREFI(MAX_DDR_SCLK(7800))
+
+#define DDR_tRCD DDR_TRCD(MIN_DDR_SCLK(15))
+#define DDR_tWTR DDR_TWTR(2)
+#define DDR_tMRD DDR_TMRD(MIN_DDR_SCLK(10))
+#define DDR_tWR DDR_TWR(MIN_DDR_SCLK(15))
+#endif
+
+#if defined(CONFIG_MEM_GENERIC_BOARD)
+#define DDR_SIZE DEVSZ_512
+#define DDR_WIDTH DEVWD_16
+#define DDR_MAX_tCK 13
+
+#define DDR_tRCD DDR_TRCD(3)
+#define DDR_tWTR DDR_TWTR(2)
+#define DDR_tWR DDR_TWR(2)
+#define DDR_tMRD DDR_TMRD(2)
+#define DDR_tRP DDR_TRP(3)
+#define DDR_tRAS DDR_TRAS(7)
+#define DDR_tRC DDR_TRC(10)
+#define DDR_tRFC DDR_TRFC(12)
+#define DDR_tREFI DDR_TREFI(1288)
+#endif
+
+#if (CONFIG_SCLK_HZ < DDR_CLK_HZ(DDR_MAX_tCK))
+# error "CONFIG_SCLK_HZ is too small (<DDR_CLK_HZ(DDR_MAX_tCK) Hz)."
+#elif(CONFIG_SCLK_HZ <= 133333333)
+# define DDR_CL CL_2
+#else
+# error "CONFIG_SCLK_HZ is too large (>133333333 Hz)."
+#endif
+
+#ifdef CONFIG_BFIN_KERNEL_CLOCK_MEMINIT_CALC
+#define mem_DDRCTL0 (DDR_tRP | DDR_tRAS | DDR_tRC | DDR_tRFC | DDR_tREFI)
+#define mem_DDRCTL1 (DDR_DATWIDTH | EXTBANK_1 | DDR_SIZE | DDR_WIDTH | DDR_tWTR \
+ | DDR_tMRD | DDR_tWR | DDR_tRCD)
+#define mem_DDRCTL2 DDR_CL
+#else
+#define mem_DDRCTL0 CONFIG_MEM_DDRCTL0
+#define mem_DDRCTL1 CONFIG_MEM_DDRCTL1
+#define mem_DDRCTL2 CONFIG_MEM_DDRCTL2
+#endif
+#endif
+
+#if defined CONFIG_CLKIN_HALF
+#define CLKIN_HALF 1
+#else
+#define CLKIN_HALF 0
+#endif
+
+#if defined CONFIG_PLL_BYPASS
+#define PLL_BYPASS 1
+#else
+#define PLL_BYPASS 0
+#endif
+
+/***************************************Currently Not Being Used *********************************/
+
+#if defined(CONFIG_FLASH_SPEED_BWAT) && \
+defined(CONFIG_FLASH_SPEED_BRAT) && \
+defined(CONFIG_FLASH_SPEED_BHT) && \
+defined(CONFIG_FLASH_SPEED_BST) && \
+defined(CONFIG_FLASH_SPEED_BTT)
+
+#define flash_EBIU_AMBCTL_WAT ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
+#define flash_EBIU_AMBCTL_RAT ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
+#define flash_EBIU_AMBCTL_HT ((CONFIG_FLASH_SPEED_BHT * 4) / (4000000000 / CONFIG_SCLK_HZ))
+#define flash_EBIU_AMBCTL_ST ((CONFIG_FLASH_SPEED_BST * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
+#define flash_EBIU_AMBCTL_TT ((CONFIG_FLASH_SPEED_BTT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
+
+#if (flash_EBIU_AMBCTL_TT > 3)
+#define flash_EBIU_AMBCTL0_TT B0TT_4
+#endif
+#if (flash_EBIU_AMBCTL_TT == 3)
+#define flash_EBIU_AMBCTL0_TT B0TT_3
+#endif
+#if (flash_EBIU_AMBCTL_TT == 2)
+#define flash_EBIU_AMBCTL0_TT B0TT_2
+#endif
+#if (flash_EBIU_AMBCTL_TT < 2)
+#define flash_EBIU_AMBCTL0_TT B0TT_1
+#endif
+
+#if (flash_EBIU_AMBCTL_ST > 3)
+#define flash_EBIU_AMBCTL0_ST B0ST_4
+#endif
+#if (flash_EBIU_AMBCTL_ST == 3)
+#define flash_EBIU_AMBCTL0_ST B0ST_3
+#endif
+#if (flash_EBIU_AMBCTL_ST == 2)
+#define flash_EBIU_AMBCTL0_ST B0ST_2
+#endif
+#if (flash_EBIU_AMBCTL_ST < 2)
+#define flash_EBIU_AMBCTL0_ST B0ST_1
+#endif
+
+#if (flash_EBIU_AMBCTL_HT > 2)
+#define flash_EBIU_AMBCTL0_HT B0HT_3
+#endif
+#if (flash_EBIU_AMBCTL_HT == 2)
+#define flash_EBIU_AMBCTL0_HT B0HT_2
+#endif
+#if (flash_EBIU_AMBCTL_HT == 1)
+#define flash_EBIU_AMBCTL0_HT B0HT_1
+#endif
+#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0)
+#define flash_EBIU_AMBCTL0_HT B0HT_0
+#endif
+#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0)
+#define flash_EBIU_AMBCTL0_HT B0HT_1
+#endif
+
+#if (flash_EBIU_AMBCTL_WAT > 14)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_15
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 14)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_14
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 13)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_13
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 12)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_12
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 11)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_11
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 10)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_10
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 9)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_9
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 8)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_8
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 7)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_7
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 6)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_6
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 5)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_5
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 4)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_4
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 3)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_3
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 2)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_2
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 1)
+#define flash_EBIU_AMBCTL0_WAT B0WAT_1
+#endif
+
+#if (flash_EBIU_AMBCTL_RAT > 14)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_15
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 14)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_14
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 13)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_13
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 12)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_12
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 11)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_11
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 10)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_10
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 9)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_9
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 8)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_8
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 7)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_7
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 6)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_6
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 5)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_5
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 4)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_4
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 3)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_3
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 2)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_2
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 1)
+#define flash_EBIU_AMBCTL0_RAT B0RAT_1
+#endif
+
+#define flash_EBIU_AMBCTL0 \
+ (flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \
+ flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN)
+#endif
diff --git a/arch/blackfin/include/asm/mem_map.h b/arch/blackfin/include/asm/mem_map.h
index 88d04a707708..e92b31051bb7 100644
--- a/arch/blackfin/include/asm/mem_map.h
+++ b/arch/blackfin/include/asm/mem_map.h
@@ -9,4 +9,79 @@
#include <mach/mem_map.h>
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+static inline ulong get_l1_scratch_start_cpu(int cpu)
+{
+ return (cpu) ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
+}
+static inline ulong get_l1_code_start_cpu(int cpu)
+{
+ return (cpu) ? COREB_L1_CODE_START : COREA_L1_CODE_START;
+}
+static inline ulong get_l1_data_a_start_cpu(int cpu)
+{
+ return (cpu) ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START;
+}
+static inline ulong get_l1_data_b_start_cpu(int cpu)
+{
+ return (cpu) ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START;
+}
+
+static inline ulong get_l1_scratch_start(void)
+{
+ return get_l1_scratch_start_cpu(blackfin_core_id());
+}
+static inline ulong get_l1_code_start(void)
+{
+ return get_l1_code_start_cpu(blackfin_core_id());
+}
+static inline ulong get_l1_data_a_start(void)
+{
+ return get_l1_data_a_start_cpu(blackfin_core_id());
+}
+static inline ulong get_l1_data_b_start(void)
+{
+ return get_l1_data_b_start_cpu(blackfin_core_id());
+}
+
+#else /* !CONFIG_SMP */
+
+static inline ulong get_l1_scratch_start_cpu(int cpu)
+{
+ return L1_SCRATCH_START;
+}
+static inline ulong get_l1_code_start_cpu(int cpu)
+{
+ return L1_CODE_START;
+}
+static inline ulong get_l1_data_a_start_cpu(int cpu)
+{
+ return L1_DATA_A_START;
+}
+static inline ulong get_l1_data_b_start_cpu(int cpu)
+{
+ return L1_DATA_B_START;
+}
+static inline ulong get_l1_scratch_start(void)
+{
+ return get_l1_scratch_start_cpu(0);
+}
+static inline ulong get_l1_code_start(void)
+{
+ return get_l1_code_start_cpu(0);
+}
+static inline ulong get_l1_data_a_start(void)
+{
+ return get_l1_data_a_start_cpu(0);
+}
+static inline ulong get_l1_data_b_start(void)
+{
+ return get_l1_data_b_start_cpu(0);
+}
+
+#endif /* CONFIG_SMP */
+#endif /* __ASSEMBLY__ */
+
#endif /* _MEM_MAP_H_ */
diff --git a/arch/blackfin/include/asm/mmu.h b/arch/blackfin/include/asm/mmu.h
index 757e43906ed4..dbfd686360e6 100644
--- a/arch/blackfin/include/asm/mmu.h
+++ b/arch/blackfin/include/asm/mmu.h
@@ -10,7 +10,6 @@ struct sram_list_struct {
};
typedef struct {
- struct vm_list_struct *vmlist;
unsigned long end_brk;
unsigned long stack_start;
diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h
index 35593dda2a4d..944e29faae48 100644
--- a/arch/blackfin/include/asm/mmu_context.h
+++ b/arch/blackfin/include/asm/mmu_context.h
@@ -37,6 +37,10 @@
#include <asm/pgalloc.h>
#include <asm/cplbinit.h>
+/* Note: L1 stacks are CPU-private things, so we bluntly disable this
+ feature in SMP mode, and use the per-CPU scratch SRAM bank only to
+ store the PDA instead. */
+
extern void *current_l1_stack_save;
extern int nr_l1stack_tasks;
extern void *l1_stack_base;
@@ -88,12 +92,15 @@ activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
struct task_struct *tsk)
{
+#ifdef CONFIG_MPU
+ unsigned int cpu = smp_processor_id();
+#endif
if (prev_mm == next_mm)
return;
#ifdef CONFIG_MPU
- if (prev_mm->context.page_rwx_mask == current_rwx_mask) {
- flush_switched_cplbs();
- set_mask_dcplbs(next_mm->context.page_rwx_mask);
+ if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
+ flush_switched_cplbs(cpu);
+ set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
}
#endif
@@ -138,9 +145,10 @@ static inline void protect_page(struct mm_struct *mm, unsigned long addr,
static inline void update_protections(struct mm_struct *mm)
{
- if (mm->context.page_rwx_mask == current_rwx_mask) {
- flush_switched_cplbs();
- set_mask_dcplbs(mm->context.page_rwx_mask);
+ unsigned int cpu = smp_processor_id();
+ if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
+ flush_switched_cplbs(cpu);
+ set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
}
}
#endif
@@ -165,6 +173,9 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static inline void destroy_context(struct mm_struct *mm)
{
struct sram_list_struct *tmp;
+#ifdef CONFIG_MPU
+ unsigned int cpu = smp_processor_id();
+#endif
#ifdef CONFIG_APP_STACK_L1
if (current_l1_stack_save == mm->context.l1_stack_save)
@@ -179,8 +190,8 @@ static inline void destroy_context(struct mm_struct *mm)
kfree(tmp);
}
#ifdef CONFIG_MPU
- if (current_rwx_mask == mm->context.page_rwx_mask)
- current_rwx_mask = NULL;
+ if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
+ current_rwx_mask[cpu] = NULL;
free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
#endif
}
diff --git a/arch/blackfin/include/asm/mutex-dec.h b/arch/blackfin/include/asm/mutex-dec.h
new file mode 100644
index 000000000000..0134151656af
--- /dev/null
+++ b/arch/blackfin/include/asm/mutex-dec.h
@@ -0,0 +1,112 @@
+/*
+ * include/asm-generic/mutex-dec.h
+ *
+ * Generic implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ */
+#ifndef _ASM_GENERIC_MUTEX_DEC_H
+#define _ASM_GENERIC_MUTEX_DEC_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function MUST leave the value lower than
+ * 1 even when the "1" assertion wasn't true.
+ */
+static inline void
+__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ fail_fn(count);
+ else
+ smp_mb();
+}
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns.
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fail_fn(count);
+ else {
+ smp_mb();
+ return 0;
+ }
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the count from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, then the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+ smp_mb();
+ if (unlikely(atomic_inc_return(count) <= 0))
+ fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ /*
+ * We have two variants here. The cmpxchg based one is the best one
+ * because it never induce a false contention state. It is included
+ * here because architectures using the inc/dec algorithms over the
+ * xchg ones are much more likely to support cmpxchg natively.
+ *
+ * If not we fall back to the spinlock based variant - that is
+ * just as efficient (and simpler) as a 'destructive' probing of
+ * the mutex state would be.
+ */
+#ifdef __HAVE_ARCH_CMPXCHG
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
+ smp_mb();
+ return 1;
+ }
+ return 0;
+#else
+ return fail_fn(count);
+#endif
+}
+
+#endif
diff --git a/arch/blackfin/include/asm/mutex.h b/arch/blackfin/include/asm/mutex.h
index 458c1f7fbc18..5d399256bf06 100644
--- a/arch/blackfin/include/asm/mutex.h
+++ b/arch/blackfin/include/asm/mutex.h
@@ -6,4 +6,67 @@
* implementation. (see asm-generic/mutex-xchg.h for details)
*/
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+#ifndef CONFIG_SMP
#include <asm-generic/mutex-dec.h>
+#else
+
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ fail_fn(count);
+ else
+ smp_mb();
+}
+
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fail_fn(count);
+ else {
+ smp_mb();
+ return 0;
+ }
+}
+
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ smp_mb();
+ if (unlikely(atomic_inc_return(count) <= 0))
+ fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ /*
+ * We have two variants here. The cmpxchg based one is the best one
+ * because it never induce a false contention state. It is included
+ * here because architectures using the inc/dec algorithms over the
+ * xchg ones are much more likely to support cmpxchg natively.
+ *
+ * If not we fall back to the spinlock based variant - that is
+ * just as efficient (and simpler) as a 'destructive' probing of
+ * the mutex state would be.
+ */
+#ifdef __HAVE_ARCH_CMPXCHG
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
+ smp_mb();
+ return 1;
+ }
+ return 0;
+#else
+ return fail_fn(count);
+#endif
+}
+
+#endif
+
+#endif
diff --git a/arch/blackfin/include/asm/pda.h b/arch/blackfin/include/asm/pda.h
new file mode 100644
index 000000000000..a67142740df0
--- /dev/null
+++ b/arch/blackfin/include/asm/pda.h
@@ -0,0 +1,71 @@
+/*
+ * File: arch/blackfin/include/asm/pda.h
+ * Author: Philippe Gerum <rpm@xenomai.org>
+ *
+ * Copyright 2007 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ASM_BLACKFIN_PDA_H
+#define _ASM_BLACKFIN_PDA_H
+
+#include <mach/anomaly.h>
+
+#ifndef __ASSEMBLY__
+
+struct blackfin_pda { /* Per-processor Data Area */
+ struct blackfin_pda *next;
+
+ unsigned long syscfg;
+#ifdef CONFIG_SMP
+ unsigned long imask; /* Current IMASK value */
+#endif
+
+ unsigned long *ipdt; /* Start of switchable I-CPLB table */
+ unsigned long *ipdt_swapcount; /* Number of swaps in ipdt */
+ unsigned long *dpdt; /* Start of switchable D-CPLB table */
+ unsigned long *dpdt_swapcount; /* Number of swaps in dpdt */
+
+ /*
+ * Single instructions can have multiple faults, which
+ * need to be handled by traps.c, in irq5. We store
+ * the exception cause to ensure we don't miss a
+ * double fault condition
+ */
+ unsigned long ex_iptr;
+ unsigned long ex_optr;
+ unsigned long ex_buf[4];
+ unsigned long ex_imask; /* Saved imask from exception */
+ unsigned long *ex_stack; /* Exception stack space */
+
+#ifdef ANOMALY_05000261
+ unsigned long last_cplb_fault_retx;
+#endif
+ unsigned long dcplb_fault_addr;
+ unsigned long icplb_fault_addr;
+ unsigned long retx;
+ unsigned long seqstat;
+ unsigned int __nmi_count; /* number of times NMI asserted on this CPU */
+};
+
+extern struct blackfin_pda cpu_pda[];
+
+void reserve_pda(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_BLACKFIN_PDA_H */
diff --git a/arch/blackfin/include/asm/percpu.h b/arch/blackfin/include/asm/percpu.h
index 78dd61f6b39f..797c0c165069 100644
--- a/arch/blackfin/include/asm/percpu.h
+++ b/arch/blackfin/include/asm/percpu.h
@@ -3,4 +3,14 @@
#include <asm-generic/percpu.h>
-#endif /* __ARCH_BLACKFIN_PERCPU__ */
+#ifdef CONFIG_MODULES
+#define PERCPU_MODULE_RESERVE 8192
+#else
+#define PERCPU_MODULE_RESERVE 0
+#endif
+
+#define PERCPU_ENOUGH_ROOM \
+ (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
+ PERCPU_MODULE_RESERVE)
+
+#endif /* __ARCH_BLACKFIN_PERCPU__ */
diff --git a/arch/blackfin/include/asm/pgtable.h b/arch/blackfin/include/asm/pgtable.h
index f11684e4ade7..783c8f7f8f8c 100644
--- a/arch/blackfin/include/asm/pgtable.h
+++ b/arch/blackfin/include/asm/pgtable.h
@@ -29,6 +29,7 @@ typedef pte_t *pte_addr_t;
#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */
#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */
#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
+#define pgprot_noncached(prot) (prot)
extern void paging_init(void);
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index e3e9b41fa8db..0eece23b41c7 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -24,6 +24,14 @@ static inline void wrusp(unsigned long usp)
__asm__ __volatile__("usp = %0;\n\t"::"da"(usp));
}
+static inline unsigned long __get_SP(void)
+{
+ unsigned long sp;
+
+ __asm__ __volatile__("%0 = sp;\n\t" : "=da"(sp));
+ return sp;
+}
+
/*
* User space process size: 1st byte beyond user address space.
* Fairly meaningless on nommu. Parts of user programs can be scattered
@@ -57,6 +65,7 @@ struct thread_struct {
* pass the data segment into user programs if it exists,
* it can't hurt anything as far as I can tell
*/
+#ifndef CONFIG_SMP
#define start_thread(_regs, _pc, _usp) \
do { \
set_fs(USER_DS); \
@@ -70,6 +79,16 @@ do { \
sizeof(*L1_SCRATCH_TASK_INFO)); \
wrusp(_usp); \
} while(0)
+#else
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ set_fs(USER_DS); \
+ (_regs)->pc = (_pc); \
+ if (current->mm) \
+ (_regs)->p5 = current->mm->start_data; \
+ wrusp(_usp); \
+} while (0)
+#endif
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -106,7 +125,8 @@ unsigned long get_wchan(struct task_struct *p);
eip; })
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
-#define cpu_relax() barrier()
+#define cpu_relax() smp_mb()
+
/* Get the Silicon Revision of the chip */
static inline uint32_t __pure bfin_revid(void)
@@ -137,7 +157,11 @@ static inline uint32_t __pure bfin_revid(void)
static inline uint16_t __pure bfin_cpuid(void)
{
return (bfin_read_CHIPID() & CHIPID_FAMILY) >> 12;
+}
+static inline uint32_t __pure bfin_dspid(void)
+{
+ return bfin_read_DSPID();
}
static inline uint32_t __pure bfin_compiled_revid(void)
@@ -154,6 +178,8 @@ static inline uint32_t __pure bfin_compiled_revid(void)
return 4;
#elif defined(CONFIG_BF_REV_0_5)
return 5;
+#elif defined(CONFIG_BF_REV_0_6)
+ return 6;
#elif defined(CONFIG_BF_REV_ANY)
return 0xffff;
#else
diff --git a/arch/blackfin/include/asm/reboot.h b/arch/blackfin/include/asm/reboot.h
index 6d448b5f5985..ae1e36329bec 100644
--- a/arch/blackfin/include/asm/reboot.h
+++ b/arch/blackfin/include/asm/reboot.h
@@ -1,7 +1,7 @@
/*
- * include/asm-blackfin/reboot.h - shutdown/reboot header
+ * reboot.h - shutdown/reboot header
*
- * Copyright 2004-2007 Analog Devices Inc.
+ * Copyright 2004-2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -15,6 +15,6 @@ extern void native_machine_halt(void);
extern void native_machine_power_off(void);
/* common reboot workarounds */
-extern void bfin_gpio_reset_spi0_ssel1(void);
+extern void bfin_reset_boot_spi_cs(unsigned short pin);
#endif
diff --git a/arch/blackfin/include/asm/rwlock.h b/arch/blackfin/include/asm/rwlock.h
new file mode 100644
index 000000000000..4a724b378971
--- /dev/null
+++ b/arch/blackfin/include/asm/rwlock.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_BLACKFIN_RWLOCK_H
+#define _ASM_BLACKFIN_RWLOCK_H
+
+#define RW_LOCK_BIAS 0x01000000
+
+#endif
diff --git a/arch/blackfin/include/asm/serial.h b/arch/blackfin/include/asm/serial.h
index 994dd869558c..3a47606c858b 100644
--- a/arch/blackfin/include/asm/serial.h
+++ b/arch/blackfin/include/asm/serial.h
@@ -3,3 +3,4 @@
*/
#define SERIAL_EXTRA_IRQ_FLAGS IRQF_TRIGGER_HIGH
+#define BASE_BAUD (1843200 / 16)
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h
new file mode 100644
index 000000000000..118deeeae7c0
--- /dev/null
+++ b/arch/blackfin/include/asm/smp.h
@@ -0,0 +1,44 @@
+/*
+ * File: arch/blackfin/include/asm/smp.h
+ * Author: Philippe Gerum <rpm@xenomai.org>
+ *
+ * Copyright 2007 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __ASM_BLACKFIN_SMP_H
+#define __ASM_BLACKFIN_SMP_H
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/cache.h>
+#include <asm/blackfin.h>
+#include <mach/smp.h>
+
+#define raw_smp_processor_id() blackfin_core_id()
+
+extern char coreb_trampoline_start, coreb_trampoline_end;
+
+struct corelock_slot {
+ int lock;
+};
+
+void smp_icache_flush_range_others(unsigned long start,
+ unsigned long end);
+
+#endif /* !__ASM_BLACKFIN_SMP_H */
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 64e908a50646..0249ac319476 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -1,6 +1,89 @@
#ifndef __BFIN_SPINLOCK_H
#define __BFIN_SPINLOCK_H
-#error blackfin architecture does not support SMP spin lock yet
+#include <asm/atomic.h>
-#endif
+asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
+asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
+asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
+asmlinkage void __raw_read_lock_asm(volatile int *ptr);
+asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
+asmlinkage void __raw_write_lock_asm(volatile int *ptr);
+asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
+
+static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+{
+ return __raw_spin_is_locked_asm(&lock->lock);
+}
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ __raw_spin_lock_asm(&lock->lock);
+}
+
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ return __raw_spin_trylock_asm(&lock->lock);
+}
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+ __raw_spin_unlock_asm(&lock->lock);
+}
+
+static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+{
+ while (__raw_spin_is_locked(lock))
+ cpu_relax();
+}
+
+static inline int __raw_read_can_lock(raw_rwlock_t *rw)
+{
+ return __raw_uncached_fetch_asm(&rw->lock) > 0;
+}
+
+static inline int __raw_write_can_lock(raw_rwlock_t *rw)
+{
+ return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
+}
+
+static inline void __raw_read_lock(raw_rwlock_t *rw)
+{
+ __raw_read_lock_asm(&rw->lock);
+}
+
+static inline int __raw_read_trylock(raw_rwlock_t *rw)
+{
+ return __raw_read_trylock_asm(&rw->lock);
+}
+
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+ __raw_read_unlock_asm(&rw->lock);
+}
+
+static inline void __raw_write_lock(raw_rwlock_t *rw)
+{
+ __raw_write_lock_asm(&rw->lock);
+}
+
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
+{
+ return __raw_write_trylock_asm(&rw->lock);
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+ __raw_write_unlock_asm(&rw->lock);
+}
+
+#define _raw_spin_relax(lock) cpu_relax()
+#define _raw_read_relax(lock) cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
+#endif /* !__BFIN_SPINLOCK_H */
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..b1e3c4c7b382
--- /dev/null
+++ b/arch/blackfin/include/asm/spinlock_types.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+#include <asm/rwlock.h>
+
+typedef struct {
+ volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+
+#endif
diff --git a/arch/blackfin/include/asm/swab.h b/arch/blackfin/include/asm/swab.h
new file mode 100644
index 000000000000..69a051b612bd
--- /dev/null
+++ b/arch/blackfin/include/asm/swab.h
@@ -0,0 +1,48 @@
+#ifndef _BLACKFIN_SWAB_H
+#define _BLACKFIN_SWAB_H
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+# define __SWAB_64_THRU_32__
+#endif
+
+#ifdef __GNUC__
+
+static __inline__ __attribute_const__ __u32 __arch_swahb32(__u32 xx)
+{
+ __u32 tmp;
+ __asm__("%1 = %0 >> 8 (V);\n\t"
+ "%0 = %0 << 8 (V);\n\t"
+ "%0 = %0 | %1;\n\t"
+ : "+d"(xx), "=&d"(tmp));
+ return xx;
+}
+#define __arch_swahb32 __arch_swahb32
+
+static __inline__ __attribute_const__ __u32 __arch_swahw32(__u32 xx)
+{
+ __u32 rv;
+ __asm__("%0 = PACK(%1.L, %1.H);\n\t": "=d"(rv): "d"(xx));
+ return rv;
+}
+#define __arch_swahw32 __arch_swahw32
+
+static __inline__ __attribute_const__ __u32 __arch_swab32(__u32 xx)
+{
+ return __arch_swahb32(__arch_swahw32(xx));
+}
+#define __arch_swab32 __arch_swab32
+
+static __inline__ __attribute_const__ __u16 __arch_swab16(__u16 xx)
+{
+ __u32 xw = xx;
+ __asm__("%0 <<= 8;\n %0.L = %0.L + %0.H (NS);\n": "+d"(xw));
+ return (__u16)xw;
+}
+#define __arch_swab16 __arch_swab16
+
+#endif /* __GNUC__ */
+
+#endif /* _BLACKFIN_SWAB_H */
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index 8f1627d8bf09..a4c8254bec55 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -37,114 +37,98 @@
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <mach/anomaly.h>
+#include <asm/pda.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
/*
- * Interrupt configuring macros.
+ * Force strict CPU ordering.
*/
+#define nop() __asm__ __volatile__ ("nop;\n\t" : : )
+#define mb() __asm__ __volatile__ ("" : : : "memory")
+#define rmb() __asm__ __volatile__ ("" : : : "memory")
+#define wmb() __asm__ __volatile__ ("" : : : "memory")
+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+#define read_barrier_depends() do { } while(0)
-extern unsigned long irq_flags;
-
-#define local_irq_enable() \
- __asm__ __volatile__( \
- "sti %0;" \
- : \
- : "d" (irq_flags) \
- )
-
-#define local_irq_disable() \
- do { \
- int __tmp_dummy; \
- __asm__ __volatile__( \
- "cli %0;" \
- : "=d" (__tmp_dummy) \
- ); \
- } while (0)
-
-#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
-# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
-#else
-# define NOP_PAD_ANOMALY_05000244
-#endif
-
-#define idle_with_irq_disabled() \
- __asm__ __volatile__( \
- NOP_PAD_ANOMALY_05000244 \
- ".align 8;" \
- "sti %0;" \
- "idle;" \
- : \
- : "d" (irq_flags) \
- )
-
-#ifdef CONFIG_DEBUG_HWERR
-# define __save_and_cli(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- "sti %1;" \
- : "=&d" (x) \
- : "d" (0x3F) \
- )
-#else
-# define __save_and_cli(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- : "=&d" (x) \
- )
-#endif
-
-#define local_save_flags(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- "sti %0;" \
- : "=d" (x) \
- )
+#ifdef CONFIG_SMP
+asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
+asmlinkage unsigned long __raw_xchg_2_asm(volatile void *ptr, unsigned long value);
+asmlinkage unsigned long __raw_xchg_4_asm(volatile void *ptr, unsigned long value);
+asmlinkage unsigned long __raw_cmpxchg_1_asm(volatile void *ptr,
+ unsigned long new, unsigned long old);
+asmlinkage unsigned long __raw_cmpxchg_2_asm(volatile void *ptr,
+ unsigned long new, unsigned long old);
+asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
+ unsigned long new, unsigned long old);
+
+#ifdef __ARCH_SYNC_CORE_DCACHE
+# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
+# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0)
+# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0)
+#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
-#ifdef CONFIG_DEBUG_HWERR
-#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
#else
-#define irqs_enabled_from_flags(x) ((x) != 0x1f)
+# define smp_mb() barrier()
+# define smp_rmb() barrier()
+# define smp_wmb() barrier()
+#define smp_read_barrier_depends() barrier()
#endif
-#define local_irq_restore(x) \
- do { \
- if (irqs_enabled_from_flags(x)) \
- local_irq_enable(); \
- } while (0)
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
+{
+ unsigned long tmp;
-/* For spinlocks etc */
-#define local_irq_save(x) __save_and_cli(x)
+ switch (size) {
+ case 1:
+ tmp = __raw_xchg_1_asm(ptr, x);
+ break;
+ case 2:
+ tmp = __raw_xchg_2_asm(ptr, x);
+ break;
+ case 4:
+ tmp = __raw_xchg_4_asm(ptr, x);
+ break;
+ }
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- !irqs_enabled_from_flags(flags); \
-})
+ return tmp;
+}
/*
- * Force strict CPU ordering.
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
*/
-#define nop() asm volatile ("nop;\n\t"::)
-#define mb() asm volatile ("" : : :"memory")
-#define rmb() asm volatile ("" : : :"memory")
-#define wmb() asm volatile ("" : : :"memory")
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long tmp;
-#define read_barrier_depends() do { } while(0)
+ switch (size) {
+ case 1:
+ tmp = __raw_cmpxchg_1_asm(ptr, new, old);
+ break;
+ case 2:
+ tmp = __raw_cmpxchg_2_asm(ptr, new, old);
+ break;
+ case 4:
+ tmp = __raw_cmpxchg_4_asm(ptr, new, old);
+ break;
+ }
+
+ return tmp;
+}
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+
+#else /* !CONFIG_SMP */
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
-#endif
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
struct __xchg_dummy {
unsigned long a[100];
@@ -157,7 +141,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
unsigned long tmp = 0;
unsigned long flags = 0;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
switch (size) {
case 1:
@@ -179,7 +163,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
}
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return tmp;
}
@@ -194,9 +178,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#ifndef CONFIG_SMP
#include <asm-generic/cmpxchg.h>
-#endif
+
+#endif /* !CONFIG_SMP */
+
+#define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+#define tas(ptr) ((void)xchg((ptr), 1))
#define prepare_to_switch() do { } while(0)
@@ -205,10 +192,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* ptr isn't the current task, in which case it does nothing.
*/
-#include <asm/blackfin.h>
+#include <asm/l1layout.h>
+#include <asm/mem_map.h>
asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next);
+#ifndef CONFIG_SMP
#define switch_to(prev,next,last) \
do { \
memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \
@@ -217,5 +206,11 @@ do { \
sizeof *L1_SCRATCH_TASK_INFO); \
(last) = resume (prev, next); \
} while (0)
+#else
+#define switch_to(prev, next, last) \
+do { \
+ (last) = resume(prev, next); \
+} while (0)
+#endif
-#endif /* _BLACKFIN_SYSTEM_H */
+#endif /* _BLACKFIN_SYSTEM_H */
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 642769329d12..e721ce55956c 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -44,6 +44,7 @@
*/
#define THREAD_SIZE_ORDER 1
#define THREAD_SIZE 8192 /* 2 pages */
+#define STACK_WARN (THREAD_SIZE/8)
#ifndef __ASSEMBLY__
@@ -62,7 +63,9 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; /* address limit */
struct restart_block restart_block;
+#ifndef CONFIG_SMP
struct l1_scratch_task_info l1_task_info;
+#endif
};
/*
@@ -90,7 +93,7 @@ __attribute_const__
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
- __asm__("%0 = sp;": "=&d"(ti):
+ __asm__("%0 = sp;" : "=da"(ti) :
);
return (struct thread_info *)((long)ti & ~((long)THREAD_SIZE-1));
}
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index d928b8099056..3248033531e6 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -149,54 +149,42 @@ static inline int bad_user_access_length(void)
: /* no outputs */ \
:"d" (x),"a" (__ptr(p)) : "memory")
-#define get_user(x,p) \
- ({ \
- int _err = 0; \
- typeof(*(p)) *_p = (p); \
- if (!access_ok(VERIFY_READ, _p, sizeof(*(_p)))) { \
- _err = -EFAULT; \
- } \
- else { \
- switch (sizeof(*(_p))) { \
- case 1: \
- __get_user_asm(x, _p, B,(Z)); \
- break; \
- case 2: \
- __get_user_asm(x, _p, W,(Z)); \
- break; \
- case 4: \
- __get_user_asm(x, _p, , ); \
- break; \
- case 8: { \
- unsigned long _xl, _xh; \
- __get_user_asm(_xl, ((unsigned long *)_p)+0, , ); \
- __get_user_asm(_xh, ((unsigned long *)_p)+1, , ); \
- ((unsigned long *)&x)[0] = _xl; \
- ((unsigned long *)&x)[1] = _xh; \
- } break; \
- default: \
- x = 0; \
- printk(KERN_INFO "get_user_bad: %s:%d %s\n", \
- __FILE__, __LINE__, __func__); \
- _err = __get_user_bad(); \
- break; \
- } \
- } \
- _err; \
- })
+#define get_user(x, ptr) \
+({ \
+ int _err = 0; \
+ unsigned long _val = 0; \
+ const typeof(*(ptr)) __user *_p = (ptr); \
+ const size_t ptr_size = sizeof(*(_p)); \
+ if (likely(access_ok(VERIFY_READ, _p, ptr_size))) { \
+ BUILD_BUG_ON(ptr_size >= 8); \
+ switch (ptr_size) { \
+ case 1: \
+ __get_user_asm(_val, _p, B,(Z)); \
+ break; \
+ case 2: \
+ __get_user_asm(_val, _p, W,(Z)); \
+ break; \
+ case 4: \
+ __get_user_asm(_val, _p, , ); \
+ break; \
+ } \
+ } else \
+ _err = -EFAULT; \
+ x = (typeof(*(ptr)))_val; \
+ _err; \
+})
#define __get_user(x,p) get_user(x,p)
#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
-#define __get_user_asm(x,p,bhw,option) \
- { \
- unsigned long _tmp; \
- __asm__ ("%0 =" #bhw "[%1]"#option";\n\t" \
- : "=d" (_tmp) \
- : "a" (__ptr(p))); \
- (x) = (__typeof__(*(p))) _tmp; \
- }
+#define __get_user_asm(x, ptr, bhw, option) \
+({ \
+ __asm__ __volatile__ ( \
+ "%0 =" #bhw "[%1]" #option ";" \
+ : "=d" (x) \
+ : "a" (__ptr(ptr))); \
+})
#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
@@ -209,8 +197,8 @@ static inline int bad_user_access_length(void)
#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\
return retval; })
-static inline long copy_from_user(void *to,
- const void __user * from, unsigned long n)
+static inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
memcpy(to, from, n);
@@ -219,8 +207,8 @@ static inline long copy_from_user(void *to,
return 0;
}
-static inline long copy_to_user(void *to,
- const void __user * from, unsigned long n)
+static inline unsigned long __must_check
+copy_to_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
memcpy(to, from, n);
@@ -233,8 +221,8 @@ static inline long copy_to_user(void *to,
* Copy a null terminated string from userspace.
*/
-static inline long strncpy_from_user(char *dst,
- const char *src, long count)
+static inline long __must_check
+strncpy_from_user(char *dst, const char *src, long count)
{
char *tmp;
if (!access_ok(VERIFY_READ, src, 1))
@@ -260,7 +248,8 @@ static inline long strnlen_user(const char *src, long n)
* Zero Userspace
*/
-static inline unsigned long __clear_user(void *to, unsigned long n)
+static inline unsigned long __must_check
+__clear_user(void *to, unsigned long n)
{
memset(to, 0, n);
return 0;
diff --git a/arch/blackfin/include/asm/xor.h b/arch/blackfin/include/asm/xor.h
new file mode 100644
index 000000000000..c82eb12a5b18
--- /dev/null
+++ b/arch/blackfin/include/asm/xor.h
@@ -0,0 +1 @@
+#include <asm-generic/xor.h>