summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/Makefile3
-rw-r--r--arch/blackfin/mach-common/arch_checks.c4
-rw-r--r--arch/blackfin/mach-common/cache-c.c14
-rw-r--r--arch/blackfin/mach-common/cpufreq.c2
-rw-r--r--arch/blackfin/mach-common/entry.S25
-rw-r--r--arch/blackfin/mach-common/ints-priority.c51
-rw-r--r--arch/blackfin/mach-common/irqpanic.c11
-rw-r--r--arch/blackfin/mach-common/pm.c4
-rw-r--r--arch/blackfin/mach-common/smp.c34
9 files changed, 89 insertions, 59 deletions
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile
index 1f3228ed713f..dd8b2dc97f56 100644
--- a/arch/blackfin/mach-common/Makefile
+++ b/arch/blackfin/mach-common/Makefile
@@ -4,7 +4,7 @@
obj-y := \
cache.o cache-c.o entry.o head.o \
- interrupt.o irqpanic.o arch_checks.o ints-priority.o
+ interrupt.o arch_checks.o ints-priority.o
obj-$(CONFIG_BFIN_ICACHE_LOCK) += lock.o
obj-$(CONFIG_PM) += pm.o dpmc_modes.o
@@ -12,3 +12,4 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o
+obj-$(CONFIG_DEBUG_ICACHE_CHECK) += irqpanic.o
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index da93d9207165..5998d8632a73 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -74,7 +74,7 @@
/* if 220 exists, can not set External Memory WB and L2 not_cached, either External Memory not_cached and L2 WB */
#if ANOMALY_05000220 && \
- ((defined(CONFIG_BFIN_WB) && defined(CONFIG_BFIN_L2_NOT_CACHED)) || \
- (!defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_L2_WB)))
+ ((defined(CONFIG_BFIN_EXTMEM_WRITEBACK) && !defined(CONFIG_BFIN_L2_DCACHEABLE)) || \
+ (!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK)))
# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
#endif
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
index e6ab1f815123..b59ce3cb3807 100644
--- a/arch/blackfin/mach-common/cache-c.c
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -16,9 +16,21 @@
void blackfin_invalidate_entire_dcache(void)
{
u32 dmem = bfin_read_DMEM_CONTROL();
- SSYNC();
bfin_write_DMEM_CONTROL(dmem & ~0xc);
SSYNC();
bfin_write_DMEM_CONTROL(dmem);
SSYNC();
}
+
+/* Invalidate the Entire Instruction cache by
+ * clearing IMC bit
+ */
+void blackfin_invalidate_entire_icache(void)
+{
+ u32 imem = bfin_read_IMEM_CONTROL();
+ bfin_write_IMEM_CONTROL(imem & ~0x4);
+ SSYNC();
+ bfin_write_IMEM_CONTROL(imem);
+ SSYNC();
+}
+
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 70e3411f558c..85c658083279 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -141,7 +141,7 @@ static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
sclk = get_sclk() / 1000;
#if ANOMALY_05000273 || ANOMALY_05000274 || \
- (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE))
+ (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
min_cclk = sclk * 2;
#else
min_cclk = sclk;
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index da0558ad1b1a..fb1795d5be2a 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -42,6 +42,7 @@
#include <asm/thread_info.h> /* TIF_NEED_RESCHED */
#include <asm/asm-offsets.h>
#include <asm/trace.h>
+#include <asm/traps.h>
#include <asm/context.S>
@@ -84,13 +85,15 @@ ENTRY(_ex_workaround_261)
if !cc jump _bfin_return_from_exception;
/* fall through */
R7 = P4;
- R6 = 0x26; /* Data CPLB Miss */
+ R6 = VEC_CPLB_M; /* Data CPLB Miss */
cc = R6 == R7;
if cc jump _ex_dcplb_miss (BP);
- R6 = 0x23; /* Data CPLB Miss */
+#ifdef CONFIG_MPU
+ R6 = VEC_CPLB_VL; /* Data CPLB Violation */
cc = R6 == R7;
if cc jump _ex_dcplb_viol (BP);
- /* Handle 0x23 Data CPLB Protection Violation
+#endif
+ /* Handle Data CPLB Protection Violation
* and Data CPLB Multiple Hits - Linux Trap Zero
*/
jump _ex_trap_c;
@@ -215,7 +218,7 @@ ENTRY(_ex_single_step)
/* Single stepping only a single instruction, so clear the trace
* bit here. */
r7 = syscfg;
- bitclr (r7, 0);
+ bitclr (r7, SYSCFG_SSSTEP_P);
syscfg = R7;
jump _ex_trap_c;
@@ -248,7 +251,7 @@ ENTRY(_ex_single_step)
if !cc jump _bfin_return_from_exception;
r7 = syscfg;
- bitclr (r7, 0);
+ bitclr (r7, SYSCFG_SSSTEP_P); /* Turn off single step */
syscfg = R7;
/* Fall through to _bfin_return_from_exception. */
@@ -270,7 +273,7 @@ ENTRY(_bfin_return_from_exception)
r6.l = lo(SEQSTAT_EXCAUSE);
r6.h = hi(SEQSTAT_EXCAUSE);
r7 = r7 & r6;
- r6 = 0x25;
+ r6 = VEC_UNCOV;
CC = R7 == R6;
if CC JUMP _double_fault;
#endif
@@ -339,9 +342,11 @@ ENTRY(_ex_trap_c)
r6 = retx;
[p5 + PDA_RETX] = r6;
#endif
+ /* Save the state of single stepping */
r6 = SYSCFG;
[p5 + PDA_SYSCFG] = r6;
- BITCLR(r6, 0);
+ /* Clear it while we handle the exception in IRQ5 mode */
+ BITCLR(r6, SYSCFG_SSSTEP_P);
SYSCFG = r6;
/* Disable all interrupts, but make sure level 5 is enabled so
@@ -364,7 +369,7 @@ ENDPROC(_ex_trap_c)
* exception. This is a unrecoverable event, so crash.
* Note: this cannot be ENTRY() as we jump here with "if cc jump" ...
*/
-_double_fault:
+ENTRY(_double_fault)
/* Turn caches & protection off, to ensure we don't get any more
* double exceptions
*/
@@ -869,7 +874,7 @@ ENTRY(_ret_from_exception)
raise 15; /* raise evt15 to do signal or reschedule */
4:
r0 = syscfg;
- bitclr(r0, 0);
+ bitclr(r0, SYSCFG_SSSTEP_P); /* Turn off single step */
syscfg = r0;
5:
rts;
@@ -1605,6 +1610,8 @@ ENTRY(_sys_call_table)
.long _sys_inotify_init1 /* 365 */
.long _sys_preadv
.long _sys_pwritev
+ .long _sys_rt_tgsigqueueinfo
+ .long _sys_perf_counter_open
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 351afd0e36d8..b42150190d0e 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -472,7 +472,7 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
if (type == IRQ_TYPE_PROBE) {
/* only probe unenabled GPIO interrupt lines */
- if (__test_bit(gpionr, gpio_enabled))
+ if (test_bit(gpionr, gpio_enabled))
return 0;
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
@@ -782,7 +782,7 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
if (type == IRQ_TYPE_PROBE) {
/* only probe unenabled GPIO interrupt lines */
- if (__test_bit(gpionr, gpio_enabled))
+ if (test_bit(gpionr, gpio_enabled))
return 0;
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
@@ -1052,35 +1052,34 @@ int __init init_arch_irq(void)
set_irq_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
-#if defined(CONFIG_TICKSOURCE_GPTMR0)
- case IRQ_TIMER0:
- set_irq_handler(irq, handle_percpu_irq);
- break;
-#endif
#ifdef CONFIG_SMP
case IRQ_SUPPLE_0:
case IRQ_SUPPLE_1:
set_irq_handler(irq, handle_percpu_irq);
break;
#endif
- default:
#ifdef CONFIG_IPIPE
- /*
- * We want internal interrupt sources to be
- * masked, because ISRs may trigger interrupts
- * recursively (e.g. DMA), but interrupts are
- * _not_ masked at CPU level. So let's handle
- * most of them as level interrupts, except
- * the timer interrupt which is special.
- */
- if (irq == IRQ_SYSTMR || irq == IRQ_CORETMR)
- set_irq_handler(irq, handle_simple_irq);
- else
- set_irq_handler(irq, handle_level_irq);
+#ifndef CONFIG_TICKSOURCE_CORETMR
+ case IRQ_TIMER0:
+ set_irq_handler(irq, handle_simple_irq);
+ break;
+#endif /* !CONFIG_TICKSOURCE_CORETMR */
+ case IRQ_CORETMR:
+ set_irq_handler(irq, handle_simple_irq);
+ break;
+ default:
+ set_irq_handler(irq, handle_level_irq);
+ break;
#else /* !CONFIG_IPIPE */
+#ifdef CONFIG_TICKSOURCE_GPTMR0
+ case IRQ_TIMER0:
+ set_irq_handler(irq, handle_percpu_irq);
+ break;
+#endif /* CONFIG_TICKSOURCE_GPTMR0 */
+ default:
set_irq_handler(irq, handle_simple_irq);
-#endif /* !CONFIG_IPIPE */
break;
+#endif /* !CONFIG_IPIPE */
}
}
@@ -1224,15 +1223,14 @@ __attribute__((l1_text))
asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
- struct ipipe_domain *this_domain = ipipe_current_domain;
+ struct ipipe_domain *this_domain = __ipipe_current_domain;
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
int irq, s;
- if (likely(vec == EVT_IVTMR_P)) {
+ if (likely(vec == EVT_IVTMR_P))
irq = IRQ_CORETMR;
-
- } else {
+ else {
#if defined(SIC_ISR0) || defined(SICA_ISR0)
unsigned long sic_status[3];
@@ -1262,12 +1260,11 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
break;
}
#endif
-
irq = ivg->irqno;
}
if (irq == IRQ_SYSTMR) {
-#ifndef CONFIG_GENERIC_CLOCKEVENTS
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
#endif
/* This is basically what we need from the register frame. */
diff --git a/arch/blackfin/mach-common/irqpanic.c b/arch/blackfin/mach-common/irqpanic.c
index 05004df0f78b..883e3241b17e 100644
--- a/arch/blackfin/mach-common/irqpanic.c
+++ b/arch/blackfin/mach-common/irqpanic.c
@@ -30,21 +30,17 @@
#include <linux/module.h>
#include <linux/kernel_stat.h>
#include <linux/sched.h>
-#include <asm/traps.h>
#include <asm/blackfin.h>
-#ifdef CONFIG_DEBUG_ICACHE_CHECK
#define L1_ICACHE_START 0xffa10000
#define L1_ICACHE_END 0xffa13fff
-void irq_panic(int reason, struct pt_regs *regs) __attribute__ ((l1_text));
-#endif
/*
* irq_panic - calls panic with string setup
*/
+__attribute__ ((l1_text))
asmlinkage void irq_panic(int reason, struct pt_regs *regs)
{
-#ifdef CONFIG_DEBUG_ICACHE_CHECK
unsigned int cmd, tag, ca, cache_hi, cache_lo, *pa;
unsigned short i, j, die;
unsigned int bad[10][6];
@@ -126,9 +122,6 @@ asmlinkage void irq_panic(int reason, struct pt_regs *regs)
bad[j][3], bad[j][4], bad[j][5]);
}
panic("icache coherency error");
- } else {
+ } else
printk(KERN_EMERG "icache checked, and OK\n");
- }
-#endif
-
}
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index bce5a84be49f..9e7e27b7fc8d 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -132,7 +132,7 @@ int bf53x_resume_l1_mem(unsigned char *memptr)
return 0;
}
-#ifdef CONFIG_BFIN_WB
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
static void flushinv_all_dcache(void)
{
u32 way, bank, subbank, set;
@@ -175,7 +175,7 @@ static inline void dcache_disable(void)
#ifdef CONFIG_BFIN_DCACHE
unsigned long ctrl;
-#ifdef CONFIG_BFIN_WB
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
flushinv_all_dcache();
#endif
SSYNC();
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 3b8ebaee77f2..349ee3f5466a 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -144,7 +144,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
static irqreturn_t ipi_handler(int irq, void *dev_instance)
{
- struct ipi_message *msg, *mg;
+ struct ipi_message *msg;
struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id();
@@ -154,7 +154,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
msg_queue->count++;
spin_lock(&msg_queue->lock);
- list_for_each_entry_safe(msg, mg, &msg_queue->head, list) {
+ while (!list_empty(&msg_queue->head)) {
+ msg = list_entry(msg_queue->head.next, typeof(*msg), list);
list_del(&msg->list);
switch (msg->type) {
case BFIN_IPI_RESCHEDULE:
@@ -210,6 +211,8 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
return 0;
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
INIT_LIST_HEAD(&msg->list);
msg->call_struct.func = func;
msg->call_struct.info = info;
@@ -221,7 +224,7 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
@@ -251,6 +254,8 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
cpu_set(cpu, callmap);
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
INIT_LIST_HEAD(&msg->list);
msg->call_struct.func = func;
msg->call_struct.info = info;
@@ -261,7 +266,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
@@ -286,13 +291,15 @@ void smp_send_reschedule(int cpu)
return;
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return;
memset(msg, 0, sizeof(msg));
INIT_LIST_HEAD(&msg->list);
msg->type = BFIN_IPI_RESCHEDULE;
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
@@ -313,6 +320,8 @@ void smp_send_stop(void)
return;
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return;
memset(msg, 0, sizeof(msg));
INIT_LIST_HEAD(&msg->list);
msg->type = BFIN_IPI_CPU_STOP;
@@ -320,7 +329,7 @@ void smp_send_stop(void)
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
@@ -449,7 +458,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
unsigned int cpu;
for_each_online_cpu(cpu)
- bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+ bogosum += loops_per_jiffy;
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
@@ -468,6 +477,17 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
+#ifdef __ARCH_SYNC_CORE_ICACHE
+void resync_core_icache(void)
+{
+ unsigned int cpu = get_cpu();
+ blackfin_invalidate_entire_icache();
+ ++per_cpu(cpu_data, cpu).icache_invld_count;
+ put_cpu();
+}
+EXPORT_SYMBOL(resync_core_icache);
+#endif
+
#ifdef __ARCH_SYNC_CORE_DCACHE
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));