summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c39
-rw-r--r--arch/powerpc/platforms/cell/smp.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c3
3 files changed, 44 insertions, 7 deletions
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 896548ba1ca1..0ce45c2b42f8 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -95,6 +95,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
struct axon_msic *msic = get_irq_data(irq);
u32 write_offset, msi;
int idx;
+ int retry = 0;
write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
pr_debug("axon_msi: original write_offset 0x%x\n", write_offset);
@@ -102,7 +103,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
/* write_offset doesn't wrap properly, so we have to mask it */
write_offset &= MSIC_FIFO_SIZE_MASK;
- while (msic->read_offset != write_offset) {
+ while (msic->read_offset != write_offset && retry < 100) {
idx = msic->read_offset / sizeof(__le32);
msi = le32_to_cpu(msic->fifo_virt[idx]);
msi &= 0xFFFF;
@@ -110,13 +111,37 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
pr_debug("axon_msi: woff %x roff %x msi %x\n",
write_offset, msic->read_offset, msi);
+ if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) {
+ generic_handle_irq(msi);
+ msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
+ } else {
+ /*
+ * Reading the MSIC_WRITE_OFFSET_REG does not
+ * reliably flush the outstanding DMA to the
+ * FIFO buffer. Here we were reading stale
+ * data, so we need to retry.
+ */
+ udelay(1);
+ retry++;
+ pr_debug("axon_msi: invalid irq 0x%x!\n", msi);
+ continue;
+ }
+
+ if (retry) {
+ pr_debug("axon_msi: late irq 0x%x, retry %d\n",
+ msi, retry);
+ retry = 0;
+ }
+
msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
msic->read_offset &= MSIC_FIFO_SIZE_MASK;
+ }
- if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host)
- generic_handle_irq(msi);
- else
- pr_debug("axon_msi: invalid irq 0x%x!\n", msi);
+ if (retry) {
+ printk(KERN_WARNING "axon_msi: irq timed out\n");
+
+ msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
+ msic->read_offset &= MSIC_FIFO_SIZE_MASK;
}
desc->chip->eoi(irq);
@@ -364,6 +389,7 @@ static int axon_msi_probe(struct of_device *device,
dn->full_name);
goto out_free_fifo;
}
+ memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP,
NR_IRQS, &msic_host_ops, 0);
@@ -387,6 +413,9 @@ static int axon_msi_probe(struct of_device *device,
MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
MSIC_CTRL_FIFO_SIZE);
+ msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
+ & MSIC_FIFO_SIZE_MASK;
+
device->dev.platform_data = msic;
ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index c0d86e1f56ea..9046803c8276 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -129,10 +129,15 @@ static int __init smp_iic_probe(void)
return cpus_weight(cpu_possible_map);
}
-static void __devinit smp_iic_setup_cpu(int cpu)
+static void __devinit smp_cell_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
iic_setup_cpu();
+
+ /*
+ * change default DABRX to allow user watchpoints
+ */
+ mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
}
static DEFINE_SPINLOCK(timebase_lock);
@@ -192,7 +197,7 @@ static struct smp_ops_t bpa_iic_smp_ops = {
.message_pass = smp_iic_message_pass,
.probe = smp_iic_probe,
.kick_cpu = smp_cell_kick_cpu,
- .setup_cpu = smp_iic_setup_cpu,
+ .setup_cpu = smp_cell_setup_cpu,
.cpu_bootable = smp_cell_cpu_bootable,
};
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index b73c369cc6f1..1b26071a86ca 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -390,6 +390,9 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
if (offset >= ps_size)
return VM_FAULT_SIGBUS;
+ if (fatal_signal_pending(current))
+ return VM_FAULT_SIGBUS;
+
/*
* Because we release the mmap_sem, the context may be destroyed while
* we're in spu_wait. Grab an extra reference so it isn't destroyed