From 606572634c3faa5b32a8fc430266e6e9d78d2179 Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Tue, 11 Nov 2008 10:22:22 +1100 Subject: powerpc/spufs: Fix spinning in spufs_ps_fault on signal Currently, we can end up in an infinite loop if we get a signal while the kernel has faulted in spufs_ps_fault. Eg: alarm(1); write(fd, some_spu_psmap_register_address, 4); - the write's copy_from_user will fault on the ps mapping, and signal_pending will be non-zero. Because returning from the fault handler will never clear TIF_SIGPENDING, so we'll just keep faulting, resulting in an unkillable process using 100% of CPU. This change returns VM_FAULT_SIGBUS if there's a fatal signal pending, letting us escape the loop. Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/file.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/powerpc/platforms/cell') diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index b73c369cc6f..1b26071a86c 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -390,6 +390,9 @@ static int spufs_ps_fault(struct vm_area_struct *vma, if (offset >= ps_size) return VM_FAULT_SIGBUS; + if (fatal_signal_pending(current)) + return VM_FAULT_SIGBUS; + /* * Because we release the mmap_sem, the context may be destroyed while * we're in spu_wait. Grab an extra reference so it isn't destroyed -- cgit v1.2.3 From d015fe9951641b2d869a7ae4a690be2a05a9dc7f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 28 Nov 2008 09:51:22 +0000 Subject: powerpc/cell/axon-msi: Retry on missing interrupt The MSI capture logic for the axon bridge can sometimes lose interrupts in case of high DMA and interrupt load, when it signals an MSI interrupt to the MPIC interrupt controller while we are already handling another MSI. Each MSI vector gets written into a FIFO buffer in main memory using DMA, and that DMA access is normally flushed by the actual interrupt packet on the IOIF. An MMIO register in the MSIC holds the position of the last entry in the FIFO buffer that was written. However, reading that position does not flush the DMA, so that we can observe stale data in the buffer. In a stress test, we have observed the DMA to arrive up to 14 microseconds after reading the register. This patch works around this problem by retrying the access to the FIFO buffer. We can reliably detect the conditioning by writing an invalid MSI vector into the FIFO buffer after reading from it, assuming that all MSIs we get are valid. After detecting an invalid MSI vector, we udelay(1) in the interrupt cascade for up to 100 times before giving up. Signed-off-by: Arnd Bergmann Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/cell/axon_msi.c | 36 +++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/platforms/cell') diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 896548ba1ca..442cf36aa17 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -95,6 +95,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) struct axon_msic *msic = get_irq_data(irq); u32 write_offset, msi; int idx; + int retry = 0; write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); pr_debug("axon_msi: original write_offset 0x%x\n", write_offset); @@ -102,7 +103,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) /* write_offset doesn't wrap properly, so we have to mask it */ write_offset &= MSIC_FIFO_SIZE_MASK; - while (msic->read_offset != write_offset) { + while (msic->read_offset != write_offset && retry < 100) { idx = msic->read_offset / sizeof(__le32); msi = le32_to_cpu(msic->fifo_virt[idx]); msi &= 0xFFFF; @@ -110,13 +111,37 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) pr_debug("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); + if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) { + generic_handle_irq(msi); + msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); + } else { + /* + * Reading the MSIC_WRITE_OFFSET_REG does not + * reliably flush the outstanding DMA to the + * FIFO buffer. Here we were reading stale + * data, so we need to retry. + */ + udelay(1); + retry++; + pr_debug("axon_msi: invalid irq 0x%x!\n", msi); + continue; + } + + if (retry) { + pr_debug("axon_msi: late irq 0x%x, retry %d\n", + msi, retry); + retry = 0; + } + msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; + } - if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) - generic_handle_irq(msi); - else - pr_debug("axon_msi: invalid irq 0x%x!\n", msi); + if (retry) { + printk(KERN_WARNING "axon_msi: irq timed out\n"); + + msic->read_offset += MSIC_FIFO_ENTRY_SIZE; + msic->read_offset &= MSIC_FIFO_SIZE_MASK; } desc->chip->eoi(irq); @@ -364,6 +389,7 @@ static int axon_msi_probe(struct of_device *device, dn->full_name); goto out_free_fifo; } + memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, NR_IRQS, &msic_host_ops, 0); -- cgit v1.2.3 From 960cedb4e3eedec6394f224fc832c7a23f35a799 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 28 Nov 2008 09:51:24 +0000 Subject: powerpc/cell: Fix GDB watchpoints, again An earlier patch from Jens Osterkamp attempted to fix GDB watchpoints by enabling the DABRX register at boot time. Unfortunately, this did not work on SMP setups, where secondary CPUs were still using the power-on DABRX value. This introduces the same change for secondary CPUs on cell as well. Reported-by: Ulrich Weigand Tested-by: Ulrich Weigand Signed-off-by: Arnd Bergmann Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/cell/smp.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/platforms/cell') diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index c0d86e1f56e..9046803c827 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -129,10 +129,15 @@ static int __init smp_iic_probe(void) return cpus_weight(cpu_possible_map); } -static void __devinit smp_iic_setup_cpu(int cpu) +static void __devinit smp_cell_setup_cpu(int cpu) { if (cpu != boot_cpuid) iic_setup_cpu(); + + /* + * change default DABRX to allow user watchpoints + */ + mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); } static DEFINE_SPINLOCK(timebase_lock); @@ -192,7 +197,7 @@ static struct smp_ops_t bpa_iic_smp_ops = { .message_pass = smp_iic_message_pass, .probe = smp_iic_probe, .kick_cpu = smp_cell_kick_cpu, - .setup_cpu = smp_iic_setup_cpu, + .setup_cpu = smp_cell_setup_cpu, .cpu_bootable = smp_cell_cpu_bootable, }; -- cgit v1.2.3