aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2008-02-28 06:06:30 +0100
committerJeremy Kerr <jk@ozlabs.org>2008-02-29 15:19:52 +1100
commitc92a1acb675058375cc508ad024c33358b42d766 (patch)
tree2860b1e74c48d09b12ea8b103366e6db0658fdfb
parentcc4b7c1814c9ad375e8167ea4a9ec4a0ec1ada04 (diff)
[POWERPC] spufs: serialize SLB invalidation against SLB loading
There is a potential race between flushes of the entire SLB in the MFC and the point where new entries are being established. The problem is that we might put a ESID entry into the MFC SLB when the VSID entry has just been cleared by the global flush. This can be circumvented by holding the register_lock throughout both the flushing and the creation of SLB entries. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index cfc28e93c82..712001f6b7d 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -81,9 +81,12 @@ struct spu_slb {
void spu_invalidate_slbs(struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
+ unsigned long flags;
+ spin_lock_irqsave(&spu->register_lock, flags);
if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
out_be64(&priv2->slb_invalidate_all_W, 0UL);
+ spin_unlock_irqrestore(&spu->register_lock, flags);
}
EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
@@ -294,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
nr_slbs++;
}
+ spin_lock_irq(&spu->register_lock);
/* Add the set of SLBs */
for (i = 0; i < nr_slbs; i++)
spu_load_slb(spu, i, &slbs[i]);
+ spin_unlock_irq(&spu->register_lock);
}
EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
@@ -341,13 +346,14 @@ spu_irq_class_1(int irq, void *data)
if (stat & CLASS1_STORAGE_FAULT_INTR)
spu_mfc_dsisr_set(spu, 0ul);
spu_int_stat_clear(spu, 1, stat);
- spin_unlock(&spu->register_lock);
- pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
- dar, dsisr);
if (stat & CLASS1_SEGMENT_FAULT_INTR)
__spu_trap_data_seg(spu, dar);
+ spin_unlock(&spu->register_lock);
+ pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
+ dar, dsisr);
+
if (stat & CLASS1_STORAGE_FAULT_INTR)
__spu_trap_data_map(spu, dar, dsisr);