aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2006-08-28 11:17:37 +1000
committerPaul Mackerras <paulus@samba.org>2006-08-30 10:36:16 +1000
commit8ec8f2e85c6b88b4a1641eb3902275bcf2c6d60a (patch)
treeff4d7eacba8c61f9950cbb36b0844ca53546316b /arch/powerpc
parent4b3afca9345f5beb9c607faeb2aef4f91dd91a13 (diff)
[POWERPC] Fix performance regression in IRQ radix tree locking
When reworking the powerpc irq code, I figured out that we were using the radix tree in a racy way. As a temporary fix, I put a spinlock in there. However, this can have a significant impact on performances. This patch reworks that to use a smarter technique based on the fact that what we need is in fact a rwlock with extremely rare writers (thus optimized for the read path). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/irq.c84
1 files changed, 66 insertions, 18 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 7ee68543331..12c5971d656 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -322,7 +322,8 @@ EXPORT_SYMBOL(do_softirq);
static LIST_HEAD(irq_hosts);
static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
-
+static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
+static unsigned int irq_radix_writer;
struct irq_map_entry irq_map[NR_IRQS];
static unsigned int irq_virq_count = NR_IRQS;
static struct irq_host *irq_default_host;
@@ -455,6 +456,58 @@ void irq_set_virq_count(unsigned int count)
irq_virq_count = count;
}
+/* radix tree not lockless safe ! we use a brlock-type mecanism
+ * for now, until we can use a lockless radix tree
+ */
+static void irq_radix_wrlock(unsigned long *flags)
+{
+ unsigned int cpu, ok;
+
+ spin_lock_irqsave(&irq_big_lock, *flags);
+ irq_radix_writer = 1;
+ smp_mb();
+ do {
+ barrier();
+ ok = 1;
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(irq_radix_reader, cpu)) {
+ ok = 0;
+ break;
+ }
+ }
+ if (!ok)
+ cpu_relax();
+ } while(!ok);
+}
+
+static void irq_radix_wrunlock(unsigned long flags)
+{
+ smp_wmb();
+ irq_radix_writer = 0;
+ spin_unlock_irqrestore(&irq_big_lock, flags);
+}
+
+static void irq_radix_rdlock(unsigned long *flags)
+{
+ local_irq_save(*flags);
+ __get_cpu_var(irq_radix_reader) = 1;
+ smp_mb();
+ if (likely(irq_radix_writer == 0))
+ return;
+ __get_cpu_var(irq_radix_reader) = 0;
+ smp_wmb();
+ spin_lock(&irq_big_lock);
+ __get_cpu_var(irq_radix_reader) = 1;
+ spin_unlock(&irq_big_lock);
+}
+
+static void irq_radix_rdunlock(unsigned long flags)
+{
+ __get_cpu_var(irq_radix_reader) = 0;
+ local_irq_restore(flags);
+}
+
+
unsigned int irq_create_mapping(struct irq_host *host,
irq_hw_number_t hwirq)
{
@@ -604,13 +657,9 @@ void irq_dispose_mapping(unsigned int virq)
/* Check if radix tree allocated yet */
if (host->revmap_data.tree.gfp_mask == 0)
break;
- /* XXX radix tree not safe ! remove lock whem it becomes safe
- * and use some RCU sync to make sure everything is ok before we
- * can re-use that map entry
- */
- spin_lock_irqsave(&irq_big_lock, flags);
+ irq_radix_wrlock(&flags);
radix_tree_delete(&host->revmap_data.tree, hwirq);
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ irq_radix_wrunlock(flags);
break;
}
@@ -677,25 +726,24 @@ unsigned int irq_radix_revmap(struct irq_host *host,
if (tree->gfp_mask == 0)
return irq_find_mapping(host, hwirq);
- /* XXX Current radix trees are NOT SMP safe !!! Remove that lock
- * when that is fixed (when Nick's patch gets in
- */
- spin_lock_irqsave(&irq_big_lock, flags);
-
/* Now try to resolve */
+ irq_radix_rdlock(&flags);
ptr = radix_tree_lookup(tree, hwirq);
+ irq_radix_rdunlock(flags);
+
/* Found it, return */
if (ptr) {
virq = ptr - irq_map;
- goto bail;
+ return virq;
}
/* If not there, try to insert it */
virq = irq_find_mapping(host, hwirq);
- if (virq != NO_IRQ)
+ if (virq != NO_IRQ) {
+ irq_radix_wrlock(&flags);
radix_tree_insert(tree, hwirq, &irq_map[virq]);
- bail:
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ irq_radix_wrunlock(flags);
+ }
return virq;
}
@@ -806,12 +854,12 @@ static int irq_late_init(void)
struct irq_host *h;
unsigned long flags;
- spin_lock_irqsave(&irq_big_lock, flags);
+ irq_radix_wrlock(&flags);
list_for_each_entry(h, &irq_hosts, link) {
if (h->revmap_type == IRQ_HOST_MAP_TREE)
INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
}
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ irq_radix_wrunlock(flags);
return 0;
}