diff options
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 84 |
1 files changed, 66 insertions, 18 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index fd4ddb858db..b4432332341 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -323,7 +323,8 @@ EXPORT_SYMBOL(do_softirq); static LIST_HEAD(irq_hosts); static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; - +static DEFINE_PER_CPU(unsigned int, irq_radix_reader); +static unsigned int irq_radix_writer; struct irq_map_entry irq_map[NR_IRQS]; static unsigned int irq_virq_count = NR_IRQS; static struct irq_host *irq_default_host; @@ -456,6 +457,58 @@ void irq_set_virq_count(unsigned int count) irq_virq_count = count; } +/* radix tree not lockless safe ! we use a brlock-type mecanism + * for now, until we can use a lockless radix tree + */ +static void irq_radix_wrlock(unsigned long *flags) +{ + unsigned int cpu, ok; + + spin_lock_irqsave(&irq_big_lock, *flags); + irq_radix_writer = 1; + smp_mb(); + do { + barrier(); + ok = 1; + for_each_possible_cpu(cpu) { + if (per_cpu(irq_radix_reader, cpu)) { + ok = 0; + break; + } + } + if (!ok) + cpu_relax(); + } while(!ok); +} + +static void irq_radix_wrunlock(unsigned long flags) +{ + smp_wmb(); + irq_radix_writer = 0; + spin_unlock_irqrestore(&irq_big_lock, flags); +} + +static void irq_radix_rdlock(unsigned long *flags) +{ + local_irq_save(*flags); + __get_cpu_var(irq_radix_reader) = 1; + smp_mb(); + if (likely(irq_radix_writer == 0)) + return; + __get_cpu_var(irq_radix_reader) = 0; + smp_wmb(); + spin_lock(&irq_big_lock); + __get_cpu_var(irq_radix_reader) = 1; + spin_unlock(&irq_big_lock); +} + +static void irq_radix_rdunlock(unsigned long flags) +{ + __get_cpu_var(irq_radix_reader) = 0; + local_irq_restore(flags); +} + + unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq) { @@ -605,13 +658,9 @@ void irq_dispose_mapping(unsigned int virq) /* Check if radix tree allocated yet */ if (host->revmap_data.tree.gfp_mask == 0) break; - /* XXX radix tree not safe ! remove lock whem it becomes safe - * and use some RCU sync to make sure everything is ok before we - * can re-use that map entry - */ - spin_lock_irqsave(&irq_big_lock, flags); + irq_radix_wrlock(&flags); radix_tree_delete(&host->revmap_data.tree, hwirq); - spin_unlock_irqrestore(&irq_big_lock, flags); + irq_radix_wrunlock(flags); break; } @@ -678,25 +727,24 @@ unsigned int irq_radix_revmap(struct irq_host *host, if (tree->gfp_mask == 0) return irq_find_mapping(host, hwirq); - /* XXX Current radix trees are NOT SMP safe !!! Remove that lock - * when that is fixed (when Nick's patch gets in - */ - spin_lock_irqsave(&irq_big_lock, flags); - /* Now try to resolve */ + irq_radix_rdlock(&flags); ptr = radix_tree_lookup(tree, hwirq); + irq_radix_rdunlock(flags); + /* Found it, return */ if (ptr) { virq = ptr - irq_map; - goto bail; + return virq; } /* If not there, try to insert it */ virq = irq_find_mapping(host, hwirq); - if (virq != NO_IRQ) + if (virq != NO_IRQ) { + irq_radix_wrlock(&flags); radix_tree_insert(tree, hwirq, &irq_map[virq]); - bail: - spin_unlock_irqrestore(&irq_big_lock, flags); + irq_radix_wrunlock(flags); + } return virq; } @@ -807,12 +855,12 @@ static int irq_late_init(void) struct irq_host *h; unsigned long flags; - spin_lock_irqsave(&irq_big_lock, flags); + irq_radix_wrlock(&flags); list_for_each_entry(h, &irq_hosts, link) { if (h->revmap_type == IRQ_HOST_MAP_TREE) INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); } - spin_unlock_irqrestore(&irq_big_lock, flags); + irq_radix_wrunlock(flags); return 0; } |