aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-01-01 10:12:26 +1030
committerRusty Russell <rusty@rustcorp.com.au>2009-01-01 10:12:26 +1030
commitd036e67b40f52bdd95392390108defbac7e53837 (patch)
tree4a00537671036c955c98891af9f4729332b35c50
parent6b954823c24f04ed026a8517f6bab5abda279db8 (diff)
cpumask: convert kernel/irq
Impact: Reduce stack usage, use new cpumask API. ALPHA mod! Main change is that irq_default_affinity becomes a cpumask_var_t, so treat it as a pointer (this effects alpha). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--arch/alpha/kernel/irq.c3
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--kernel/irq/manage.c11
-rw-r--r--kernel/irq/proc.c32
4 files changed, 33 insertions, 15 deletions
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index d0f1620007f..703731accda 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -50,7 +50,8 @@ int irq_select_affinity(unsigned int irq)
if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq])
return 1;
- while (!cpu_possible(cpu) || !cpu_isset(cpu, irq_default_affinity))
+ while (!cpu_possible(cpu) ||
+ !cpumask_test_cpu(cpu, irq_default_affinity))
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index dfaee6bd265..91f1ef8e581 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -109,7 +109,7 @@ extern void enable_irq(unsigned int irq);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
-extern cpumask_t irq_default_affinity;
+extern cpumask_var_t irq_default_affinity;
extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_can_set_affinity(unsigned int irq);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 61c4a9b6216..cd0cd8dcb34 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -16,8 +16,15 @@
#include "internals.h"
#ifdef CONFIG_SMP
+cpumask_var_t irq_default_affinity;
-cpumask_t irq_default_affinity = CPU_MASK_ALL;
+static int init_irq_default_affinity(void)
+{
+ alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
+ cpumask_setall(irq_default_affinity);
+ return 0;
+}
+core_initcall(init_irq_default_affinity);
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
@@ -127,7 +134,7 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
desc->status &= ~IRQ_AFFINITY_SET;
}
- cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity);
+ cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
set_affinity:
desc->chip->set_affinity(irq, &desc->affinity);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index d2c0e5ee53c..2abd3a7716e 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,7 +20,7 @@ static struct proc_dir_entry *root_irq_dir;
static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
- cpumask_t *mask = &desc->affinity;
+ const struct cpumask *mask = &desc->affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PENDING)
@@ -93,7 +93,7 @@ static const struct file_operations irq_affinity_proc_fops = {
static int default_affinity_show(struct seq_file *m, void *v)
{
- seq_cpumask(m, &irq_default_affinity);
+ seq_cpumask(m, irq_default_affinity);
seq_putc(m, '\n');
return 0;
}
@@ -101,27 +101,37 @@ static int default_affinity_show(struct seq_file *m, void *v)
static ssize_t default_affinity_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
- cpumask_t new_value;
+ cpumask_var_t new_value;
int err;
- err = cpumask_parse_user(buffer, count, &new_value);
+ if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = cpumask_parse_user(buffer, count, new_value);
if (err)
- return err;
+ goto out;
- if (!is_affinity_mask_valid(new_value))
- return -EINVAL;
+ if (!is_affinity_mask_valid(new_value)) {
+ err = -EINVAL;
+ goto out;
+ }
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted.
*/
- if (!cpus_intersects(new_value, cpu_online_map))
- return -EINVAL;
+ if (!cpumask_intersects(new_value, cpu_online_mask)) {
+ err = -EINVAL;
+ goto out;
+ }
- irq_default_affinity = new_value;
+ cpumask_copy(irq_default_affinity, new_value);
+ err = count;
- return count;
+out:
+ free_cpumask_var(new_value);
+ return err;
}
static int default_affinity_open(struct inode *inode, struct file *file)