aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 16:39:23 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 16:39:23 -0800
commit71ca44dac4cbf89ce88e460a293cc25c5b18fa50 (patch)
tree199c54ee6ecfbdda0bf035df0257bbd278b97531 /arch/ia64
parent2c6f2db13a2428aa16f54f50232a589ddd5d7d01 (diff)
parent9dad6f5785a9f113dbbd58951d2f5ef9abd06dcc (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] fix ia64 kprobes compilation [IA64] move gcc_intrin.h from header-y to unifdef-y [IA64] workaround tiger ia64_sal_get_physical_id_info hang [IA64] move defconfig to arch/ia64/configs/ [IA64] Fix irq migration in multiple vector domain [IA64] signal(ia64_ia32): add a signal stack overflow check [IA64] signal(ia64): add a signal stack overflow check [IA64] CONFIG_SGI_SN2 - auto select NUMA and ACPI_NUMA
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/configs/generic_defconfig (renamed from arch/ia64/defconfig)0
-rw-r--r--arch/ia64/ia32/ia32_signal.c13
-rw-r--r--arch/ia64/kernel/iosapic.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c134
-rw-r--r--arch/ia64/kernel/kprobes.c5
-rw-r--r--arch/ia64/kernel/msi_ia64.c3
-rw-r--r--arch/ia64/kernel/sal.c7
-rw-r--r--arch/ia64/kernel/signal.c36
10 files changed, 164 insertions, 42 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 56762d3c2a6..8fa3faf5ef1 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -156,6 +156,8 @@ config IA64_HP_ZX1_SWIOTLB
config IA64_SGI_SN2
bool "SGI-SN2"
+ select NUMA
+ select ACPI_NUMA
help
Selecting this option will optimize the kernel for use on sn2 based
systems, but the resulting kernel binary will not run on other
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index b916ccfdef8..f1645c4f703 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -11,6 +11,8 @@
# Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com>
#
+KBUILD_DEFCONFIG := generic_defconfig
+
NM := $(CROSS_COMPILE)nm -B
READELF := $(CROSS_COMPILE)readelf
diff --git a/arch/ia64/defconfig b/arch/ia64/configs/generic_defconfig
index 0210545e7f6..0210545e7f6 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/configs/generic_defconfig
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index 85e82f32e48..256a7faeda0 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -766,8 +766,19 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (!on_sig_stack(esp))
+ int onstack = sas_ss_flags(esp);
+
+ if (onstack == 0)
esp = current->sas_ss_sp + current->sas_ss_size;
+ else if (onstack == SS_ONSTACK) {
+ /*
+ * If we are on the alternate signal stack and would
+ * overflow it, don't. Return an always-bogus address
+ * instead so we will die with SIGSEGV.
+ */
+ if (!likely(on_sig_stack(esp - frame_size)))
+ return (void __user *) -1L;
+ }
}
/* Legacy stack switching not supported */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 398e2fd1cd2..7b3292282de 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -345,7 +345,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
if (cpus_empty(mask))
return;
- if (reassign_irq_vector(irq, first_cpu(mask)))
+ if (irq_prepare_move(irq, first_cpu(mask)))
return;
dest = cpu_physical_id(first_cpu(mask));
@@ -397,6 +397,7 @@ iosapic_end_level_irq (unsigned int irq)
struct iosapic_rte_info *rte;
int do_unmask_irq = 0;
+ irq_complete_move(irq);
if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
do_unmask_irq = 1;
mask_irq(irq);
@@ -450,6 +451,7 @@ iosapic_ack_edge_irq (unsigned int irq)
{
irq_desc_t *idesc = irq_desc + irq;
+ irq_complete_move(irq);
move_native_irq(irq);
/*
* Once we have recorded IRQ_PENDING already, we can mask the
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 0b52f19ed04..2b8cf6e85af 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -260,6 +260,8 @@ void __setup_vector_irq(int cpu)
}
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
+#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
+
static enum vector_domain_type {
VECTOR_DOMAIN_NONE,
VECTOR_DOMAIN_PERCPU
@@ -272,6 +274,101 @@ static cpumask_t vector_allocation_domain(int cpu)
return CPU_MASK_ALL;
}
+static int __irq_prepare_move(int irq, int cpu)
+{
+ struct irq_cfg *cfg = &irq_cfg[irq];
+ int vector;
+ cpumask_t domain;
+
+ if (cfg->move_in_progress || cfg->move_cleanup_count)
+ return -EBUSY;
+ if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
+ return -EINVAL;
+ if (cpu_isset(cpu, cfg->domain))
+ return 0;
+ domain = vector_allocation_domain(cpu);
+ vector = find_unassigned_vector(domain);
+ if (vector < 0)
+ return -ENOSPC;
+ cfg->move_in_progress = 1;
+ cfg->old_domain = cfg->domain;
+ cfg->vector = IRQ_VECTOR_UNASSIGNED;
+ cfg->domain = CPU_MASK_NONE;
+ BUG_ON(__bind_irq_vector(irq, vector, domain));
+ return 0;
+}
+
+int irq_prepare_move(int irq, int cpu)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ ret = __irq_prepare_move(irq, cpu);
+ spin_unlock_irqrestore(&vector_lock, flags);
+ return ret;
+}
+
+void irq_complete_move(unsigned irq)
+{
+ struct irq_cfg *cfg = &irq_cfg[irq];
+ cpumask_t cleanup_mask;
+ int i;
+
+ if (likely(!cfg->move_in_progress))
+ return;
+
+ if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
+ return;
+
+ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+ cfg->move_cleanup_count = cpus_weight(cleanup_mask);
+ for_each_cpu_mask(i, cleanup_mask)
+ platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
+ cfg->move_in_progress = 0;
+}
+
+static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
+{
+ int me = smp_processor_id();
+ ia64_vector vector;
+ unsigned long flags;
+
+ for (vector = IA64_FIRST_DEVICE_VECTOR;
+ vector < IA64_LAST_DEVICE_VECTOR; vector++) {
+ int irq;
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+ irq = __get_cpu_var(vector_irq)[vector];
+ if (irq < 0)
+ continue;
+
+ desc = irq_desc + irq;
+ cfg = irq_cfg + irq;
+ spin_lock(&desc->lock);
+ if (!cfg->move_cleanup_count)
+ goto unlock;
+
+ if (!cpu_isset(me, cfg->old_domain))
+ goto unlock;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ __get_cpu_var(vector_irq)[vector] = -1;
+ cpu_clear(me, vector_table[vector]);
+ spin_unlock_irqrestore(&vector_lock, flags);
+ cfg->move_cleanup_count--;
+ unlock:
+ spin_unlock(&desc->lock);
+ }
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_move_irqaction = {
+ .handler = smp_irq_move_cleanup_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "irq_move"
+};
+
static int __init parse_vector_domain(char *arg)
{
if (!arg)
@@ -303,36 +400,6 @@ void destroy_and_reserve_irq(unsigned int irq)
spin_unlock_irqrestore(&vector_lock, flags);
}
-static int __reassign_irq_vector(int irq, int cpu)
-{
- struct irq_cfg *cfg = &irq_cfg[irq];
- int vector;
- cpumask_t domain;
-
- if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
- return -EINVAL;
- if (cpu_isset(cpu, cfg->domain))
- return 0;
- domain = vector_allocation_domain(cpu);
- vector = find_unassigned_vector(domain);
- if (vector < 0)
- return -ENOSPC;
- __clear_irq_vector(irq);
- BUG_ON(__bind_irq_vector(irq, vector, domain));
- return 0;
-}
-
-int reassign_irq_vector(int irq, int cpu)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&vector_lock, flags);
- ret = __reassign_irq_vector(irq, cpu);
- spin_unlock_irqrestore(&vector_lock, flags);
- return ret;
-}
-
/*
* Dynamic irq allocate and deallocation for MSI
*/
@@ -578,6 +645,13 @@ init_IRQ (void)
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
+ if (vector_domain_type != VECTOR_DOMAIN_NONE) {
+ BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
+ IA64_FIRST_DEVICE_VECTOR++;
+ register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
+ }
+#endif
#endif
#ifdef CONFIG_PERFMON
pfm_init_percpu();
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index b618487cdc8..615c3d2b634 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -1001,6 +1001,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
+/* ia64 does not need this */
+void __kprobes jprobe_return(void)
+{
+}
+
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index e86d0295979..60c6ef67ebb 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -57,7 +57,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
if (!cpu_online(cpu))
return;
- if (reassign_irq_vector(irq, cpu))
+ if (irq_prepare_move(irq, cpu))
return;
read_msi_msg(irq, &msg);
@@ -119,6 +119,7 @@ void ia64_teardown_msi_irq(unsigned int irq)
static void ia64_ack_msi_irq(unsigned int irq)
{
+ irq_complete_move(irq);
move_native_irq(irq);
ia64_eoi();
}
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index f44fe841216..a3022dc48ef 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -109,6 +109,13 @@ check_versions (struct ia64_sal_systab *systab)
sal_revision = SAL_VERSION_CODE(2, 8);
sal_version = SAL_VERSION_CODE(0, 0);
}
+
+ if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9)))
+ /*
+ * SGI Altix has hard-coded version 2.9 in their prom
+ * but they actually implement 3.2, so let's fix it here.
+ */
+ sal_revision = SAL_VERSION_CODE(3, 2);
}
static void __init
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 309da3567bc..5740296c35a 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -342,15 +342,33 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
new_sp = scr->pt.r12;
tramp_addr = (unsigned long) __kernel_sigtramp;
- if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) {
- new_sp = current->sas_ss_sp + current->sas_ss_size;
- /*
- * We need to check for the register stack being on the signal stack
- * separately, because it's switched separately (memory stack is switched
- * in the kernel, register stack is switched in the signal trampoline).
- */
- if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
- new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ int onstack = sas_ss_flags(new_sp);
+
+ if (onstack == 0) {
+ new_sp = current->sas_ss_sp + current->sas_ss_size;
+ /*
+ * We need to check for the register stack being on the
+ * signal stack separately, because it's switched
+ * separately (memory stack is switched in the kernel,
+ * register stack is switched in the signal trampoline).
+ */
+ if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
+ new_rbs = ALIGN(current->sas_ss_sp,
+ sizeof(long));
+ } else if (onstack == SS_ONSTACK) {
+ unsigned long check_sp;
+
+ /*
+ * If we are on the alternate signal stack and would
+ * overflow it, don't. Return an always-bogus address
+ * instead so we will die with SIGSEGV.
+ */
+ check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
+ if (!likely(on_sig_stack(check_sp)))
+ return force_sigsegv_info(sig, (void __user *)
+ check_sp);
+ }
}
frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);