aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-06-18 08:36:46 +0100
committerDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-06-18 08:36:46 +0100
commit0107b3cf3225aed6ddde4fa8dbcd4ed643b34f4d (patch)
tree9b9337ae627fc56a0eda43c60860765f25efaa0b /arch/ia64
parent1c3f45ab2f7f879ea482501c83899505c31f7539 (diff)
parent9ee1c939d1cb936b1f98e8d81aeffab57bae46ab (diff)
Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/module.c10
-rw-r--r--arch/ia64/kernel/ptrace.c6
-rw-r--r--arch/ia64/kernel/setup.c3
-rw-r--r--arch/ia64/kernel/traps.c29
-rw-r--r--arch/ia64/mm/init.c19
-rw-r--r--arch/ia64/sn/kernel/setup.c4
7 files changed, 63 insertions, 12 deletions
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 4f3cdef7579..962b6c4e32b 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -460,9 +460,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
;;
st8 [r2]=r14 // update current->blocked with new mask
- cmpxchg4.acq r14=[r9],r18,ar.ccv // current->thread_info->flags <- r18
+ cmpxchg4.acq r8=[r9],r18,ar.ccv // current->thread_info->flags <- r18
;;
- cmp.ne p6,p0=r17,r14 // update failed?
+ cmp.ne p6,p0=r17,r8 // update failed?
(p6) br.cond.spnt.few 1b // yes -> retry
#ifdef CONFIG_SMP
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index febc091c2f0..f1aca7cffd1 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -825,14 +825,16 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
* XXX Should have an arch-hook for running this after final section
* addresses have been selected...
*/
- /* See if gp can cover the entire core module: */
- uint64_t gp = (uint64_t) mod->module_core + MAX_LTOFF / 2;
- if (mod->core_size >= MAX_LTOFF)
+ uint64_t gp;
+ if (mod->core_size > MAX_LTOFF)
/*
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated
* at the end of the module.
*/
- gp = (uint64_t) mod->module_core + mod->core_size - MAX_LTOFF / 2;
+ gp = mod->core_size - MAX_LTOFF / 2;
+ else
+ gp = mod->core_size / 2;
+ gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
mod->arch.gp = gp;
DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp);
}
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 08c8a5eb25a..575a8f657b3 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -635,11 +635,17 @@ ia64_flush_fph (struct task_struct *task)
{
struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
+ /*
+ * Prevent migrating this task while
+ * we're fiddling with the FPU state
+ */
+ preempt_disable();
if (ia64_is_local_fpu_owner(task) && psr->mfh) {
psr->mfh = 0;
task->thread.flags |= IA64_THREAD_FPH_VALID;
ia64_save_fpu(&task->thread.fph[0]);
}
+ preempt_enable();
}
/*
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index b7e6b4cb374..d14692e0920 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -720,7 +720,8 @@ cpu_init (void)
ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
/*
- * Initialize default control register to defer all speculative faults. The
+ * Initialize default control register to defer speculative faults except
+ * for those arising from TLB misses, which are not deferred. The
* kernel MUST NOT depend on a particular setting of these bits (in other words,
* the kernel must have recovery code for all speculative accesses). Turn on
* dcr.lc as per recommendation by the architecture team. Most IA-32 apps
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index e82ad78081b..1861173bd4f 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -111,6 +111,24 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
siginfo_t siginfo;
int sig, code;
+ /* break.b always sets cr.iim to 0, which causes problems for
+ * debuggers. Get the real break number from the original instruction,
+ * but only for kernel code. User space break.b is left alone, to
+ * preserve the existing behaviour. All break codings have the same
+ * format, so there is no need to check the slot type.
+ */
+ if (break_num == 0 && !user_mode(regs)) {
+ struct ia64_psr *ipsr = ia64_psr(regs);
+ unsigned long *bundle = (unsigned long *)regs->cr_iip;
+ unsigned long slot;
+ switch (ipsr->ri) {
+ case 0: slot = (bundle[0] >> 5); break;
+ case 1: slot = (bundle[0] >> 46) | (bundle[1] << 18); break;
+ default: slot = (bundle[1] >> 23); break;
+ }
+ break_num = ((slot >> 36 & 1) << 20) | (slot >> 6 & 0xfffff);
+ }
+
/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_imm = break_num;
@@ -202,13 +220,21 @@ disabled_fph_fault (struct pt_regs *regs)
/* first, grant user-level access to fph partition: */
psr->dfh = 0;
+
+ /*
+ * Make sure that no other task gets in on this processor
+ * while we're claiming the FPU
+ */
+ preempt_disable();
#ifndef CONFIG_SMP
{
struct task_struct *fpu_owner
= (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
- if (ia64_is_local_fpu_owner(current))
+ if (ia64_is_local_fpu_owner(current)) {
+ preempt_enable_no_resched();
return;
+ }
if (fpu_owner)
ia64_flush_fph(fpu_owner);
@@ -226,6 +252,7 @@ disabled_fph_fault (struct pt_regs *regs)
*/
psr->mfh = 1;
}
+ preempt_enable_no_resched();
}
static inline int
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 547785e3cba..4eb2f52b87a 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -305,8 +305,9 @@ setup_gate (void)
struct page *page;
/*
- * Map the gate page twice: once read-only to export the ELF headers etc. and once
- * execute-only page to enable privilege-promotion via "epc":
+ * Map the gate page twice: once read-only to export the ELF
+ * headers etc. and once execute-only page to enable
+ * privilege-promotion via "epc":
*/
page = virt_to_page(ia64_imva(__start_gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
@@ -315,6 +316,20 @@ setup_gate (void)
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
+ /* Fill in the holes (if any) with read-only zero pages: */
+ {
+ unsigned long addr;
+
+ for (addr = GATE_ADDR + PAGE_SIZE;
+ addr < GATE_ADDR + PERCPU_PAGE_SIZE;
+ addr += PAGE_SIZE)
+ {
+ put_kernel_page(ZERO_PAGE(0), addr,
+ PAGE_READONLY);
+ put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
+ PAGE_READONLY);
+ }
+ }
#endif
ia64_patch_gate();
}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index e64cb8175f7..44bfc7f318c 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -222,7 +222,7 @@ void __init early_sn_setup(void)
extern int platform_intr_list[];
extern nasid_t master_nasid;
-static int shub_1_1_found __initdata;
+static int __initdata shub_1_1_found = 0;
/*
* sn_check_for_wars
@@ -251,7 +251,7 @@ static void __init sn_check_for_wars(void)
} else {
for_each_online_node(cnode) {
if (is_shub_1_1(cnodeid_to_nasid(cnode)))
- sn_hub_info->shub_1_1_found = 1;
+ shub_1_1_found = 1;
}
}
}