From 0e2d94f6a09d0a2d39c3b7d9529ac5c378098245 Mon Sep 17 00:00:00 2001 From: Otavio Salvador Date: Tue, 29 Nov 2005 08:02:24 +1100 Subject: [PATCH] ppc: Export symbol needed by MOL Export symbol needed to allow MOL to run. This was changed to be inline in past and forgot to be change here. Signed-off-by: Paul Mackerras --- arch/ppc/kernel/ppc_ksyms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c index 66073f77519..bb6a5c6a64b 100644 --- a/arch/ppc/kernel/ppc_ksyms.c +++ b/arch/ppc/kernel/ppc_ksyms.c @@ -184,7 +184,7 @@ EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(flush_instruction_cache); EXPORT_SYMBOL(giveup_fpu); -EXPORT_SYMBOL(flush_icache_range); +EXPORT_SYMBOL(__flush_icache_range); EXPORT_SYMBOL(flush_dcache_range); EXPORT_SYMBOL(flush_icache_user_range); EXPORT_SYMBOL(flush_dcache_page); -- cgit v1.2.3 From 666acb94d155106e494c6dfdd8b2fae44e0fad61 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 29 Nov 2005 15:50:58 +1100 Subject: powerpc: Export __flush_icache_range for 32-bit Both 32-bit and 64-bit use the same inline flush_icache_range definition now, so both need to export __flush_icache_range, not just 64-bit. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ppc_ksyms.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index af4d1bc9a2e..94db2570845 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -163,15 +163,13 @@ EXPORT_SYMBOL(giveup_altivec); EXPORT_SYMBOL(giveup_spe); #endif /* CONFIG_SPE */ -#ifdef CONFIG_PPC64 -EXPORT_SYMBOL(__flush_icache_range); -#else +#ifndef CONFIG_PPC64 EXPORT_SYMBOL(flush_instruction_cache); -EXPORT_SYMBOL(flush_icache_range); EXPORT_SYMBOL(flush_tlb_kernel_range); EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(_tlbie); #endif +EXPORT_SYMBOL(__flush_icache_range); EXPORT_SYMBOL(flush_dcache_range); #ifdef CONFIG_SMP -- cgit v1.2.3 From b77dae5293efba42ea1ff04d410ee68e66d5b0cf Mon Sep 17 00:00:00 2001 From: Dean Roe Date: Wed, 9 Nov 2005 14:25:06 -0600 Subject: [IA64] - Make pfn_valid more precise for SGI Altix systems A single SGI Altix system can be divided into multiple partitions, each running their own instance of the Linux kernel. pfn_valid() is currently not optimal for any but the first partition, since it does not compare the pfn with min_low_pfn before calling the more costly ia64_pfn_valid(). Signed-off-by: Dean Roe Signed-off-by: Tony Luck --- arch/ia64/kernel/ia64_ksyms.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 01572814abe..5db9d3bcbbc 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -42,6 +42,7 @@ EXPORT_SYMBOL(clear_page); #ifdef CONFIG_VIRTUAL_MEM_MAP #include +EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ #endif -- cgit v1.2.3 From 5a94bcfd2a18edcf368b3128c7df07b58e529932 Mon Sep 17 00:00:00 2001 From: Keshavamurthy Anil S Date: Tue, 22 Nov 2005 14:15:49 -0800 Subject: [IA64] Remove getting break_num by decoding instruction break.b always sets cr.iim to 0 and the current code tries to get the break_num by decoding instruction. However, their seems to be a race condition while reading the regs->cr_iip, as on other cpu the break.b at regs->cr_iip might have been replaced with the original instruction as a result of unregister_kprobe() and hence decoding instruction to obtain break_num will result in wrong value in this case. Also includes changes to kprobes.c which now has to handle break number zero. Signed-off-by: Anil S Keshavamurthy Signed-off-by: Tony Luck --- arch/ia64/kernel/kprobes.c | 2 +- arch/ia64/kernel/traps.c | 18 ------------------ 2 files changed, 1 insertion(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 801eeaeaf3d..2895d6e6062 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -740,7 +740,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, switch(val) { case DIE_BREAK: /* err is break number from ia64_bad_break() */ - if (args->err == 0x80200 || args->err == 0x80300) + if (args->err == 0x80200 || args->err == 0x80300 || args->err == 0) if (pre_kprobes_handler(args)) ret = NOTIFY_STOP; break; diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index fba5fdd1f96..d3e0ecb56d6 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -132,24 +132,6 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) siginfo_t siginfo; int sig, code; - /* break.b always sets cr.iim to 0, which causes problems for - * debuggers. Get the real break number from the original instruction, - * but only for kernel code. User space break.b is left alone, to - * preserve the existing behaviour. All break codings have the same - * format, so there is no need to check the slot type. - */ - if (break_num == 0 && !user_mode(regs)) { - struct ia64_psr *ipsr = ia64_psr(regs); - unsigned long *bundle = (unsigned long *)regs->cr_iip; - unsigned long slot; - switch (ipsr->ri) { - case 0: slot = (bundle[0] >> 5); break; - case 1: slot = (bundle[0] >> 46) | (bundle[1] << 18); break; - default: slot = (bundle[1] >> 23); break; - } - break_num = ((slot >> 36 & 1) << 20) | (slot >> 6 & 0xfffff); - } - /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); siginfo.si_imm = break_num; -- cgit v1.2.3 From cab3f16febeaf1a60e38159ff578f609f9976544 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 29 Nov 2005 13:59:03 -0800 Subject: [SPARC64]: Fix >8K I/O mappings. Increment the PFN field of the PTE so that the tests on vm_pfn in mm/memory.c match up. The TLB ignores these lower bits for larger page sizes, so it's OK to set things like this. Signed-off-by: David S. Miller --- arch/sparc64/mm/generic.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index d9396c1721c..580b63da836 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c @@ -77,6 +77,7 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, BUG_ON(!pte_none(*pte)); set_pte_at(mm, address, pte, entry); address += PAGE_SIZE; + pte_val(entry) += PAGE_SIZE; pte++; } while (address < curend); } while (address < end); -- cgit v1.2.3 From 8117ce76c28ef0cab8545b518fa0543f6d1437e6 Mon Sep 17 00:00:00 2001 From: Paolo Galtieri Date: Tue, 29 Nov 2005 14:26:47 -0800 Subject: [PATCH] ppc: fix floating point register corruption I recently discovered a bug on PPC which causes the floating point registers to get corrupted when CONFIG_PREEMPT=y. The problem occurred while running a multi threaded Java application that does floating point. The problem could be reproduced in anywhere from 2 to 6 hours. With the patch I have included below it ran for over a week without failure. Signed-off-by: Paolo Galtieri Cc: Kumar Gala Cc: Matt Porter Cc: Tom Rini Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Paul Mackerras --- arch/ppc/kernel/process.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c index cb1c7b92f8c..25cbdc8d294 100644 --- a/arch/ppc/kernel/process.c +++ b/arch/ppc/kernel/process.c @@ -417,6 +417,7 @@ void show_regs(struct pt_regs * regs) void exit_thread(void) { + preempt_disable(); if (last_task_used_math == current) last_task_used_math = NULL; if (last_task_used_altivec == current) @@ -425,10 +426,12 @@ void exit_thread(void) if (last_task_used_spe == current) last_task_used_spe = NULL; #endif + preempt_enable(); } void flush_thread(void) { + preempt_disable(); if (last_task_used_math == current) last_task_used_math = NULL; if (last_task_used_altivec == current) @@ -437,6 +440,7 @@ void flush_thread(void) if (last_task_used_spe == current) last_task_used_spe = NULL; #endif + preempt_enable(); } void @@ -535,6 +539,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) regs->nip = nip; regs->gpr[1] = sp; regs->msr = MSR_USER; + preempt_disable(); if (last_task_used_math == current) last_task_used_math = NULL; if (last_task_used_altivec == current) @@ -543,6 +548,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) if (last_task_used_spe == current) last_task_used_spe = NULL; #endif + preempt_enable(); memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); current->thread.fpscr.val = 0; #ifdef CONFIG_ALTIVEC -- cgit v1.2.3 From 48abec07cf8063184d397560a6a5f27eaf9caddf Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 30 Nov 2005 13:20:54 +1100 Subject: powerpc: Fix bug causing FP registers corruption on UP + preempt This fixes a bug noticed by Paolo Galtieri and fixed for ARCH=ppc in the previous commit (ppc: fix floating point register corruption). This fixes the arch/powerpc code by adding preempt_disable/enable, and also cleans it up a bit by pulling out the code that discards any lazily-switched CPU register state into a new function, rather than having that code repeated in three places. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/process.c | 62 +++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 37 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a5a7542a8ff..105d5609ff5 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -201,6 +201,28 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) } #endif /* CONFIG_SPE */ +/* + * If we are doing lazy switching of CPU state (FP, altivec or SPE), + * and the current task has some state, discard it. + */ +static inline void discard_lazy_cpu_state(void) +{ +#ifndef CONFIG_SMP + preempt_disable(); + if (last_task_used_math == current) + last_task_used_math = NULL; +#ifdef CONFIG_ALTIVEC + if (last_task_used_altivec == current) + last_task_used_altivec = NULL; +#endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_SPE + if (last_task_used_spe == current) + last_task_used_spe = NULL; +#endif + preempt_enable(); +#endif /* CONFIG_SMP */ +} + int set_dabr(unsigned long dabr) { if (ppc_md.set_dabr) @@ -434,19 +456,7 @@ void show_regs(struct pt_regs * regs) void exit_thread(void) { kprobe_flush_task(current); - -#ifndef CONFIG_SMP - if (last_task_used_math == current) - last_task_used_math = NULL; -#ifdef CONFIG_ALTIVEC - if (last_task_used_altivec == current) - last_task_used_altivec = NULL; -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE - if (last_task_used_spe == current) - last_task_used_spe = NULL; -#endif -#endif /* CONFIG_SMP */ + discard_lazy_cpu_state(); } void flush_thread(void) @@ -458,18 +468,7 @@ void flush_thread(void) t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); #endif -#ifndef CONFIG_SMP - if (last_task_used_math == current) - last_task_used_math = NULL; -#ifdef CONFIG_ALTIVEC - if (last_task_used_altivec == current) - last_task_used_altivec = NULL; -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE - if (last_task_used_spe == current) - last_task_used_spe = NULL; -#endif -#endif /* CONFIG_SMP */ + discard_lazy_cpu_state(); #ifdef CONFIG_PPC64 /* for now */ if (current->thread.dabr) { @@ -635,18 +634,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) } #endif -#ifndef CONFIG_SMP - if (last_task_used_math == current) - last_task_used_math = NULL; -#ifdef CONFIG_ALTIVEC - if (last_task_used_altivec == current) - last_task_used_altivec = NULL; -#endif -#ifdef CONFIG_SPE - if (last_task_used_spe == current) - last_task_used_spe = NULL; -#endif -#endif /* CONFIG_SMP */ + discard_lazy_cpu_state(); memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); current->thread.fpscr.val = 0; #ifdef CONFIG_ALTIVEC -- cgit v1.2.3 From ed1189b7e8cd8144f0b232c220aed4ee26d89463 Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Tue, 29 Nov 2005 14:04:05 +0100 Subject: [PATCH] powerpc: prevent stack corruption in call_prom_ret Use the correct pointer to clear the memory of the return values, to prevent stack corruption in the callers stackframe. Signed-off-by: Olaf Hering Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 4ce0105c308..bcdc209dca8 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -265,7 +265,7 @@ static int __init call_prom_ret(const char *service, int nargs, int nret, va_end(list); for (i = 0; i < nret; i++) - rets[nargs+i] = 0; + args.args[nargs+i] = 0; if (enter_prom(&args, RELOC(prom_entry)) < 0) return PROM_ERROR; -- cgit v1.2.3 From d91b14c463306eb6527550ba48617e7f5500d3ae Mon Sep 17 00:00:00 2001 From: Thierry Vignaud Date: Tue, 29 Nov 2005 19:34:35 -0800 Subject: [PATCH] fix rebooting on HP nc6120 laptop Anne NICOLAS and Andres Kaaber reported their HP laptop didn't reboot smoothly. Signed-off-by: Thierry Vignaud Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/reboot.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c index 350ea6680f6..2afe0f8d555 100644 --- a/arch/i386/kernel/reboot.c +++ b/arch/i386/kernel/reboot.c @@ -111,6 +111,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), }, }, + { /* Handle problems with rebooting on HP nc6120 */ + .callback = set_bios_reboot, + .ident = "HP Compaq nc6120", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6120"), + }, + }, { } }; -- cgit v1.2.3 From 9f232a125bf86b0dae09f8ea4a0553535cf6b658 Mon Sep 17 00:00:00 2001 From: Paolo Galtieri Date: Tue, 29 Nov 2005 19:34:38 -0800 Subject: [PATCH] ppc: fix floating point register corruption I recently discovered a bug on PPC which causes the floating point registers to get corrupted when CONFIG_PREEMPT=y. The problem occurred while running a multi threaded Java application that does floating point. The problem could be reproduced in anywhere from 2 to 6 hours. With the patch I have included below it ran for over a week without failure. Signed-off-by: Paolo Galtieri Cc: Kumar Gala Cc: Matt Porter Cc: Tom Rini Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc/kernel/process.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c index cb1c7b92f8c..25cbdc8d294 100644 --- a/arch/ppc/kernel/process.c +++ b/arch/ppc/kernel/process.c @@ -417,6 +417,7 @@ void show_regs(struct pt_regs * regs) void exit_thread(void) { + preempt_disable(); if (last_task_used_math == current) last_task_used_math = NULL; if (last_task_used_altivec == current) @@ -425,10 +426,12 @@ void exit_thread(void) if (last_task_used_spe == current) last_task_used_spe = NULL; #endif + preempt_enable(); } void flush_thread(void) { + preempt_disable(); if (last_task_used_math == current) last_task_used_math = NULL; if (last_task_used_altivec == current) @@ -437,6 +440,7 @@ void flush_thread(void) if (last_task_used_spe == current) last_task_used_spe = NULL; #endif + preempt_enable(); } void @@ -535,6 +539,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) regs->nip = nip; regs->gpr[1] = sp; regs->msr = MSR_USER; + preempt_disable(); if (last_task_used_math == current) last_task_used_math = NULL; if (last_task_used_altivec == current) @@ -543,6 +548,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) if (last_task_used_spe == current) last_task_used_spe = NULL; #endif + preempt_enable(); memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); current->thread.fpscr.val = 0; #ifdef CONFIG_ALTIVEC -- cgit v1.2.3 From fe655d3a06488c8a188461bca493e9f23fc8c448 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Tue, 29 Nov 2005 19:34:42 -0800 Subject: [PATCH] setting irq affinity is broken in ia32 with MSI enabled Setting irq affinity stops working when MSI is enabled. With MSI, move_irq is empty, so we can't change irq affinity. It appears a typo in Ashok's original commit for this issue. X86_64 actually is using move_native_irq. Signed-off-by: Shaohua Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/io_apic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index cc5d7ac5b2e..22c8675c79f 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -2009,7 +2009,7 @@ static void ack_edge_ioapic_vector(unsigned int vector) { int irq = vector_to_irq(vector); - move_irq(vector); + move_native_irq(vector); ack_edge_ioapic_irq(irq); } @@ -2024,7 +2024,7 @@ static void end_level_ioapic_vector (unsigned int vector) { int irq = vector_to_irq(vector); - move_irq(vector); + move_native_irq(vector); end_level_ioapic_irq(irq); } -- cgit v1.2.3