diff options
Diffstat (limited to 'arch/ppc')
-rw-r--r-- | arch/ppc/Kconfig | 4 | ||||
-rw-r--r-- | arch/ppc/Makefile | 1 | ||||
-rw-r--r-- | arch/ppc/boot/images/Makefile | 3 | ||||
-rw-r--r-- | arch/ppc/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/ppc/kernel/align.c | 12 | ||||
-rw-r--r-- | arch/ppc/kernel/cpu_setup_6xx.S | 42 | ||||
-rw-r--r-- | arch/ppc/kernel/entry.S | 59 | ||||
-rw-r--r-- | arch/ppc/kernel/fpu.S | 133 | ||||
-rw-r--r-- | arch/ppc/kernel/head.S | 163 | ||||
-rw-r--r-- | arch/ppc/kernel/head_44x.S | 6 | ||||
-rw-r--r-- | arch/ppc/kernel/head_booke.h | 7 | ||||
-rw-r--r-- | arch/ppc/kernel/head_fsl_booke.S | 8 | ||||
-rw-r--r-- | arch/ppc/kernel/misc.S | 12 | ||||
-rw-r--r-- | arch/ppc/kernel/ptrace.c | 5 | ||||
-rw-r--r-- | arch/ppc/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/ppc/platforms/pmac_cache.S | 54 | ||||
-rw-r--r-- | arch/ppc/platforms/pmac_feature.c | 216 | ||||
-rw-r--r-- | arch/ppc/platforms/pmac_sleep.S | 4 | ||||
-rw-r--r-- | arch/ppc/platforms/radstone_ppc7d.c | 60 | ||||
-rw-r--r-- | arch/ppc/platforms/radstone_ppc7d.h | 1 | ||||
-rw-r--r-- | arch/ppc/syslib/cpm2_pic.c | 5 |
21 files changed, 492 insertions, 306 deletions
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index 74aa1e92a39..c3d941345e3 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig @@ -53,6 +53,7 @@ choice config 6xx bool "6xx/7xx/74xx/52xx/82xx/83xx" + select PPC_FPU help There are four types of PowerPC chips supported. The more common types (601, 603, 604, 740, 750, 7400), the Motorola embedded @@ -86,6 +87,9 @@ config E500 endchoice +config PPC_FPU + bool + config BOOKE bool depends on E500 diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile index 73cbdda5b59..0432a25b473 100644 --- a/arch/ppc/Makefile +++ b/arch/ppc/Makefile @@ -53,6 +53,7 @@ head-$(CONFIG_FSL_BOOKE) := arch/ppc/kernel/head_fsl_booke.o head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o +head-$(CONFIG_PPC_FPU) += arch/ppc/kernel/fpu.o core-y += arch/ppc/kernel/ arch/ppc/platforms/ \ arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/ diff --git a/arch/ppc/boot/images/Makefile b/arch/ppc/boot/images/Makefile index 774de8e2387..f850fb0fb51 100644 --- a/arch/ppc/boot/images/Makefile +++ b/arch/ppc/boot/images/Makefile @@ -20,8 +20,9 @@ quiet_cmd_uimage = UIMAGE $@ targets += uImage $(obj)/uImage: $(obj)/vmlinux.gz + $(Q)rm -f $@ $(call if_changed,uimage) - @echo ' Image $@ is ready' + @echo ' Image: $@' $(if $(wildcard $@),'is ready','not made') # Files generated that shall be removed upon make clean clean-files := sImage vmapus vmlinux* miboot* zImage* uImage diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile index 86bc878cb3e..b284451802c 100644 --- a/arch/ppc/kernel/Makefile +++ b/arch/ppc/kernel/Makefile @@ -9,6 +9,7 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o extra-$(CONFIG_8xx) := head_8xx.o extra-$(CONFIG_6xx) += idle_6xx.o extra-$(CONFIG_POWER4) += idle_power4.o +extra-$(CONFIG_PPC_FPU) += fpu.o extra-y += vmlinux.lds obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c index 79c92947503..ff81da9598d 100644 --- a/arch/ppc/kernel/align.c +++ b/arch/ppc/kernel/align.c @@ -290,6 +290,10 @@ fix_alignment(struct pt_regs *regs) /* lwm, stmw */ nb = (32 - reg) * 4; } + + if (!access_ok((flags & ST? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0)) + return -EFAULT; /* bad address */ + rptr = (unsigned char *) ®s->gpr[reg]; if (flags & LD) { for (i = 0; i < nb; ++i) @@ -368,16 +372,24 @@ fix_alignment(struct pt_regs *regs) /* Single-precision FP load and store require conversions... */ case LD+F+S: +#ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); cvt_fd(&data.f, &data.d, ¤t->thread.fpscr); preempt_enable(); +#else + return 0; +#endif break; case ST+F+S: +#ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); cvt_df(&data.d, &data.f, ¤t->thread.fpscr); preempt_enable(); +#else + return 0; +#endif break; } diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S index 74f781b486a..468721d9ebd 100644 --- a/arch/ppc/kernel/cpu_setup_6xx.S +++ b/arch/ppc/kernel/cpu_setup_6xx.S @@ -30,12 +30,14 @@ _GLOBAL(__setup_cpu_604) blr _GLOBAL(__setup_cpu_750) mflr r4 + bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 mtlr r4 blr _GLOBAL(__setup_cpu_750cx) mflr r4 + bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750cx @@ -43,6 +45,7 @@ _GLOBAL(__setup_cpu_750cx) blr _GLOBAL(__setup_cpu_750fx) mflr r4 + bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750fx @@ -50,6 +53,7 @@ _GLOBAL(__setup_cpu_750fx) blr _GLOBAL(__setup_cpu_7400) mflr r4 + bl __init_fpu_registers bl setup_7400_workarounds bl setup_common_caches bl setup_750_7400_hid0 @@ -57,6 +61,7 @@ _GLOBAL(__setup_cpu_7400) blr _GLOBAL(__setup_cpu_7410) mflr r4 + bl __init_fpu_registers bl setup_7410_workarounds bl setup_common_caches bl setup_750_7400_hid0 @@ -80,7 +85,7 @@ setup_common_caches: bne 1f /* don't invalidate the D-cache */ ori r8,r8,HID0_DCI /* unless it wasn't enabled */ 1: sync - mtspr SPRN_HID0,r8 /* enable and invalidate caches */ + mtspr SPRN_HID0,r8 /* enable and invalidate caches */ sync mtspr SPRN_HID0,r11 /* enable caches */ sync @@ -152,9 +157,13 @@ setup_7410_workarounds: setup_750_7400_hid0: mfspr r11,SPRN_HID0 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC + oris r11,r11,HID0_DPM@h BEGIN_FTR_SECTION - oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */ -END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) + xori r11,r11,HID0_BTIC +END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) +BEGIN_FTR_SECTION + xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ +END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) li r3,HID0_SPD andc r11,r11,r3 /* clear SPD: enable speculative */ li r3,0 @@ -218,13 +227,15 @@ setup_745x_specifics: /* All of the bits we have to set..... */ - ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_LRSTK | HID0_BTIC + ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE + ori r11,r11,HID0_LRSTK | HID0_BTIC + oris r11,r11,HID0_DPM@h BEGIN_FTR_SECTION xori r11,r11,HID0_BTIC END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) BEGIN_FTR_SECTION - oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */ -END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) + xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ +END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) /* All of the bits we have to clear.... */ @@ -248,6 +259,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) isync blr +/* + * Initialize the FPU registers. This is needed to work around an errata + * in some 750 cpus where using a not yet initialized FPU register after + * power on reset may hang the CPU + */ +_GLOBAL(__init_fpu_registers) + mfmsr r10 + ori r11,r10,MSR_FP + mtmsr r11 + isync + addis r9,r3,empty_zero_page@ha + addi r9,r9,empty_zero_page@l + REST_32FPRS(0,r9) + sync + mtmsr r10 + isync + blr + + /* Definitions for the table use to save CPU states */ #define CS_HID0 0 #define CS_HID1 4 diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S index 035217d6c0f..5f075dbc4ee 100644 --- a/arch/ppc/kernel/entry.S +++ b/arch/ppc/kernel/entry.S @@ -563,6 +563,65 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) addi r1,r1,INT_FRAME_SIZE blr + .globl fast_exception_return +fast_exception_return: +#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) + andi. r10,r9,MSR_RI /* check for recoverable interrupt */ + beq 1f /* if not, we've got problems */ +#endif + +2: REST_4GPRS(3, r11) + lwz r10,_CCR(r11) + REST_GPR(1, r11) + mtcr r10 + lwz r10,_LINK(r11) + mtlr r10 + REST_GPR(10, r11) + mtspr SPRN_SRR1,r9 + mtspr SPRN_SRR0,r12 + REST_GPR(9, r11) + REST_GPR(12, r11) + lwz r11,GPR11(r11) + SYNC + RFI + +#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) +/* check if the exception happened in a restartable section */ +1: lis r3,exc_exit_restart_end@ha + addi r3,r3,exc_exit_restart_end@l + cmplw r12,r3 + bge 3f + lis r4,exc_exit_restart@ha + addi r4,r4,exc_exit_restart@l + cmplw r12,r4 + blt 3f + lis r3,fee_restarts@ha + tophys(r3,r3) + lwz r5,fee_restarts@l(r3) + addi r5,r5,1 + stw r5,fee_restarts@l(r3) + mr r12,r4 /* restart at exc_exit_restart */ + b 2b + + .comm fee_restarts,4 + +/* aargh, a nonrecoverable interrupt, panic */ +/* aargh, we don't know which trap this is */ +/* but the 601 doesn't implement the RI bit, so assume it's OK */ +3: +BEGIN_FTR_SECTION + b 2b +END_FTR_SECTION_IFSET(CPU_FTR_601) + li r10,-1 + stw r10,TRAP(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + lis r10,MSR_KERNEL@h + ori r10,r10,MSR_KERNEL@l + bl transfer_to_handler_full + .long nonrecoverable_exception + .long ret_from_except +#endif + .globl sigreturn_exit sigreturn_exit: subi r1,r3,STACK_FRAME_OVERHEAD diff --git a/arch/ppc/kernel/fpu.S b/arch/ppc/kernel/fpu.S new file mode 100644 index 00000000000..6189b26f640 --- /dev/null +++ b/arch/ppc/kernel/fpu.S @@ -0,0 +1,133 @@ +/* + * FPU support code, moved here from head.S so that it can be used + * by chips which use other head-whatever.S files. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/cputable.h> +#include <asm/cache.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/offsets.h> + +/* + * This task wants to use the FPU now. + * On UP, disable FP for the task which had the FPU previously, + * and save its floating-point registers in its thread_struct. + * Load up this task's FP registers from its thread_struct, + * enable the FPU for the current task and return to the task. + */ + .globl load_up_fpu +load_up_fpu: + mfmsr r5 + ori r5,r5,MSR_FP +#ifdef CONFIG_PPC64BRIDGE + clrldi r5,r5,1 /* turn off 64-bit mode */ +#endif /* CONFIG_PPC64BRIDGE */ + SYNC + MTMSRD(r5) /* enable use of fpu now */ + isync +/* + * For SMP, we don't do lazy FPU switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_fpu in switch_to. + */ +#ifndef CONFIG_SMP + tophys(r6,0) /* get __pa constant */ + addis r3,r6,last_task_used_math@ha + lwz r4,last_task_used_math@l(r3) + cmpwi 0,r4,0 + beq 1f + add r4,r4,r6 + addi r4,r4,THREAD /* want last_task_used_math->thread */ + SAVE_32FPRS(0, r4) + mffs fr0 + stfd fr0,THREAD_FPSCR-4(r4) + lwz r5,PT_REGS(r4) + add r5,r5,r6 + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r10,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r10 /* disable FP for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of FP after return */ + mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ + lwz r4,THREAD_FPEXC_MODE(r5) + ori r9,r9,MSR_FP /* enable FP for current */ + or r9,r9,r4 + lfd fr0,THREAD_FPSCR-4(r5) + mtfsf 0xff,fr0 + REST_32FPRS(0, r5) +#ifndef CONFIG_SMP + subi r4,r5,THREAD + sub r4,r4,r6 + stw r4,last_task_used_math@l(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ + /* we haven't used ctr or xer or lr */ + b fast_exception_return + +/* + * FP unavailable trap from kernel - print a message, but let + * the task use FP in the kernel until it returns to user mode. + */ + .globl KernelFP +KernelFP: + lwz r3,_MSR(r1) + ori r3,r3,MSR_FP + stw r3,_MSR(r1) /* enable use of FP after return */ + lis r3,86f@h + ori r3,r3,86f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl printk + b ret_from_except +86: .string "floating point used in kernel (task=%p, pc=%x)\n" + .align 4,0 + +/* + * giveup_fpu(tsk) + * Disable FP for the task given as the argument, + * and save the floating-point registers in its thread_struct. + * Enables the FPU for use in the kernel on return. + */ + .globl giveup_fpu +giveup_fpu: + mfmsr r5 + ori r5,r5,MSR_FP + SYNC_601 + ISYNC_601 + MTMSRD(r5) /* enable use of fpu now */ + SYNC_601 + isync + cmpwi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpwi 0,r5,0 + SAVE_32FPRS(0, r3) + mffs fr0 + stfd fr0,THREAD_FPSCR-4(r3) + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r3,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r3 /* disable FP for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + lis r4,last_task_used_math@ha + stw r5,last_task_used_math@l(r4) +#endif /* CONFIG_SMP */ + blr diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S index 1a89a71e0ac..a931d773715 100644 --- a/arch/ppc/kernel/head.S +++ b/arch/ppc/kernel/head.S @@ -775,133 +775,6 @@ InstructionSegment: EXC_XFER_STD(0x480, UnknownException) #endif /* CONFIG_PPC64BRIDGE */ -/* - * This task wants to use the FPU now. - * On UP, disable FP for the task which had the FPU previously, - * and save its floating-point registers in its thread_struct. - * Load up this task's FP registers from its thread_struct, - * enable the FPU for the current task and return to the task. - */ -load_up_fpu: - mfmsr r5 - ori r5,r5,MSR_FP -#ifdef CONFIG_PPC64BRIDGE - clrldi r5,r5,1 /* turn off 64-bit mode */ -#endif /* CONFIG_PPC64BRIDGE */ - SYNC - MTMSRD(r5) /* enable use of fpu now */ - isync -/* - * For SMP, we don't do lazy FPU switching because it just gets too - * horrendously complex, especially when a task switches from one CPU - * to another. Instead we call giveup_fpu in switch_to. - */ -#ifndef CONFIG_SMP - tophys(r6,0) /* get __pa constant */ - addis r3,r6,last_task_used_math@ha - lwz r4,last_task_used_math@l(r3) - cmpwi 0,r4,0 - beq 1f - add r4,r4,r6 - addi r4,r4,THREAD /* want last_task_used_math->thread */ - SAVE_32FPRS(0, r4) - mffs fr0 - stfd fr0,THREAD_FPSCR-4(r4) - lwz r5,PT_REGS(r4) - add r5,r5,r6 - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - li r10,MSR_FP|MSR_FE0|MSR_FE1 - andc r4,r4,r10 /* disable FP for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ - /* enable use of FP after return */ - mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ - lwz r4,THREAD_FPEXC_MODE(r5) - ori r9,r9,MSR_FP /* enable FP for current */ - or r9,r9,r4 - lfd fr0,THREAD_FPSCR-4(r5) - mtfsf 0xff,fr0 - REST_32FPRS(0, r5) -#ifndef CONFIG_SMP - subi r4,r5,THREAD - sub r4,r4,r6 - stw r4,last_task_used_math@l(r3) -#endif /* CONFIG_SMP */ - /* restore registers and return */ - /* we haven't used ctr or xer or lr */ - /* fall through to fast_exception_return */ - - .globl fast_exception_return -fast_exception_return: - andi. r10,r9,MSR_RI /* check for recoverable interrupt */ - beq 1f /* if not, we've got problems */ -2: REST_4GPRS(3, r11) - lwz r10,_CCR(r11) - REST_GPR(1, r11) - mtcr r10 - lwz r10,_LINK(r11) - mtlr r10 - REST_GPR(10, r11) - mtspr SPRN_SRR1,r9 - mtspr SPRN_SRR0,r12 - REST_GPR(9, r11) - REST_GPR(12, r11) - lwz r11,GPR11(r11) - SYNC - RFI - -/* check if the exception happened in a restartable section */ -1: lis r3,exc_exit_restart_end@ha - addi r3,r3,exc_exit_restart_end@l - cmplw r12,r3 - bge 3f - lis r4,exc_exit_restart@ha - addi r4,r4,exc_exit_restart@l - cmplw r12,r4 - blt 3f - lis r3,fee_restarts@ha - tophys(r3,r3) - lwz r5,fee_restarts@l(r3) - addi r5,r5,1 - stw r5,fee_restarts@l(r3) - mr r12,r4 /* restart at exc_exit_restart */ - b 2b - - .comm fee_restarts,4 - -/* aargh, a nonrecoverable interrupt, panic */ -/* aargh, we don't know which trap this is */ -/* but the 601 doesn't implement the RI bit, so assume it's OK */ -3: -BEGIN_FTR_SECTION - b 2b -END_FTR_SECTION_IFSET(CPU_FTR_601) - li r10,-1 - stw r10,TRAP(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - li r10,MSR_KERNEL - bl transfer_to_handler_full - .long nonrecoverable_exception - .long ret_from_except - -/* - * FP unavailable trap from kernel - print a message, but let - * the task use FP in the kernel until it returns to user mode. - */ -KernelFP: - lwz r3,_MSR(r1) - ori r3,r3,MSR_FP - stw r3,_MSR(r1) /* enable use of FP after return */ - lis r3,86f@h - ori r3,r3,86f@l - mr r4,r2 /* current */ - lwz r5,_NIP(r1) - bl printk - b ret_from_except -86: .string "floating point used in kernel (task=%p, pc=%x)\n" - .align 4,0 - #ifdef CONFIG_ALTIVEC /* Note that the AltiVec support is closely modeled after the FP * support. Changes to one are likely to be applicable to the @@ -1016,42 +889,6 @@ giveup_altivec: #endif /* CONFIG_ALTIVEC */ /* - * giveup_fpu(tsk) - * Disable FP for the task given as the argument, - * and save the floating-point registers in its thread_struct. - * Enables the FPU for use in the kernel on return. - */ - .globl giveup_fpu -giveup_fpu: - mfmsr r5 - ori r5,r5,MSR_FP - SYNC_601 - ISYNC_601 - MTMSRD(r5) /* enable use of fpu now */ - SYNC_601 - isync - cmpwi 0,r3,0 - beqlr- /* if no previous owner, done */ - addi r3,r3,THREAD /* want THREAD of task */ - lwz r5,PT_REGS(r3) - cmpwi 0,r5,0 - SAVE_32FPRS(0, r3) - mffs fr0 - stfd fr0,THREAD_FPSCR-4(r3) - beq 1f - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - li r3,MSR_FP|MSR_FE0|MSR_FE1 - andc r4,r4,r3 /* disable FP for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#ifndef CONFIG_SMP - li r5,0 - lis r4,last_task_used_math@ha - stw r5,last_task_used_math@l(r4) -#endif /* CONFIG_SMP */ - blr - -/* * This code is jumped to from the startup code to copy * the kernel image to physical address 0. */ diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S index 9ed8165a3d6..9b6a8e51365 100644 --- a/arch/ppc/kernel/head_44x.S +++ b/arch/ppc/kernel/head_44x.S @@ -426,7 +426,11 @@ interrupt_base: PROGRAM_EXCEPTION /* Floating Point Unavailable Interrupt */ +#ifdef CONFIG_PPC_FPU + FP_UNAVAILABLE_EXCEPTION +#else EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) +#endif /* System Call Interrupt */ START_EXCEPTION(SystemCall) @@ -686,8 +690,10 @@ _GLOBAL(giveup_altivec) * * The 44x core does not have an FPU. */ +#ifndef CONFIG_PPC_FPU _GLOBAL(giveup_fpu) blr +#endif /* * extern void abort(void) diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h index 884dac916bc..f213d12eec0 100644 --- a/arch/ppc/kernel/head_booke.h +++ b/arch/ppc/kernel/head_booke.h @@ -337,4 +337,11 @@ label: addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_LITE(0x0900, timer_interrupt) +#define FP_UNAVAILABLE_EXCEPTION \ + START_EXCEPTION(FloatingPointUnavailable) \ + NORMAL_EXCEPTION_PROLOG; \ + bne load_up_fpu; /* if from user, just load it up */ \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + EXC_XFER_EE_LITE(0x800, KernelFP) + #endif /* __HEAD_BOOKE_H__ */ diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S index d64bf61d2b1..f22ddce3613 100644 --- a/arch/ppc/kernel/head_fsl_booke.S +++ b/arch/ppc/kernel/head_fsl_booke.S @@ -504,7 +504,11 @@ interrupt_base: PROGRAM_EXCEPTION /* Floating Point Unavailable Interrupt */ +#ifdef CONFIG_PPC_FPU + FP_UNAVAILABLE_EXCEPTION +#else EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) +#endif /* System Call Interrupt */ START_EXCEPTION(SystemCall) @@ -916,10 +920,12 @@ _GLOBAL(giveup_spe) /* * extern void giveup_fpu(struct task_struct *prev) * - * The e500 core does not have an FPU. + * Not all FSL Book-E cores have an FPU */ +#ifndef CONFIG_PPC_FPU _GLOBAL(giveup_fpu) blr +#endif /* * extern void abort(void) diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index 73f7c23b0dd..e4f1615ec13 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -1096,17 +1096,7 @@ _GLOBAL(_get_SP) * and exceptions as if the cpu had performed the load or store. */ -#if defined(CONFIG_4xx) || defined(CONFIG_E500) -_GLOBAL(cvt_fd) - lfs 0,0(r3) - stfd 0,0(r4) - blr - -_GLOBAL(cvt_df) - lfd 0,0(r3) - stfs 0,0(r4) - blr -#else +#ifdef CONFIG_PPC_FPU _GLOBAL(cvt_fd) lfd 0,-4(r5) /* load up fpscr value */ mtfsf 0xff,0 diff --git a/arch/ppc/kernel/ptrace.c b/arch/ppc/kernel/ptrace.c index 426b6f7d9de..59d59a8dc24 100644 --- a/arch/ppc/kernel/ptrace.c +++ b/arch/ppc/kernel/ptrace.c @@ -26,6 +26,7 @@ #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> +#include <linux/signal.h> #include <asm/uaccess.h> #include <asm/page.h> @@ -356,7 +357,7 @@ int sys_ptrace(long request, long pid, long addr, long data) case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: { /* restart after signal. */ ret = -EIO; - if ((unsigned long) data > _NSIG) + if (!valid_signal(data)) break; if (request == PTRACE_SYSCALL) { set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); @@ -389,7 +390,7 @@ int sys_ptrace(long request, long pid, long addr, long data) case PTRACE_SINGLESTEP: { /* set the trap flag. */ ret = -EIO; - if ((unsigned long) data > _NSIG) + if (!valid_signal(data)) break; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); set_single_step(child); diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c index 361865c4bc8..f8e7e324a17 100644 --- a/arch/ppc/kernel/traps.c +++ b/arch/ppc/kernel/traps.c @@ -176,7 +176,7 @@ static inline int check_io_access(struct pt_regs *regs) #else #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) #endif -#define REASON_FP 0 +#define REASON_FP ESR_FP #define REASON_ILLEGAL ESR_PIL #define REASON_PRIVILEGED ESR_PPR #define REASON_TRAP ESR_PTR diff --git a/arch/ppc/platforms/pmac_cache.S b/arch/ppc/platforms/pmac_cache.S index da34a9bc929..fb977de6b70 100644 --- a/arch/ppc/platforms/pmac_cache.S +++ b/arch/ppc/platforms/pmac_cache.S @@ -64,27 +64,39 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mtspr SPRN_HID0,r4 /* Disable DPM */ sync - /* disp-flush L1 */ - li r4,0x4000 - mtctr r4 + /* Disp-flush L1. We have a weird problem here that I never + * totally figured out. On 750FX, using the ROM for the flush + * results in a non-working flush. We use that workaround for + * now until I finally understand what's going on. --BenH + */ + + /* ROM base by default */ lis r4,0xfff0 -1: lwzx r0,r0,r4 + mfpvr r3 + srwi r3,r3,16 + cmplwi cr0,r3,0x7000 + bne+ 1f + /* RAM base on 750FX */ + li r4,0 +1: li r4,0x4000 + mtctr r4 +1: lwz r0,0(r4) addi r4,r4,32 bdnz 1b sync isync - /* disable / invalidate / enable L1 data */ + /* Disable / invalidate / enable L1 data */ mfspr r3,SPRN_HID0 - rlwinm r0,r0,0,~HID0_DCE + rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE) mtspr SPRN_HID0,r3 sync isync - ori r3,r3,HID0_DCE|HID0_DCI + ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI) sync isync mtspr SPRN_HID0,r3 - xori r3,r3,HID0_DCI + xori r3,r3,(HID0_DCI|HID0_ICFI) mtspr SPRN_HID0,r3 sync @@ -110,11 +122,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) lis r4,2 mtctr r4 lis r4,0xfff0 -1: lwzx r0,r0,r4 +1: lwz r0,0(r4) + addi r4,r4,32 + bdnz 1b + sync + isync + lis r4,2 + mtctr r4 + lis r4,0xfff0 +1: dcbf 0,r4 addi r4,r4,32 bdnz 1b sync isync + /* now disable L2 */ rlwinm r5,r5,0,~L2CR_L2E b 2f @@ -135,6 +156,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mtspr SPRN_L2CR,r4 sync isync + + /* Wait for the invalidation to complete */ +1: mfspr r3,SPRN_L2CR + rlwinm. r0,r3,0,31,31 + bne 1b + + /* Clear L2I */ xoris r4,r4,L2CR_L2I@h sync mtspr SPRN_L2CR,r4 @@ -142,14 +170,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* now disable the L1 data cache */ mfspr r0,SPRN_HID0 - rlwinm r0,r0,0,~HID0_DCE + rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE) mtspr SPRN_HID0,r0 sync isync /* Restore HID0[DPM] to whatever it was before */ sync - mtspr SPRN_HID0,r8 + mfspr r0,SPRN_HID0 + rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */ + mtspr SPRN_HID0,r0 sync /* restore DR and EE */ @@ -201,7 +231,7 @@ flush_disable_745x: mtctr r4 li r4,0 1: - lwzx r0,r0,r4 + lwz r0,0(r4) addi r4,r4,32 /* Go to start of next cache line */ bdnz 1b isync diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c index 46cbf36722d..867336ad5d3 100644 --- a/arch/ppc/platforms/pmac_feature.c +++ b/arch/ppc/platforms/pmac_feature.c @@ -1590,6 +1590,114 @@ intrepid_shutdown(struct macio_chip* macio, int sleep_mode) mdelay(10); } + +void __pmac pmac_tweak_clock_spreading(int enable) +{ + struct macio_chip* macio = &macio_chips[0]; + + /* Hack for doing clock spreading on some machines PowerBooks and + * iBooks. This implements the "platform-do-clockspreading" OF + * property as decoded manually on various models. For safety, we also + * check the product ID in the device-tree in cases we'll whack the i2c + * chip to make reasonably sure we won't set wrong values in there + * + * Of course, ultimately, we have to implement a real parser for + * the platform-do-* stuff... + */ + + if (macio->type == macio_intrepid) { + if (enable) + UN_OUT(UNI_N_CLOCK_SPREADING, 2); + else + UN_OUT(UNI_N_CLOCK_SPREADING, 0); + mdelay(40); + } + + while (machine_is_compatible("PowerBook5,2") || + machine_is_compatible("PowerBook5,3") || + machine_is_compatible("PowerBook6,2") || + machine_is_compatible("PowerBook6,3")) { + struct device_node *ui2c = of_find_node_by_type(NULL, "i2c"); + struct device_node *dt = of_find_node_by_name(NULL, "device-tree"); + u8 buffer[9]; + u32 *productID; + int i, rc, changed = 0; + + if (dt == NULL) + break; + productID = (u32 *)get_property(dt, "pid#", NULL); + if (productID == NULL) + break; + while(ui2c) { + struct device_node *p = of_get_parent(ui2c); + if (p && !strcmp(p->name, "uni-n")) + break; + ui2c = of_find_node_by_type(ui2c, "i2c"); + } + if (ui2c == NULL) + break; + DBG("Trying to bump clock speed for PID: %08x...\n", *productID); + rc = pmac_low_i2c_open(ui2c, 1); + if (rc != 0) + break; + pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); + rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); + DBG("read result: %d,", rc); + if (rc != 0) { + pmac_low_i2c_close(ui2c); + break; + } + for (i=0; i<9; i++) + DBG(" %02x", buffer[i]); + DBG("\n"); + + switch(*productID) { + case 0x1182: /* AlBook 12" rev 2 */ + case 0x1183: /* iBook G4 12" */ + buffer[0] = (buffer[0] & 0x8f) | 0x70; + buffer[2] = (buffer[2] & 0x7f) | 0x00; + buffer[5] = (buffer[5] & 0x80) | 0x31; + buffer[6] = (buffer[6] & 0x40) | 0xb0; + buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba); + buffer[8] = (buffer[8] & 0x00) | 0x30; + changed = 1; + break; + case 0x3142: /* AlBook 15" (ATI M10) */ + case 0x3143: /* AlBook 17" (ATI M10) */ + buffer[0] = (buffer[0] & 0xaf) | 0x50; + buffer[2] = (buffer[2] & 0x7f) | 0x00; + buffer[5] = (buffer[5] & 0x80) | 0x31; + buffer[6] = (buffer[6] & 0x40) | 0xb0; + buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0); + buffer[8] = (buffer[8] & 0x00) | 0x30; + changed = 1; + break; + default: + DBG("i2c-hwclock: Machine model not handled\n"); + break; + } + if (!changed) { + pmac_low_i2c_close(ui2c); + break; + } + pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); + rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); + DBG("write result: %d,", rc); + pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); + rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); + DBG("read result: %d,", rc); + if (rc != 0) { + pmac_low_i2c_close(ui2c); + break; + } + for (i=0; i<9; i++) + DBG(" %02x", buffer[i]); + pmac_low_i2c_close(ui2c); + break; + } +} + + static int __pmac core99_sleep(void) { @@ -1601,12 +1709,6 @@ core99_sleep(void) macio->type != macio_intrepid) return -ENODEV; - /* The device-tree contains that in the hwclock node */ - if (macio->type == macio_intrepid) { - UN_OUT(UNI_N_CLOCK_SPREADING, 0); - mdelay(40); - } - /* We power off the wireless slot in case it was not done * by the driver. We don't power it on automatically however */ @@ -1749,12 +1851,6 @@ core99_wake_up(void) UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl); udelay(100); - /* Restore clock spreading */ - if (macio->type == macio_intrepid) { - UN_OUT(UNI_N_CLOCK_SPREADING, 2); - mdelay(40); - } - return 0; } @@ -2149,7 +2245,7 @@ static struct pmac_mb_def pmac_mb_defs[] __pmacdata = { }, { "PowerBook1,1", "PowerBook 101 (Lombard)", PMAC_TYPE_101_PBOOK, paddington_features, - PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE + PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE }, { "PowerBook2,1", "iBook (first generation)", PMAC_TYPE_ORIG_IBOOK, core99_features, @@ -2718,97 +2814,11 @@ set_initial_features(void) MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); } - /* Hack for bumping clock speed on the new PowerBooks and the - * iBook G4. This implements the "platform-do-clockspreading" OF - * property. For safety, we also check the product ID in the - * device-tree to make reasonably sure we won't set wrong values - * in the clock chip. - * - * Of course, ultimately, we have to implement a real parser for - * the platform-do-* stuff... + /* Some machine models need the clock chip to be properly setup for + * clock spreading now. This should be a platform function but we + * don't do these at the moment */ - while (machine_is_compatible("PowerBook5,2") || - machine_is_compatible("PowerBook5,3") || - machine_is_compatible("PowerBook6,2") || - machine_is_compatible("PowerBook6,3")) { - struct device_node *ui2c = of_find_node_by_type(NULL, "i2c"); - struct device_node *dt = of_find_node_by_name(NULL, "device-tree"); - u8 buffer[9]; - u32 *productID; - int i, rc, changed = 0; - - if (dt == NULL) - break; - productID = (u32 *)get_property(dt, "pid#", NULL); - if (productID == NULL) - break; - while(ui2c) { - struct device_node *p = of_get_parent(ui2c); - if (p && !strcmp(p->name, "uni-n")) - break; - ui2c = of_find_node_by_type(ui2c, "i2c"); - } - if (ui2c == NULL) - break; - DBG("Trying to bump clock speed for PID: %08x...\n", *productID); - rc = pmac_low_i2c_open(ui2c, 1); - if (rc != 0) - break; - pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); - rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); - DBG("read result: %d,", rc); - if (rc != 0) { - pmac_low_i2c_close(ui2c); - break; - } - for (i=0; i<9; i++) - DBG(" %02x", buffer[i]); - DBG("\n"); - - switch(*productID) { - case 0x1182: /* AlBook 12" rev 2 */ - case 0x1183: /* iBook G4 12" */ - buffer[0] = (buffer[0] & 0x8f) | 0x70; - buffer[2] = (buffer[2] & 0x7f) | 0x00; - buffer[5] = (buffer[5] & 0x80) | 0x31; - buffer[6] = (buffer[6] & 0x40) | 0xb0; - buffer[7] = (buffer[7] & 0x00) | 0xc0; - buffer[8] = (buffer[8] & 0x00) | 0x30; - changed = 1; - break; - case 0x3142: /* AlBook 15" (ATI M10) */ - case 0x3143: /* AlBook 17" (ATI M10) */ - buffer[0] = (buffer[0] & 0xaf) | 0x50; - buffer[2] = (buffer[2] & 0x7f) | 0x00; - buffer[5] = (buffer[5] & 0x80) | 0x31; - buffer[6] = (buffer[6] & 0x40) | 0xb0; - buffer[7] = (buffer[7] & 0x00) | 0xd0; - buffer[8] = (buffer[8] & 0x00) | 0x30; - changed = 1; - break; - default: - DBG("i2c-hwclock: Machine model not handled\n"); - break; - } - if (!changed) { - pmac_low_i2c_close(ui2c); - break; - } - pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); - rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); - DBG("write result: %d,", rc); - pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); - rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); - DBG("read result: %d,", rc); - if (rc != 0) { - pmac_low_i2c_close(ui2c); - break; - } - for (i=0; i<9; i++) - DBG(" %02x", buffer[i]); - pmac_low_i2c_close(ui2c); - break; - } + pmac_tweak_clock_spreading(1); #endif /* CONFIG_POWER4 */ diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S index 3139b6766ad..f459ade1bd6 100644 --- a/arch/ppc/platforms/pmac_sleep.S +++ b/arch/ppc/platforms/pmac_sleep.S @@ -267,6 +267,10 @@ grackle_wake_up: /* Restore various CPU config stuffs */ bl __restore_cpu_setup + /* Make sure all FPRs have been initialized */ + bl reloc_offset + bl __init_fpu_registers + /* Invalidate & enable L1 cache, we don't care about * whatever the ROM may have tried to write to memory */ diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c index 2a99b43737a..c30607a972d 100644 --- a/arch/ppc/platforms/radstone_ppc7d.c +++ b/arch/ppc/platforms/radstone_ppc7d.c @@ -68,6 +68,7 @@ #define PPC7D_RST_PIN 17 /* GPP17 */ extern u32 mv64360_irq_base; +extern spinlock_t rtc_lock; static struct mv64x60_handle bh; static int ppc7d_has_alma; @@ -75,6 +76,11 @@ static int ppc7d_has_alma; extern void gen550_progress(char *, unsigned short); extern void gen550_init(int, struct uart_port *); +/* FIXME - move to h file */ +extern int ds1337_do_command(int id, int cmd, void *arg); +#define DS1337_GET_DATE 0 +#define DS1337_SET_DATE 1 + /* residual data */ unsigned char __res[sizeof(bd_t)]; @@ -253,6 +259,8 @@ static int ppc7d_show_cpuinfo(struct seq_file *m) u8 val1, val2; static int flash_sizes[4] = { 64, 32, 0, 16 }; static int flash_banks[4] = { 4, 3, 2, 1 }; + static int sdram_bank_sizes[4] = { 128, 256, 512, 1 }; + int sdram_num_banks = 2; static char *pci_modes[] = { "PCI33", "PCI66", "Unknown", "Unknown", "PCIX33", "PCIX66", @@ -279,13 +287,17 @@ static int ppc7d_show_cpuinfo(struct seq_file *m) (val1 == PPC7D_CPLD_MB_TYPE_PLL_100) ? 100 : (val1 == PPC7D_CPLD_MB_TYPE_PLL_64) ? 64 : 0); + val = inb(PPC7D_CPLD_MEM_CONFIG); + if (val & PPC7D_CPLD_SDRAM_BANK_NUM_MASK) sdram_num_banks--; + val = inb(PPC7D_CPLD_MEM_CONFIG_EXTEND); - val1 = val & PPC7D_CPLD_SDRAM_BANK_SIZE_MASK; - seq_printf(m, "SDRAM\t\t: %d%c", - (val1 == PPC7D_CPLD_SDRAM_BANK_SIZE_128M) ? 128 : - (val1 == PPC7D_CPLD_SDRAM_BANK_SIZE_256M) ? 256 : - (val1 == PPC7D_CPLD_SDRAM_BANK_SIZE_512M) ? 512 : 1, - (val1 == PPC7D_CPLD_SDRAM_BANK_SIZE_1G) ? 'G' : 'M'); + val1 = (val & PPC7D_CPLD_SDRAM_BANK_SIZE_MASK) >> 6; + seq_printf(m, "SDRAM\t\t: %d banks of %d%c, total %d%c", + sdram_num_banks, + sdram_bank_sizes[val1], + (sdram_bank_sizes[val1] < 128) ? 'G' : 'M', + sdram_num_banks * sdram_bank_sizes[val1], + (sdram_bank_sizes[val1] < 128) ? 'G' : 'M'); if (val2 & PPC7D_CPLD_MB_TYPE_ECC_FITTED_MASK) { seq_printf(m, " [ECC %sabled]", (val2 & PPC7D_CPLD_MB_TYPE_ECC_ENABLE_MASK) ? "en" : @@ -1236,6 +1248,38 @@ static void __init ppc7d_setup_arch(void) printk(KERN_INFO "Radstone Technology PPC7D\n"); if (ppc_md.progress) ppc_md.progress("ppc7d_setup_arch: exit", 0); + +} + +/* Real Time Clock support. + * PPC7D has a DS1337 accessed by I2C. + */ +static ulong ppc7d_get_rtc_time(void) +{ + struct rtc_time tm; + int result; + + spin_lock(&rtc_lock); + result = ds1337_do_command(0, DS1337_GET_DATE, &tm); + spin_unlock(&rtc_lock); + + if (result == 0) + result = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); + + return result; +} + +static int ppc7d_set_rtc_time(unsigned long nowtime) +{ + struct rtc_time tm; + int result; + + spin_lock(&rtc_lock); + to_tm(nowtime, &tm); + result = ds1337_do_command(0, DS1337_SET_DATE, &tm); + spin_unlock(&rtc_lock); + + return result; } /* This kernel command line parameter can be used to have the target @@ -1293,6 +1337,10 @@ static void ppc7d_init2(void) data8 |= 0x07; outb(data8, PPC7D_CPLD_LEDS); + /* Hook up RTC. We couldn't do this earlier because we need the I2C subsystem */ + ppc_md.set_rtc_time = ppc7d_set_rtc_time; + ppc_md.get_rtc_time = ppc7d_get_rtc_time; + pr_debug("%s: exit\n", __FUNCTION__); } diff --git a/arch/ppc/platforms/radstone_ppc7d.h b/arch/ppc/platforms/radstone_ppc7d.h index 4546fff2b0c..938375510be 100644 --- a/arch/ppc/platforms/radstone_ppc7d.h +++ b/arch/ppc/platforms/radstone_ppc7d.h @@ -240,6 +240,7 @@ #define PPC7D_CPLD_FLASH_CNTL 0x086E /* MEMORY_CONFIG_EXTEND */ +#define PPC7D_CPLD_SDRAM_BANK_NUM_MASK 0x02 #define PPC7D_CPLD_SDRAM_BANK_SIZE_MASK 0xc0 #define PPC7D_CPLD_SDRAM_BANK_SIZE_128M 0 #define PPC7D_CPLD_SDRAM_BANK_SIZE_256M 0x40 diff --git a/arch/ppc/syslib/cpm2_pic.c b/arch/ppc/syslib/cpm2_pic.c index 954b07fc1df..c867be6981c 100644 --- a/arch/ppc/syslib/cpm2_pic.c +++ b/arch/ppc/syslib/cpm2_pic.c @@ -107,6 +107,11 @@ static void cpm2_end_irq(unsigned int irq_nr) simr = &(cpm2_immr->im_intctl.ic_simrh); ppc_cached_irq_mask[word] |= 1 << bit; simr[word] = ppc_cached_irq_mask[word]; + /* + * Work around large numbers of spurious IRQs on PowerPC 82xx + * systems. + */ + mb(); } } |