diff options
author | Paul Mackerras <paulus@samba.org> | 2008-06-09 12:19:41 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-06-09 12:19:41 +1000 |
commit | 8a3e1c670e503ddd6f6c373b307f38b783ee3a50 (patch) | |
tree | 03094e8425b750d2693a271ebc89b49312e5476a /arch/ia64/kernel | |
parent | e026892c85571e12f11abffde5a90bcc704d663e (diff) | |
parent | 60d5019be8acef268f4676d229c490186d338fbc (diff) |
Merge branch 'merge'
Conflicts:
arch/powerpc/sysdev/fsl_soc.c
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/ivt.S | 84 | ||||
-rw-r--r-- | arch/ia64/kernel/minstate.h | 46 | ||||
-rw-r--r-- | arch/ia64/kernel/patch.c | 23 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 7 |
5 files changed, 125 insertions, 46 deletions
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 6678c49daba..80b44ea052d 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -1076,48 +1076,6 @@ END(ia64_syscall_setup) DBG_FAULT(15) FAULT(15) - /* - * Squatting in this space ... - * - * This special case dispatcher for illegal operation faults allows preserved - * registers to be modified through a callback function (asm only) that is handed - * back from the fault handler in r8. Up to three arguments can be passed to the - * callback function by returning an aggregate with the callback as its first - * element, followed by the arguments. - */ -ENTRY(dispatch_illegal_op_fault) - .prologue - .body - SAVE_MIN_WITH_COVER - ssm psr.ic | PSR_DEFAULT_BITS - ;; - srlz.i // guarantee that interruption collection is on - ;; -(p15) ssm psr.i // restore psr.i - adds r3=8,r2 // set up second base pointer for SAVE_REST - ;; - alloc r14=ar.pfs,0,0,1,0 // must be first in insn group - mov out0=ar.ec - ;; - SAVE_REST - PT_REGS_UNWIND_INFO(0) - ;; - br.call.sptk.many rp=ia64_illegal_op_fault -.ret0: ;; - alloc r14=ar.pfs,0,0,3,0 // must be first in insn group - mov out0=r9 - mov out1=r10 - mov out2=r11 - movl r15=ia64_leave_kernel - ;; - mov rp=r15 - mov b6=r8 - ;; - cmp.ne p6,p0=0,r8 -(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel - br.sptk.many ia64_leave_kernel -END(dispatch_illegal_op_fault) - .org ia64_ivt+0x4000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4000 Entry 16 (size 64 bundles) Reserved @@ -1715,6 +1673,48 @@ END(ia32_interrupt) DBG_FAULT(67) FAULT(67) + /* + * Squatting in this space ... + * + * This special case dispatcher for illegal operation faults allows preserved + * registers to be modified through a callback function (asm only) that is handed + * back from the fault handler in r8. Up to three arguments can be passed to the + * callback function by returning an aggregate with the callback as its first + * element, followed by the arguments. + */ +ENTRY(dispatch_illegal_op_fault) + .prologue + .body + SAVE_MIN_WITH_COVER + ssm psr.ic | PSR_DEFAULT_BITS + ;; + srlz.i // guarantee that interruption collection is on + ;; +(p15) ssm psr.i // restore psr.i + adds r3=8,r2 // set up second base pointer for SAVE_REST + ;; + alloc r14=ar.pfs,0,0,1,0 // must be first in insn group + mov out0=ar.ec + ;; + SAVE_REST + PT_REGS_UNWIND_INFO(0) + ;; + br.call.sptk.many rp=ia64_illegal_op_fault +.ret0: ;; + alloc r14=ar.pfs,0,0,3,0 // must be first in insn group + mov out0=r9 + mov out1=r10 + mov out2=r11 + movl r15=ia64_leave_kernel + ;; + mov rp=r15 + mov b6=r8 + ;; + cmp.ne p6,p0=0,r8 +(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel + br.sptk.many ia64_leave_kernel +END(dispatch_illegal_op_fault) + #ifdef CONFIG_IA32_SUPPORT /* diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index 7c548ac52bb..74b6d670aae 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h @@ -15,6 +15,9 @@ #define ACCOUNT_SYS_ENTER #endif +.section ".data.patch.rse", "a" +.previous + /* * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves * the minimum state necessary that allows us to turn psr.ic back @@ -40,7 +43,7 @@ * Note that psr.ic is NOT turned on by this macro. This is so that * we can pass interruption state as arguments to a handler. */ -#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ +#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \ mov r16=IA64_KR(CURRENT); /* M */ \ mov r27=ar.rsc; /* M */ \ mov r20=r1; /* A */ \ @@ -87,6 +90,7 @@ tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ mov r29=b0 \ ;; \ + WORKAROUND; \ adds r16=PT(R8),r1; /* initialize first base pointer */ \ adds r17=PT(R9),r1; /* initialize second base pointer */ \ (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ @@ -206,6 +210,40 @@ st8 [r25]=r10; /* ar.ssd */ \ ;; -#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,) -#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19) -#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, ) +#define RSE_WORKAROUND \ +(pUStk) extr.u r17=r18,3,6; \ +(pUStk) sub r16=r18,r22; \ +[1:](pKStk) br.cond.sptk.many 1f; \ + .xdata4 ".data.patch.rse",1b-. \ + ;; \ + cmp.ge p6,p7 = 33,r17; \ + ;; \ +(p6) mov r17=0x310; \ +(p7) mov r17=0x308; \ + ;; \ + cmp.leu p1,p0=r16,r17; \ +(p1) br.cond.sptk.many 1f; \ + dep.z r17=r26,0,62; \ + movl r16=2f; \ + ;; \ + mov ar.pfs=r17; \ + dep r27=r0,r27,16,14; \ + mov b0=r16; \ + ;; \ + br.ret.sptk b0; \ + ;; \ +2: \ + mov ar.rsc=r0 \ + ;; \ + flushrs; \ + ;; \ + mov ar.bspstore=r22 \ + ;; \ + mov r18=ar.bsp; \ + ;; \ +1: \ + .pred.rel "mutex", pKStk, pUStk + +#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND) +#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND) +#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , ) diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index e0dca8743db..b83b2c51600 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c @@ -115,6 +115,29 @@ ia64_patch_vtop (unsigned long start, unsigned long end) ia64_srlz_i(); } +/* + * Disable the RSE workaround by turning the conditional branch + * that we tagged in each place the workaround was used into an + * unconditional branch. + */ +void __init +ia64_patch_rse (unsigned long start, unsigned long end) +{ + s32 *offp = (s32 *) start; + u64 ip, *b; + + while (offp < (s32 *) end) { + ip = (u64) offp + *offp; + + b = (u64 *)(ip & -16); + b[1] &= ~0xf800000L; + ia64_fc((void *) ip); + ++offp; + } + ia64_sync_i(); + ia64_srlz_i(); +} + void __init ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) { diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index e9596cd0cda..f48a809c686 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -560,6 +560,17 @@ setup_arch (char **cmdline_p) /* process SAL system table: */ ia64_sal_init(__va(efi.sal_systab)); +#ifdef CONFIG_ITANIUM + ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); +#else + { + u64 num_phys_stacked; + + if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) + ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); + } +#endif + #ifdef CONFIG_SMP cpu_physical_id(0) = hard_smp_processor_id(); #endif diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 80622acc95d..5929ab10a28 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -156,6 +156,13 @@ SECTIONS __end___vtop_patchlist = .; } + .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) + { + __start___rse_patchlist = .; + *(.data.patch.rse) + __end___rse_patchlist = .; + } + .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) { __start___mckinley_e9_bundles = .; |