From f794c8279d02ccd69429d816eb03fa12c130d06d Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 5 Mar 2007 23:35:38 +0100 Subject: [S390] kprobes breaks BUG_ON The illegal operation handler calls the die notifier with DIE_BPT to let kprobes pick up its breakpoint. If kprobes does not find its breakpoint it returns NOTIFY_STOP instead of NOTIFY_DONE. Since we use stop_machine_run on s390 to arm/disarm the kprobes breakpoints the race that kprobe_handler tries to solve by checking for the kprobes breakpoints does not exist. Removing the check makes BUG_ON working again. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/kprobes.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index a466bab6677..8af549e9573 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -337,21 +337,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) } p = get_kprobe(addr); - if (!p) { - if (*addr != BREAKPOINT_INSTRUCTION) { - /* - * The breakpoint instruction was removed right - * after we hit it. Another cpu has removed - * either a probepoint or a debugger breakpoint - * at this address. In either case, no further - * handling of this interrupt is appropriate. - * - */ - ret = 1; - } - /* Not one of ours: let kernel handle it */ + if (!p) + /* + * No kprobe at this address. The fault has not been + * caused by a kprobe breakpoint. The race of breakpoint + * vs. kprobe remove does not exist because on s390 we + * use stop_machine_run to arm/disarm the breakpoints. + */ goto no_kprobe; - } kcb->kprobe_status = KPROBE_HIT_ACTIVE; set_current_kprobe(p, regs, kcb); -- cgit v1.2.3 From 25864162c15e61b494aa619974a4d521270362f7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 5 Mar 2007 23:35:41 +0100 Subject: [S390] smp: disable preemption in smp_call_function/smp_call_function_on Avoid sprinkling a _lot_ of preempt_disable/preempt_enable pairs. This would be necessary for e.g. the iucv driver. Also this way we are more consistent with other architectures which disable preemption at least for smp_call_function. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index ecaa432a99f..97764f710bb 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -94,10 +94,9 @@ static void __smp_call_function_map(void (*func) (void *info), void *info, int cpu, local = 0; /* - * Can deadlock when interrupts are disabled or if in wrong context, - * caller must disable preemption + * Can deadlock when interrupts are disabled or if in wrong context. */ - WARN_ON(irqs_disabled() || in_irq() || preemptible()); + WARN_ON(irqs_disabled() || in_irq()); /* * Check for local function call. We have to have the same call order @@ -152,17 +151,18 @@ out: * Run a function on all other CPUs. * * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. Must be called with preemption disabled. - * You may call it from a bottom half. + * hardware interrupt handler. You may call it from a bottom half. */ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int wait) { cpumask_t map; + preempt_disable(); map = cpu_online_map; cpu_clear(smp_processor_id(), map); __smp_call_function_map(func, info, nonatomic, wait, map); + preempt_enable(); return 0; } EXPORT_SYMBOL(smp_call_function); @@ -178,16 +178,17 @@ EXPORT_SYMBOL(smp_call_function); * Run a function on one processor. * * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. Must be called with preemption disabled. - * You may call it from a bottom half. + * hardware interrupt handler. You may call it from a bottom half. */ int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, int wait, int cpu) { cpumask_t map = CPU_MASK_NONE; + preempt_disable(); cpu_set(cpu, map); __smp_call_function_map(func, info, nonatomic, wait, map); + preempt_enable(); return 0; } EXPORT_SYMBOL(smp_call_function_on); -- cgit v1.2.3 From c5dd8586707800cd7bbdefcd675ad7d3c9afcd57 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 5 Mar 2007 23:35:43 +0100 Subject: [S390] reipl: move dump_prefix_page out of text section. Reipl doesn't work on older machines were s390_reset_machine() gets called. The reason is that the text section is read-only but the variable dump_prefix_page is there. Since s390_reset_machine() writes to it we get a protection exception. Therefore move dump_prefix_page to the bss section. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/ipl.c | 4 ++-- arch/s390/kernel/reipl.S | 13 +++++++------ arch/s390/kernel/reipl64.S | 13 +++++++++---- 3 files changed, 18 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 5a863a3bf10..d125a4ead08 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1066,7 +1066,7 @@ static void do_reset_calls(void) reset->fn(); } -extern __u32 dump_prefix_page; +u32 dump_prefix_page; void s390_reset_system(void) { @@ -1078,7 +1078,7 @@ void s390_reset_system(void) lc->panic_stack = S390_lowcore.panic_stack; /* Save prefix page address for dump case */ - dump_prefix_page = (unsigned long) lc; + dump_prefix_page = (u32)(unsigned long) lc; /* Disable prefixing */ set_prefix(0); diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index c3f4d9b9508..2f481cc3d1c 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -8,6 +8,10 @@ #include +# +# do_reipl_asm +# Parameter: r2 = schid of reipl device +# .globl do_reipl_asm do_reipl_asm: basr %r13,0 .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) @@ -16,12 +20,12 @@ do_reipl_asm: basr %r13,0 stm %r0,%r15,__LC_GPREGS_SAVE_AREA stctl %c0,%c15,__LC_CREGS_SAVE_AREA stam %a0,%a15,__LC_AREGS_SAVE_AREA - mvc __LC_PREFIX_SAVE_AREA(4),dump_prefix_page-.Lpg0(%r13) + l %r10,.Ldump_pfx-.Lpg0(%r13) + mvc __LC_PREFIX_SAVE_AREA(4),0(%r10) stckc .Lclkcmp-.Lpg0(%r13) mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) stpt __LC_CPU_TIMER_SAVE_AREA st %r13, __LC_PSW_SAVE_AREA+4 - lctl %c6,%c6,.Lall-.Lpg0(%r13) lr %r1,%r2 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) @@ -55,6 +59,7 @@ do_reipl_asm: basr %r13,0 .align 8 .Lclkcmp: .quad 0x0000000000000000 .Lall: .long 0xff000000 +.Ldump_pfx: .long dump_prefix_page .align 8 .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 .Lpcnew: .long 0x00080000,0x80000000+.Lecs @@ -79,7 +84,3 @@ do_reipl_asm: basr %r13,0 .long 0x00000000,0x00000000 .long 0x00000000,0x00000000 .long 0x00000000,0x00000000 - .globl dump_prefix_page -dump_prefix_page: - .long 0x00000000 - diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index dbb3eed3886..c41930499a5 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -8,6 +8,12 @@ */ #include + +# +# do_reipl_asm +# Parameter: r2 = schid of reipl device +# + .globl do_reipl_asm do_reipl_asm: basr %r13,0 .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) @@ -20,7 +26,8 @@ do_reipl_asm: basr %r13,0 stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1) stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1) stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1) - mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),dump_prefix_page-.Lpg0(%r13) + lg %r10,.Ldump_pfx-.Lpg0(%r13) + mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10) stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) stckc .Lclkcmp-.Lpg0(%r13) mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13) @@ -64,6 +71,7 @@ do_reipl_asm: basr %r13,0 .align 8 .Lclkcmp: .quad 0x0000000000000000 .Lall: .quad 0x00000000ff000000 +.Ldump_pfx: .quad dump_prefix_page .Lregsave: .quad 0x0000000000000000 .align 16 /* @@ -103,6 +111,3 @@ do_reipl_asm: basr %r13,0 .long 0x00000000,0x00000000 .long 0x00000000,0x00000000 .long 0x00000000,0x00000000 - .globl dump_prefix_page -dump_prefix_page: - .long 0x00000000 -- cgit v1.2.3 From 9c9c17613a78545a4a93b1370924f62eb282c903 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 5 Mar 2007 23:35:45 +0100 Subject: [S390] nss: disable kexec. nss and kexec don't work together since kexec wants to write to the read-only text section of the shared kernel image. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 2 ++ arch/s390/kernel/machine_kexec.c | 5 +++++ 2 files changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index d9425f59be9..0f293aa7b0f 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -376,6 +376,8 @@ config SHARED_KERNEL Select this option, if you want to share the text segment of the Linux kernel between different VM guests. This reduces memory usage with lots of guests but greatly increases kernel size. + Also if a kernel was IPL'ed from a shared segment the kexec system + call will not work. You should only select this option if you know what you are doing and want to exploit this feature. diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 52f57af252b..3c77dd36994 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -19,6 +19,7 @@ #include #include #include +#include typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); @@ -29,6 +30,10 @@ int machine_kexec_prepare(struct kimage *image) { void *reboot_code_buffer; + /* Can't replace kernel image since it is read-only. */ + if (ipl_flags & IPL_NSS_VALID) + return -ENOSYS; + /* We don't support anything but the default image type for now. */ if (image->type != KEXEC_TYPE_DEFAULT) return -EINVAL; -- cgit v1.2.3 From 482b05dd533da162fa8d04c61712fae297bea3e0 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Mon, 5 Mar 2007 23:35:54 +0100 Subject: [S390] Fixed handling of access register mode faults. Replaced check_user_space() + __check_access_register with the new check_space(). The old functions made wrong assumptions about kernel and user space when the kernel and user address spaces are switched (kernel in home space, user in primary/secondary space). Secondly the user process can switch to the accress register mode if it is running in primary or secondary mode. In addition it can load an arbitrary value to the access registers. If any other value than 0 for primary space or 1 for secondary space is loaded and memory is accessed using the base register related to the access register, the program should be terminated with a SIGSEGV. To achieve that the DUALD pointer in the DUCT and the PSALD pointer in the PASTE need to point to an array of 8 invalid access-list entries to get a ALEN-translation exception if an invalid alet is used. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/head31.S | 11 +++-- arch/s390/kernel/head64.S | 11 +++-- arch/s390/mm/fault.c | 105 +++++++++++++++++++++------------------------- 3 files changed, 63 insertions(+), 64 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index da7c8bb8098..dc364c1419a 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -121,7 +121,7 @@ startup_continue: .long .Lduct # cr2: dispatchable unit control table .long 0 # cr3: instruction authorization .long 0 # cr4: instruction authorization - .long 0xffffffff # cr5: primary-aste origin + .long .Lduct # cr5: primary-aste origin .long 0 # cr6: I/O interrupts .long 0 # cr7: secondary space segment table .long 0 # cr8: access registers translation @@ -132,8 +132,6 @@ startup_continue: .long 0 # cr13: home space segment table .long 0xc0000000 # cr14: machine check handling off .long 0 # cr15: linkage stack operations -.Lduct: .long 0,0,0,0,0,0,0,0 - .long 0,0,0,0,0,0,0,0 .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg @@ -147,6 +145,13 @@ startup_continue: .Linittu: .long init_thread_union .Lstartup_init: .long startup_init + .align 64 +.Lduct: .long 0,0,0,0,.Lduald,0,0,0 + .long 0,0,0,0,0,0,0,0 + .align 128 +.Lduald:.rept 8 + .long 0x80000000,0,0,0 # invalid access-list entries + .endr .org 0x12000 .globl _ehead diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index af09e18cc5d..37010709fe6 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -134,7 +134,7 @@ startup_continue: .quad .Lduct # cr2: dispatchable unit control table .quad 0 # cr3: instruction authorization .quad 0 # cr4: instruction authorization - .quad 0xffffffffffffffff # cr5: primary-aste origin + .quad .Lduct # cr5: primary-aste origin .quad 0 # cr6: I/O interrupts .quad 0 # cr7: secondary space segment table .quad 0 # cr8: access registers translation @@ -145,14 +145,19 @@ startup_continue: .quad 0 # cr13: home space segment table .quad 0xc0000000 # cr14: machine check handling off .quad 0 # cr15: linkage stack operations -.Lduct: .long 0,0,0,0,0,0,0,0 - .long 0,0,0,0,0,0,0,0 .Lpcmsk:.quad 0x0000000180000000 .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lnop: .long 0x07000700 .Lparmaddr: .quad PARMAREA + .align 64 +.Lduct: .long 0,0,0,0,.Lduald,0,0,0 + .long 0,0,0,0,0,0,0,0 + .align 128 +.Lduald:.rept 8 + .long 0x80000000,0,0,0 # invalid access-list entries + .endr .org 0x12000 .globl _ehead diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 641aef36ccc..7462aebd3eb 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -108,53 +108,40 @@ void bust_spinlocks(int yes) } /* - * Check which address space is addressed by the access - * register in S390_lowcore.exc_access_id. - * Returns 1 for user space and 0 for kernel space. + * Returns the address space associated with the fault. + * Returns 0 for kernel space, 1 for user space and + * 2 for code execution in user space with noexec=on. */ -static int __check_access_register(struct pt_regs *regs, int error_code) -{ - int areg = S390_lowcore.exc_access_id; - - if (areg == 0) - /* Access via access register 0 -> kernel address */ - return 0; - save_access_regs(current->thread.acrs); - if (regs && areg < NUM_ACRS && current->thread.acrs[areg] <= 1) - /* - * access register contains 0 -> kernel address, - * access register contains 1 -> user space address - */ - return current->thread.acrs[areg]; - - /* Something unhealthy was done with the access registers... */ - die("page fault via unknown access register", regs, error_code); - do_exit(SIGKILL); - return 0; -} - -/* - * Check which address space the address belongs to. - * May return 1 or 2 for user space and 0 for kernel space. - * Returns 2 for user space in primary addressing mode with - * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on. - */ -static inline int check_user_space(struct pt_regs *regs, int error_code) +static inline int check_space(struct task_struct *tsk) { /* - * The lowest two bits of S390_lowcore.trans_exc_code indicate - * which paging table was used: - * 0: Primary Segment Table Descriptor - * 1: STD determined via access register - * 2: Secondary Segment Table Descriptor - * 3: Home Segment Table Descriptor + * The lowest two bits of S390_lowcore.trans_exc_code + * indicate which paging table was used. */ - int descriptor = S390_lowcore.trans_exc_code & 3; - if (unlikely(descriptor == 1)) - return __check_access_register(regs, error_code); - if (descriptor == 2) - return current->thread.mm_segment.ar4; - return ((descriptor != 0) ^ (switch_amode)) << s390_noexec; + int desc = S390_lowcore.trans_exc_code & 3; + + if (desc == 3) /* Home Segment Table Descriptor */ + return switch_amode == 0; + if (desc == 2) /* Secondary Segment Table Descriptor */ + return tsk->thread.mm_segment.ar4; +#ifdef CONFIG_S390_SWITCH_AMODE + if (unlikely(desc == 1)) { /* STD determined via access register */ + /* %a0 always indicates primary space. */ + if (S390_lowcore.exc_access_id != 0) { + save_access_regs(tsk->thread.acrs); + /* + * An alet of 0 indicates primary space. + * An alet of 1 indicates secondary space. + * Any other alet values generate an + * alen-translation exception. + */ + if (tsk->thread.acrs[S390_lowcore.exc_access_id]) + return tsk->thread.mm_segment.ar4; + } + } +#endif + /* Primary Segment Table Descriptor */ + return switch_amode << s390_noexec; } /* @@ -265,16 +252,16 @@ out_fault: * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification) */ -static inline void __kprobes +static inline void do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; unsigned long address; - int user_address; const struct exception_table_entry *fixup; - int si_code = SEGV_MAPERR; + int si_code; + int space; tsk = current; mm = tsk->mm; @@ -294,7 +281,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) NULL pointer write access in kernel mode. */ if (!(regs->psw.mask & PSW_MASK_PSTATE)) { address = 0; - user_address = 0; + space = 0; goto no_context; } @@ -309,15 +296,15 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) * the address */ address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; - user_address = check_user_space(regs, error_code); + space = check_space(tsk); /* * Verify that the fault happened in user space, that * we are not in an interrupt and that there is a * user context. */ - if (user_address == 0 || in_atomic() || !mm) - goto no_context; + if (unlikely(space == 0 || in_atomic() || !mm)) + goto no_context; /* * When we get here, the fault happened in the current @@ -328,12 +315,13 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) down_read(&mm->mmap_sem); - vma = find_vma(mm, address); - if (!vma) - goto bad_area; + si_code = SEGV_MAPERR; + vma = find_vma(mm, address); + if (!vma) + goto bad_area; #ifdef CONFIG_S390_EXEC_PROTECT - if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC))) + if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC))) if (!signal_return(mm, regs, address, error_code)) /* * signal_return() has done an up_read(&mm->mmap_sem) @@ -389,7 +377,7 @@ survive: * The instruction that caused the program check will * be repeated. Don't signal single step via SIGTRAP. */ - clear_tsk_thread_flag(current, TIF_SINGLE_STEP); + clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); return; /* @@ -419,7 +407,7 @@ no_context: * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - if (user_address == 0) + if (space == 0) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else @@ -462,13 +450,14 @@ do_sigbus: goto no_context; } -void do_protection_exception(struct pt_regs *regs, unsigned long error_code) +void __kprobes do_protection_exception(struct pt_regs *regs, + unsigned long error_code) { regs->psw.addr -= (error_code >> 16); do_exception(regs, 4, 1); } -void do_dat_exception(struct pt_regs *regs, unsigned long error_code) +void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code) { do_exception(regs, error_code & 0xff, 0); } -- cgit v1.2.3