From 7be5c55af0cc58e54e42e1702d837527e15b8414 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 18 Mar 2009 08:47:31 +0000 Subject: sh: simplify kexec vbr code Setup the vbr register in machine_kexec(). This instead of passing values to the assembly snippet. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'arch/sh/kernel/machine_kexec.c') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 94df56b0d1f..d3318f99256 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -23,8 +23,7 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)( unsigned long indirection_page, unsigned long reboot_code_buffer, - unsigned long start_address, - unsigned long vbr_reg) ATTRIB_NORET; + unsigned long start_address) ATTRIB_NORET; extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; @@ -76,14 +75,8 @@ void machine_kexec(struct kimage *image) unsigned long page_list; unsigned long reboot_code_buffer; - unsigned long vbr_reg; relocate_new_kernel_t rnk; -#if defined(CONFIG_SH_STANDARD_BIOS) - vbr_reg = ((unsigned long )gdb_vbr_vector) - 0x100; -#else - vbr_reg = 0x80000000; // dummy -#endif /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); @@ -100,9 +93,15 @@ void machine_kexec(struct kimage *image) kexec_info(image); flush_cache_all(); + set_bl_bit(); +#if defined(CONFIG_SH_STANDARD_BIOS) + asm volatile("ldc %0, vbr" : + : "r" (((unsigned long) gdb_vbr_vector) - 0x100) + : "memory"); +#endif /* now call it */ rnk = (relocate_new_kernel_t) reboot_code_buffer; - (*rnk)(page_list, reboot_code_buffer, P2SEGADDR(image->start), vbr_reg); + (*rnk)(page_list, reboot_code_buffer, P2SEGADDR(image->start)); } void arch_crash_save_vmcoreinfo(void) -- cgit v1.2.3 From e4e063d0c288bd65c56dd855337780a541ed928d Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 18 Mar 2009 08:49:45 +0000 Subject: sh: rework kexec segment code Rework the kexec code to avoid using P2SEG. Instead we walk the page list in machine_kexec() and convert the addresses from physical to virtual using C. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel/machine_kexec.c') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index d3318f99256..25b4748fdc7 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -76,6 +76,21 @@ void machine_kexec(struct kimage *image) unsigned long page_list; unsigned long reboot_code_buffer; relocate_new_kernel_t rnk; + unsigned long entry; + unsigned long *ptr; + + /* + * Nicked from the mips version of machine_kexec(): + * The generic kexec code builds a page list with physical + * addresses. Use phys_to_virt() to convert them to virtual. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = (unsigned long) phys_to_virt(*ptr); + } /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); @@ -101,7 +116,7 @@ void machine_kexec(struct kimage *image) #endif /* now call it */ rnk = (relocate_new_kernel_t) reboot_code_buffer; - (*rnk)(page_list, reboot_code_buffer, P2SEGADDR(image->start)); + (*rnk)(page_list, reboot_code_buffer, image->start); } void arch_crash_save_vmcoreinfo(void) -- cgit v1.2.3 From b7cf6ddc13186f9272438a97aa75972d496d0b0a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 18 Mar 2009 08:51:29 +0000 Subject: sh: add kexec jump support Add kexec jump support to the SuperH architecture. Similar to the x86 implementation, with the following exceptions: - Instead of separating the assembly code flow into two parts for regular kexec and kexec jump we use a single code path. In the assembly snippet regular kexec is just kexec jump that never comes back. - Instead of using a swap page when moving data between pages the page copy assembly routine has been modified to exchange the data between the pages using registers. - We walk the page list twice in machine_kexec() to do and undo physical to virtual address conversion. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) (limited to 'arch/sh/kernel/machine_kexec.c') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 25b4748fdc7..c44efb73ab1 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -14,20 +14,21 @@ #include #include #include +#include #include #include #include #include #include -typedef NORET_TYPE void (*relocate_new_kernel_t)( - unsigned long indirection_page, - unsigned long reboot_code_buffer, - unsigned long start_address) ATTRIB_NORET; +typedef void (*relocate_new_kernel_t)(unsigned long indirection_page, + unsigned long reboot_code_buffer, + unsigned long start_address); extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; extern void *gdb_vbr_vector; +extern void *vbr_base; void machine_shutdown(void) { @@ -72,7 +73,6 @@ static void kexec_info(struct kimage *image) */ void machine_kexec(struct kimage *image) { - unsigned long page_list; unsigned long reboot_code_buffer; relocate_new_kernel_t rnk; @@ -92,6 +92,11 @@ void machine_kexec(struct kimage *image) *ptr = (unsigned long) phys_to_virt(*ptr); } +#ifdef CONFIG_KEXEC_JUMP + if (image->preserve_context) + save_processor_state(); +#endif + /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); @@ -117,6 +122,23 @@ void machine_kexec(struct kimage *image) /* now call it */ rnk = (relocate_new_kernel_t) reboot_code_buffer; (*rnk)(page_list, reboot_code_buffer, image->start); + +#ifdef CONFIG_KEXEC_JUMP + asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); + local_irq_disable(); + clear_bl_bit(); + if (image->preserve_context) + restore_processor_state(); + + /* Convert page list back to physical addresses, what a mess. */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (*ptr & IND_INDIRECTION) ? + phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = virt_to_phys(*ptr); + } +#endif } void arch_crash_save_vmcoreinfo(void) -- cgit v1.2.3 From a6bab7b5c18501e4dd3201ae8ac1dc6da5f07acc Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 18 Mar 2009 19:06:15 +0900 Subject: sh: kexec: Drop SR.BL bit toggling. For the time being, this creates far more problems than it solves, evident by the second local_irq_disable(). Kill all of this off and rely on IRQ disabling to protect against the VBR reload. Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/sh/kernel/machine_kexec.c') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index c44efb73ab1..69268c0d806 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -110,23 +110,22 @@ void machine_kexec(struct kimage *image) memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); - kexec_info(image); + kexec_info(image); flush_cache_all(); - set_bl_bit(); #if defined(CONFIG_SH_STANDARD_BIOS) asm volatile("ldc %0, vbr" : : "r" (((unsigned long) gdb_vbr_vector) - 0x100) : "memory"); #endif + /* now call it */ rnk = (relocate_new_kernel_t) reboot_code_buffer; (*rnk)(page_list, reboot_code_buffer, image->start); #ifdef CONFIG_KEXEC_JUMP asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); - local_irq_disable(); - clear_bl_bit(); + if (image->preserve_context) restore_processor_state(); -- cgit v1.2.3 From 7e6b6f2b949a52382f59a93ecbe86e32e4fcec7c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 18 Mar 2009 19:07:16 +0900 Subject: sh: kexec jump: fix for ftrace. Save and restore ftrace state when returning from kexec jump in machine_kexec(). Follows the x86 change. Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/kernel/machine_kexec.c') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 69268c0d806..cc7c29b0dc8 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -78,6 +79,7 @@ void machine_kexec(struct kimage *image) relocate_new_kernel_t rnk; unsigned long entry; unsigned long *ptr; + int save_ftrace_enabled; /* * Nicked from the mips version of machine_kexec(): @@ -97,6 +99,8 @@ void machine_kexec(struct kimage *image) save_processor_state(); #endif + save_ftrace_enabled = __ftrace_enabled_save(); + /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); @@ -138,6 +142,8 @@ void machine_kexec(struct kimage *image) *ptr = virt_to_phys(*ptr); } #endif + + __ftrace_enabled_restore(save_ftrace_enabled); } void arch_crash_save_vmcoreinfo(void) -- cgit v1.2.3 From 615e73b3cd8876262f61ea28b4147c8de38a043a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 19 Mar 2009 10:04:29 +0000 Subject: sh: disallow kexec virtual entry Older versions of kexec-tools has a zImage loader that passes a virtual address as entry point. The elf loader otoh it passes a physical address as entry point, and pages are always passed as physical addresses as well. Only allow physical addresses from now on. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel/machine_kexec.c') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index cc7c29b0dc8..7ea2704ea03 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -46,6 +46,12 @@ void machine_crash_shutdown(struct pt_regs *regs) */ int machine_kexec_prepare(struct kimage *image) { + /* older versions of kexec-tools are passing + * the zImage entry point as a virtual address. + */ + if (image->start != PHYSADDR(image->start)) + return -EINVAL; /* upgrade your kexec-tools */ + return 0; } @@ -125,7 +131,8 @@ void machine_kexec(struct kimage *image) /* now call it */ rnk = (relocate_new_kernel_t) reboot_code_buffer; - (*rnk)(page_list, reboot_code_buffer, image->start); + (*rnk)(page_list, reboot_code_buffer, + (unsigned long)phys_to_virt(image->start)); #ifdef CONFIG_KEXEC_JUMP asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); -- cgit v1.2.3