From d158cbdf39ffaec9dd5299fdfdfdd2c7897a71dc Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 18 Oct 2007 03:04:53 -0700 Subject: Hibernation: Arbitrary boot kernel support on x86_64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make it possible to restore a hibernation image on x86_64 with the help of a kernel different from the one in the image. The idea is to split the core restoration code into two separate parts and to place each of them in a different page.  The first part belongs to the boot kernel and is executed as the last step of the image kernel's memory restoration procedure.  Before being executed, it is relocated to a safe page that won't be overwritten while copying the image kernel pages. The final operation performed by it is a jump to the second part of the core restoration code that belongs to the image kernel and has just been restored. This code makes the CPU switch to the image kernel's page tables and restores the state of general purpose registers (including the stack pointer) from before the hibernation. The main issue with this idea is that in order to jump to the second part of the core restoration code the boot kernel needs to know its address.  However, this address may be passed to it in the image header.  Namely, the part of the image header previously used for checking if the version of the image kernel is correct can be replaced with some architecture specific data that will allow the boot kernel to jump to the right address within the image kernel.  These data should also be used for checking if the image kernel is compatible with the boot kernel (as far as the memory restroration procedure is concerned). It can be done, for example, with the help of a "magic" value that has to be equal in both kernels, so that they can be regarded as compatible. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/suspend_asm_64.S | 41 +++++++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 7 deletions(-) (limited to 'arch/x86/kernel/suspend_asm_64.S') diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S index 16d183f67bc..40a209e0525 100644 --- a/arch/x86/kernel/suspend_asm_64.S +++ b/arch/x86/kernel/suspend_asm_64.S @@ -2,8 +2,8 @@ * * Distribute under GPLv2. * - * swsusp_arch_resume may not use any stack, nor any variable that is - * not "NoSave" during copying pages: + * swsusp_arch_resume must not use any stack or any nonlocal variables while + * copying pages: * * Its rewriting one kernel image with another. What is stack in "old" * image could very well be data page in "new" image, and overwriting @@ -36,6 +36,10 @@ ENTRY(swsusp_arch_suspend) movq %r15, saved_context_r15(%rip) pushfq ; popq saved_context_eflags(%rip) + /* save the address of restore_registers */ + movq $restore_registers, %rax + movq %rax, restore_jump_address(%rip) + call swsusp_save ret @@ -54,7 +58,16 @@ ENTRY(restore_image) movq %rcx, %cr3; movq %rax, %cr4; # turn PGE back on + /* prepare to jump to the image kernel */ + movq restore_jump_address(%rip), %rax + + /* prepare to copy image data to their original locations */ movq restore_pblist(%rip), %rdx + movq relocated_restore_code(%rip), %rcx + jmpq *%rcx + + /* code below has been relocated to a safe page */ +ENTRY(core_restore_code) loop: testq %rdx, %rdx jz done @@ -62,7 +75,7 @@ loop: /* get addresses from the pbe and copy the page */ movq pbe_address(%rdx), %rsi movq pbe_orig_address(%rdx), %rdi - movq $512, %rcx + movq $(PAGE_SIZE >> 3), %rcx rep movsq @@ -70,6 +83,20 @@ loop: movq pbe_next(%rdx), %rdx jmp loop done: + /* jump to the restore_registers address from the image header */ + jmpq *%rax + /* + * NOTE: This assumes that the boot kernel's text mapping covers the + * image kernel's page containing restore_registers and the address of + * this page is the same as in the image kernel's text mapping (it + * should always be true, because the text mapping is linear, starting + * from 0, and is supposed to cover the entire kernel text for every + * kernel). + * + * code below belongs to the image kernel + */ + +ENTRY(restore_registers) /* go back to the original page tables */ movq $(init_level4_pgt - __START_KERNEL_map), %rax addq phys_base(%rip), %rax @@ -84,12 +111,9 @@ done: movq %rcx, %cr3 movq %rax, %cr4; # turn PGE back on - movl $24, %eax - movl %eax, %ds - movq saved_context_esp(%rip), %rsp movq saved_context_ebp(%rip), %rbp - /* Don't restore %rax, it must be 0 anyway */ + /* restore GPRs (we don't restore %rax, it must be 0 anyway) */ movq saved_context_ebx(%rip), %rbx movq saved_context_ecx(%rip), %rcx movq saved_context_edx(%rip), %rdx @@ -107,4 +131,7 @@ done: xorq %rax, %rax + /* tell the hibernation core that we've just restored the memory */ + movq %rax, in_suspend(%rip) + ret -- cgit v1.2.3