aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/suspend.c127
-rw-r--r--arch/x86_64/kernel/suspend_asm.S17
2 files changed, 138 insertions, 6 deletions
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index ebb9abf3ce6..f066c6ab361 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -11,6 +11,8 @@
#include <linux/smp.h>
#include <linux/suspend.h>
#include <asm/proto.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
struct saved_context saved_context;
@@ -140,4 +142,129 @@ void fix_processor_context(void)
}
+#ifdef CONFIG_SOFTWARE_SUSPEND
+/* Defined in arch/x86_64/kernel/suspend_asm.S */
+extern int restore_image(void);
+pgd_t *temp_level4_pgt;
+
+static void **pages;
+
+static inline void *__add_page(void)
+{
+ void **c;
+
+ c = (void **)get_usable_page(GFP_ATOMIC);
+ if (c) {
+ *c = pages;
+ pages = c;
+ }
+ return c;
+}
+
+static inline void *__next_page(void)
+{
+ void **c;
+
+ c = pages;
+ if (c) {
+ pages = *c;
+ *c = NULL;
+ }
+ return c;
+}
+
+/*
+ * Try to allocate as many usable pages as needed and daisy chain them.
+ * If one allocation fails, free the pages allocated so far
+ */
+static int alloc_usable_pages(unsigned long n)
+{
+ void *p;
+
+ pages = NULL;
+ do
+ if (!__add_page())
+ break;
+ while (--n);
+ if (n) {
+ p = __next_page();
+ while (p) {
+ free_page((unsigned long)p);
+ p = __next_page();
+ }
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
+{
+ long i, j;
+
+ i = pud_index(address);
+ pud = pud + i;
+ for (; i < PTRS_PER_PUD; pud++, i++) {
+ unsigned long paddr;
+ pmd_t *pmd;
+
+ paddr = address + i*PUD_SIZE;
+ if (paddr >= end)
+ break;
+
+ pmd = (pmd_t *)__next_page();
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+ for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
+ unsigned long pe;
+
+ if (paddr >= end)
+ break;
+ pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr;
+ pe &= __supported_pte_mask;
+ set_pmd(pmd, __pmd(pe));
+ }
+ }
+}
+
+static void set_up_temporary_mappings(void)
+{
+ unsigned long start, end, next;
+
+ temp_level4_pgt = (pgd_t *)__next_page();
+
+ /* It is safe to reuse the original kernel mapping */
+ set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
+ init_level4_pgt[pgd_index(__START_KERNEL_map)]);
+
+ /* Set up the direct mapping from scratch */
+ start = (unsigned long)pfn_to_kaddr(0);
+ end = (unsigned long)pfn_to_kaddr(end_pfn);
+
+ for (; start < end; start = next) {
+ pud_t *pud = (pud_t *)__next_page();
+ next = start + PGDIR_SIZE;
+ if (next > end)
+ next = end;
+ res_phys_pud_init(pud, __pa(start), __pa(next));
+ set_pgd(temp_level4_pgt + pgd_index(start),
+ mk_kernel_pgd(__pa(pud)));
+ }
+}
+
+int swsusp_arch_resume(void)
+{
+ unsigned long n;
+
+ n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT;
+ n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1;
+ pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n);
+ if (alloc_usable_pages(n)) {
+ free_eaten_memory();
+ return -ENOMEM;
+ }
+ /* We have got enough memory and from now on we cannot recover */
+ set_up_temporary_mappings();
+ restore_image();
+ return 0;
+}
+#endif /* CONFIG_SOFTWARE_SUSPEND */
diff --git a/arch/x86_64/kernel/suspend_asm.S b/arch/x86_64/kernel/suspend_asm.S
index 4d659e97df1..320b6fb00cc 100644
--- a/arch/x86_64/kernel/suspend_asm.S
+++ b/arch/x86_64/kernel/suspend_asm.S
@@ -39,12 +39,13 @@ ENTRY(swsusp_arch_suspend)
call swsusp_save
ret
-ENTRY(swsusp_arch_resume)
- /* set up cr3 */
- leaq init_level4_pgt(%rip),%rax
- subq $__START_KERNEL_map,%rax
- movq %rax,%cr3
-
+ENTRY(restore_image)
+ /* switch to temporary page tables */
+ movq $__PAGE_OFFSET, %rdx
+ movq temp_level4_pgt(%rip), %rax
+ subq %rdx, %rax
+ movq %rax, %cr3
+ /* Flush TLB */
movq mmu_cr4_features(%rip), %rax
movq %rax, %rdx
andq $~(1<<7), %rdx # PGE
@@ -69,6 +70,10 @@ loop:
movq pbe_next(%rdx), %rdx
jmp loop
done:
+ /* go back to the original page tables */
+ leaq init_level4_pgt(%rip), %rax
+ subq $__START_KERNEL_map, %rax
+ movq %rax, %cr3
/* Flush TLB, including "global" things (vmalloc) */
movq mmu_cr4_features(%rip), %rax
movq %rax, %rdx