diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-26 13:22:13 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-26 13:22:13 +0100 |
commit | 4434e5156409eb3ec98f5ad7f0a0c07ebafe970d (patch) | |
tree | c7f148cdae7a6cf4013253c6c2dbd1c49e837e32 /arch/x86/mm/pageattr.c | |
parent | 2b8f836fb196acede88b6cc772e9057e0a9c0223 (diff) | |
parent | 694593e3374a67d95ece6a275a1f181644c2c4d8 (diff) |
Merge branches 'sched/cleanups', 'sched/urgent' and 'linus' into sched/core
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r-- | arch/x86/mm/pageattr.c | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 84ba74820ad..7be47d1a97e 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -508,18 +508,13 @@ static int split_large_page(pte_t *kpte, unsigned long address) #endif /* - * Install the new, split up pagetable. Important details here: + * Install the new, split up pagetable. * - * On Intel the NX bit of all levels must be cleared to make a - * page executable. See section 4.13.2 of Intel 64 and IA-32 - * Architectures Software Developer's Manual). - * - * Mark the entry present. The current mapping might be - * set to not present, which we preserved above. + * We use the standard kernel pagetable protections for the new + * pagetable protections, the actual ptes set above control the + * primary protection behavior: */ - ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); - pgprot_val(ref_prot) |= _PAGE_PRESENT; - __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); + __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); base = NULL; out_unlock: @@ -575,7 +570,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) address = cpa->vaddr[cpa->curpage]; else address = *cpa->vaddr; - repeat: kpte = lookup_address(address, &level); if (!kpte) @@ -812,6 +806,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, vm_unmap_aliases(); + /* + * If we're called with lazy mmu updates enabled, the + * in-memory pte state may be stale. Flush pending updates to + * bring them up to date. + */ + arch_flush_lazy_mmu_mode(); + cpa.vaddr = addr; cpa.numpages = numpages; cpa.mask_set = mask_set; @@ -854,6 +855,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, } else cpa_flush_all(cache); + /* + * If we've been called with lazy mmu updates enabled, then + * make sure that everything gets flushed out before we + * return. + */ + arch_flush_lazy_mmu_mode(); + out: return ret; } |