aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-03-17 16:37:00 -0700
committerIngo Molnar <mingo@elte.hu>2008-04-24 23:57:31 +0200
commit394158559d4c912cc58c311b6346cdea0ed2b1de (patch)
treec4cdc93d77d964577af8b42a5c9b37916735bf47 /arch
parent5a5f8f42241cf09caec5530a7639cfa8dccc3a7b (diff)
x86: move all the pgd_list handling to one place
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/pgtable.c28
1 files changed, 7 insertions, 21 deletions
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index c67966e10a9..0d2866b8f42 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -43,34 +43,31 @@ void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
#endif /* PAGETABLE_LEVELS > 3 */
#endif /* PAGETABLE_LEVELS > 2 */
-#ifdef CONFIG_X86_64
static inline void pgd_list_add(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
- unsigned long flags;
- spin_lock_irqsave(&pgd_lock, flags);
list_add(&page->lru, &pgd_list);
- spin_unlock_irqrestore(&pgd_lock, flags);
}
static inline void pgd_list_del(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
- unsigned long flags;
- spin_lock_irqsave(&pgd_lock, flags);
list_del(&page->lru);
- spin_unlock_irqrestore(&pgd_lock, flags);
}
+#ifdef CONFIG_X86_64
pgd_t *pgd_alloc(struct mm_struct *mm)
{
unsigned boundary;
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ unsigned long flags;
if (!pgd)
return NULL;
+ spin_lock_irqsave(&pgd_lock, flags);
pgd_list_add(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
/*
* Copy kernel pointers in from init.
* Could keep a freelist or slab cache of those because the kernel
@@ -86,8 +83,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
+ unsigned long flags;
BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
+ spin_lock_irqsave(&pgd_lock, flags);
pgd_list_del(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
free_page((unsigned long)pgd);
}
#else
@@ -101,20 +101,6 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
* vmalloc faults work because attached pagetables are never freed.
* -- wli
*/
-static inline void pgd_list_add(pgd_t *pgd)
-{
- struct page *page = virt_to_page(pgd);
-
- list_add(&page->lru, &pgd_list);
-}
-
-static inline void pgd_list_del(pgd_t *pgd)
-{
- struct page *page = virt_to_page(pgd);
-
- list_del(&page->lru);
-}
-
#define UNSHARED_PTRS_PER_PGD \
(SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)