From e3ed910db221768f8fd6192b13373e17d61bcdf0 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 30 Jan 2008 13:34:11 +0100 Subject: x86: use the same pgd_list for PAE and 64-bit Use a standard list threaded through page->lru for maintaining the pgd list on PAE. This is the same as 64-bit, and seems saner than using a non-standard list via page->index. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/fault.c | 10 +++------- arch/x86/mm/pageattr.c | 2 +- arch/x86/mm/pgtable_32.c | 19 +++++-------------- 3 files changed, 9 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 72547a7e32c..e28cc5277b1 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -907,10 +907,8 @@ do_sigbus: force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); } -#ifdef CONFIG_X86_64 DEFINE_SPINLOCK(pgd_lock); LIST_HEAD(pgd_list); -#endif void vmalloc_sync_all(void) { @@ -935,13 +933,11 @@ void vmalloc_sync_all(void) struct page *page; spin_lock_irqsave(&pgd_lock, flags); - for (page = pgd_list; page; page = - (struct page *)page->index) + list_for_each_entry(page, &pgd_list, lru) { if (!vmalloc_sync_one(page_address(page), - address)) { - BUG_ON(page != pgd_list); + address)) break; - } + } spin_unlock_irqrestore(&pgd_lock, flags); if (!page) set_bit(pgd_index(address), insync); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index ec07c1873d6..1cc6607eacb 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -175,7 +175,7 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) if (!SHARED_KERNEL_PMD) { struct page *page; - for (page = pgd_list; page; page = (struct page *)page->index) { + list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 5ca3552474a..2ae5999a795 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -205,27 +205,18 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) * vmalloc faults work because attached pagetables are never freed. * -- wli */ -DEFINE_SPINLOCK(pgd_lock); -struct page *pgd_list; - static inline void pgd_list_add(pgd_t *pgd) { struct page *page = virt_to_page(pgd); - page->index = (unsigned long)pgd_list; - if (pgd_list) - set_page_private(pgd_list, (unsigned long)&page->index); - pgd_list = page; - set_page_private(page, (unsigned long)&pgd_list); + + list_add(&page->lru, &pgd_list); } static inline void pgd_list_del(pgd_t *pgd) { - struct page *next, **pprev, *page = virt_to_page(pgd); - next = (struct page *)page->index; - pprev = (struct page **)page_private(page); - *pprev = next; - if (next) - set_page_private(next, (unsigned long)pprev); + struct page *page = virt_to_page(pgd); + + list_del(&page->lru); } -- cgit v1.2.3