aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-02 19:27:10 +0200
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 19:27:10 +0200
commit2bff73830c3df5f575d3bc21bf19df1a10bf7091 (patch)
treeaac7c05edb493a31d3b709462a2091ef16c0cbb3
parent05f36927eddd83e2840a981ef4d9af754dcb86e9 (diff)
[PATCH] x86-64: use lru instead of page->index and page->private for pgd lists management.
x86_64 currently simulates a list using the index and private fields of the page struct. Seems that the code was inherited from i386. But x86_64 does not use the slab to allocate pgds and pmds etc. So the lru field is not used by the slab and therefore available. This patch uses standard list operations on page->lru to realize pgd tracking. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--arch/x86_64/mm/fault.c5
-rw-r--r--include/asm-x86_64/pgalloc.h14
-rw-r--r--include/asm-x86_64/pgtable.h2
3 files changed, 6 insertions, 15 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 6ada7231f3a..de99dba2c51 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -585,7 +585,7 @@ do_sigbus:
}
DEFINE_SPINLOCK(pgd_lock);
-struct page *pgd_list;
+LIST_HEAD(pgd_list);
void vmalloc_sync_all(void)
{
@@ -605,8 +605,7 @@ void vmalloc_sync_all(void)
if (pgd_none(*pgd_ref))
continue;
spin_lock(&pgd_lock);
- for (page = pgd_list; page;
- page = (struct page *)page->index) {
+ list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
pgd = (pgd_t *)page_address(page) + pgd_index(address);
if (pgd_none(*pgd))
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index 31d49717196..8bb56468786 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -44,24 +44,16 @@ static inline void pgd_list_add(pgd_t *pgd)
struct page *page = virt_to_page(pgd);
spin_lock(&pgd_lock);
- page->index = (pgoff_t)pgd_list;
- if (pgd_list)
- pgd_list->private = (unsigned long)&page->index;
- pgd_list = page;
- page->private = (unsigned long)&pgd_list;
+ list_add(&page->lru, &pgd_list);
spin_unlock(&pgd_lock);
}
static inline void pgd_list_del(pgd_t *pgd)
{
- struct page *next, **pprev, *page = virt_to_page(pgd);
+ struct page *page = virt_to_page(pgd);
spin_lock(&pgd_lock);
- next = (struct page *)page->index;
- pprev = (struct page **)page->private;
- *pprev = next;
- if (next)
- next->private = (unsigned long)pprev;
+ list_del(&page->lru);
spin_unlock(&pgd_lock);
}
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index c1865e38c7b..599993f6ba8 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -410,7 +410,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
extern spinlock_t pgd_lock;
-extern struct page *pgd_list;
+extern struct list_head pgd_list;
void vmalloc_sync_all(void);
extern int kern_addr_valid(unsigned long addr);