aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sparc64/pgalloc.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 18:30:27 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:11:14 -0800
commit3c936465249f863f322154ff1aaa628b84ee5750 (patch)
tree2bd7a229236f197d20a655133370e5d0c1bf886c /include/asm-sparc64/pgalloc.h
parent05e28f9de65a38bb0c769080e91b6976e7e1e70c (diff)
[SPARC64]: Kill pgtable quicklists and use SLAB.
Taking a nod from the powerpc port. With the per-cpu caching of both the page allocator and SLAB, the pgtable quicklist scheme becomes relatively silly and primitive. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/pgalloc.h')
-rw-r--r--include/asm-sparc64/pgalloc.h158
1 files changed, 27 insertions, 131 deletions
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index ecea1bbdc11..12e4a273bd4 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <asm/spitfire.h>
#include <asm/cpudata.h>
@@ -13,164 +14,59 @@
#include <asm/page.h>
/* Page table allocation/freeing. */
-#ifdef CONFIG_SMP
-/* Sliiiicck */
-#define pgt_quicklists local_cpu_data()
-#else
-extern struct pgtable_cache_struct {
- unsigned long *pgd_cache;
- unsigned long *pte_cache;
- unsigned int pgcache_size;
-} pgt_quicklists;
-#endif
-#define pgd_quicklist (pgt_quicklists.pgd_cache)
-#define pte_quicklist (pgt_quicklists.pte_cache)
-#define pgtable_cache_size (pgt_quicklists.pgcache_size)
+extern kmem_cache_t *pgtable_cache;
-static inline void free_pgd_fast(pgd_t *pgd)
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- preempt_disable();
- *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
- pgd_quicklist = (unsigned long *) pgd;
- pgtable_cache_size++;
- preempt_enable();
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
}
-static inline pgd_t *get_pgd_fast(void)
+static inline void pgd_free(pgd_t *pgd)
{
- unsigned long *ret;
-
- preempt_disable();
- if((ret = pgd_quicklist) != NULL) {
- pgd_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- preempt_enable();
- } else {
- preempt_enable();
- ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
- if(ret)
- memset(ret, 0, PAGE_SIZE);
- }
- return (pgd_t *)ret;
-}
-
-static inline void free_pgd_slow(pgd_t *pgd)
-{
- free_page((unsigned long)pgd);
+ kmem_cache_free(pgtable_cache, pgd);
}
#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
-static inline pmd_t *pmd_alloc_one_fast(void)
-{
- unsigned long *ret;
-
- preempt_disable();
- ret = (unsigned long *) pte_quicklist;
- if (likely(ret)) {
- pte_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- }
- preempt_enable();
-
- return (pmd_t *) ret;
-}
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- pmd_t *pmd;
-
- pmd = pmd_alloc_one_fast();
- if (unlikely(!pmd)) {
- pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
- if (pmd)
- memset(pmd, 0, PAGE_SIZE);
- }
- return pmd;
-}
-
-static inline void free_pmd_fast(pmd_t *pmd)
-{
- preempt_disable();
- *(unsigned long *)pmd = (unsigned long) pte_quicklist;
- pte_quicklist = (unsigned long *) pmd;
- pgtable_cache_size++;
- preempt_enable();
-}
-
-static inline void free_pmd_slow(pmd_t *pmd)
-{
- free_page((unsigned long)pmd);
-}
-
-#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
-#define pmd_populate(MM,PMD,PTE_PAGE) \
- pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
-
-static inline pte_t *pte_alloc_one_fast(void)
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- unsigned long *ret;
-
- preempt_disable();
- ret = (unsigned long *) pte_quicklist;
- if (likely(ret)) {
- pte_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- }
- preempt_enable();
-
- return (pte_t *) ret;
+ return kmem_cache_alloc(pgtable_cache,
+ GFP_KERNEL|__GFP_REPEAT);
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+static inline void pmd_free(pmd_t *pmd)
{
- pte_t *ptep = pte_alloc_one_fast();
-
- if (likely(ptep))
- return ptep;
-
- return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ kmem_cache_free(pgtable_cache, pmd);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
{
- pte_t *pte = pte_alloc_one_fast();
-
- if (likely(pte))
- return virt_to_page(pte);
-
- return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ return kmem_cache_alloc(pgtable_cache,
+ GFP_KERNEL|__GFP_REPEAT);
}
-static inline void free_pte_fast(pte_t *pte)
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
{
- preempt_disable();
- *(unsigned long *)pte = (unsigned long) pte_quicklist;
- pte_quicklist = (unsigned long *) pte;
- pgtable_cache_size++;
- preempt_enable();
+ return virt_to_page(pte_alloc_one_kernel(mm, address));
}
-
-static inline void free_pte_slow(pte_t *pte)
-{
- free_page((unsigned long) pte);
-}
-
+
static inline void pte_free_kernel(pte_t *pte)
{
- free_pte_fast(pte);
+ kmem_cache_free(pgtable_cache, pte);
}
static inline void pte_free(struct page *ptepage)
{
- free_pte_fast(page_address(ptepage));
+ pte_free_kernel(page_address(ptepage));
}
-#define pmd_free(pmd) free_pmd_fast(pmd)
-#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc(mm) get_pgd_fast()
+
+#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
+#define pmd_populate(MM,PMD,PTE_PAGE) \
+ pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
+
+#define check_pgt_cache() do { } while (0)
#endif /* _SPARC64_PGALLOC_H */