aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/sparc64/kernel/tsb.S11
-rw-r--r--arch/sparc64/mm/tsb.c28
-rw-r--r--include/asm-sparc64/mmu_context.h32
-rw-r--r--include/asm-sparc64/pgtable.h4
4 files changed, 45 insertions, 30 deletions
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 50752c51877..76f2c0b01f3 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -55,6 +55,17 @@ tsb_reload:
brgez,a,pn %g5, tsb_do_fault
stx %g0, [%g1]
+ /* If it is larger than the base page size, don't
+ * bother putting it into the TSB.
+ */
+ srlx %g5, 32, %g2
+ sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g4
+ sethi %hi(_PAGE_SZBITS >> 32), %g7
+ and %g2, %g4, %g2
+ cmp %g2, %g7
+ bne,a,pn %xcc, tsb_tlb_reload
+ stx %g0, [%g1]
+
TSB_WRITE(%g1, %g5, %g6)
/* Finally, load TLB and return from trap. */
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 15e8af58b1d..2f84cef6c1b 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -8,6 +8,7 @@
#include <asm/page.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
+#include <asm/mmu_context.h>
#define TSB_ENTRY_ALIGNMENT 16
@@ -82,3 +83,30 @@ void flush_tsb_user(struct mmu_gather *mp)
}
}
}
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+
+ mm->context.sparc64_ctx_val = 0UL;
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ mm->context.sparc64_tsb = (unsigned long *) page;
+
+ return 0;
+}
+
+void destroy_context(struct mm_struct *mm)
+{
+ free_page((unsigned long) mm->context.sparc64_tsb);
+
+ spin_lock(&ctx_alloc_lock);
+
+ if (CTX_VALID(mm->context)) {
+ unsigned long nr = CTX_NRBITS(mm->context);
+ mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
+ }
+
+ spin_unlock(&ctx_alloc_lock);
+}
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 34640a370ab..0dffb4ce8a1 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -19,36 +19,8 @@ extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
extern void get_new_mmu_context(struct mm_struct *mm);
-
-/* Initialize a new mmu context. This is invoked when a new
- * address space instance (unique or shared) is instantiated.
- * This just needs to set mm->context to an invalid context.
- */
-#define init_new_context(__tsk, __mm) \
-({ unsigned long __pg = get_zeroed_page(GFP_KERNEL); \
- (__mm)->context.sparc64_ctx_val = 0UL; \
- (__mm)->context.sparc64_tsb = \
- (unsigned long *) __pg; \
- (__pg ? 0 : -ENOMEM); \
-})
-
-
-/* Destroy a dead context. This occurs when mmput drops the
- * mm_users count to zero, the mmaps have been released, and
- * all the page tables have been flushed. Our job is to destroy
- * any remaining processor-specific state, and in the sparc64
- * case this just means freeing up the mmu context ID held by
- * this task if valid.
- */
-#define destroy_context(__mm) \
-do { free_page((unsigned long)(__mm)->context.sparc64_tsb); \
- spin_lock(&ctx_alloc_lock); \
- if (CTX_VALID((__mm)->context)) { \
- unsigned long nr = CTX_NRBITS((__mm)->context); \
- mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \
- } \
- spin_unlock(&ctx_alloc_lock); \
-} while(0)
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void destroy_context(struct mm_struct *mm);
extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 77ba0b6cc1c..2b2ecd6104d 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -116,6 +116,10 @@
#define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */
#define _PAGE_G _AC(0x0000000000000001,UL) /* Global */
+#define _PAGE_ALL_SZ_BITS \
+ (_PAGE_SZ4MB | _PAGE_SZ512K | _PAGE_SZ64K | \
+ _PAGE_SZ8K | _PAGE_SZ32MB | _PAGE_SZ256MB)
+
/* Here are the SpitFire software bits we use in the TTE's.
*
* WARNING: If you are going to try and start using some