From 4ff83ce1114827f707b7f1f4f2e5f69de9df94ac Mon Sep 17 00:00:00 2001 From: Jeff Dike Date: Sun, 6 May 2007 14:51:08 -0700 Subject: uml: create as-layout.h This patch moves all the the symbols defined in um_arch.c, which are mostly boundaries between different parts of the UML kernel address space, to a new header, as-layout.h. There are also a few things here which aren't really related to address space layout, but which don't really have a better place to go. Signed-off-by: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/skas/process.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/um/kernel/skas') diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c index ae4fa71d3b8..9c2a7d8c56f 100644 --- a/arch/um/kernel/skas/process.c +++ b/arch/um/kernel/skas/process.c @@ -13,6 +13,7 @@ #include "asm/uaccess.h" #include "asm/atomic.h" #include "kern_util.h" +#include "as-layout.h" #include "skas.h" #include "os.h" #include "user_util.h" -- cgit v1.2.3 From 9218b1714949095bff9d9739d80f431d58e561d6 Mon Sep 17 00:00:00 2001 From: Jeff Dike Date: Sun, 6 May 2007 14:51:10 -0700 Subject: uml: remove user_util.h user_util.h isn't needed any more, so delete it and remove all includes of it. Signed-off-by: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/skas/process.c | 1 - arch/um/kernel/skas/tlb.c | 1 - 2 files changed, 2 deletions(-) (limited to 'arch/um/kernel/skas') diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c index 9c2a7d8c56f..ef36facd8fe 100644 --- a/arch/um/kernel/skas/process.c +++ b/arch/um/kernel/skas/process.c @@ -16,7 +16,6 @@ #include "as-layout.h" #include "skas.h" #include "os.h" -#include "user_util.h" #include "tlb.h" #include "kern.h" #include "mode.h" diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c index 27eb29ce666..304a5b0695a 100644 --- a/arch/um/kernel/skas/tlb.c +++ b/arch/um/kernel/skas/tlb.c @@ -10,7 +10,6 @@ #include "asm/page.h" #include "asm/pgtable.h" #include "asm/mmu.h" -#include "user_util.h" #include "mem_user.h" #include "mem.h" #include "skas.h" -- cgit v1.2.3 From a18ff1bde0c3da9ece3ba60e6eae2ef87f91a12e Mon Sep 17 00:00:00 2001 From: Jeff Dike Date: Sun, 6 May 2007 14:51:19 -0700 Subject: uml: speed up exec flush_thread doesn't need to do a full page table walk in order to clear the address space. It knows what the end result needs to be, so it can call unmap directly. This results in a 10-20% speedup in an exec from bash. Signed-off-by: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/skas/exec.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/um/kernel/skas') diff --git a/arch/um/kernel/skas/exec.c b/arch/um/kernel/skas/exec.c index 54b79595137..580eb646894 100644 --- a/arch/um/kernel/skas/exec.c +++ b/arch/um/kernel/skas/exec.c @@ -17,7 +17,17 @@ void flush_thread_skas(void) { - force_flush_all(); + void *data = NULL; + unsigned long end = proc_mm ? task_size : CONFIG_STUB_START; + int ret; + + ret = unmap(¤t->mm->context.skas.id, 0, end, 1, &data); + if(ret){ + printk("flush_thread_skas - clearing address space failed, " + "err = %d\n", ret); + force_sig(SIGKILL, current); + } + switch_mm_skas(¤t->mm->context.skas.id); } -- cgit v1.2.3 From 64f60841c096594b8073e408cd9b40d7d08dcfdd Mon Sep 17 00:00:00 2001 From: Jeff Dike Date: Sun, 6 May 2007 14:51:45 -0700 Subject: uml: speed page fault path Give the page fault code a specialized path. There is only one page to look at, so there's no point in going into the general page table walking code. There's only going to be one host operation, so there are no opportunities for merging. So, we go straight to the pte we want, figure out what needs doing, and do it. While I was in here, I fixed the wart where the address passed to unmap was a void *, but an unsigned long to map and protect. This gives me just under 10% on a kernel build. Signed-off-by: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/skas/tlb.c | 66 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 64 insertions(+), 2 deletions(-) (limited to 'arch/um/kernel/skas') diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c index 304a5b0695a..c43901aa936 100644 --- a/arch/um/kernel/skas/tlb.c +++ b/arch/um/kernel/skas/tlb.c @@ -32,8 +32,7 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, op->u.mmap.offset, finished, flush); break; case MUNMAP: - ret = unmap(&mmu->skas.id, - (void *) op->u.munmap.addr, + ret = unmap(&mmu->skas.id, op->u.munmap.addr, op->u.munmap.len, finished, flush); break; case MPROTECT: @@ -94,3 +93,66 @@ void force_flush_all_skas(void) unsigned long end = proc_mm ? task_size : CONFIG_STUB_START; fix_range(current->mm, 0, end, 1); } + +void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + struct mm_struct *mm = vma->vm_mm; + void *flush = NULL; + int r, w, x, err = 0; + struct mm_id *mm_id; + + pgd = pgd_offset(vma->vm_mm, address); + if(!pgd_present(*pgd)) + goto kill; + + pud = pud_offset(pgd, address); + if(!pud_present(*pud)) + goto kill; + + pmd = pmd_offset(pud, address); + if(!pmd_present(*pmd)) + goto kill; + + pte = pte_offset_kernel(pmd, address); + + r = pte_read(*pte); + w = pte_write(*pte); + x = pte_exec(*pte); + if (!pte_young(*pte)) { + r = 0; + w = 0; + } else if (!pte_dirty(*pte)) { + w = 0; + } + + mm_id = &mm->context.skas.id; + if(pte_newpage(*pte)){ + if(pte_present(*pte)){ + unsigned long long offset; + int fd; + + fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset); + err = map(mm_id, address, PAGE_SIZE, r, w, x, fd, + offset, 1, &flush); + } + else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); + } + else if(pte_newprot(*pte)) + err = protect(mm_id, address, PAGE_SIZE, r, w, x, 1, &flush); + + if(err) + goto kill; + + *pte = pte_mkuptodate(*pte); + + return; + +kill: + printk("Failed to flush page for address 0x%lx\n", address); + force_sig(SIGKILL, current); +} + -- cgit v1.2.3 From 16dd07bc6404c8da0bdfeb7a5cde4e4a63991c00 Mon Sep 17 00:00:00 2001 From: Jeff Dike Date: Sun, 6 May 2007 14:51:48 -0700 Subject: uml: more page fault path trimming More trimming of the page fault path. Permissions are passed around in a single int rather than one bit per int. The permission values are copied from libc so that they can be passed to mmap and mprotect without any further conversion. The register sets used by do_syscall_stub and copy_context_skas0 are initialized once, at boot time, rather than once per call. wait_stub_done checks whether it is getting the signals it expects by comparing the wait status to a mask containing bits for the signals of interest rather than comparing individually to the signal numbers. It also has one check for a wait failure instead of two. The caller is expected to do the initial continue of the stub. This gets rid of an argument and some logic. The fname argument is gone, as that can be had from a stack trace. user_signal() is collapsed into userspace() as it is basically one or two lines of code afterwards. The physical memory remapping stuff is gone, as it is unused. flush_tlb_page is inlined. Signed-off-by: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/skas/tlb.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'arch/um/kernel/skas') diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c index c43901aa936..b3d722ddde3 100644 --- a/arch/um/kernel/skas/tlb.c +++ b/arch/um/kernel/skas/tlb.c @@ -27,9 +27,9 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, switch(op->type){ case MMAP: ret = map(&mmu->skas.id, op->u.mmap.addr, - op->u.mmap.len, op->u.mmap.r, op->u.mmap.w, - op->u.mmap.x, op->u.mmap.fd, - op->u.mmap.offset, finished, flush); + op->u.mmap.len, op->u.mmap.prot, + op->u.mmap.fd, op->u.mmap.offset, finished, + flush); break; case MUNMAP: ret = unmap(&mmu->skas.id, op->u.munmap.addr, @@ -37,8 +37,7 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, break; case MPROTECT: ret = protect(&mmu->skas.id, op->u.mprotect.addr, - op->u.mprotect.len, op->u.mprotect.r, - op->u.mprotect.w, op->u.mprotect.x, + op->u.mprotect.len, op->u.mprotect.prot, finished, flush); break; default: @@ -102,10 +101,10 @@ void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address) pte_t *pte; struct mm_struct *mm = vma->vm_mm; void *flush = NULL; - int r, w, x, err = 0; + int r, w, x, prot, err = 0; struct mm_id *mm_id; - pgd = pgd_offset(vma->vm_mm, address); + pgd = pgd_offset(mm, address); if(!pgd_present(*pgd)) goto kill; @@ -130,19 +129,21 @@ void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address) } mm_id = &mm->context.skas.id; + prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | + (x ? UM_PROT_EXEC : 0)); if(pte_newpage(*pte)){ if(pte_present(*pte)){ unsigned long long offset; int fd; fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset); - err = map(mm_id, address, PAGE_SIZE, r, w, x, fd, - offset, 1, &flush); + err = map(mm_id, address, PAGE_SIZE, prot, fd, offset, + 1, &flush); } else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); } else if(pte_newprot(*pte)) - err = protect(mm_id, address, PAGE_SIZE, r, w, x, 1, &flush); + err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush); if(err) goto kill; -- cgit v1.2.3 From 1e7371c1a11f041d641cc0ff113bf1daa1bd98b9 Mon Sep 17 00:00:00 2001 From: Jeff Dike Date: Sun, 6 May 2007 14:51:49 -0700 Subject: uml: only flush areas covered by VMA When doing a full address space flush, only look at areas covered by a VMA. Signed-off-by: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/skas/tlb.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/um/kernel/skas') diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c index b3d722ddde3..c0f0693743b 100644 --- a/arch/um/kernel/skas/tlb.c +++ b/arch/um/kernel/skas/tlb.c @@ -89,8 +89,13 @@ void flush_tlb_mm_skas(struct mm_struct *mm) void force_flush_all_skas(void) { - unsigned long end = proc_mm ? task_size : CONFIG_STUB_START; - fix_range(current->mm, 0, end, 1); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = mm->mmap; + + while(vma != NULL) { + fix_range(mm, vma->vm_start, vma->vm_end, 1); + vma = vma->vm_next; + } } void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address) -- cgit v1.2.3