diff options
author | Andi Kleen <ak@suse.de> | 2006-01-11 22:44:09 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-11 19:04:53 -0800 |
commit | 66c581569e2cb41231b3fcd91a6c9f853d4d4e25 (patch) | |
tree | b7e4629b9eeee9850e2ba4be32c2ee3c0afb6959 /arch/x86_64 | |
parent | bf2fcc6fdfe4f4e92bb74f062c0a1be189f3a561 (diff) |
[PATCH] x86_64: Convert page fault error codes to symbolic constants.
Much better to deal with these than with the magic numbers.
And remove the comment describing the bits - kernel source
is no replacement for an architecture manual.
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64')
-rw-r--r-- | arch/x86_64/mm/fault.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 2a9c836133a..26eac194064 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -35,6 +35,13 @@ #include <asm-generic/sections.h> #include <asm/kdebug.h> +/* Page fault error code bits */ +#define PF_PROT (1<<0) /* or no page found */ +#define PF_WRITE (1<<1) +#define PF_USER (1<<2) +#define PF_RSVD (1<<3) +#define PF_INSTR (1<<4) + void bust_spinlocks(int yes) { int loglevel_save = console_loglevel; @@ -68,7 +75,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, unsigned char *max_instr; /* If it was a exec fault ignore */ - if (error_code & (1<<4)) + if (error_code & PF_INSTR) return 0; instr = (unsigned char *)convert_rip_to_linear(current, regs); @@ -293,13 +300,6 @@ int exception_trace = 1; * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. - * - * error_code: - * bit 0 == 0 means no page found, 1 means protection fault - * bit 1 == 0 means read, 1 means write - * bit 2 == 0 means kernel, 1 means user-mode - * bit 3 == 1 means use of reserved bit detected - * bit 4 == 1 means fault was an instruction fetch */ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) @@ -350,7 +350,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * is always initialized because it's shared with the main * kernel text. Only vmalloc may need PML4 syncups. */ - if (!(error_code & 0xd) && + if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && ((address >= VMALLOC_START && address < VMALLOC_END))) { if (vmalloc_fault(address) < 0) goto bad_area_nosemaphore; @@ -363,7 +363,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, goto bad_area_nosemaphore; } - if (unlikely(error_code & (1 << 3))) + if (unlikely(error_code & PF_RSVD)) pgtable_bad(address, regs, error_code); /* @@ -390,7 +390,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * thus avoiding the deadlock. */ if (!down_read_trylock(&mm->mmap_sem)) { - if ((error_code & 4) == 0 && + if ((error_code & PF_USER) == 0 && !search_exception_tables(regs->rip)) goto bad_area_nosemaphore; down_read(&mm->mmap_sem); @@ -417,17 +417,17 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, good_area: info.si_code = SEGV_ACCERR; write = 0; - switch (error_code & 3) { + switch (error_code & (PF_PROT|PF_WRITE)) { default: /* 3: write, present */ /* fall through */ - case 2: /* write, not present */ + case PF_WRITE: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) goto bad_area; write++; break; - case 1: /* read, present */ + case PF_PROT: /* read, present */ goto bad_area; - case 0: /* read, not present */ + case 0: /* read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } @@ -462,7 +462,7 @@ bad_area: bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ - if (error_code & 4) { + if (error_code & PF_USER) { if (is_prefetch(regs, address, error_code)) return; @@ -558,7 +558,7 @@ do_sigbus: up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die */ - if (!(error_code & 4)) + if (!(error_code & PF_USER)) goto no_context; tsk->thread.cr2 = address; |