aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/kprobes_32.c3
-rw-r--r--arch/x86/kernel/kprobes_64.c1
-rw-r--r--arch/x86/mm/fault_32.c43
-rw-r--r--arch/x86/mm/fault_64.c44
4 files changed, 39 insertions, 52 deletions
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
index e7d0d3c2ef6..06b86e5617f 100644
--- a/arch/x86/kernel/kprobes_32.c
+++ b/arch/x86/kernel/kprobes_32.c
@@ -584,7 +584,7 @@ out:
return 1;
}
-static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -666,7 +666,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
ret = NOTIFY_STOP;
break;
case DIE_GPF:
- case DIE_PAGE_FAULT:
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() &&
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index 62e28e52d78..7c16506d681 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -657,7 +657,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
ret = NOTIFY_STOP;
break;
case DIE_GPF:
- case DIE_PAGE_FAULT:
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() &&
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index fcb38e7f354..be72c2a5b03 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -25,6 +25,7 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
+#include <linux/kprobes.h>
#include <asm/system.h>
#include <asm/desc.h>
@@ -32,33 +33,27 @@
extern void die(const char *,struct pt_regs *,long);
-static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
-
-int register_page_fault_notifier(struct notifier_block *nb)
+#ifdef CONFIG_KPROBES
+static inline int notify_page_fault(struct pt_regs *regs)
{
- vmalloc_sync_all();
- return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
-}
-EXPORT_SYMBOL_GPL(register_page_fault_notifier);
+ int ret = 0;
+
+ /* kprobe_running() needs smp_processor_id() */
+ if (!user_mode_vm(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, 14))
+ ret = 1;
+ preempt_enable();
+ }
-int unregister_page_fault_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+ return ret;
}
-EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
-
-static inline int notify_page_fault(struct pt_regs *regs, long err)
+#else
+static inline int notify_page_fault(struct pt_regs *regs)
{
- struct die_args args = {
- .regs = regs,
- .str = "page fault",
- .err = err,
- .trapnr = 14,
- .signr = SIGSEGV
- };
- return atomic_notifier_call_chain(&notify_page_fault_chain,
- DIE_PAGE_FAULT, &args);
+ return 0;
}
+#endif
/*
* Return EIP plus the CS segment base. The segment limit is also
@@ -331,7 +326,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
if (unlikely(address >= TASK_SIZE)) {
if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
return;
- if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
+ if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
@@ -340,7 +335,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
- if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
+ if (notify_page_fault(regs))
return;
/* It's safe to allow irq's after cr2 has been saved and the vmalloc
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 54816adb8e9..5e0e54906c4 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -25,6 +25,7 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
+#include <linux/kprobes.h>
#include <asm/system.h>
#include <asm/pgalloc.h>
@@ -40,34 +41,27 @@
#define PF_RSVD (1<<3)
#define PF_INSTR (1<<4)
-static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
-
-/* Hook to register for page fault notifications */
-int register_page_fault_notifier(struct notifier_block *nb)
+#ifdef CONFIG_KPROBES
+static inline int notify_page_fault(struct pt_regs *regs)
{
- vmalloc_sync_all();
- return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
-}
-EXPORT_SYMBOL_GPL(register_page_fault_notifier);
+ int ret = 0;
+
+ /* kprobe_running() needs smp_processor_id() */
+ if (!user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, 14))
+ ret = 1;
+ preempt_enable();
+ }
-int unregister_page_fault_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+ return ret;
}
-EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
-
-static inline int notify_page_fault(struct pt_regs *regs, long err)
+#else
+static inline int notify_page_fault(struct pt_regs *regs)
{
- struct die_args args = {
- .regs = regs,
- .str = "page fault",
- .err = err,
- .trapnr = 14,
- .signr = SIGSEGV
- };
- return atomic_notifier_call_chain(&notify_page_fault_chain,
- DIE_PAGE_FAULT, &args);
+ return 0;
}
+#endif
/* Sometimes the CPU reports invalid exceptions on prefetch.
Check that here and ignore.
@@ -345,7 +339,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (vmalloc_fault(address) >= 0)
return;
}
- if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
+ if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
@@ -354,7 +348,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
- if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
+ if (notify_page_fault(regs))
return;
if (likely(regs->eflags & X86_EFLAGS_IF))