aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/kprobes.c25
1 files changed, 6 insertions, 19 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 6cb40d133b7..9bef2c8dc12 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -34,7 +34,6 @@
#include <linux/config.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
-#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/preempt.h>
@@ -266,6 +265,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->rip = (unsigned long)p->ainsn.insn;
}
+/* Called with kretprobe_lock held */
void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
@@ -299,15 +299,12 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
/* Check we're not actually recursing */
if (kprobe_running()) {
- /* We *are* holding lock here, so this is safe.
- Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
if (p) {
if (kcb->kprobe_status == KPROBE_HIT_SS &&
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
regs->eflags &= ~TF_MASK;
regs->eflags |= kcb->kprobe_saved_rflags;
- unlock_kprobes();
goto no_kprobe;
} else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
/* TODO: Provide re-entrancy from
@@ -340,14 +337,11 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
goto ss_probe;
}
}
- /* If it's not ours, can't be delete race, (we hold lock). */
goto no_kprobe;
}
- lock_kprobes();
p = get_kprobe(addr);
if (!p) {
- unlock_kprobes();
if (*addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
@@ -406,9 +400,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
- unsigned long orig_ret_address = 0;
+ unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+ spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
/*
@@ -448,7 +443,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->rip = orig_ret_address;
reset_current_kprobe();
- unlock_kprobes();
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
preempt_enable_no_resched();
/*
@@ -536,10 +531,6 @@ static void __kprobes resume_execution(struct kprobe *p,
}
}
-/*
- * Interrupts are disabled on entry as trap1 is an interrupt gate and they
- * remain disabled thoroughout this function. And we hold kprobe lock.
- */
int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
@@ -560,8 +551,6 @@ int __kprobes post_kprobe_handler(struct pt_regs *regs)
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
- } else {
- unlock_kprobes();
}
reset_current_kprobe();
out:
@@ -578,7 +567,6 @@ out:
return 1;
}
-/* Interrupts disabled, kprobe_lock held. */
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
@@ -592,7 +580,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
regs->eflags |= kcb->kprobe_old_rflags;
reset_current_kprobe();
- unlock_kprobes();
preempt_enable_no_resched();
}
return 0;
@@ -607,7 +594,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
- preempt_disable();
+ rcu_read_lock();
switch (val) {
case DIE_INT3:
if (kprobe_handler(args->regs))
@@ -626,7 +613,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
default:
break;
}
- preempt_enable();
+ rcu_read_unlock();
return ret;
}