aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig6
-rw-r--r--arch/ia64/Kconfig.debug11
-rw-r--r--arch/ia64/configs/sn2_defconfig2
-rw-r--r--arch/ia64/defconfig2
-rw-r--r--arch/ia64/ia32/ia32priv.h2
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/jprobes.S61
-rw-r--r--arch/ia64/kernel/kprobes.c601
-rw-r--r--arch/ia64/kernel/traps.c33
-rw-r--r--arch/ia64/mm/discontig.c9
-rw-r--r--arch/ia64/mm/fault.c8
11 files changed, 727 insertions, 9 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ce4dfa8b834..01b78e7f992 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -161,6 +161,8 @@ config IA64_PAGE_SIZE_64KB
endchoice
+source kernel/Kconfig.hz
+
config IA64_BRL_EMU
bool
depends on ITANIUM
@@ -197,7 +199,7 @@ config HOLES_IN_ZONE
bool
default y if VIRTUAL_MEM_MAP
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
bool "Discontiguous memory support"
depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB) && NUMA && VIRTUAL_MEM_MAP
default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA
@@ -300,6 +302,8 @@ config PREEMPT
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
+source "mm/Kconfig"
+
config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT)
diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug
index de9d507ba0f..fda67ac993d 100644
--- a/arch/ia64/Kconfig.debug
+++ b/arch/ia64/Kconfig.debug
@@ -2,6 +2,17 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
+config KPROBES
+ bool "Kprobes"
+ depends on DEBUG_KERNEL
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+
+
choice
prompt "Physical memory granularity"
default IA64_GRANULE_64MB
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index a01bb02d074..487d2e36b0a 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -78,7 +78,7 @@ CONFIG_IA64_L1_CACHE_SHIFT=7
CONFIG_NUMA=y
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
# CONFIG_IA64_CYCLONE is not set
CONFIG_IOSAPIC=y
CONFIG_IA64_SGI_SN_SIM=y
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 7be8096e056..8444add7638 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -84,7 +84,7 @@ CONFIG_IA64_L1_CACHE_SHIFT=7
CONFIG_NUMA=y
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_IA64_CYCLONE=y
CONFIG_IOSAPIC=y
CONFIG_FORCE_MAX_ZONEORDER=18
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index b2de948bdae..e3e9290e3ff 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -241,7 +241,7 @@ typedef struct compat_siginfo {
/* POSIX.1b timers */
struct {
- timer_t _tid; /* timer id */
+ compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
char _pad[sizeof(unsigned int) - sizeof(int)];
compat_sigval_t _sigval; /* same as below */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 4c73d8ba2e3..b2e2f6509eb 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
+obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
new file mode 100644
index 00000000000..b7fa3ccd2b0
--- /dev/null
+++ b/arch/ia64/kernel/jprobes.S
@@ -0,0 +1,61 @@
+/*
+ * Jprobe specific operations
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-May Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ * <anil.s.keshavamurthy@intel.com> initial implementation
+ *
+ * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a
+ * probe to be inserted into the beginning of a function call. The fundamental
+ * difference between a jprobe and a kprobe is the jprobe handler is executed
+ * in the same context as the target function, while the kprobe handlers
+ * are executed in interrupt context.
+ *
+ * For jprobes we initially gain control by placing a break point in the
+ * first instruction of the targeted function. When we catch that specific
+ * break, we:
+ * * set the return address to our jprobe_inst_return() function
+ * * jump to the jprobe handler function
+ *
+ * Since we fixed up the return address, the jprobe handler will return to our
+ * jprobe_inst_return() function, giving us control again. At this point we
+ * are back in the parents frame marker, so we do yet another call to our
+ * jprobe_break() function to fix up the frame marker as it would normally
+ * exist in the target function.
+ *
+ * Our jprobe_return function then transfers control back to kprobes.c by
+ * executing a break instruction using one of our reserved numbers. When we
+ * catch that break in kprobes.c, we continue like we do for a normal kprobe
+ * by single stepping the emulated instruction, and then returning execution
+ * to the correct location.
+ */
+#include <asm/asmmacro.h>
+
+ /*
+ * void jprobe_break(void)
+ */
+ENTRY(jprobe_break)
+ break.m 0x80300
+END(jprobe_break)
+
+ /*
+ * void jprobe_inst_return(void)
+ */
+GLOBAL_ENTRY(jprobe_inst_return)
+ br.call.sptk.many b0=jprobe_break
+END(jprobe_inst_return)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
new file mode 100644
index 00000000000..5978823d5c6
--- /dev/null
+++ b/arch/ia64/kernel/kprobes.c
@@ -0,0 +1,601 @@
+/*
+ * Kernel Probes (KProbes)
+ * arch/ia64/kernel/kprobes.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ * <anil.s.keshavamurthy@intel.com> adapted from i386
+ */
+
+#include <linux/config.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/preempt.h>
+#include <linux/moduleloader.h>
+
+#include <asm/pgtable.h>
+#include <asm/kdebug.h>
+
+extern void jprobe_inst_return(void);
+
+/* kprobe_status settings */
+#define KPROBE_HIT_ACTIVE 0x00000001
+#define KPROBE_HIT_SS 0x00000002
+
+static struct kprobe *current_kprobe, *kprobe_prev;
+static unsigned long kprobe_status, kprobe_status_prev;
+static struct pt_regs jprobe_saved_regs;
+
+enum instruction_type {A, I, M, F, B, L, X, u};
+static enum instruction_type bundle_encoding[32][3] = {
+ { M, I, I }, /* 00 */
+ { M, I, I }, /* 01 */
+ { M, I, I }, /* 02 */
+ { M, I, I }, /* 03 */
+ { M, L, X }, /* 04 */
+ { M, L, X }, /* 05 */
+ { u, u, u }, /* 06 */
+ { u, u, u }, /* 07 */
+ { M, M, I }, /* 08 */
+ { M, M, I }, /* 09 */
+ { M, M, I }, /* 0A */
+ { M, M, I }, /* 0B */
+ { M, F, I }, /* 0C */
+ { M, F, I }, /* 0D */
+ { M, M, F }, /* 0E */
+ { M, M, F }, /* 0F */
+ { M, I, B }, /* 10 */
+ { M, I, B }, /* 11 */
+ { M, B, B }, /* 12 */
+ { M, B, B }, /* 13 */
+ { u, u, u }, /* 14 */
+ { u, u, u }, /* 15 */
+ { B, B, B }, /* 16 */
+ { B, B, B }, /* 17 */
+ { M, M, B }, /* 18 */
+ { M, M, B }, /* 19 */
+ { u, u, u }, /* 1A */
+ { u, u, u }, /* 1B */
+ { M, F, B }, /* 1C */
+ { M, F, B }, /* 1D */
+ { u, u, u }, /* 1E */
+ { u, u, u }, /* 1F */
+};
+
+/*
+ * In this function we check to see if the instruction
+ * is IP relative instruction and update the kprobe
+ * inst flag accordingly
+ */
+static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode,
+ unsigned long kprobe_inst, struct kprobe *p)
+{
+ p->ainsn.inst_flag = 0;
+ p->ainsn.target_br_reg = 0;
+
+ if (bundle_encoding[template][slot] == B) {
+ switch (major_opcode) {
+ case INDIRECT_CALL_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
+ p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
+ break;
+ case IP_RELATIVE_PREDICT_OPCODE:
+ case IP_RELATIVE_BRANCH_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
+ break;
+ case IP_RELATIVE_CALL_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
+ p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
+ p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
+ break;
+ }
+ } else if (bundle_encoding[template][slot] == X) {
+ switch (major_opcode) {
+ case LONG_CALL_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
+ p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
+ break;
+ }
+ }
+ return;
+}
+
+/*
+ * In this function we check to see if the instruction
+ * on which we are inserting kprobe is supported.
+ * Returns 0 if supported
+ * Returns -EINVAL if unsupported
+ */
+static int unsupported_inst(uint template, uint slot, uint major_opcode,
+ unsigned long kprobe_inst, struct kprobe *p)
+{
+ unsigned long addr = (unsigned long)p->addr;
+
+ if (bundle_encoding[template][slot] == I) {
+ switch (major_opcode) {
+ case 0x0: //I_UNIT_MISC_OPCODE:
+ /*
+ * Check for Integer speculation instruction
+ * - Bit 33-35 to be equal to 0x1
+ */
+ if (((kprobe_inst >> 33) & 0x7) == 1) {
+ printk(KERN_WARNING
+ "Kprobes on speculation inst at <0x%lx> not supported\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /*
+ * IP relative mov instruction
+ * - Bit 27-35 to be equal to 0x30
+ */
+ if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
+ printk(KERN_WARNING
+ "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
+ addr);
+ return -EINVAL;
+
+ }
+ }
+ }
+ return 0;
+}
+
+
+/*
+ * In this function we check to see if the instruction
+ * (qp) cmpx.crel.ctype p1,p2=r2,r3
+ * on which we are inserting kprobe is cmp instruction
+ * with ctype as unc.
+ */
+static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode,
+unsigned long kprobe_inst)
+{
+ cmp_inst_t cmp_inst;
+ uint ctype_unc = 0;
+
+ if (!((bundle_encoding[template][slot] == I) ||
+ (bundle_encoding[template][slot] == M)))
+ goto out;
+
+ if (!((major_opcode == 0xC) || (major_opcode == 0xD) ||
+ (major_opcode == 0xE)))
+ goto out;
+
+ cmp_inst.l = kprobe_inst;
+ if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
+ /* Integere compare - Register Register (A6 type)*/
+ if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
+ &&(cmp_inst.f.c == 1))
+ ctype_unc = 1;
+ } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
+ /* Integere compare - Immediate Register (A8 type)*/
+ if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
+ ctype_unc = 1;
+ }
+out:
+ return ctype_unc;
+}
+
+/*
+ * In this function we override the bundle with
+ * the break instruction at the given slot.
+ */
+static void prepare_break_inst(uint template, uint slot, uint major_opcode,
+ unsigned long kprobe_inst, struct kprobe *p)
+{
+ unsigned long break_inst = BREAK_INST;
+ bundle_t *bundle = &p->ainsn.insn.bundle;
+
+ /*
+ * Copy the original kprobe_inst qualifying predicate(qp)
+ * to the break instruction iff !is_cmp_ctype_unc_inst
+ * because for cmp instruction with ctype equal to unc,
+ * which is a special instruction always needs to be
+ * executed regradless of qp
+ */
+ if (!is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst))
+ break_inst |= (0x3f & kprobe_inst);
+
+ switch (slot) {
+ case 0:
+ bundle->quad0.slot0 = break_inst;
+ break;
+ case 1:
+ bundle->quad0.slot1_p0 = break_inst;
+ bundle->quad1.slot1_p1 = break_inst >> (64-46);
+ break;
+ case 2:
+ bundle->quad1.slot2 = break_inst;
+ break;
+ }
+
+ /*
+ * Update the instruction flag, so that we can
+ * emulate the instruction properly after we
+ * single step on original instruction
+ */
+ update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
+}
+
+static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
+ unsigned long *kprobe_inst, uint *major_opcode)
+{
+ unsigned long kprobe_inst_p0, kprobe_inst_p1;
+ unsigned int template;
+
+ template = bundle->quad0.template;
+
+ switch (slot) {
+ case 0:
+ *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
+ *kprobe_inst = bundle->quad0.slot0;
+ break;
+ case 1:
+ *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
+ kprobe_inst_p0 = bundle->quad0.slot1_p0;
+ kprobe_inst_p1 = bundle->quad1.slot1_p1;
+ *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
+ break;
+ case 2:
+ *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
+ *kprobe_inst = bundle->quad1.slot2;
+ break;
+ }
+}
+
+static int valid_kprobe_addr(int template, int slot, unsigned long addr)
+{
+ if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
+ printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n",
+ addr);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void save_previous_kprobe(void)
+{
+ kprobe_prev = current_kprobe;
+ kprobe_status_prev = kprobe_status;
+}
+
+static inline void restore_previous_kprobe(void)
+{
+ current_kprobe = kprobe_prev;
+ kprobe_status = kprobe_status_prev;
+}
+
+static inline void set_current_kprobe(struct kprobe *p)
+{
+ current_kprobe = p;
+}
+
+int arch_prepare_kprobe(struct kprobe *p)
+{
+ unsigned long addr = (unsigned long) p->addr;
+ unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
+ unsigned long kprobe_inst=0;
+ unsigned int slot = addr & 0xf, template, major_opcode = 0;
+ bundle_t *bundle = &p->ainsn.insn.bundle;
+
+ memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
+ memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
+
+ template = bundle->quad0.template;
+
+ if(valid_kprobe_addr(template, slot, addr))
+ return -EINVAL;
+
+ /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
+ if (slot == 1 && bundle_encoding[template][1] == L)
+ slot++;
+
+ /* Get kprobe_inst and major_opcode from the bundle */
+ get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
+
+ if (unsupported_inst(template, slot, major_opcode, kprobe_inst, p))
+ return -EINVAL;
+
+ prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
+
+ return 0;
+}
+
+void arch_arm_kprobe(struct kprobe *p)
+{
+ unsigned long addr = (unsigned long)p->addr;
+ unsigned long arm_addr = addr & ~0xFULL;
+
+ memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
+ flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
+}
+
+void arch_disarm_kprobe(struct kprobe *p)
+{
+ unsigned long addr = (unsigned long)p->addr;
+ unsigned long arm_addr = addr & ~0xFULL;
+
+ /* p->opcode contains the original unaltered bundle */
+ memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t));
+ flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
+}
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+}
+
+/*
+ * We are resuming execution after a single step fault, so the pt_regs
+ * structure reflects the register state after we executed the instruction
+ * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust
+ * the ip to point back to the original stack address. To set the IP address
+ * to original stack address, handle the case where we need to fixup the
+ * relative IP address and/or fixup branch register.
+ */
+static void resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
+ unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
+ unsigned long template;
+ int slot = ((unsigned long)p->addr & 0xf);
+
+ template = p->opcode.bundle.quad0.template;
+
+ if (slot == 1 && bundle_encoding[template][1] == L)
+ slot = 2;
+
+ if (p->ainsn.inst_flag) {
+
+ if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
+ /* Fix relative IP address */
+ regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr;
+ }
+
+ if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
+ /*
+ * Fix target branch register, software convention is
+ * to use either b0 or b6 or b7, so just checking
+ * only those registers
+ */
+ switch (p->ainsn.target_br_reg) {
+ case 0:
+ if ((regs->b0 == bundle_addr) ||
+ (regs->b0 == bundle_addr + 0x10)) {
+ regs->b0 = (regs->b0 - bundle_addr) +
+ resume_addr;
+ }
+ break;
+ case 6:
+ if ((regs->b6 == bundle_addr) ||
+ (regs->b6 == bundle_addr + 0x10)) {
+ regs->b6 = (regs->b6 - bundle_addr) +
+ resume_addr;
+ }
+ break;
+ case 7:
+ if ((regs->b7 == bundle_addr) ||
+ (regs->b7 == bundle_addr + 0x10)) {
+ regs->b7 = (regs->b7 - bundle_addr) +
+ resume_addr;
+ }
+ break;
+ } /* end switch */
+ }
+ goto turn_ss_off;
+ }
+
+ if (slot == 2) {
+ if (regs->cr_iip == bundle_addr + 0x10) {
+ regs->cr_iip = resume_addr + 0x10;
+ }
+ } else {
+ if (regs->cr_iip == bundle_addr) {
+ regs->cr_iip = resume_addr;
+ }
+ }
+
+turn_ss_off:
+ /* Turn off Single Step bit */
+ ia64_psr(regs)->ss = 0;
+}
+
+static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
+ unsigned long slot = (unsigned long)p->addr & 0xf;
+
+ /* Update instruction pointer (IIP) and slot number (IPSR.ri) */
+ regs->cr_iip = bundle_addr & ~0xFULL;
+
+ if (slot > 2)
+ slot = 0;
+
+ ia64_psr(regs)->ri = slot;
+
+ /* turn on single stepping */
+ ia64_psr(regs)->ss = 1;
+}
+
+static int pre_kprobes_handler(struct die_args *args)
+{
+ struct kprobe *p;
+ int ret = 0;
+ struct pt_regs *regs = args->regs;
+ kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
+
+ preempt_disable();
+
+ /* Handle recursion cases */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kprobe_status == KPROBE_HIT_SS) {
+ unlock_kprobes();
+ goto no_kprobe;
+ }
+ /* We have reentered the pre_kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe();
+ set_current_kprobe(p);
+ p->nmissed++;
+ prepare_ss(p, regs);
+ kprobe_status = KPROBE_REENTER;
+ return 1;
+ } else if (args->err == __IA64_BREAK_JPROBE) {
+ /*
+ * jprobe instrumented function just completed
+ */
+ p = current_kprobe;
+ if (p->break_handler && p->break_handler(p, regs)) {
+ goto ss_probe;
+ }
+ } else {
+ /* Not our break */
+ goto no_kprobe;
+ }
+ }
+
+ lock_kprobes();
+ p = get_kprobe(addr);
+ if (!p) {
+ unlock_kprobes();
+ goto no_kprobe;
+ }
+
+ kprobe_status = KPROBE_HIT_ACTIVE;
+ set_current_kprobe(p);
+
+ if (p->pre_handler && p->pre_handler(p, regs))
+ /*
+ * Our pre-handler is specifically requesting that we just
+ * do a return. This is handling the case where the
+ * pre-handler is really our special jprobe pre-handler.
+ */
+ return 1;
+
+ss_probe:
+ prepare_ss(p, regs);
+ kprobe_status = KPROBE_HIT_SS;
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+}
+
+static int post_kprobes_handler(struct pt_regs *regs)
+{
+ if (!kprobe_running())
+ return 0;
+
+ if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
+ kprobe_status = KPROBE_HIT_SSDONE;
+ current_kprobe->post_handler(current_kprobe, regs, 0);
+ }
+
+ resume_execution(current_kprobe, regs);
+
+ /*Restore back the original saved kprobes variables and continue. */
+ if (kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe();
+ goto out;
+ }
+
+ unlock_kprobes();
+
+out:
+ preempt_enable_no_resched();
+ return 1;
+}
+
+static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ if (!kprobe_running())
+ return 0;
+
+ if (current_kprobe->fault_handler &&
+ current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ return 1;
+
+ if (kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(current_kprobe, regs);
+ unlock_kprobes();
+ preempt_enable_no_resched();
+ }
+
+ return 0;
+}
+
+int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ struct die_args *args = (struct die_args *)data;
+ switch(val) {
+ case DIE_BREAK:
+ if (pre_kprobes_handler(args))
+ return NOTIFY_STOP;
+ break;
+ case DIE_SS:
+ if (post_kprobes_handler(args->regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_PAGE_FAULT:
+ if (kprobes_fault_handler(args->regs, args->trapnr))
+ return NOTIFY_STOP;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
+
+ /* save architectural state */
+ jprobe_saved_regs = *regs;
+
+ /* after rfi, execute the jprobe instrumented function */
+ regs->cr_iip = addr & ~0xFULL;
+ ia64_psr(regs)->ri = addr & 0xf;
+ regs->r1 = ((struct fnptr *)(jp->entry))->gp;
+
+ /*
+ * fix the return address to our jprobe_inst_return() function
+ * in the jprobes.S file
+ */
+ regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
+
+ return 1;
+}
+
+int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ *regs = jprobe_saved_regs;
+ return 1;
+}
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 1861173bd4f..e7e520d90f0 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -21,12 +21,26 @@
#include <asm/intrinsics.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
+#include <asm/kdebug.h>
extern spinlock_t timerlist_lock;
fpswa_interface_t *fpswa_interface;
EXPORT_SYMBOL(fpswa_interface);
+struct notifier_block *ia64die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+ int err = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&die_notifier_lock, flags);
+ err = notifier_chain_register(&ia64die_chain, nb);
+ spin_unlock_irqrestore(&die_notifier_lock, flags);
+ return err;
+}
+
void __init
trap_init (void)
{
@@ -137,6 +151,10 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
switch (break_num) {
case 0: /* unknown error (used by GCC for __builtin_abort()) */
+ if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
+ == NOTIFY_STOP) {
+ return;
+ }
die_if_kernel("bugcheck!", regs, break_num);
sig = SIGILL; code = ILL_ILLOPC;
break;
@@ -189,6 +207,15 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
sig = SIGILL; code = __ILL_BNDMOD;
break;
+ case 0x80200:
+ case 0x80300:
+ if (notify_die(DIE_BREAK, "kprobe", regs, break_num, TRAP_BRKPT, SIGTRAP)
+ == NOTIFY_STOP) {
+ return;
+ }
+ sig = SIGTRAP; code = TRAP_BRKPT;
+ break;
+
default:
if (break_num < 0x40000 || break_num > 0x100000)
die_if_kernel("Bad break", regs, break_num);
@@ -548,7 +575,11 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
#endif
break;
case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
- case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
+ case 36:
+ if (notify_die(DIE_SS, "ss", &regs, vector,
+ vector, SIGTRAP) == NOTIFY_STOP)
+ return;
+ siginfo.si_code = TRAP_TRACE; ifa = 0; break;
}
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = 0;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index c0071092939..f3fd528ead3 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -560,14 +560,15 @@ void show_mem(void)
int shared = 0, cached = 0, reserved = 0;
printk("Node ID: %d\n", pgdat->node_id);
for(i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page = pgdat_page_nr(pgdat, i);
if (!ia64_pfn_valid(pgdat->node_start_pfn+i))
continue;
- if (PageReserved(pgdat->node_mem_map+i))
+ if (PageReserved(page))
reserved++;
- else if (PageSwapCache(pgdat->node_mem_map+i))
+ else if (PageSwapCache(page))
cached++;
- else if (page_count(pgdat->node_mem_map+i))
- shared += page_count(pgdat->node_mem_map+i)-1;
+ else if (page_count(page))
+ shared += page_count(page)-1;
}
total_present += present;
total_reserved += reserved;
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 4174ec999dd..ff62551eb3a 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -14,6 +14,7 @@
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
+#include <asm/kdebug.h>
extern void die (char *, struct pt_regs *, long);
@@ -102,6 +103,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
goto bad_area_no_up;
#endif
+ /*
+ * This is to handle the kprobes on user space access instructions
+ */
+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+
down_read(&mm->mmap_sem);
vma = find_vma_prev(mm, address, &prev_vma);