aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/entry_64.S74
-rw-r--r--arch/x86/kernel/ftrace.c11
3 files changed, 85 insertions, 1 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 64939a0c398..d274425fb07 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -17,6 +17,7 @@ endif
ifdef CONFIG_FUNCTION_GRAPH_TRACER
# Don't trace __switch_to() but let it for function tracer
CFLAGS_REMOVE_process_32.o = -pg
+CFLAGS_REMOVE_process_64.o = -pg
endif
#
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 08aa6b10933..2aa0526ac30 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -98,6 +98,12 @@ ftrace_call:
movq (%rsp), %rax
addq $0x38, %rsp
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+ jmp ftrace_stub
+#endif
+
.globl ftrace_stub
ftrace_stub:
retq
@@ -110,6 +116,12 @@ ENTRY(mcount)
cmpq $ftrace_stub, ftrace_trace_function
jnz trace
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ cmpq $ftrace_stub, ftrace_graph_return
+ jnz ftrace_graph_caller
+#endif
+
.globl ftrace_stub
ftrace_stub:
retq
@@ -145,6 +157,68 @@ END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+ cmpl $0, function_trace_stop
+ jne ftrace_stub
+
+ subq $0x38, %rsp
+ movq %rax, (%rsp)
+ movq %rcx, 8(%rsp)
+ movq %rdx, 16(%rsp)
+ movq %rsi, 24(%rsp)
+ movq %rdi, 32(%rsp)
+ movq %r8, 40(%rsp)
+ movq %r9, 48(%rsp)
+
+ leaq 8(%rbp), %rdi
+ movq 0x38(%rsp), %rsi
+
+ call prepare_ftrace_return
+
+ movq 48(%rsp), %r9
+ movq 40(%rsp), %r8
+ movq 32(%rsp), %rdi
+ movq 24(%rsp), %rsi
+ movq 16(%rsp), %rdx
+ movq 8(%rsp), %rcx
+ movq (%rsp), %rax
+ addq $0x38, %rsp
+ retq
+END(ftrace_graph_caller)
+
+
+.globl return_to_handler
+return_to_handler:
+ subq $80, %rsp
+
+ movq %rax, (%rsp)
+ movq %rcx, 8(%rsp)
+ movq %rdx, 16(%rsp)
+ movq %rsi, 24(%rsp)
+ movq %rdi, 32(%rsp)
+ movq %r8, 40(%rsp)
+ movq %r9, 48(%rsp)
+ movq %r10, 56(%rsp)
+ movq %r11, 64(%rsp)
+
+ call ftrace_return_to_handler
+
+ movq %rax, 72(%rsp)
+ movq 64(%rsp), %r11
+ movq 56(%rsp), %r10
+ movq 48(%rsp), %r9
+ movq 40(%rsp), %r8
+ movq 32(%rsp), %rdi
+ movq 24(%rsp), %rsi
+ movq 16(%rsp), %rdx
+ movq 8(%rsp), %rcx
+ movq (%rsp), %rax
+ addq $72, %rsp
+ retq
+#endif
+
+
#ifndef CONFIG_PREEMPT
#define retint_kernel retint_restore_args
#endif
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 7ef914e6a2f..58832478b94 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -467,8 +467,13 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
* ignore such a protection.
*/
asm volatile(
+#ifdef CONFIG_X86_64
+ "1: movq (%[parent_old]), %[old]\n"
+ "2: movq %[return_hooker], (%[parent_replaced])\n"
+#else
"1: movl (%[parent_old]), %[old]\n"
"2: movl %[return_hooker], (%[parent_replaced])\n"
+#endif
" movl $0, %[faulted]\n"
".section .fixup, \"ax\"\n"
@@ -476,8 +481,13 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
".previous\n"
".section __ex_table, \"a\"\n"
+#ifdef CONFIG_X86_64
+ " .quad 1b, 3b\n"
+ " .quad 2b, 3b\n"
+#else
" .long 1b, 3b\n"
" .long 2b, 3b\n"
+#endif
".previous\n"
: [parent_replaced] "=r" (parent), [old] "=r" (old),
@@ -509,5 +519,4 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
ftrace_graph_entry(&trace);
}
-
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */