summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/ftrace.c69
-rw-r--r--arch/x86/kernel/kprobes.c17
2 files changed, 50 insertions, 36 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 1d0d7f42efe3..61df77532120 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -79,11 +79,11 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
*
* 1) Put the instruction pointer into the IP buffer
* and the new code into the "code" buffer.
- * 2) Set a flag that says we are modifying code
- * 3) Wait for any running NMIs to finish.
- * 4) Write the code
- * 5) clear the flag.
- * 6) Wait for any running NMIs to finish.
+ * 2) Wait for any running NMIs to finish and set a flag that says
+ * we are modifying code, it is done in an atomic operation.
+ * 3) Write the code
+ * 4) clear the flag.
+ * 5) Wait for any running NMIs to finish.
*
* If an NMI is executed, the first thing it does is to call
* "ftrace_nmi_enter". This will check if the flag is set to write
@@ -95,9 +95,9 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
* are the same as what exists.
*/
+#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
static atomic_t nmi_running = ATOMIC_INIT(0);
static int mod_code_status; /* holds return value of text write */
-static int mod_code_write; /* set when NMI should do the write */
static void *mod_code_ip; /* holds the IP to write to */
static void *mod_code_newcode; /* holds the text to write to the IP */
@@ -114,6 +114,20 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
return r;
}
+static void clear_mod_flag(void)
+{
+ int old = atomic_read(&nmi_running);
+
+ for (;;) {
+ int new = old & ~MOD_CODE_WRITE_FLAG;
+
+ if (old == new)
+ break;
+
+ old = atomic_cmpxchg(&nmi_running, old, new);
+ }
+}
+
static void ftrace_mod_code(void)
{
/*
@@ -127,27 +141,39 @@ static void ftrace_mod_code(void)
/* if we fail, then kill any new writers */
if (mod_code_status)
- mod_code_write = 0;
+ clear_mod_flag();
}
void ftrace_nmi_enter(void)
{
- atomic_inc(&nmi_running);
- /* Must have nmi_running seen before reading write flag */
- smp_mb();
- if (mod_code_write) {
+ if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
+ smp_rmb();
ftrace_mod_code();
atomic_inc(&nmi_update_count);
}
+ /* Must have previous changes seen before executions */
+ smp_mb();
}
void ftrace_nmi_exit(void)
{
/* Finish all executions before clearing nmi_running */
- smp_wmb();
+ smp_mb();
atomic_dec(&nmi_running);
}
+static void wait_for_nmi_and_set_mod_flag(void)
+{
+ if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
+ return;
+
+ do {
+ cpu_relax();
+ } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
+
+ nmi_wait_count++;
+}
+
static void wait_for_nmi(void)
{
if (!atomic_read(&nmi_running))
@@ -167,14 +193,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
mod_code_newcode = new_code;
/* The buffers need to be visible before we let NMIs write them */
- smp_wmb();
-
- mod_code_write = 1;
-
- /* Make sure write bit is visible before we wait on NMIs */
smp_mb();
- wait_for_nmi();
+ wait_for_nmi_and_set_mod_flag();
/* Make sure all running NMIs have finished before we write the code */
smp_mb();
@@ -182,13 +203,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
ftrace_mod_code();
/* Make sure the write happens before clearing the bit */
- smp_wmb();
-
- mod_code_write = 0;
-
- /* make sure NMIs see the cleared bit */
smp_mb();
+ clear_mod_flag();
wait_for_nmi();
return mod_code_status;
@@ -393,7 +410,6 @@ int ftrace_disable_ftrace_graph_caller(void)
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
- unsigned long long calltime;
int faulted;
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)
@@ -436,10 +452,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return;
}
- calltime = trace_clock_local();
-
- if (ftrace_push_return_trace(old, calltime,
- self_addr, &trace.depth) == -EBUSY) {
+ if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
*parent = old;
return;
}
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 4558dd3918cf..759095d53a06 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -638,13 +638,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
#else
" pushf\n"
/*
- * Skip cs, ip, orig_ax.
+ * Skip cs, ip, orig_ax and gs.
* trampoline_handler() will plug in these values
*/
- " subl $12, %esp\n"
+ " subl $16, %esp\n"
" pushl %fs\n"
- " pushl %ds\n"
" pushl %es\n"
+ " pushl %ds\n"
" pushl %eax\n"
" pushl %ebp\n"
" pushl %edi\n"
@@ -655,10 +655,10 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
" movl %esp, %eax\n"
" call trampoline_handler\n"
/* Move flags to cs */
- " movl 52(%esp), %edx\n"
- " movl %edx, 48(%esp)\n"
+ " movl 56(%esp), %edx\n"
+ " movl %edx, 52(%esp)\n"
/* Replace saved flags with true return address. */
- " movl %eax, 52(%esp)\n"
+ " movl %eax, 56(%esp)\n"
" popl %ebx\n"
" popl %ecx\n"
" popl %edx\n"
@@ -666,8 +666,8 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
" popl %edi\n"
" popl %ebp\n"
" popl %eax\n"
- /* Skip ip, orig_ax, es, ds, fs */
- " addl $20, %esp\n"
+ /* Skip ds, es, fs, gs, orig_ax and ip */
+ " addl $24, %esp\n"
" popf\n"
#endif
" ret\n");
@@ -691,6 +691,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
regs->cs = __KERNEL_CS;
#else
regs->cs = __KERNEL_CS | get_kernel_rpl();
+ regs->gs = 0;
#endif
regs->ip = trampoline_address;
regs->orig_ax = ~0UL;
OpenPOWER on IntegriCloud