From af8b3cd3934ec60f4c2a420d19a9d416554f140b Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Tue, 14 Feb 2017 00:11:02 -0800 Subject: x86/process: Optimize TIF checks in __switch_to_xtra() Help the compiler to avoid reevaluating the thread flags for each checked bit by reordering the bit checks and providing an explicit xor for evaluation. With default defconfigs for each arch, x86_64: arch/x86/kernel/process.o text data bss dec hex 3056 8577 16 11649 2d81 Before 3024 8577 16 11617 2d61 After i386: arch/x86/kernel/process.o text data bss dec hex 2957 8673 8 11638 2d76 Before 2925 8673 8 11606 2d56 After Originally-by: Thomas Gleixner Signed-off-by: Kyle Huey Cc: Peter Zijlstra Cc: Andy Lutomirski Link: http://lkml.kernel.org/r/20170214081104.9244-2-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/process.c | 65 ++++++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index f67591561711..ea9ea2582dab 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -182,54 +182,61 @@ int set_tsc_mode(unsigned int val) return 0; } +static inline void switch_to_bitmap(struct tss_struct *tss, + struct thread_struct *prev, + struct thread_struct *next, + unsigned long tifp, unsigned long tifn) +{ + if (tifn & _TIF_IO_BITMAP) { + /* + * Copy the relevant range of the IO bitmap. + * Normally this is 128 bytes or less: + */ + memcpy(tss->io_bitmap, next->io_bitmap_ptr, + max(prev->io_bitmap_max, next->io_bitmap_max)); + /* + * Make sure that the TSS limit is correct for the CPU + * to notice the IO bitmap. + */ + refresh_tss_limit(); + } else if (tifp & _TIF_IO_BITMAP) { + /* + * Clear any possible leftover bits: + */ + memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); + } +} + void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, struct tss_struct *tss) { struct thread_struct *prev, *next; + unsigned long tifp, tifn; prev = &prev_p->thread; next = &next_p->thread; - if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^ - test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) { + tifn = READ_ONCE(task_thread_info(next_p)->flags); + tifp = READ_ONCE(task_thread_info(prev_p)->flags); + switch_to_bitmap(tss, prev, next, tifp, tifn); + + propagate_user_return_notify(prev_p, next_p); + + if ((tifp ^ tifn) & _TIF_BLOCKSTEP) { unsigned long debugctl = get_debugctlmsr(); debugctl &= ~DEBUGCTLMSR_BTF; - if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) + if (tifn & _TIF_BLOCKSTEP) debugctl |= DEBUGCTLMSR_BTF; - update_debugctlmsr(debugctl); } - if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ - test_tsk_thread_flag(next_p, TIF_NOTSC)) { - /* prev and next are different */ - if (test_tsk_thread_flag(next_p, TIF_NOTSC)) + if ((tifp ^ tifn) & _TIF_NOTSC) { + if (tifn & _TIF_NOTSC) hard_disable_TSC(); else hard_enable_TSC(); } - - if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { - /* - * Copy the relevant range of the IO bitmap. - * Normally this is 128 bytes or less: - */ - memcpy(tss->io_bitmap, next->io_bitmap_ptr, - max(prev->io_bitmap_max, next->io_bitmap_max)); - - /* - * Make sure that the TSS limit is correct for the CPU - * to notice the IO bitmap. - */ - refresh_tss_limit(); - } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { - /* - * Clear any possible leftover bits: - */ - memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); - } - propagate_user_return_notify(prev_p, next_p); } /* -- cgit v1.2.1 From b9894a2f5bd18b1691cb6872c9afe32b148d0132 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Tue, 14 Feb 2017 00:11:03 -0800 Subject: x86/process: Correct and optimize TIF_BLOCKSTEP switch The debug control MSR is "highly magical" as the blockstep bit can be cleared by hardware under not well documented circumstances. So a task switch relying on the bit set by the previous task (according to the previous tasks thread flags) can trip over this and not update the flag for the next task. To fix this its required to handle DEBUGCTLMSR_BTF when either the previous or the next or both tasks have the TIF_BLOCKSTEP flag set. While at it avoid branching within the TIF_BLOCKSTEP case and evaluating boot_cpu_data twice in kernels without CONFIG_X86_DEBUGCTLMSR. x86_64: arch/x86/kernel/process.o text data bss dec hex 3024 8577 16 11617 2d61 Before 3008 8577 16 11601 2d51 After i386: No change [ tglx: Made the shift value explicit, use a local variable to make the code readable and massaged changelog] Originally-by: Thomas Gleixner Signed-off-by: Kyle Huey Cc: Peter Zijlstra Cc: Andy Lutomirski Link: http://lkml.kernel.org/r/20170214081104.9244-3-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/msr-index.h | 1 + arch/x86/kernel/process.c | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index d8b5f8ab8ef9..4c928f332f8f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -127,6 +127,7 @@ /* DEBUGCTLMSR bits (others vary by model): */ #define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ +#define DEBUGCTLMSR_BTF_SHIFT 1 #define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ #define DEBUGCTLMSR_TR (1UL << 6) #define DEBUGCTLMSR_BTS (1UL << 7) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ea9ea2582dab..83fa3cb4f8f0 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -222,13 +222,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, propagate_user_return_notify(prev_p, next_p); - if ((tifp ^ tifn) & _TIF_BLOCKSTEP) { - unsigned long debugctl = get_debugctlmsr(); + if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) && + arch_has_block_step()) { + unsigned long debugctl, msk; + rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); debugctl &= ~DEBUGCTLMSR_BTF; - if (tifn & _TIF_BLOCKSTEP) - debugctl |= DEBUGCTLMSR_BTF; - update_debugctlmsr(debugctl); + msk = tifn & _TIF_BLOCKSTEP; + debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT; + wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); } if ((tifp ^ tifn) & _TIF_NOTSC) { -- cgit v1.2.1 From 5a920155e388ec22a22e0532fb695b9215c9b34d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 14 Feb 2017 00:11:04 -0800 Subject: x86/process: Optimize TIF_NOTSC switch Provide and use a toggle helper instead of doing it with a branch. x86_64: arch/x86/kernel/process.o text data bss dec hex 3008 8577 16 11601 2d51 Before 2976 8577 16 11569 2d31 After i386: arch/x86/kernel/process.o text data bss dec hex 2925 8673 8 11606 2d56 Before 2893 8673 8 11574 2d36 After Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Andy Lutomirski Link: http://lkml.kernel.org/r/20170214081104.9244-4-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/tlbflush.h | 10 ++++++++++ arch/x86/kernel/process.c | 22 ++++------------------ 2 files changed, 14 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 6fa85944af83..ff4923a19f79 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -110,6 +110,16 @@ static inline void cr4_clear_bits(unsigned long mask) } } +static inline void cr4_toggle_bits(unsigned long mask) +{ + unsigned long cr4; + + cr4 = this_cpu_read(cpu_tlbstate.cr4); + cr4 ^= mask; + this_cpu_write(cpu_tlbstate.cr4, cr4); + __write_cr4(cr4); +} + /* Read the CR4 shadow. */ static inline unsigned long cr4_read_shadow(void) { diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 83fa3cb4f8f0..366db7782fc6 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -124,11 +124,6 @@ void flush_thread(void) fpu__clear(&tsk->thread.fpu); } -static void hard_disable_TSC(void) -{ - cr4_set_bits(X86_CR4_TSD); -} - void disable_TSC(void) { preempt_disable(); @@ -137,15 +132,10 @@ void disable_TSC(void) * Must flip the CPU state synchronously with * TIF_NOTSC in the current running context. */ - hard_disable_TSC(); + cr4_set_bits(X86_CR4_TSD); preempt_enable(); } -static void hard_enable_TSC(void) -{ - cr4_clear_bits(X86_CR4_TSD); -} - static void enable_TSC(void) { preempt_disable(); @@ -154,7 +144,7 @@ static void enable_TSC(void) * Must flip the CPU state synchronously with * TIF_NOTSC in the current running context. */ - hard_enable_TSC(); + cr4_clear_bits(X86_CR4_TSD); preempt_enable(); } @@ -233,12 +223,8 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); } - if ((tifp ^ tifn) & _TIF_NOTSC) { - if (tifn & _TIF_NOTSC) - hard_disable_TSC(); - else - hard_enable_TSC(); - } + if ((tifp ^ tifn) & _TIF_NOTSC) + cr4_toggle_bits(X86_CR4_TSD); } /* -- cgit v1.2.1 From ed827adb009490673c9c63e0b716e0fa36afbcc1 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 10 Feb 2017 02:23:58 -0500 Subject: perf/x86: Add Top Down events to Intel Goldmont Goldmont supports full Top Down level 1 metrics (FrontendBound, Retiring, Backend Bound and Bad Speculation). It has 3 wide pipeline. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: http://lkml.kernel.org/r/1486711438-80058-1-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'arch') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index eb1484c86bb4..4244bed77824 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -1553,6 +1553,27 @@ static __initconst const u64 slm_hw_cache_event_ids }, }; +EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c"); +EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3"); +/* UOPS_NOT_DELIVERED.ANY */ +EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c"); +/* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */ +EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02"); +/* UOPS_RETIRED.ANY */ +EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2"); +/* UOPS_ISSUED.ANY */ +EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e"); + +static struct attribute *glm_events_attrs[] = { + EVENT_PTR(td_total_slots_glm), + EVENT_PTR(td_total_slots_scale_glm), + EVENT_PTR(td_fetch_bubbles_glm), + EVENT_PTR(td_recovery_bubbles_glm), + EVENT_PTR(td_slots_issued_glm), + EVENT_PTR(td_slots_retired_glm), + NULL +}; + static struct extra_reg intel_glm_extra_regs[] __read_mostly = { /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0), @@ -3750,6 +3771,7 @@ __init int intel_pmu_init(void) x86_pmu.pebs_prec_dist = true; x86_pmu.lbr_pt_coexist = true; x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.cpu_events = glm_events_attrs; pr_cont("Goldmont events, "); break; -- cgit v1.2.1 From f4c0b0aa58d9b7e30ab0a95e33da84d53b3d764a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 20 Feb 2017 15:33:50 +0200 Subject: perf/core: Keep AUX flags in the output handle In preparation for adding more flags to perf AUX records, introduce a separate API for setting the flags for a session, rather than appending more bool arguments to perf_aux_output_end. This allows to set each flag at the time a corresponding condition is detected, instead of tracking it in each driver's private state. Signed-off-by: Will Deacon Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Mathieu Poirier Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: vince@deater.net Link: http://lkml.kernel.org/r/20170220133352.17995-3-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/bts.c | 16 +++++++--------- arch/x86/events/intel/pt.c | 17 +++++++++-------- arch/x86/events/intel/pt.h | 1 - 3 files changed, 16 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 982c9e31daca..8ae8c5ce3a1f 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -63,7 +63,6 @@ struct bts_buffer { unsigned int cur_buf; bool snapshot; local_t data_size; - local_t lost; local_t head; unsigned long end; void **data_pages; @@ -199,7 +198,8 @@ static void bts_update(struct bts_ctx *bts) return; if (ds->bts_index >= ds->bts_absolute_maximum) - local_inc(&buf->lost); + perf_aux_output_flag(&bts->handle, + PERF_AUX_FLAG_TRUNCATED); /* * old and head are always in the same physical buffer, so we @@ -276,7 +276,7 @@ static void bts_event_start(struct perf_event *event, int flags) return; fail_end_stop: - perf_aux_output_end(&bts->handle, 0, false); + perf_aux_output_end(&bts->handle, 0); fail_stop: event->hw.state = PERF_HES_STOPPED; @@ -319,9 +319,8 @@ static void bts_event_stop(struct perf_event *event, int flags) bts->handle.head = local_xchg(&buf->data_size, buf->nr_pages << PAGE_SHIFT); - - perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0), - !!local_xchg(&buf->lost, 0)); + perf_aux_output_end(&bts->handle, + local_xchg(&buf->data_size, 0)); } cpuc->ds->bts_index = bts->ds_back.bts_buffer_base; @@ -484,8 +483,7 @@ int intel_bts_interrupt(void) if (old_head == local_read(&buf->head)) return handled; - perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0), - !!local_xchg(&buf->lost, 0)); + perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0)); buf = perf_aux_output_begin(&bts->handle, event); if (buf) @@ -500,7 +498,7 @@ int intel_bts_interrupt(void) * cleared handle::event */ barrier(); - perf_aux_output_end(&bts->handle, 0, false); + perf_aux_output_end(&bts->handle, 0); } } diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 5900471ee508..0218728be37a 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -753,7 +753,8 @@ static void pt_handle_status(struct pt *pt) */ if (!pt_cap_get(PT_CAP_topa_multiple_entries) || buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) { - local_inc(&buf->lost); + perf_aux_output_flag(&pt->handle, + PERF_AUX_FLAG_TRUNCATED); advance++; } } @@ -846,8 +847,10 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, /* can't stop in the middle of an output region */ if (buf->output_off + handle->size + 1 < - sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) + sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) { + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); return -EINVAL; + } /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */ @@ -1192,8 +1195,7 @@ void intel_pt_interrupt(void) pt_update_head(pt); - perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0), - local_xchg(&buf->lost, 0)); + perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0)); if (!event->hw.state) { int ret; @@ -1208,7 +1210,7 @@ void intel_pt_interrupt(void) /* snapshot counters don't use PMI, so it's safe */ ret = pt_buffer_reset_markers(buf, &pt->handle); if (ret) { - perf_aux_output_end(&pt->handle, 0, true); + perf_aux_output_end(&pt->handle, 0); return; } @@ -1280,7 +1282,7 @@ static void pt_event_start(struct perf_event *event, int mode) return; fail_end_stop: - perf_aux_output_end(&pt->handle, 0, true); + perf_aux_output_end(&pt->handle, 0); fail_stop: hwc->state = PERF_HES_STOPPED; } @@ -1321,8 +1323,7 @@ static void pt_event_stop(struct perf_event *event, int mode) pt->handle.head = local_xchg(&buf->data_size, buf->nr_pages << PAGE_SHIFT); - perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0), - local_xchg(&buf->lost, 0)); + perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0)); } } diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index 53473c21b554..b528e8f373e4 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -143,7 +143,6 @@ struct pt_buffer { size_t output_off; unsigned long nr_pages; local_t data_size; - local_t lost; local64_t head; bool snapshot; unsigned long stop_pos, intr_pos; -- cgit v1.2.1 From ee368428aac96d94a9804b9109a81355451c3cd9 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Mon, 20 Feb 2017 15:33:52 +0200 Subject: perf/x86/intel/pt: Handle VMX better Since commit: 1c5ac21a0e ("perf/x86/intel/pt: Don't die on VMXON") ... PT events depend on re-scheduling to get enabled after a VMX session has taken place. This is, in particular, a problem for CPU context events, which don't normally get re-scheduled, unless there is a reason. This patch changes the VMX handling so that PT event gets re-enabled when VMX root mode exits. Also, notify the user when there's a gap in PT data due to VMX root mode by flagging AUX records as partial. In combination with vmm_exclusive=0 parameter of the kvm_intel driver, this will result in trace gaps only for the duration of the guest's timeslices. Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: vince@deater.net Link: http://lkml.kernel.org/r/20170220133352.17995-5-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/pt.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 0218728be37a..354e9ff2978c 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -411,6 +411,7 @@ static u64 pt_config_filters(struct perf_event *event) static void pt_config(struct perf_event *event) { + struct pt *pt = this_cpu_ptr(&pt_ctx); u64 reg; if (!event->hw.itrace_started) { @@ -429,11 +430,15 @@ static void pt_config(struct perf_event *event) reg |= (event->attr.config & PT_CONFIG_MASK); event->hw.config = reg; - wrmsrl(MSR_IA32_RTIT_CTL, reg); + if (READ_ONCE(pt->vmx_on)) + perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL); + else + wrmsrl(MSR_IA32_RTIT_CTL, reg); } static void pt_config_stop(struct perf_event *event) { + struct pt *pt = this_cpu_ptr(&pt_ctx); u64 ctl = READ_ONCE(event->hw.config); /* may be already stopped by a PMI */ @@ -441,7 +446,8 @@ static void pt_config_stop(struct perf_event *event) return; ctl &= ~RTIT_CTL_TRACEEN; - wrmsrl(MSR_IA32_RTIT_CTL, ctl); + if (!READ_ONCE(pt->vmx_on)) + wrmsrl(MSR_IA32_RTIT_CTL, ctl); WRITE_ONCE(event->hw.config, ctl); @@ -1174,12 +1180,6 @@ void intel_pt_interrupt(void) if (!READ_ONCE(pt->handle_nmi)) return; - /* - * If VMX is on and PT does not support it, don't touch anything. - */ - if (READ_ONCE(pt->vmx_on)) - return; - if (!event) return; @@ -1239,12 +1239,19 @@ void intel_pt_handle_vmx(int on) local_irq_save(flags); WRITE_ONCE(pt->vmx_on, on); - if (on) { - /* prevent pt_config_stop() from writing RTIT_CTL */ - event = pt->handle.event; - if (event) - event->hw.config = 0; - } + /* + * If an AUX transaction is in progress, it will contain + * gap(s), so flag it PARTIAL to inform the user. + */ + event = pt->handle.event; + if (event) + perf_aux_output_flag(&pt->handle, + PERF_AUX_FLAG_PARTIAL); + + /* Turn PTs back on */ + if (!on && event) + wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config); + local_irq_restore(flags); } EXPORT_SYMBOL_GPL(intel_pt_handle_vmx); @@ -1259,9 +1266,6 @@ static void pt_event_start(struct perf_event *event, int mode) struct pt *pt = this_cpu_ptr(&pt_ctx); struct pt_buffer *buf; - if (READ_ONCE(pt->vmx_on)) - return; - buf = perf_aux_output_begin(&pt->handle, event); if (!buf) goto fail_stop; -- cgit v1.2.1 From ab6d9468631a6e56e4c071c6ce6710956485fe08 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Mon, 20 Mar 2017 01:16:19 -0700 Subject: x86/msr: Rename MISC_FEATURE_ENABLES to MISC_FEATURES_ENABLES MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This matches the only public Intel documentation of this MSR, in the "Virtualization Technology FlexMigration Application Note" (preserved at https://bugzilla.kernel.org/attachment.cgi?id=243991) Signed-off-by: Kyle Huey Cc: Grzegorz Andrejczuk Cc: kvm@vger.kernel.org Cc: Radim Krčmář Cc: Peter Zijlstra Cc: Dave Hansen Cc: Andi Kleen Cc: linux-kselftest@vger.kernel.org Cc: Nadav Amit Cc: Robert O'Callahan Cc: Richard Weinberger Cc: "Rafael J. Wysocki" Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Len Brown Cc: Shuah Khan Cc: user-mode-linux-devel@lists.sourceforge.net Cc: Jeff Dike Cc: Alexander Viro Cc: user-mode-linux-user@lists.sourceforge.net Cc: David Matlack Cc: Boris Ostrovsky Cc: Dmitry Safonov Cc: linux-fsdevel@vger.kernel.org Cc: Paolo Bonzini Link: http://lkml.kernel.org/r/20170320081628.18952-2-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/msr-index.h | 6 +++--- arch/x86/kernel/cpu/intel.c | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4c928f332f8f..f429b70ebaef 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -553,10 +553,10 @@ #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39 #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT) -/* MISC_FEATURE_ENABLES non-architectural features */ -#define MSR_MISC_FEATURE_ENABLES 0x00000140 +/* MISC_FEATURES_ENABLES non-architectural features */ +#define MSR_MISC_FEATURES_ENABLES 0x00000140 -#define MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT 1 +#define MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT 1 #define MSR_IA32_TSC_DEADLINE 0x000006E0 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 063197771b8d..e229318d7230 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -91,13 +91,13 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) } if (ring3mwait_disabled) { - msr_clear_bit(MSR_MISC_FEATURE_ENABLES, - MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT); + msr_clear_bit(MSR_MISC_FEATURES_ENABLES, + MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT); return; } - msr_set_bit(MSR_MISC_FEATURE_ENABLES, - MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT); + msr_set_bit(MSR_MISC_FEATURES_ENABLES, + MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT); set_cpu_cap(c, X86_FEATURE_RING3MWAIT); -- cgit v1.2.1 From dd93938a92dc067aba70c401bdf2e50ed58083db Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Mon, 20 Mar 2017 01:16:20 -0700 Subject: x86/arch_prctl: Rename 'code' argument to 'option' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The x86 specific arch_prctl() arbitrarily changed prctl's 'option' to 'code'. Before adding new options, rename it. Signed-off-by: Kyle Huey Cc: Grzegorz Andrejczuk Cc: kvm@vger.kernel.org Cc: Radim Krčmář Cc: Peter Zijlstra Cc: Dave Hansen Cc: Andi Kleen Cc: linux-kselftest@vger.kernel.org Cc: Nadav Amit Cc: Robert O'Callahan Cc: Richard Weinberger Cc: "Rafael J. Wysocki" Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Len Brown Cc: Shuah Khan Cc: user-mode-linux-devel@lists.sourceforge.net Cc: Jeff Dike Cc: Alexander Viro Cc: user-mode-linux-user@lists.sourceforge.net Cc: David Matlack Cc: Boris Ostrovsky Cc: Dmitry Safonov Cc: linux-fsdevel@vger.kernel.org Cc: Paolo Bonzini Link: http://lkml.kernel.org/r/20170320081628.18952-3-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/um/include/shared/os.h | 4 ++-- arch/x86/include/asm/proto.h | 2 +- arch/x86/kernel/process_64.c | 8 ++++---- arch/x86/um/asm/ptrace.h | 2 +- arch/x86/um/os-Linux/prctl.c | 4 ++-- arch/x86/um/syscalls_64.c | 13 +++++++------ 6 files changed, 17 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index de5d572225f3..32e41c4ef6d3 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -302,8 +302,8 @@ extern int ignore_sigio_fd(int fd); extern void maybe_sigio_broken(int fd, int read); extern void sigio_broken(int fd, int read); -/* sys-x86_64/prctl.c */ -extern int os_arch_prctl(int pid, int code, unsigned long *addr); +/* prctl.c */ +extern int os_arch_prctl(int pid, int option, unsigned long *addr); /* tty.c */ extern int get_pty(void); diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 9b9b30b19441..91675a960391 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h @@ -30,6 +30,6 @@ void x86_report_nx(void); extern int reboot_force; -long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); +long do_arch_prctl(struct task_struct *task, int option, unsigned long addr); #endif /* _ASM_X86_PROTO_H */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index d6b784a5520d..4377cfe8e449 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -547,13 +547,13 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) } #endif -long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) +long do_arch_prctl(struct task_struct *task, int option, unsigned long addr) { int ret = 0; int doit = task == current; int cpu; - switch (code) { + switch (option) { case ARCH_SET_GS: if (addr >= TASK_SIZE_MAX) return -EPERM; @@ -621,9 +621,9 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) return ret; } -long sys_arch_prctl(int code, unsigned long addr) +long sys_arch_prctl(int option, unsigned long addr) { - return do_arch_prctl(current, code, addr); + return do_arch_prctl(current, option, addr); } unsigned long KSTK_ESP(struct task_struct *task) diff --git a/arch/x86/um/asm/ptrace.h b/arch/x86/um/asm/ptrace.h index e59eef20647b..b291ca5cf66b 100644 --- a/arch/x86/um/asm/ptrace.h +++ b/arch/x86/um/asm/ptrace.h @@ -78,7 +78,7 @@ static inline int ptrace_set_thread_area(struct task_struct *child, int idx, return -ENOSYS; } -extern long arch_prctl(struct task_struct *task, int code, +extern long arch_prctl(struct task_struct *task, int option, unsigned long __user *addr); #endif diff --git a/arch/x86/um/os-Linux/prctl.c b/arch/x86/um/os-Linux/prctl.c index 96eb2bd28832..0a6e16a35b77 100644 --- a/arch/x86/um/os-Linux/prctl.c +++ b/arch/x86/um/os-Linux/prctl.c @@ -6,7 +6,7 @@ #include #include -int os_arch_prctl(int pid, int code, unsigned long *addr) +int os_arch_prctl(int pid, int option, unsigned long *addr) { - return ptrace(PTRACE_ARCH_PRCTL, pid, (unsigned long) addr, code); + return ptrace(PTRACE_ARCH_PRCTL, pid, (unsigned long) addr, option); } diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c index 10d907098c26..3c2dd8768992 100644 --- a/arch/x86/um/syscalls_64.c +++ b/arch/x86/um/syscalls_64.c @@ -11,7 +11,8 @@ #include /* XXX This should get the constants from libc */ #include -long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) +long arch_prctl(struct task_struct *task, int option + unsigned long __user *addr) { unsigned long *ptr = addr, tmp; long ret; @@ -30,7 +31,7 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) * arch_prctl is run on the host, then the registers are read * back. */ - switch (code) { + switch (option) { case ARCH_SET_FS: case ARCH_SET_GS: ret = restore_registers(pid, ¤t->thread.regs.regs); @@ -50,11 +51,11 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) ptr = &tmp; } - ret = os_arch_prctl(pid, code, ptr); + ret = os_arch_prctl(pid, option, ptr); if (ret) return ret; - switch (code) { + switch (option) { case ARCH_SET_FS: current->thread.arch.fs = (unsigned long) ptr; ret = save_registers(pid, ¤t->thread.regs.regs); @@ -73,9 +74,9 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) return ret; } -long sys_arch_prctl(int code, unsigned long addr) +long sys_arch_prctl(int option, unsigned long addr) { - return arch_prctl(current, code, (unsigned long __user *) addr); + return arch_prctl(current, option, (unsigned long __user *) addr); } void arch_switch_to(struct task_struct *to) -- cgit v1.2.1 From ff3f097eef30151f5ee250859e0fe8a0ec02c160 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Mon, 20 Mar 2017 01:16:21 -0700 Subject: x86/arch_prctl/64: Use SYSCALL_DEFINE2 to define sys_arch_prctl() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the SYSCALL_DEFINE2 macro instead of manually defining it. Signed-off-by: Kyle Huey Cc: Grzegorz Andrejczuk Cc: kvm@vger.kernel.org Cc: Radim Krčmář Cc: Peter Zijlstra Cc: Dave Hansen Cc: Andi Kleen Cc: linux-kselftest@vger.kernel.org Cc: Nadav Amit Cc: Robert O'Callahan Cc: Richard Weinberger Cc: "Rafael J. Wysocki" Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Len Brown Cc: Shuah Khan Cc: user-mode-linux-devel@lists.sourceforge.net Cc: Jeff Dike Cc: Alexander Viro Cc: user-mode-linux-user@lists.sourceforge.net Cc: David Matlack Cc: Boris Ostrovsky Cc: Dmitry Safonov Cc: linux-fsdevel@vger.kernel.org Cc: Paolo Bonzini Link: http://lkml.kernel.org/r/20170320081628.18952-4-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/process_64.c | 3 ++- arch/x86/um/syscalls_64.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 4377cfe8e449..bf9d7b6c0223 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -621,7 +622,7 @@ long do_arch_prctl(struct task_struct *task, int option, unsigned long addr) return ret; } -long sys_arch_prctl(int option, unsigned long addr) +SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, addr) { return do_arch_prctl(current, option, addr); } diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c index 3c2dd8768992..42369fa5421f 100644 --- a/arch/x86/um/syscalls_64.c +++ b/arch/x86/um/syscalls_64.c @@ -7,6 +7,7 @@ #include #include +#include #include #include /* XXX This should get the constants from libc */ #include @@ -74,7 +75,7 @@ long arch_prctl(struct task_struct *task, int option return ret; } -long sys_arch_prctl(int option, unsigned long addr) +SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, addr) { return arch_prctl(current, option, (unsigned long __user *) addr); } -- cgit v1.2.1 From 17a6e1b8e8e8539f89156643f8c3073f09ec446a Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Mon, 20 Mar 2017 01:16:22 -0700 Subject: x86/arch_prctl/64: Rename do_arch_prctl() to do_arch_prctl_64() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to introduce new arch_prctls that are not 64 bit only, rename the existing 64 bit implementation to do_arch_prctl_64(). Also rename the second argument of that function from 'addr' to 'arg2', because it will no longer always be an address. Signed-off-by: Kyle Huey Reviewed-by: Andy Lutomirski Cc: Grzegorz Andrejczuk Cc: kvm@vger.kernel.org Cc: Radim Krčmář Cc: Peter Zijlstra Cc: Dave Hansen Cc: Andi Kleen Cc: linux-kselftest@vger.kernel.org Cc: Nadav Amit Cc: Robert O'Callahan Cc: Richard Weinberger Cc: "Rafael J. Wysocki" Cc: Borislav Petkov Cc: Len Brown Cc: Shuah Khan Cc: user-mode-linux-devel@lists.sourceforge.net Cc: Jeff Dike Cc: Alexander Viro Cc: user-mode-linux-user@lists.sourceforge.net Cc: David Matlack Cc: Boris Ostrovsky Cc: Dmitry Safonov Cc: linux-fsdevel@vger.kernel.org Cc: Paolo Bonzini Link: http://lkml.kernel.org/r/20170320081628.18952-5-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/um/include/shared/os.h | 2 +- arch/x86/include/asm/proto.h | 3 +-- arch/x86/kernel/process_64.c | 32 +++++++++++++++++--------------- arch/x86/kernel/ptrace.c | 8 ++++---- arch/x86/um/os-Linux/prctl.c | 4 ++-- arch/x86/um/syscalls_64.c | 14 +++++++------- 6 files changed, 32 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index 32e41c4ef6d3..cd1fa97776c3 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -303,7 +303,7 @@ extern void maybe_sigio_broken(int fd, int read); extern void sigio_broken(int fd, int read); /* prctl.c */ -extern int os_arch_prctl(int pid, int option, unsigned long *addr); +extern int os_arch_prctl(int pid, int option, unsigned long *arg2); /* tty.c */ extern int get_pty(void); diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 91675a960391..4e276f6cb1c1 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h @@ -9,6 +9,7 @@ void syscall_init(void); #ifdef CONFIG_X86_64 void entry_SYSCALL_64(void); +long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2); #endif #ifdef CONFIG_X86_32 @@ -30,6 +31,4 @@ void x86_report_nx(void); extern int reboot_force; -long do_arch_prctl(struct task_struct *task, int option, unsigned long addr); - #endif /* _ASM_X86_PROTO_H */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index bf9d7b6c0223..e37f764c11cc 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -205,7 +205,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, (struct user_desc __user *)tls, 0); else #endif - err = do_arch_prctl(p, ARCH_SET_FS, tls); + err = do_arch_prctl_64(p, ARCH_SET_FS, tls); if (err) goto out; } @@ -548,7 +548,7 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) } #endif -long do_arch_prctl(struct task_struct *task, int option, unsigned long addr) +long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) { int ret = 0; int doit = task == current; @@ -556,62 +556,64 @@ long do_arch_prctl(struct task_struct *task, int option, unsigned long addr) switch (option) { case ARCH_SET_GS: - if (addr >= TASK_SIZE_MAX) + if (arg2 >= TASK_SIZE_MAX) return -EPERM; cpu = get_cpu(); task->thread.gsindex = 0; - task->thread.gsbase = addr; + task->thread.gsbase = arg2; if (doit) { load_gs_index(0); - ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr); + ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, arg2); } put_cpu(); break; case ARCH_SET_FS: /* Not strictly needed for fs, but do it for symmetry with gs */ - if (addr >= TASK_SIZE_MAX) + if (arg2 >= TASK_SIZE_MAX) return -EPERM; cpu = get_cpu(); task->thread.fsindex = 0; - task->thread.fsbase = addr; + task->thread.fsbase = arg2; if (doit) { /* set the selector to 0 to not confuse __switch_to */ loadsegment(fs, 0); - ret = wrmsrl_safe(MSR_FS_BASE, addr); + ret = wrmsrl_safe(MSR_FS_BASE, arg2); } put_cpu(); break; case ARCH_GET_FS: { unsigned long base; + if (doit) rdmsrl(MSR_FS_BASE, base); else base = task->thread.fsbase; - ret = put_user(base, (unsigned long __user *)addr); + ret = put_user(base, (unsigned long __user *)arg2); break; } case ARCH_GET_GS: { unsigned long base; + if (doit) rdmsrl(MSR_KERNEL_GS_BASE, base); else base = task->thread.gsbase; - ret = put_user(base, (unsigned long __user *)addr); + ret = put_user(base, (unsigned long __user *)arg2); break; } #ifdef CONFIG_CHECKPOINT_RESTORE # ifdef CONFIG_X86_X32_ABI case ARCH_MAP_VDSO_X32: - return prctl_map_vdso(&vdso_image_x32, addr); + return prctl_map_vdso(&vdso_image_x32, arg2); # endif # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION case ARCH_MAP_VDSO_32: - return prctl_map_vdso(&vdso_image_32, addr); + return prctl_map_vdso(&vdso_image_32, arg2); # endif case ARCH_MAP_VDSO_64: - return prctl_map_vdso(&vdso_image_64, addr); + return prctl_map_vdso(&vdso_image_64, arg2); #endif default: @@ -622,9 +624,9 @@ long do_arch_prctl(struct task_struct *task, int option, unsigned long addr) return ret; } -SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, addr) +SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) { - return do_arch_prctl(current, option, addr); + return do_arch_prctl_64(current, option, arg2); } unsigned long KSTK_ESP(struct task_struct *task) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 2364b23ea3e5..f37d18124648 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -396,12 +396,12 @@ static int putreg(struct task_struct *child, if (value >= TASK_SIZE_MAX) return -EIO; /* - * When changing the segment base, use do_arch_prctl + * When changing the segment base, use do_arch_prctl_64 * to set either thread.fs or thread.fsindex and the * corresponding GDT slot. */ if (child->thread.fsbase != value) - return do_arch_prctl(child, ARCH_SET_FS, value); + return do_arch_prctl_64(child, ARCH_SET_FS, value); return 0; case offsetof(struct user_regs_struct,gs_base): /* @@ -410,7 +410,7 @@ static int putreg(struct task_struct *child, if (value >= TASK_SIZE_MAX) return -EIO; if (child->thread.gsbase != value) - return do_arch_prctl(child, ARCH_SET_GS, value); + return do_arch_prctl_64(child, ARCH_SET_GS, value); return 0; #endif } @@ -869,7 +869,7 @@ long arch_ptrace(struct task_struct *child, long request, Works just like arch_prctl, except that the arguments are reversed. */ case PTRACE_ARCH_PRCTL: - ret = do_arch_prctl(child, data, addr); + ret = do_arch_prctl_64(child, data, addr); break; #endif diff --git a/arch/x86/um/os-Linux/prctl.c b/arch/x86/um/os-Linux/prctl.c index 0a6e16a35b77..8431e87ac333 100644 --- a/arch/x86/um/os-Linux/prctl.c +++ b/arch/x86/um/os-Linux/prctl.c @@ -6,7 +6,7 @@ #include #include -int os_arch_prctl(int pid, int option, unsigned long *addr) +int os_arch_prctl(int pid, int option, unsigned long *arg2) { - return ptrace(PTRACE_ARCH_PRCTL, pid, (unsigned long) addr, option); + return ptrace(PTRACE_ARCH_PRCTL, pid, (unsigned long) arg2, option); } diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c index 42369fa5421f..81b9fe100f7c 100644 --- a/arch/x86/um/syscalls_64.c +++ b/arch/x86/um/syscalls_64.c @@ -12,10 +12,10 @@ #include /* XXX This should get the constants from libc */ #include -long arch_prctl(struct task_struct *task, int option - unsigned long __user *addr) +long arch_prctl(struct task_struct *task, int option) + unsigned long __user *arg2) { - unsigned long *ptr = addr, tmp; + unsigned long *ptr = arg2, tmp; long ret; int pid = task->mm->context.id.u.pid; @@ -65,19 +65,19 @@ long arch_prctl(struct task_struct *task, int option ret = save_registers(pid, ¤t->thread.regs.regs); break; case ARCH_GET_FS: - ret = put_user(tmp, addr); + ret = put_user(tmp, arg2); break; case ARCH_GET_GS: - ret = put_user(tmp, addr); + ret = put_user(tmp, arg2); break; } return ret; } -SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, addr) +SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) { - return arch_prctl(current, option, (unsigned long __user *) addr); + return arch_prctl(current, option, (unsigned long __user *) arg2); } void arch_switch_to(struct task_struct *to) -- cgit v1.2.1 From b0b9b014016d16ca7a192da986aa8ebae21bb995 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Mon, 20 Mar 2017 01:16:23 -0700 Subject: x86/arch_prctl: Add do_arch_prctl_common() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add do_arch_prctl_common() to handle arch_prctls that are not specific to 64 bit mode. Call it from the syscall entry point, but not any of the other callsites in the kernel, which all want one of the existing 64 bit only arch_prctls. Signed-off-by: Kyle Huey Cc: Grzegorz Andrejczuk Cc: kvm@vger.kernel.org Cc: Radim Krčmář Cc: Peter Zijlstra Cc: Dave Hansen Cc: Andi Kleen Cc: linux-kselftest@vger.kernel.org Cc: Nadav Amit Cc: Robert O'Callahan Cc: Richard Weinberger Cc: "Rafael J. Wysocki" Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Len Brown Cc: Shuah Khan Cc: user-mode-linux-devel@lists.sourceforge.net Cc: Jeff Dike Cc: Alexander Viro Cc: user-mode-linux-user@lists.sourceforge.net Cc: David Matlack Cc: Boris Ostrovsky Cc: Dmitry Safonov Cc: linux-fsdevel@vger.kernel.org Cc: Paolo Bonzini Link: http://lkml.kernel.org/r/20170320081628.18952-6-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/proto.h | 3 +++ arch/x86/kernel/process.c | 6 ++++++ arch/x86/kernel/process_64.c | 8 +++++++- 3 files changed, 16 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 4e276f6cb1c1..8d3964fc5f91 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h @@ -31,4 +31,7 @@ void x86_report_nx(void); extern int reboot_force; +long do_arch_prctl_common(struct task_struct *task, int option, + unsigned long cpuid_enabled); + #endif /* _ASM_X86_PROTO_H */ diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 366db7782fc6..b12e95eceb83 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -545,3 +545,9 @@ out: put_task_stack(p); return ret; } + +long do_arch_prctl_common(struct task_struct *task, int option, + unsigned long cpuid_enabled) +{ + return -EINVAL; +} diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index e37f764c11cc..d81b0a60a45c 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -626,7 +626,13 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) { - return do_arch_prctl_64(current, option, arg2); + long ret; + + ret = do_arch_prctl_64(current, option, arg2); + if (ret == -EINVAL) + ret = do_arch_prctl_common(current, option, arg2); + + return ret; } unsigned long KSTK_ESP(struct task_struct *task) -- cgit v1.2.1 From 79170fda313ed5be2394f87aa2a00d597f8ed4a1 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Mon, 20 Mar 2017 01:16:24 -0700 Subject: x86/syscalls/32: Wire up arch_prctl on x86-32 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hook up arch_prctl to call do_arch_prctl() on x86-32, and in 32 bit compat mode on x86-64. This allows to have arch_prctls that are not specific to 64 bits. On UML, simply stub out this syscall. Signed-off-by: Kyle Huey Cc: Grzegorz Andrejczuk Cc: kvm@vger.kernel.org Cc: Radim Krčmář Cc: Peter Zijlstra Cc: Dave Hansen Cc: Andi Kleen Cc: linux-kselftest@vger.kernel.org Cc: Nadav Amit Cc: Robert O'Callahan Cc: Richard Weinberger Cc: "Rafael J. Wysocki" Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Len Brown Cc: Shuah Khan Cc: user-mode-linux-devel@lists.sourceforge.net Cc: Jeff Dike Cc: Alexander Viro Cc: user-mode-linux-user@lists.sourceforge.net Cc: David Matlack Cc: Boris Ostrovsky Cc: Dmitry Safonov Cc: linux-fsdevel@vger.kernel.org Cc: Paolo Bonzini Link: http://lkml.kernel.org/r/20170320081628.18952-7-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/x86/entry/syscalls/syscall_32.tbl | 1 + arch/x86/kernel/process_32.c | 7 +++++++ arch/x86/kernel/process_64.c | 7 +++++++ arch/x86/um/Makefile | 2 +- arch/x86/um/syscalls_32.c | 7 +++++++ 5 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 arch/x86/um/syscalls_32.c (limited to 'arch') diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 9ba050fe47f3..0af59fa789ea 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -390,3 +390,4 @@ 381 i386 pkey_alloc sys_pkey_alloc 382 i386 pkey_free sys_pkey_free 383 i386 statx sys_statx +384 i386 arch_prctl sys_arch_prctl compat_sys_arch_prctl diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 4c818f8bc135..ff40e74c9181 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -56,6 +57,7 @@ #include #include #include +#include void __show_regs(struct pt_regs *regs, int all) { @@ -304,3 +306,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) return prev_p; } + +SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) +{ + return do_arch_prctl_common(current, option, arg2); +} diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index d81b0a60a45c..ea1a6180bf39 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -635,6 +635,13 @@ SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) return ret; } +#ifdef CONFIG_IA32_EMULATION +COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) +{ + return do_arch_prctl_common(current, option, arg2); +} +#endif + unsigned long KSTK_ESP(struct task_struct *task) { return task_pt_regs(task)->sp; diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile index e7e7055a8658..69f0827d5f53 100644 --- a/arch/x86/um/Makefile +++ b/arch/x86/um/Makefile @@ -16,7 +16,7 @@ obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \ ifeq ($(CONFIG_X86_32),y) -obj-y += checksum_32.o +obj-y += checksum_32.o syscalls_32.o obj-$(CONFIG_ELF_CORE) += elfcore.o subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o diff --git a/arch/x86/um/syscalls_32.c b/arch/x86/um/syscalls_32.c new file mode 100644 index 000000000000..627d68836b16 --- /dev/null +++ b/arch/x86/um/syscalls_32.c @@ -0,0 +1,7 @@ +#include +#include + +SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) +{ + return -EINVAL; +} -- cgit v1.2.1 From 90218ac77d0582eaf2d0872d8d900cbd5bf1f205 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Mon, 20 Mar 2017 01:16:25 -0700 Subject: x86/cpufeature: Detect CPUID faulting support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Intel supports faulting on the CPUID instruction beginning with Ivy Bridge. When enabled, the processor will fault on attempts to execute the CPUID instruction with CPL>0. This will allow a ptracer to emulate the CPUID instruction. Bit 31 of MSR_PLATFORM_INFO advertises support for this feature. It is documented in detail in Section 2.3.2 of https://bugzilla.kernel.org/attachment.cgi?id=243991 Detect support for this feature and expose it as X86_FEATURE_CPUID_FAULT. Signed-off-by: Kyle Huey Reviewed-by: Borislav Petkov Cc: Grzegorz Andrejczuk Cc: kvm@vger.kernel.org Cc: Radim Krčmář Cc: Peter Zijlstra Cc: Dave Hansen Cc: Andi Kleen Cc: linux-kselftest@vger.kernel.org Cc: Nadav Amit Cc: Robert O'Callahan Cc: Richard Weinberger Cc: "Rafael J. Wysocki" Cc: Andy Lutomirski Cc: Len Brown Cc: Shuah Khan Cc: user-mode-linux-devel@lists.sourceforge.net Cc: Jeff Dike Cc: Alexander Viro Cc: user-mode-linux-user@lists.sourceforge.net Cc: David Matlack Cc: Boris Ostrovsky Cc: Dmitry Safonov Cc: linux-fsdevel@vger.kernel.org Cc: Paolo Bonzini Link: http://lkml.kernel.org/r/20170320081628.18952-8-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/msr-index.h | 2 ++ arch/x86/kernel/cpu/intel.c | 24 +++++++++++++++++++++++- 3 files changed, 26 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index b04bb6dfed7f..0fe00446f9ca 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -187,6 +187,7 @@ * Reuse free bits when adding new feature flags! */ #define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ +#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ #define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index f429b70ebaef..b1f75daca34b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -45,6 +45,8 @@ #define MSR_IA32_PERFCTR1 0x000000c2 #define MSR_FSB_FREQ 0x000000cd #define MSR_PLATFORM_INFO 0x000000ce +#define MSR_PLATFORM_INFO_CPUID_FAULT_BIT 31 +#define MSR_PLATFORM_INFO_CPUID_FAULT BIT_ULL(MSR_PLATFORM_INFO_CPUID_FAULT_BIT) #define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 #define NHM_C3_AUTO_DEMOTE (1UL << 25) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index e229318d7230..a07f8295c9ed 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -488,6 +488,28 @@ static void intel_bsp_resume(struct cpuinfo_x86 *c) init_intel_energy_perf(c); } +static void init_cpuid_fault(struct cpuinfo_x86 *c) +{ + u64 msr; + + if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) { + if (msr & MSR_PLATFORM_INFO_CPUID_FAULT) + set_cpu_cap(c, X86_FEATURE_CPUID_FAULT); + } +} + +static void init_intel_misc_features(struct cpuinfo_x86 *c) +{ + u64 msr; + + if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr)) + return; + + /* Check features and update capabilities */ + init_cpuid_fault(c); + probe_xeon_phi_r3mwait(c); +} + static void init_intel(struct cpuinfo_x86 *c) { unsigned int l2 = 0; @@ -602,7 +624,7 @@ static void init_intel(struct cpuinfo_x86 *c) init_intel_energy_perf(c); - probe_xeon_phi_r3mwait(c); + init_intel_misc_features(c); } #ifdef CONFIG_X86_32 -- cgit v1.2.1 From e9ea1e7f53b852147cbd568b0568c7ad97ec21a3 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Mon, 20 Mar 2017 01:16:26 -0700 Subject: x86/arch_prctl: Add ARCH_[GET|SET]_CPUID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Intel supports faulting on the CPUID instruction beginning with Ivy Bridge. When enabled, the processor will fault on attempts to execute the CPUID instruction with CPL>0. Exposing this feature to userspace will allow a ptracer to trap and emulate the CPUID instruction. When supported, this feature is controlled by toggling bit 0 of MSR_MISC_FEATURES_ENABLES. It is documented in detail in Section 2.3.2 of https://bugzilla.kernel.org/attachment.cgi?id=243991 Implement a new pair of arch_prctls, available on both x86-32 and x86-64. ARCH_GET_CPUID: Returns the current CPUID state, either 0 if CPUID faulting is enabled (and thus the CPUID instruction is not available) or 1 if CPUID faulting is not enabled. ARCH_SET_CPUID: Set the CPUID state to the second argument. If cpuid_enabled is 0 CPUID faulting will be activated, otherwise it will be deactivated. Returns ENODEV if CPUID faulting is not supported on this system. The state of the CPUID faulting flag is propagated across forks, but reset upon exec. Signed-off-by: Kyle Huey Cc: Grzegorz Andrejczuk Cc: kvm@vger.kernel.org Cc: Radim Krčmář Cc: Peter Zijlstra Cc: Dave Hansen Cc: Andi Kleen Cc: linux-kselftest@vger.kernel.org Cc: Nadav Amit Cc: Robert O'Callahan Cc: Richard Weinberger Cc: "Rafael J. Wysocki" Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Len Brown Cc: Shuah Khan Cc: user-mode-linux-devel@lists.sourceforge.net Cc: Jeff Dike Cc: Alexander Viro Cc: user-mode-linux-user@lists.sourceforge.net Cc: David Matlack Cc: Boris Ostrovsky Cc: Dmitry Safonov Cc: linux-fsdevel@vger.kernel.org Cc: Paolo Bonzini Link: http://lkml.kernel.org/r/20170320081628.18952-9-khuey@kylehuey.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/msr-index.h | 2 + arch/x86/include/asm/processor.h | 2 + arch/x86/include/asm/thread_info.h | 6 ++- arch/x86/include/uapi/asm/prctl.h | 11 ++++-- arch/x86/kernel/cpu/intel.c | 18 +++++---- arch/x86/kernel/process.c | 78 ++++++++++++++++++++++++++++++++++++++ 6 files changed, 104 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index b1f75daca34b..673f9ac50f6d 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -558,6 +558,8 @@ /* MISC_FEATURES_ENABLES non-architectural features */ #define MSR_MISC_FEATURES_ENABLES 0x00000140 +#define MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT 0 +#define MSR_MISC_FEATURES_ENABLES_CPUID_FAULT BIT_ULL(MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT) #define MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT 1 #define MSR_IA32_TSC_DEADLINE 0x000006E0 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index f385eca5407a..a80c1b3997ed 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -884,6 +884,8 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, extern int get_tsc_mode(unsigned long adr); extern int set_tsc_mode(unsigned int val); +DECLARE_PER_CPU(u64, msr_misc_features_shadow); + /* Register/unregister a process' MPX related resource */ #define MPX_ENABLE_MANAGEMENT() mpx_enable_management() #define MPX_DISABLE_MANAGEMENT() mpx_disable_management() diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index ad6f5eb07a95..9fc44b95f7cb 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -87,6 +87,7 @@ struct thread_info { #define TIF_SECCOMP 8 /* secure computing */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ #define TIF_UPROBE 12 /* breakpointed or singlestepping */ +#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* IA32 compatibility process */ #define TIF_NOHZ 19 /* in adaptive nohz mode */ @@ -110,6 +111,7 @@ struct thread_info { #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) #define _TIF_UPROBE (1 << TIF_UPROBE) +#define _TIF_NOCPUID (1 << TIF_NOCPUID) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) #define _TIF_NOHZ (1 << TIF_NOHZ) @@ -138,7 +140,7 @@ struct thread_info { /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ - (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) + (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP) #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) @@ -239,6 +241,8 @@ static inline int arch_within_stack_frames(const void * const stack, extern void arch_task_cache_init(void); extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); extern void arch_release_task_struct(struct task_struct *tsk); +extern void arch_setup_new_exec(void); +#define arch_setup_new_exec arch_setup_new_exec #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_THREAD_INFO_H */ diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h index 835aa51c7f6e..c45765517092 100644 --- a/arch/x86/include/uapi/asm/prctl.h +++ b/arch/x86/include/uapi/asm/prctl.h @@ -1,10 +1,13 @@ #ifndef _ASM_X86_PRCTL_H #define _ASM_X86_PRCTL_H -#define ARCH_SET_GS 0x1001 -#define ARCH_SET_FS 0x1002 -#define ARCH_GET_FS 0x1003 -#define ARCH_GET_GS 0x1004 +#define ARCH_SET_GS 0x1001 +#define ARCH_SET_FS 0x1002 +#define ARCH_GET_FS 0x1003 +#define ARCH_GET_GS 0x1004 + +#define ARCH_GET_CPUID 0x1011 +#define ARCH_SET_CPUID 0x1012 #define ARCH_MAP_VDSO_X32 0x2001 #define ARCH_MAP_VDSO_32 0x2002 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index a07f8295c9ed..dfa90a3a5145 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -90,16 +90,12 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) return; } - if (ring3mwait_disabled) { - msr_clear_bit(MSR_MISC_FEATURES_ENABLES, - MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT); + if (ring3mwait_disabled) return; - } - - msr_set_bit(MSR_MISC_FEATURES_ENABLES, - MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT); set_cpu_cap(c, X86_FEATURE_RING3MWAIT); + this_cpu_or(msr_misc_features_shadow, + 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT); if (c == &boot_cpu_data) ELF_HWCAP2 |= HWCAP2_RING3MWAIT; @@ -505,9 +501,15 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c) if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr)) return; - /* Check features and update capabilities */ + /* Clear all MISC features */ + this_cpu_write(msr_misc_features_shadow, 0); + + /* Check features and update capabilities and shadow control bits */ init_cpuid_fault(c); probe_xeon_phi_r3mwait(c); + + msr = this_cpu_read(msr_misc_features_shadow); + wrmsrl(MSR_MISC_FEATURES_ENABLES, msr); } static void init_intel(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index b12e95eceb83..0bb88428cbf2 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -37,6 +37,7 @@ #include #include #include +#include /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, @@ -172,6 +173,73 @@ int set_tsc_mode(unsigned int val) return 0; } +DEFINE_PER_CPU(u64, msr_misc_features_shadow); + +static void set_cpuid_faulting(bool on) +{ + u64 msrval; + + msrval = this_cpu_read(msr_misc_features_shadow); + msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT; + msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT); + this_cpu_write(msr_misc_features_shadow, msrval); + wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval); +} + +static void disable_cpuid(void) +{ + preempt_disable(); + if (!test_and_set_thread_flag(TIF_NOCPUID)) { + /* + * Must flip the CPU state synchronously with + * TIF_NOCPUID in the current running context. + */ + set_cpuid_faulting(true); + } + preempt_enable(); +} + +static void enable_cpuid(void) +{ + preempt_disable(); + if (test_and_clear_thread_flag(TIF_NOCPUID)) { + /* + * Must flip the CPU state synchronously with + * TIF_NOCPUID in the current running context. + */ + set_cpuid_faulting(false); + } + preempt_enable(); +} + +static int get_cpuid_mode(void) +{ + return !test_thread_flag(TIF_NOCPUID); +} + +static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled) +{ + if (!static_cpu_has(X86_FEATURE_CPUID_FAULT)) + return -ENODEV; + + if (cpuid_enabled) + enable_cpuid(); + else + disable_cpuid(); + + return 0; +} + +/* + * Called immediately after a successful exec. + */ +void arch_setup_new_exec(void) +{ + /* If cpuid was previously disabled for this task, re-enable it. */ + if (test_thread_flag(TIF_NOCPUID)) + enable_cpuid(); +} + static inline void switch_to_bitmap(struct tss_struct *tss, struct thread_struct *prev, struct thread_struct *next, @@ -225,6 +293,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, if ((tifp ^ tifn) & _TIF_NOTSC) cr4_toggle_bits(X86_CR4_TSD); + + if ((tifp ^ tifn) & _TIF_NOCPUID) + set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); } /* @@ -549,5 +620,12 @@ out: long do_arch_prctl_common(struct task_struct *task, int option, unsigned long cpuid_enabled) { + switch (option) { + case ARCH_GET_CPUID: + return get_cpuid_mode(); + case ARCH_SET_CPUID: + return set_cpuid_mode(task, cpuid_enabled); + } + return -EINVAL; } -- cgit v1.2.1 From d35869ba348d3f1ff3e6d8214fe0f674bb0e404e Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Mon, 6 Feb 2017 16:41:40 +0200 Subject: perf/x86/intel/pt: Allow the disabling of branch tracing Now that Intel PT supports more types of trace content than just branch tracing, it may be useful to allow the user to disable branch tracing when it is not needed. The special case is BDW, where not setting BranchEn is not supported. This is slightly trickier than necessary, because up to this moment the driver has been setting BranchEn automatically and the userspace assumes as much. Instead of reversing the semantics of BranchEn, we introduce a 'passthrough' bit, which will forego the default and allow the user to set BranchEn to their heart's content. Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: vince@deater.net Link: http://lkml.kernel.org/r/20170206144140.14402-1-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/pt.c | 74 ++++++++++++++++++++++++++++++++++++++++++++-- arch/x86/events/intel/pt.h | 1 + 2 files changed, 73 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 354e9ff2978c..ae8324d65e61 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "../perf_event.h" #include "pt.h" @@ -98,6 +99,7 @@ static struct attribute_group pt_cap_group = { .name = "caps", }; +PMU_FORMAT_ATTR(pt, "config:0" ); PMU_FORMAT_ATTR(cyc, "config:1" ); PMU_FORMAT_ATTR(pwr_evt, "config:4" ); PMU_FORMAT_ATTR(fup_on_ptw, "config:5" ); @@ -105,11 +107,13 @@ PMU_FORMAT_ATTR(mtc, "config:9" ); PMU_FORMAT_ATTR(tsc, "config:10" ); PMU_FORMAT_ATTR(noretcomp, "config:11" ); PMU_FORMAT_ATTR(ptw, "config:12" ); +PMU_FORMAT_ATTR(branch, "config:13" ); PMU_FORMAT_ATTR(mtc_period, "config:14-17" ); PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" ); PMU_FORMAT_ATTR(psb_period, "config:24-27" ); static struct attribute *pt_formats_attr[] = { + &format_attr_pt.attr, &format_attr_cyc.attr, &format_attr_pwr_evt.attr, &format_attr_fup_on_ptw.attr, @@ -117,6 +121,7 @@ static struct attribute *pt_formats_attr[] = { &format_attr_tsc.attr, &format_attr_noretcomp.attr, &format_attr_ptw.attr, + &format_attr_branch.attr, &format_attr_mtc_period.attr, &format_attr_cyc_thresh.attr, &format_attr_psb_period.attr, @@ -197,6 +202,19 @@ static int __init pt_pmu_hw_init(void) pt_pmu.tsc_art_den = eax; } + /* model-specific quirks */ + switch (boot_cpu_data.x86_model) { + case INTEL_FAM6_BROADWELL_CORE: + case INTEL_FAM6_BROADWELL_XEON_D: + case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_BROADWELL_X: + /* not setting BRANCH_EN will #GP, erratum BDM106 */ + pt_pmu.branch_en_always_on = true; + break; + default: + break; + } + if (boot_cpu_has(X86_FEATURE_VMX)) { /* * Intel SDM, 36.5 "Tracing post-VMXON" says that @@ -263,8 +281,20 @@ fail: #define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \ RTIT_CTL_FUP_ON_PTW) -#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \ +/* + * Bit 0 (TraceEn) in the attr.config is meaningless as the + * corresponding bit in the RTIT_CTL can only be controlled + * by the driver; therefore, repurpose it to mean: pass + * through the bit that was previously assumed to be always + * on for PT, thereby allowing the user to *not* set it if + * they so wish. See also pt_event_valid() and pt_config(). + */ +#define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN + +#define PT_CONFIG_MASK (RTIT_CTL_TRACEEN | \ + RTIT_CTL_TSC_EN | \ RTIT_CTL_DISRETC | \ + RTIT_CTL_BRANCH_EN | \ RTIT_CTL_CYC_PSB | \ RTIT_CTL_MTC | \ RTIT_CTL_PWR_EVT_EN | \ @@ -332,6 +362,33 @@ static bool pt_event_valid(struct perf_event *event) return false; } + /* + * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config + * clears the assomption that BranchEn must always be enabled, + * as was the case with the first implementation of PT. + * If this bit is not set, the legacy behavior is preserved + * for compatibility with the older userspace. + * + * Re-using bit 0 for this purpose is fine because it is never + * directly set by the user; previous attempts at setting it in + * the attr.config resulted in -EINVAL. + */ + if (config & RTIT_CTL_PASSTHROUGH) { + /* + * Disallow not setting BRANCH_EN where BRANCH_EN is + * always required. + */ + if (pt_pmu.branch_en_always_on && + !(config & RTIT_CTL_BRANCH_EN)) + return false; + } else { + /* + * Disallow BRANCH_EN without the PASSTHROUGH. + */ + if (config & RTIT_CTL_BRANCH_EN) + return false; + } + return true; } @@ -420,7 +477,20 @@ static void pt_config(struct perf_event *event) } reg = pt_config_filters(event); - reg |= RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN; + reg |= RTIT_CTL_TOPA | RTIT_CTL_TRACEEN; + + /* + * Previously, we had BRANCH_EN on by default, but now that PT has + * grown features outside of branch tracing, it is useful to allow + * the user to disable it. Setting bit 0 in the event's attr.config + * allows BRANCH_EN to pass through instead of being always on. See + * also the comment in pt_event_valid(). + */ + if (event->attr.config & BIT(0)) { + reg |= event->attr.config & RTIT_CTL_BRANCH_EN; + } else { + reg |= RTIT_CTL_BRANCH_EN; + } if (!event->attr.exclude_kernel) reg |= RTIT_CTL_OS; diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index b528e8f373e4..0eb41d07b79a 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -110,6 +110,7 @@ struct pt_pmu { struct pmu pmu; u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; bool vmx; + bool branch_en_always_on; unsigned long max_nonturbo_ratio; unsigned int tsc_art_num; unsigned int tsc_art_den; -- cgit v1.2.1 From f9573e53f123ee487cca737139f3a43897a6383e Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Fri, 24 Feb 2017 02:48:13 -0600 Subject: x86/events/amd/iommu: Declare pr_fmt() format MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Declare pr_fmt() format for perf/amd_iommu and remove unnecessary pr_debug() calls. Also check return value when _init_events_attrs() fails and issue an error message. Signed-off-by: Suravee Suthikulpanit Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Jörg Rödel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/1487926102-13073-2-git-send-email-Suravee.Suthikulpanit@amd.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/iommu.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index b28200dea715..8d8ed40613fa 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -11,6 +11,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) "perf/amd_iommu: " fmt + #include #include #include @@ -298,7 +300,6 @@ static void perf_iommu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; - pr_debug("perf: amd_iommu:perf_iommu_start\n"); if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; @@ -323,7 +324,6 @@ static void perf_iommu_read(struct perf_event *event) u64 prev_raw_count = 0ULL; u64 delta = 0ULL; struct hw_perf_event *hwc = &event->hw; - pr_debug("perf: amd_iommu:perf_iommu_read\n"); amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), _GET_BANK(event), _GET_CNTR(event), @@ -349,8 +349,6 @@ static void perf_iommu_stop(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; u64 config; - pr_debug("perf: amd_iommu:perf_iommu_stop\n"); - if (hwc->state & PERF_HES_UPTODATE) return; @@ -372,7 +370,6 @@ static int perf_iommu_add(struct perf_event *event, int flags) struct perf_amd_iommu *perf_iommu = container_of(event->pmu, struct perf_amd_iommu, pmu); - pr_debug("perf: amd_iommu:perf_iommu_add\n"); event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; /* request an iommu bank/counter */ @@ -393,7 +390,6 @@ static void perf_iommu_del(struct perf_event *event, int flags) struct perf_amd_iommu *perf_iommu = container_of(event->pmu, struct perf_amd_iommu, pmu); - pr_debug("perf: amd_iommu:perf_iommu_del\n"); perf_iommu_stop(event, PERF_EF_UPDATE); /* clear the assigned iommu bank/counter */ @@ -444,27 +440,27 @@ static __init int _init_perf_amd_iommu( raw_spin_lock_init(&perf_iommu->lock); - /* Init format attributes */ perf_iommu->format_group = &amd_iommu_format_group; /* Init cpumask attributes to only core 0 */ cpumask_set_cpu(0, &iommu_cpumask); perf_iommu->cpumask_group = &amd_iommu_cpumask_group; - /* Init events attributes */ - if (_init_events_attrs(perf_iommu) != 0) - pr_err("perf: amd_iommu: Only support raw events.\n"); + ret = _init_events_attrs(perf_iommu); + if (ret) { + pr_err("Error initializing AMD IOMMU perf events.\n"); + return ret; + } - /* Init null attributes */ perf_iommu->null_group = NULL; perf_iommu->pmu.attr_groups = perf_iommu->attr_groups; ret = perf_pmu_register(&perf_iommu->pmu, name, -1); if (ret) { - pr_err("perf: amd_iommu: Failed to initialized.\n"); + pr_err("Error initializing AMD IOMMU perf counters.\n"); amd_iommu_pc_exit(); } else { - pr_info("perf: amd_iommu: Detected. (%d banks, %d counters/bank)\n", + pr_info("Detected AMD IOMMU (%d banks, %d counters/bank).\n", amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID), amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID)); } -- cgit v1.2.1 From 6aad0c6269052a6114259deaf664ce350bf64fa2 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Fri, 24 Feb 2017 02:48:14 -0600 Subject: x86/events/amd/iommu: Clean up bitwise operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean up register initialization and make use of BIT_ULL(x) where appropriate. This should not affect logic and functionality. Signed-off-by: Suravee Suthikulpanit Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Jörg Rödel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/1487926102-13073-3-git-send-email-Suravee.Suthikulpanit@amd.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/iommu.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index 8d8ed40613fa..e112f498a019 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -164,11 +164,11 @@ static int get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu) for (bank = 0, shift = 0; bank < max_banks; bank++) { for (cntr = 0; cntr < max_cntrs; cntr++) { shift = bank + (bank*3) + cntr; - if (perf_iommu->cntr_assign_mask & (1ULL<cntr_assign_mask & BIT_ULL(shift)) { continue; } else { - perf_iommu->cntr_assign_mask |= (1ULL<cntr_assign_mask |= BIT_ULL(shift); + retval = ((bank & 0xFF) << 8) | (cntr & 0xFF); goto out; } } @@ -265,23 +265,23 @@ static void perf_iommu_enable_event(struct perf_event *ev) _GET_BANK(ev), _GET_CNTR(ev) , IOMMU_PC_COUNTER_SRC_REG, ®, true); - reg = 0ULL | devid | (_GET_DEVID_MASK(ev) << 32); + reg = devid | (_GET_DEVID_MASK(ev) << 32); if (reg) - reg |= (1UL << 31); + reg |= BIT(31); amd_iommu_pc_get_set_reg_val(devid, _GET_BANK(ev), _GET_CNTR(ev) , IOMMU_PC_DEVID_MATCH_REG, ®, true); - reg = 0ULL | _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32); + reg = _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32); if (reg) - reg |= (1UL << 31); + reg |= BIT(31); amd_iommu_pc_get_set_reg_val(devid, _GET_BANK(ev), _GET_CNTR(ev) , IOMMU_PC_PASID_MATCH_REG, ®, true); - reg = 0ULL | _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32); + reg = _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32); if (reg) - reg |= (1UL << 31); + reg |= BIT(31); amd_iommu_pc_get_set_reg_val(devid, _GET_BANK(ev), _GET_CNTR(ev) , IOMMU_PC_DOMID_MATCH_REG, ®, true); -- cgit v1.2.1 From dc6ca5e47d44c11a111807208595ff6a8fcd2a83 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Fri, 24 Feb 2017 02:48:15 -0600 Subject: x86/events/amd/iommu: Clean up perf_iommu_read() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix coding style and use GENMASK_ULL(). Signed-off-by: Suravee Suthikulpanit Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Jörg Rödel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/1487926102-13073-4-git-send-email-Suravee.Suthikulpanit@amd.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/iommu.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index e112f498a019..d4375dadd4e9 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -320,9 +320,7 @@ static void perf_iommu_start(struct perf_event *event, int flags) static void perf_iommu_read(struct perf_event *event) { - u64 count = 0ULL; - u64 prev_raw_count = 0ULL; - u64 delta = 0ULL; + u64 count, prev, delta; struct hw_perf_event *hwc = &event->hw; amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), @@ -330,18 +328,16 @@ static void perf_iommu_read(struct perf_event *event) IOMMU_PC_COUNTER_REG, &count, false); /* IOMMU pc counter register is only 48 bits */ - count &= 0xFFFFFFFFFFFFULL; + count &= GENMASK_ULL(47, 0); - prev_raw_count = local64_read(&hwc->prev_count); - if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, - count) != prev_raw_count) + prev = local64_read(&hwc->prev_count); + if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev) return; - /* Handling 48-bit counter overflowing */ - delta = (count << COUNTER_SHIFT) - (prev_raw_count << COUNTER_SHIFT); + /* Handle 48-bit counter overflow */ + delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT); delta >>= COUNTER_SHIFT; local64_add(delta, &event->count); - } static void perf_iommu_stop(struct perf_event *event, int flags) -- cgit v1.2.1 From 6b9376e30f42b902260371245f009bc05eb3fdfb Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Fri, 24 Feb 2017 02:48:17 -0600 Subject: x86/events, drivers/iommu/amd: Introduce amd_iommu_get_num_iommus() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce amd_iommu_get_num_iommus(), which returns the value of amd_iommus_present. The function is used to replace direct access to the variable, which is now declared as static. This function will also be used by AMD IOMMU perf driver. Signed-off-by: Suravee Suthikulpanit Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Jörg Rödel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/1487926102-13073-6-git-send-email-Suravee.Suthikulpanit@amd.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/iommu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/events/amd/iommu.h b/arch/x86/events/amd/iommu.h index 845d173278e3..5c5c9329e571 100644 --- a/arch/x86/events/amd/iommu.h +++ b/arch/x86/events/amd/iommu.h @@ -28,6 +28,8 @@ #define IOMMU_BASE_DEVID 0x0000 /* amd_iommu_init.c external support functions */ +extern int amd_iommu_get_num_iommus(void); + extern bool amd_iommu_pc_supported(void); extern u8 amd_iommu_pc_get_max_banks(u16 devid); -- cgit v1.2.1 From f5863a00e73c432b91e4efe1d68778b4ace6a892 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Fri, 24 Feb 2017 02:48:18 -0600 Subject: x86/events/amd/iommu.c: Modify functions to query max banks and counters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, amd_iommu_pc_get_max_[banks|counters]() use end-point device ID to locate an IOMMU and check the reported max banks/counters. The logic assumes that the IOMMU_BASE_DEVID belongs to the first IOMMU, and uses it to acquire a reference to the first IOMMU, which does not work on certain systems. Instead, modify the function to take an IOMMU index, and use it to query the corresponding AMD IOMMU instance. Currently, hardcode the IOMMU index to 0 since the current AMD IOMMU perf implementation supports only a single IOMMU. A subsequent patch will add support for multiple IOMMUs, and will use a proper IOMMU index. Signed-off-by: Suravee Suthikulpanit Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Jörg Rödel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/1487926102-13073-7-git-send-email-Suravee.Suthikulpanit@amd.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/iommu.c | 17 +++++++---------- arch/x86/events/amd/iommu.h | 9 ++++----- 2 files changed, 11 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index d4375dadd4e9..10f67d39cac5 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -239,14 +239,6 @@ static int perf_iommu_event_init(struct perf_event *event) return -EINVAL; } - /* integrate with iommu base devid (0000), assume one iommu */ - perf_iommu->max_banks = - amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID); - perf_iommu->max_counters = - amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID); - if ((perf_iommu->max_banks == 0) || (perf_iommu->max_counters == 0)) - return -EINVAL; - /* update the hw_perf_event struct with the iommu config data */ hwc->config = config; hwc->extra_reg.config = config1; @@ -448,6 +440,11 @@ static __init int _init_perf_amd_iommu( return ret; } + perf_iommu->max_banks = amd_iommu_pc_get_max_banks(0); + perf_iommu->max_counters = amd_iommu_pc_get_max_counters(0); + if (!perf_iommu->max_banks || !perf_iommu->max_counters) + return -EINVAL; + perf_iommu->null_group = NULL; perf_iommu->pmu.attr_groups = perf_iommu->attr_groups; @@ -457,8 +454,8 @@ static __init int _init_perf_amd_iommu( amd_iommu_pc_exit(); } else { pr_info("Detected AMD IOMMU (%d banks, %d counters/bank).\n", - amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID), - amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID)); + amd_iommu_pc_get_max_banks(0), + amd_iommu_pc_get_max_counters(0)); } return ret; diff --git a/arch/x86/events/amd/iommu.h b/arch/x86/events/amd/iommu.h index 5c5c9329e571..b775107c221e 100644 --- a/arch/x86/events/amd/iommu.h +++ b/arch/x86/events/amd/iommu.h @@ -24,19 +24,18 @@ #define PC_MAX_SPEC_BNKS 64 #define PC_MAX_SPEC_CNTRS 16 -/* iommu pc reg masks*/ -#define IOMMU_BASE_DEVID 0x0000 - /* amd_iommu_init.c external support functions */ extern int amd_iommu_get_num_iommus(void); extern bool amd_iommu_pc_supported(void); -extern u8 amd_iommu_pc_get_max_banks(u16 devid); +extern u8 amd_iommu_pc_get_max_banks(unsigned int idx); -extern u8 amd_iommu_pc_get_max_counters(u16 devid); +extern u8 amd_iommu_pc_get_max_counters(unsigned int idx); extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, u64 *value, bool is_write); +extern struct amd_iommu *get_amd_iommu(int idx); + #endif /*_PERF_EVENT_AMD_IOMMU_H_*/ -- cgit v1.2.1 From 1650dfd1a9bcde8fcfaab776887bb6f4e91830c3 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Fri, 24 Feb 2017 02:48:19 -0600 Subject: x86/events, drivers/amd/iommu: Prepare for multiple IOMMUs support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, amd_iommu_pc_get_set_reg_val() cannot support multiple IOMMUs. Modify it to allow callers to specify an IOMMU. This is in preparation for supporting multiple IOMMUs. Signed-off-by: Suravee Suthikulpanit Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Jörg Rödel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/1487926102-13073-8-git-send-email-Suravee.Suthikulpanit@amd.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/iommu.c | 38 +++++++++++++++++--------------------- arch/x86/events/amd/iommu.h | 9 +++++++-- 2 files changed, 24 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index 10f67d39cac5..88fbc8001460 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -248,49 +248,45 @@ static int perf_iommu_event_init(struct perf_event *event) static void perf_iommu_enable_event(struct perf_event *ev) { + struct amd_iommu *iommu = get_amd_iommu(0); u8 csource = _GET_CSOURCE(ev); u16 devid = _GET_DEVID(ev); + u8 bank = _GET_BANK(ev); + u8 cntr = _GET_CNTR(ev); u64 reg = 0ULL; reg = csource; - amd_iommu_pc_get_set_reg_val(devid, - _GET_BANK(ev), _GET_CNTR(ev) , - IOMMU_PC_COUNTER_SRC_REG, ®, true); + amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, ®); reg = devid | (_GET_DEVID_MASK(ev) << 32); if (reg) reg |= BIT(31); - amd_iommu_pc_get_set_reg_val(devid, - _GET_BANK(ev), _GET_CNTR(ev) , - IOMMU_PC_DEVID_MATCH_REG, ®, true); + amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, ®); reg = _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32); if (reg) reg |= BIT(31); - amd_iommu_pc_get_set_reg_val(devid, - _GET_BANK(ev), _GET_CNTR(ev) , - IOMMU_PC_PASID_MATCH_REG, ®, true); + amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, ®); reg = _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32); if (reg) reg |= BIT(31); - amd_iommu_pc_get_set_reg_val(devid, - _GET_BANK(ev), _GET_CNTR(ev) , - IOMMU_PC_DOMID_MATCH_REG, ®, true); + amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, ®); } static void perf_iommu_disable_event(struct perf_event *event) { + struct amd_iommu *iommu = get_amd_iommu(0); u64 reg = 0ULL; - amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), - _GET_BANK(event), _GET_CNTR(event), - IOMMU_PC_COUNTER_SRC_REG, ®, true); + amd_iommu_pc_set_reg(iommu, _GET_BANK(event), _GET_CNTR(event), + IOMMU_PC_COUNTER_SRC_REG, ®); } static void perf_iommu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; + struct amd_iommu *iommu = get_amd_iommu(0); if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; @@ -300,9 +296,8 @@ static void perf_iommu_start(struct perf_event *event, int flags) if (flags & PERF_EF_RELOAD) { u64 prev_raw_count = local64_read(&hwc->prev_count); - amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), - _GET_BANK(event), _GET_CNTR(event), - IOMMU_PC_COUNTER_REG, &prev_raw_count, true); + amd_iommu_pc_set_reg(iommu, _GET_BANK(event), _GET_CNTR(event), + IOMMU_PC_COUNTER_REG, &prev_raw_count); } perf_iommu_enable_event(event); @@ -314,10 +309,11 @@ static void perf_iommu_read(struct perf_event *event) { u64 count, prev, delta; struct hw_perf_event *hwc = &event->hw; + struct amd_iommu *iommu = get_amd_iommu(0); - amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), - _GET_BANK(event), _GET_CNTR(event), - IOMMU_PC_COUNTER_REG, &count, false); + if (amd_iommu_pc_get_reg(iommu, _GET_BANK(event), _GET_CNTR(event), + IOMMU_PC_COUNTER_REG, &count)) + return; /* IOMMU pc counter register is only 48 bits */ count &= GENMASK_ULL(47, 0); diff --git a/arch/x86/events/amd/iommu.h b/arch/x86/events/amd/iommu.h index b775107c221e..62e0702c4374 100644 --- a/arch/x86/events/amd/iommu.h +++ b/arch/x86/events/amd/iommu.h @@ -24,6 +24,8 @@ #define PC_MAX_SPEC_BNKS 64 #define PC_MAX_SPEC_CNTRS 16 +struct amd_iommu; + /* amd_iommu_init.c external support functions */ extern int amd_iommu_get_num_iommus(void); @@ -33,8 +35,11 @@ extern u8 amd_iommu_pc_get_max_banks(unsigned int idx); extern u8 amd_iommu_pc_get_max_counters(unsigned int idx); -extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, - u8 fxn, u64 *value, bool is_write); +extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, + u8 fxn, u64 *value); + +extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, + u8 fxn, u64 *value); extern struct amd_iommu *get_amd_iommu(int idx); -- cgit v1.2.1 From 51686546304fd7f778bb31bf7e2ae9bad6b1d21c Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Fri, 24 Feb 2017 02:48:20 -0600 Subject: x86/events/amd/iommu: Fix sysfs perf attribute groups MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce static amd_iommu_attr_groups to simplify the sysfs attributes initialization code. Signed-off-by: Suravee Suthikulpanit Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Jörg Rödel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/1487926102-13073-9-git-send-email-Suravee.Suthikulpanit@amd.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/iommu.c | 81 ++++++++++++++++++--------------------------- 1 file changed, 32 insertions(+), 49 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index 88fbc8001460..7ac8138023cc 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -43,14 +43,8 @@ struct perf_amd_iommu { u8 max_counters; u64 cntr_assign_mask; raw_spinlock_t lock; - const struct attribute_group *attr_groups[4]; }; -#define format_group attr_groups[0] -#define cpumask_group attr_groups[1] -#define events_group attr_groups[2] -#define null_group attr_groups[3] - /*--------------------------------------------- * sysfs format attributes *---------------------------------------------*/ @@ -81,6 +75,10 @@ static struct attribute_group amd_iommu_format_group = { /*--------------------------------------------- * sysfs events attributes *---------------------------------------------*/ +static struct attribute_group amd_iommu_events_group = { + .name = "events", +}; + struct amd_iommu_event_desc { struct kobj_attribute attr; const char *event; @@ -384,76 +382,60 @@ static void perf_iommu_del(struct perf_event *event, int flags) perf_event_update_userpage(event); } -static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu) +static __init int _init_events_attrs(void) { - struct attribute **attrs; - struct attribute_group *attr_group; int i = 0, j; + struct attribute **attrs; while (amd_iommu_v2_event_descs[i].attr.attr.name) i++; - attr_group = kzalloc(sizeof(struct attribute *) - * (i + 1) + sizeof(*attr_group), GFP_KERNEL); - if (!attr_group) + attrs = kzalloc(sizeof(struct attribute **) * (i + 1), GFP_KERNEL); + if (!attrs) return -ENOMEM; - attrs = (struct attribute **)(attr_group + 1); for (j = 0; j < i; j++) attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr; - attr_group->name = "events"; - attr_group->attrs = attrs; - perf_iommu->events_group = attr_group; - + amd_iommu_events_group.attrs = attrs; return 0; } static __init void amd_iommu_pc_exit(void) { - if (__perf_iommu.events_group != NULL) { - kfree(__perf_iommu.events_group); - __perf_iommu.events_group = NULL; - } + kfree(amd_iommu_events_group.attrs); } -static __init int _init_perf_amd_iommu( - struct perf_amd_iommu *perf_iommu, char *name) +const struct attribute_group *amd_iommu_attr_groups[] = { + &amd_iommu_format_group, + &amd_iommu_cpumask_group, + &amd_iommu_events_group, + NULL, +}; + +static __init int +_init_perf_amd_iommu(struct perf_amd_iommu *perf_iommu, char *name) { int ret; raw_spin_lock_init(&perf_iommu->lock); - perf_iommu->format_group = &amd_iommu_format_group; - /* Init cpumask attributes to only core 0 */ cpumask_set_cpu(0, &iommu_cpumask); - perf_iommu->cpumask_group = &amd_iommu_cpumask_group; - - ret = _init_events_attrs(perf_iommu); - if (ret) { - pr_err("Error initializing AMD IOMMU perf events.\n"); - return ret; - } perf_iommu->max_banks = amd_iommu_pc_get_max_banks(0); perf_iommu->max_counters = amd_iommu_pc_get_max_counters(0); if (!perf_iommu->max_banks || !perf_iommu->max_counters) return -EINVAL; - perf_iommu->null_group = NULL; - perf_iommu->pmu.attr_groups = perf_iommu->attr_groups; - + perf_iommu->pmu.attr_groups = amd_iommu_attr_groups; ret = perf_pmu_register(&perf_iommu->pmu, name, -1); - if (ret) { + if (ret) pr_err("Error initializing AMD IOMMU perf counters.\n"); - amd_iommu_pc_exit(); - } else { + else pr_info("Detected AMD IOMMU (%d banks, %d counters/bank).\n", amd_iommu_pc_get_max_banks(0), amd_iommu_pc_get_max_counters(0)); - } - return ret; } @@ -467,24 +449,25 @@ static struct perf_amd_iommu __perf_iommu = { .stop = perf_iommu_stop, .read = perf_iommu_read, }, - .max_banks = 0x00, - .max_counters = 0x00, - .cntr_assign_mask = 0ULL, - .format_group = NULL, - .cpumask_group = NULL, - .events_group = NULL, - .null_group = NULL, }; static __init int amd_iommu_pc_init(void) { + int ret; + /* Make sure the IOMMU PC resource is available */ if (!amd_iommu_pc_supported()) return -ENODEV; - _init_perf_amd_iommu(&__perf_iommu, "amd_iommu"); + ret = _init_events_attrs(); + if (ret) + return ret; - return 0; + ret = _init_perf_amd_iommu(&__perf_iommu, "amd_iommu"); + if (ret) + amd_iommu_pc_exit(); + + return ret; } device_initcall(amd_iommu_pc_init); -- cgit v1.2.1 From cf25f904ef75aa7c25097eb4981bbc634bf5ff9e Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Fri, 24 Feb 2017 02:48:21 -0600 Subject: x86/events/amd/iommu: Add IOMMU-specific hw_perf_event struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Current AMD IOMMU perf PMU inappropriately uses the hardware struct inside the union in struct hw_perf_event, extra_reg in particular. Instead, introduce an AMD IOMMU-specific struct with required parameters to be programmed into the IOMMU performance counter control register. Update the pasid field from 16 to 20 bits while at it. Signed-off-by: Suravee Suthikulpanit [ Fixup macros, shorten get_next_avail_iommu_bnk_cntr() local vars, massage commit message. ] Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Jörg Rödel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/1487926102-13073-10-git-send-email-Suravee.Suthikulpanit@amd.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/iommu.c | 113 ++++++++++++++++++++------------------------ 1 file changed, 50 insertions(+), 63 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index 7ac8138023cc..f0d94c8b382a 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -23,17 +23,16 @@ #define COUNTER_SHIFT 16 -#define _GET_BANK(ev) ((u8)(ev->hw.extra_reg.reg >> 8)) -#define _GET_CNTR(ev) ((u8)(ev->hw.extra_reg.reg)) - -/* iommu pmu config masks */ -#define _GET_CSOURCE(ev) ((ev->hw.config & 0xFFULL)) -#define _GET_DEVID(ev) ((ev->hw.config >> 8) & 0xFFFFULL) -#define _GET_PASID(ev) ((ev->hw.config >> 24) & 0xFFFFULL) -#define _GET_DOMID(ev) ((ev->hw.config >> 40) & 0xFFFFULL) -#define _GET_DEVID_MASK(ev) ((ev->hw.extra_reg.config) & 0xFFFFULL) -#define _GET_PASID_MASK(ev) ((ev->hw.extra_reg.config >> 16) & 0xFFFFULL) -#define _GET_DOMID_MASK(ev) ((ev->hw.extra_reg.config >> 32) & 0xFFFFULL) +/* iommu pmu conf masks */ +#define GET_CSOURCE(x) ((x)->conf & 0xFFULL) +#define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL) +#define GET_DOMID(x) (((x)->conf >> 24) & 0xFFFFULL) +#define GET_PASID(x) (((x)->conf >> 40) & 0xFFFFFULL) + +/* iommu pmu conf1 masks */ +#define GET_DEVID_MASK(x) ((x)->conf1 & 0xFFFFULL) +#define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL) +#define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL) static struct perf_amd_iommu __perf_iommu; @@ -50,11 +49,11 @@ struct perf_amd_iommu { *---------------------------------------------*/ PMU_FORMAT_ATTR(csource, "config:0-7"); PMU_FORMAT_ATTR(devid, "config:8-23"); -PMU_FORMAT_ATTR(pasid, "config:24-39"); -PMU_FORMAT_ATTR(domid, "config:40-55"); +PMU_FORMAT_ATTR(domid, "config:24-39"); +PMU_FORMAT_ATTR(pasid, "config:40-59"); PMU_FORMAT_ATTR(devid_mask, "config1:0-15"); -PMU_FORMAT_ATTR(pasid_mask, "config1:16-31"); -PMU_FORMAT_ATTR(domid_mask, "config1:32-47"); +PMU_FORMAT_ATTR(domid_mask, "config1:16-31"); +PMU_FORMAT_ATTR(pasid_mask, "config1:32-51"); static struct attribute *iommu_format_attrs[] = { &format_attr_csource.attr, @@ -150,30 +149,34 @@ static struct attribute_group amd_iommu_cpumask_group = { /*---------------------------------------------*/ -static int get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu) +static int get_next_avail_iommu_bnk_cntr(struct perf_event *event) { + struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu); + int max_cntrs = piommu->max_counters; + int max_banks = piommu->max_banks; + u32 shift, bank, cntr; unsigned long flags; - int shift, bank, cntr, retval; - int max_banks = perf_iommu->max_banks; - int max_cntrs = perf_iommu->max_counters; + int retval; - raw_spin_lock_irqsave(&perf_iommu->lock, flags); + raw_spin_lock_irqsave(&piommu->lock, flags); for (bank = 0, shift = 0; bank < max_banks; bank++) { for (cntr = 0; cntr < max_cntrs; cntr++) { shift = bank + (bank*3) + cntr; - if (perf_iommu->cntr_assign_mask & BIT_ULL(shift)) { + if (piommu->cntr_assign_mask & BIT_ULL(shift)) { continue; } else { - perf_iommu->cntr_assign_mask |= BIT_ULL(shift); - retval = ((bank & 0xFF) << 8) | (cntr & 0xFF); + piommu->cntr_assign_mask |= BIT_ULL(shift); + event->hw.iommu_bank = bank; + event->hw.iommu_cntr = cntr; + retval = 0; goto out; } } } retval = -ENOSPC; out: - raw_spin_unlock_irqrestore(&perf_iommu->lock, flags); + raw_spin_unlock_irqrestore(&piommu->lock, flags); return retval; } @@ -202,8 +205,6 @@ static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu, static int perf_iommu_event_init(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - struct perf_amd_iommu *perf_iommu; - u64 config, config1; /* test the event attr type check for PMU enumeration */ if (event->attr.type != event->pmu->type) @@ -225,21 +226,9 @@ static int perf_iommu_event_init(struct perf_event *event) if (event->cpu < 0) return -EINVAL; - perf_iommu = &__perf_iommu; - - if (event->pmu != &perf_iommu->pmu) - return -ENOENT; - - if (perf_iommu) { - config = event->attr.config; - config1 = event->attr.config1; - } else { - return -EINVAL; - } - /* update the hw_perf_event struct with the iommu config data */ - hwc->config = config; - hwc->extra_reg.config = config1; + hwc->conf = event->attr.config; + hwc->conf1 = event->attr.config1; return 0; } @@ -247,26 +236,28 @@ static int perf_iommu_event_init(struct perf_event *event) static void perf_iommu_enable_event(struct perf_event *ev) { struct amd_iommu *iommu = get_amd_iommu(0); - u8 csource = _GET_CSOURCE(ev); - u16 devid = _GET_DEVID(ev); - u8 bank = _GET_BANK(ev); - u8 cntr = _GET_CNTR(ev); + struct hw_perf_event *hwc = &ev->hw; + u8 bank = hwc->iommu_bank; + u8 cntr = hwc->iommu_cntr; u64 reg = 0ULL; - reg = csource; + reg = GET_CSOURCE(hwc); amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, ®); - reg = devid | (_GET_DEVID_MASK(ev) << 32); + reg = GET_DEVID_MASK(hwc); + reg = GET_DEVID(hwc) | (reg << 32); if (reg) reg |= BIT(31); amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, ®); - reg = _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32); + reg = GET_PASID_MASK(hwc); + reg = GET_PASID(hwc) | (reg << 32); if (reg) reg |= BIT(31); amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, ®); - reg = _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32); + reg = GET_DOMID_MASK(hwc); + reg = GET_DOMID(hwc) | (reg << 32); if (reg) reg |= BIT(31); amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, ®); @@ -275,16 +266,16 @@ static void perf_iommu_enable_event(struct perf_event *ev) static void perf_iommu_disable_event(struct perf_event *event) { struct amd_iommu *iommu = get_amd_iommu(0); + struct hw_perf_event *hwc = &event->hw; u64 reg = 0ULL; - amd_iommu_pc_set_reg(iommu, _GET_BANK(event), _GET_CNTR(event), + amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, IOMMU_PC_COUNTER_SRC_REG, ®); } static void perf_iommu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; - struct amd_iommu *iommu = get_amd_iommu(0); if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; @@ -293,8 +284,10 @@ static void perf_iommu_start(struct perf_event *event, int flags) hwc->state = 0; if (flags & PERF_EF_RELOAD) { - u64 prev_raw_count = local64_read(&hwc->prev_count); - amd_iommu_pc_set_reg(iommu, _GET_BANK(event), _GET_CNTR(event), + u64 prev_raw_count = local64_read(&hwc->prev_count); + struct amd_iommu *iommu = get_amd_iommu(0); + + amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, IOMMU_PC_COUNTER_REG, &prev_raw_count); } @@ -309,7 +302,7 @@ static void perf_iommu_read(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; struct amd_iommu *iommu = get_amd_iommu(0); - if (amd_iommu_pc_get_reg(iommu, _GET_BANK(event), _GET_CNTR(event), + if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, IOMMU_PC_COUNTER_REG, &count)) return; @@ -329,7 +322,6 @@ static void perf_iommu_read(struct perf_event *event) static void perf_iommu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; - u64 config; if (hwc->state & PERF_HES_UPTODATE) return; @@ -341,7 +333,6 @@ static void perf_iommu_stop(struct perf_event *event, int flags) if (hwc->state & PERF_HES_UPTODATE) return; - config = hwc->config; perf_iommu_read(event); hwc->state |= PERF_HES_UPTODATE; } @@ -349,16 +340,12 @@ static void perf_iommu_stop(struct perf_event *event, int flags) static int perf_iommu_add(struct perf_event *event, int flags) { int retval; - struct perf_amd_iommu *perf_iommu = - container_of(event->pmu, struct perf_amd_iommu, pmu); event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; /* request an iommu bank/counter */ - retval = get_next_avail_iommu_bnk_cntr(perf_iommu); - if (retval != -ENOSPC) - event->hw.extra_reg.reg = (u16)retval; - else + retval = get_next_avail_iommu_bnk_cntr(event); + if (retval) return retval; if (flags & PERF_EF_START) @@ -369,6 +356,7 @@ static int perf_iommu_add(struct perf_event *event, int flags) static void perf_iommu_del(struct perf_event *event, int flags) { + struct hw_perf_event *hwc = &event->hw; struct perf_amd_iommu *perf_iommu = container_of(event->pmu, struct perf_amd_iommu, pmu); @@ -376,8 +364,7 @@ static void perf_iommu_del(struct perf_event *event, int flags) /* clear the assigned iommu bank/counter */ clear_avail_iommu_bnk_cntr(perf_iommu, - _GET_BANK(event), - _GET_CNTR(event)); + hwc->iommu_bank, hwc->iommu_cntr); perf_event_update_userpage(event); } -- cgit v1.2.1 From 25df39f2cfd06a4b49ad592c5b7cba0cbf24e27f Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Wed, 22 Mar 2017 02:02:42 -0500 Subject: x86/events/amd/iommu: Enable support for multiple IOMMUs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for multiple IOMMUs to perf by exposing an AMD IOMMU PMU for each IOMMU found in the system via: /bus/event_source/devices/amd_iommu_x where x is the IOMMU index. This allows users to specify different events to be programmed into the performance counters of each IOMMU. Signed-off-by: Suravee Suthikulpanit [ Improve readability, shorten names. ] Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Jörg Rödel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/1490166162-10002-11-git-send-email-Suravee.Suthikulpanit@amd.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/iommu.c | 112 ++++++++++++++++++++++++++++---------------- 1 file changed, 71 insertions(+), 41 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index f0d94c8b382a..3641e24fdac5 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -34,16 +34,21 @@ #define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL) #define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL) -static struct perf_amd_iommu __perf_iommu; +#define IOMMU_NAME_SIZE 16 struct perf_amd_iommu { + struct list_head list; struct pmu pmu; + struct amd_iommu *iommu; + char name[IOMMU_NAME_SIZE]; u8 max_banks; u8 max_counters; u64 cntr_assign_mask; raw_spinlock_t lock; }; +static LIST_HEAD(perf_amd_iommu_list); + /*--------------------------------------------- * sysfs format attributes *---------------------------------------------*/ @@ -233,9 +238,14 @@ static int perf_iommu_event_init(struct perf_event *event) return 0; } +static inline struct amd_iommu *perf_event_2_iommu(struct perf_event *ev) +{ + return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu; +} + static void perf_iommu_enable_event(struct perf_event *ev) { - struct amd_iommu *iommu = get_amd_iommu(0); + struct amd_iommu *iommu = perf_event_2_iommu(ev); struct hw_perf_event *hwc = &ev->hw; u8 bank = hwc->iommu_bank; u8 cntr = hwc->iommu_cntr; @@ -265,7 +275,7 @@ static void perf_iommu_enable_event(struct perf_event *ev) static void perf_iommu_disable_event(struct perf_event *event) { - struct amd_iommu *iommu = get_amd_iommu(0); + struct amd_iommu *iommu = perf_event_2_iommu(event); struct hw_perf_event *hwc = &event->hw; u64 reg = 0ULL; @@ -285,7 +295,7 @@ static void perf_iommu_start(struct perf_event *event, int flags) if (flags & PERF_EF_RELOAD) { u64 prev_raw_count = local64_read(&hwc->prev_count); - struct amd_iommu *iommu = get_amd_iommu(0); + struct amd_iommu *iommu = perf_event_2_iommu(event); amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, IOMMU_PC_COUNTER_REG, &prev_raw_count); @@ -300,7 +310,7 @@ static void perf_iommu_read(struct perf_event *event) { u64 count, prev, delta; struct hw_perf_event *hwc = &event->hw; - struct amd_iommu *iommu = get_amd_iommu(0); + struct amd_iommu *iommu = perf_event_2_iommu(event); if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, IOMMU_PC_COUNTER_REG, &count)) @@ -388,11 +398,6 @@ static __init int _init_events_attrs(void) return 0; } -static __init void amd_iommu_pc_exit(void) -{ - kfree(amd_iommu_events_group.attrs); -} - const struct attribute_group *amd_iommu_attr_groups[] = { &amd_iommu_format_group, &amd_iommu_cpumask_group, @@ -400,46 +405,57 @@ const struct attribute_group *amd_iommu_attr_groups[] = { NULL, }; -static __init int -_init_perf_amd_iommu(struct perf_amd_iommu *perf_iommu, char *name) +static struct pmu iommu_pmu = { + .event_init = perf_iommu_event_init, + .add = perf_iommu_add, + .del = perf_iommu_del, + .start = perf_iommu_start, + .stop = perf_iommu_stop, + .read = perf_iommu_read, + .task_ctx_nr = perf_invalid_context, + .attr_groups = amd_iommu_attr_groups, +}; + +static __init int init_one_iommu(unsigned int idx) { + struct perf_amd_iommu *perf_iommu; int ret; + perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL); + if (!perf_iommu) + return -ENOMEM; + raw_spin_lock_init(&perf_iommu->lock); - /* Init cpumask attributes to only core 0 */ - cpumask_set_cpu(0, &iommu_cpumask); + perf_iommu->pmu = iommu_pmu; + perf_iommu->iommu = get_amd_iommu(idx); + perf_iommu->max_banks = amd_iommu_pc_get_max_banks(idx); + perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx); - perf_iommu->max_banks = amd_iommu_pc_get_max_banks(0); - perf_iommu->max_counters = amd_iommu_pc_get_max_counters(0); - if (!perf_iommu->max_banks || !perf_iommu->max_counters) + if (!perf_iommu->iommu || + !perf_iommu->max_banks || + !perf_iommu->max_counters) { + kfree(perf_iommu); return -EINVAL; + } - perf_iommu->pmu.attr_groups = amd_iommu_attr_groups; - ret = perf_pmu_register(&perf_iommu->pmu, name, -1); - if (ret) - pr_err("Error initializing AMD IOMMU perf counters.\n"); - else - pr_info("Detected AMD IOMMU (%d banks, %d counters/bank).\n", - amd_iommu_pc_get_max_banks(0), - amd_iommu_pc_get_max_counters(0)); + snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx); + + ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1); + if (!ret) { + pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n", + idx, perf_iommu->max_banks, perf_iommu->max_counters); + list_add_tail(&perf_iommu->list, &perf_amd_iommu_list); + } else { + pr_warn("Error initializing IOMMU %d.\n", idx); + kfree(perf_iommu); + } return ret; } -static struct perf_amd_iommu __perf_iommu = { - .pmu = { - .task_ctx_nr = perf_invalid_context, - .event_init = perf_iommu_event_init, - .add = perf_iommu_add, - .del = perf_iommu_del, - .start = perf_iommu_start, - .stop = perf_iommu_stop, - .read = perf_iommu_read, - }, -}; - static __init int amd_iommu_pc_init(void) { + unsigned int i, cnt = 0; int ret; /* Make sure the IOMMU PC resource is available */ @@ -450,11 +466,25 @@ static __init int amd_iommu_pc_init(void) if (ret) return ret; - ret = _init_perf_amd_iommu(&__perf_iommu, "amd_iommu"); - if (ret) - amd_iommu_pc_exit(); + /* + * An IOMMU PMU is specific to an IOMMU, and can function independently. + * So we go through all IOMMUs and ignore the one that fails init + * unless all IOMMU are failing. + */ + for (i = 0; i < amd_iommu_get_num_iommus(); i++) { + ret = init_one_iommu(i); + if (!ret) + cnt++; + } - return ret; + if (!cnt) { + kfree(amd_iommu_events_group.attrs); + return -ENODEV; + } + + /* Init cpumask attributes to only core 0 */ + cpumask_set_cpu(0, &iommu_cpumask); + return 0; } device_initcall(amd_iommu_pc_init); -- cgit v1.2.1 From c2628f90c9964881a62dd8e9f7372ca05cb6fe32 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 10 Apr 2017 14:20:45 +0200 Subject: perf/amd/uncore: Do feature check first, before assignments ... and save some unnecessary work. Remove now unused label while at it. Signed-off-by: Borislav Petkov Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Suravee Suthikulpanit Cc: Thomas Gleixner Cc: Vince Weaver Link: http://lkml.kernel.org/r/20170410122047.3026-2-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/events/amd/uncore.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 4d1f7f2d9aff..abd4b9064fba 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -509,7 +509,10 @@ static int __init amd_uncore_init(void) int ret = -ENODEV; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) - goto fail_nodev; + return -ENODEV; + + if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) + return -ENODEV; switch(boot_cpu_data.x86) { case 23: @@ -552,9 +555,6 @@ static int __init amd_uncore_init(void) amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3; - if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) - goto fail_nodev; - if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) { amd_uncore_nb = alloc_percpu(struct amd_uncore *); if (!amd_uncore_nb) { @@ -615,7 +615,6 @@ fail_nb: if (amd_uncore_nb) free_percpu(amd_uncore_nb); -fail_nodev: return ret; } device_initcall(amd_uncore_init); -- cgit v1.2.1 From 68e8038048f44e7782079e79338506246393a876 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 10 Apr 2017 14:20:46 +0200 Subject: perf/amd/uncore: Clean up per-family setup Fam16h is the same as the default one, remove it. Turn the switch-case into a simple if-else. No functionality change. Signed-off-by: Borislav Petkov Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Suravee Suthikulpanit Cc: Thomas Gleixner Cc: Vince Weaver Link: http://lkml.kernel.org/r/20170410122047.3026-3-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/events/amd/uncore.c | 59 ++++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index abd4b9064fba..975f24f6e238 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -514,45 +514,28 @@ static int __init amd_uncore_init(void) if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) return -ENODEV; - switch(boot_cpu_data.x86) { - case 23: - /* Family 17h: */ - num_counters_nb = NUM_COUNTERS_NB; - num_counters_llc = NUM_COUNTERS_L3; - /* - * For Family17h, the NorthBridge counters are - * re-purposed as Data Fabric counters. Also, support is - * added for L3 counters. The pmus are exported based on - * family as either L2 or L3 and NB or DF. - */ - amd_nb_pmu.name = "amd_df"; - amd_llc_pmu.name = "amd_l3"; - format_attr_event_df.show = &event_show_df; - format_attr_event_l3.show = &event_show_l3; - break; - case 22: - /* Family 16h - may change: */ - num_counters_nb = NUM_COUNTERS_NB; - num_counters_llc = NUM_COUNTERS_L2; - amd_nb_pmu.name = "amd_nb"; - amd_llc_pmu.name = "amd_l2"; - format_attr_event_df = format_attr_event; - format_attr_event_l3 = format_attr_event; - break; - default: - /* - * All prior families have the same number of - * NorthBridge and Last Level Cache counters - */ - num_counters_nb = NUM_COUNTERS_NB; - num_counters_llc = NUM_COUNTERS_L2; - amd_nb_pmu.name = "amd_nb"; - amd_llc_pmu.name = "amd_l2"; - format_attr_event_df = format_attr_event; - format_attr_event_l3 = format_attr_event; - break; + if (boot_cpu_data.x86 == 0x17) { + /* + * For F17h, the Northbridge counters are repurposed as Data + * Fabric counters. Also, L3 counters are supported too. The PMUs + * are exported based on family as either L2 or L3 and NB or DF. + */ + num_counters_nb = NUM_COUNTERS_NB; + num_counters_llc = NUM_COUNTERS_L3; + amd_nb_pmu.name = "amd_df"; + amd_llc_pmu.name = "amd_l3"; + format_attr_event_df.show = &event_show_df; + format_attr_event_l3.show = &event_show_l3; + } else { + num_counters_nb = NUM_COUNTERS_NB; + num_counters_llc = NUM_COUNTERS_L2; + amd_nb_pmu.name = "amd_nb"; + amd_llc_pmu.name = "amd_l2"; + format_attr_event_df = format_attr_event; + format_attr_event_l3 = format_attr_event; } - amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; + + amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3; if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) { -- cgit v1.2.1 From 9df9078ef2086652647248ee6e82ca8f661cb3f5 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 10 Apr 2017 14:20:47 +0200 Subject: perf/amd/uncore: Fix pr_fmt() prefix Make it "perf/amd/uncore: ", i.e., something more specific than "perf: ". Signed-off-by: Borislav Petkov Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Suravee Suthikulpanit Cc: Thomas Gleixner Cc: Vince Weaver Link: http://lkml.kernel.org/r/20170410122047.3026-4-bp@alien8.de [ Changed it to perf/amd/uncore/ ] Signed-off-by: Ingo Molnar --- arch/x86/events/amd/uncore.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 975f24f6e238..ad44af0dd667 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -30,6 +30,9 @@ #define COUNTER_SHIFT 16 +#undef pr_fmt +#define pr_fmt(fmt) "amd_uncore: " fmt + static int num_counters_llc; static int num_counters_nb; @@ -548,7 +551,7 @@ static int __init amd_uncore_init(void) if (ret) goto fail_nb; - pr_info("perf: AMD NB counters detected\n"); + pr_info("AMD NB counters detected\n"); ret = 0; } @@ -562,7 +565,7 @@ static int __init amd_uncore_init(void) if (ret) goto fail_llc; - pr_info("perf: AMD LLC counters detected\n"); + pr_info("AMD LLC counters detected\n"); ret = 0; } -- cgit v1.2.1 From bd0b90676c30fe640e7ead919b3e38846ac88ab7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 13:56:56 +0900 Subject: kprobes/x86: Fix kprobe-booster not to boost far call instructions Fix the kprobe-booster not to boost far call instruction, because a call may store the address in the single-step execution buffer to the stack, which should be modified after single stepping. Currently, this instruction will be filtered as not boostable in resume_execution(), so this is not a critical issue. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076340615.22469.14066273186134229909.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 993fa4fe4f68..9eae5a6c5870 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -200,6 +200,8 @@ retry: return (opcode != 0x62 && opcode != 0x67); case 0x70: return 0; /* can't boost conditional jump */ + case 0x90: + return opcode != 0x9a; /* can't boost call far */ case 0xc0: /* can't boost software-interruptions */ return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; -- cgit v1.2.1 From 129d17e8e8daf50f8aff4941fb4a9cda027ab9cf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 13:58:06 +0900 Subject: kprobes/x86: Fix the description of __copy_instruction() Fix the description comment of __copy_instruction() function since it has already been changed to return the length of the copied instruction. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076347582.22469.3775133607244923462.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 9eae5a6c5870..81d4dc786dae 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -350,11 +350,10 @@ static int is_IF_modifier(kprobe_opcode_t *insn) } /* - * Copy an instruction and adjust the displacement if the instruction - * uses the %rip-relative addressing mode. - * If it does, Return the address of the 32-bit displacement word. - * If not, return null. - * Only applicable to 64-bit x86. + * Copy an instruction with recovering modified instruction by kprobes + * and adjust the displacement if the instruction uses the %rip-relative + * addressing mode. + * This returns the length of copied instruction, or 0 if it has an error. */ int __copy_instruction(u8 *dest, u8 *src) { @@ -376,6 +375,7 @@ int __copy_instruction(u8 *dest, u8 *src) memcpy(dest, insn.kaddr, length); #ifdef CONFIG_X86_64 + /* Only x86_64 has RIP relative instructions */ if (insn_rip_relative(&insn)) { s64 newdisp; u8 *disp; -- cgit v1.2.1 From 17880e4d5777df4770081ecf0750471cda57f86b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 13:59:15 +0900 Subject: kprobes/x86: Use instruction decoder for booster Use x86 instruction decoder for checking whether the probed instruction is able to boost or not, instead of hand-written code. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076354563.22469.13379472209338986858.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 39 ++++++++++++++++----------------------- 1 file changed, 16 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 81d4dc786dae..6327f95832a0 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -169,35 +169,33 @@ NOKPROBE_SYMBOL(skip_prefixes); */ int can_boost(kprobe_opcode_t *opcodes, void *addr) { + struct insn insn; kprobe_opcode_t opcode; - kprobe_opcode_t *orig_opcodes = opcodes; if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ -retry: - if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) - return 0; - opcode = *(opcodes++); + kernel_insn_init(&insn, (void *)opcodes, MAX_INSN_SIZE); + insn_get_opcode(&insn); /* 2nd-byte opcode */ - if (opcode == 0x0f) { - if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) - return 0; - return test_bit(*opcodes, + if (insn.opcode.nbytes == 2) + return test_bit(insn.opcode.bytes[1], (unsigned long *)twobyte_is_boostable); - } + + if (insn.opcode.nbytes != 1) + return 0; + + /* Can't boost Address-size override prefix */ + if (unlikely(inat_is_address_size_prefix(insn.attr))) + return 0; + + opcode = insn.opcode.bytes[0]; switch (opcode & 0xf0) { -#ifdef CONFIG_X86_64 - case 0x40: - goto retry; /* REX prefix is boostable */ -#endif case 0x60: - if (0x63 < opcode && opcode < 0x67) - goto retry; /* prefixes */ - /* can't boost Address-size override and bound */ - return (opcode != 0x62 && opcode != 0x67); + /* can't boost "bound" */ + return (opcode != 0x62); case 0x70: return 0; /* can't boost conditional jump */ case 0x90: @@ -212,14 +210,9 @@ retry: /* can boost in/out and absolute jmps */ return ((opcode & 0x04) || opcode == 0xea); case 0xf0: - if ((opcode & 0x0c) == 0 && opcode != 0xf1) - goto retry; /* lock/rep(ne) prefix */ /* clear and set flags are boostable */ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); default: - /* segment override prefixes are boostable */ - if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) - goto retry; /* prefixes */ /* CS override prefix and call are not boostable */ return (opcode != 0x2e && opcode != 0x9a); } -- cgit v1.2.1 From 804dec5bda9b4fcdab5f67fe61db4a0498af5221 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 14:00:25 +0900 Subject: kprobes/x86: Do not modify singlestep buffer while resuming Do not modify singlestep execution buffer (kprobe.ainsn.insn) while resuming from single-stepping, instead, modifies the buffer to add a jump back instruction at preparing buffer. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076361560.22469.1610155860343077495.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6327f95832a0..a654054eae7e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -399,23 +399,36 @@ int __copy_instruction(u8 *dest, u8 *src) return length; } +/* Prepare reljump right after instruction to boost */ +static void prepare_boost(struct kprobe *p, int length) +{ + if (can_boost(p->ainsn.insn, p->addr) && + MAX_INSN_SIZE - length >= RELATIVEJUMP_SIZE) { + /* + * These instructions can be executed directly if it + * jumps back to correct address. + */ + synthesize_reljump(p->ainsn.insn + length, p->addr + length); + p->ainsn.boostable = 1; + } else { + p->ainsn.boostable = -1; + } +} + static int arch_copy_kprobe(struct kprobe *p) { - int ret; + int len; /* Copy an instruction with recovering if other optprobe modifies it.*/ - ret = __copy_instruction(p->ainsn.insn, p->addr); - if (!ret) + len = __copy_instruction(p->ainsn.insn, p->addr); + if (!len) return -EINVAL; /* * __copy_instruction can modify the displacement of the instruction, * but it doesn't affect boostable check. */ - if (can_boost(p->ainsn.insn, p->addr)) - p->ainsn.boostable = 0; - else - p->ainsn.boostable = -1; + prepare_boost(p, len); /* Check whether the instruction modifies Interrupt Flag or not */ p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); @@ -878,21 +891,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs, break; } - if (p->ainsn.boostable == 0) { - if ((regs->ip > copy_ip) && - (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { - /* - * These instructions can be executed directly if it - * jumps back to correct address. - */ - synthesize_reljump((void *)regs->ip, - (void *)orig_ip + (regs->ip - copy_ip)); - p->ainsn.boostable = 1; - } else { - p->ainsn.boostable = -1; - } - } - regs->ip += orig_ip - copy_ip; no_change: -- cgit v1.2.1 From 490154bc68d15de9e38fbb850fe470e32cc66407 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 14:01:35 +0900 Subject: kprobes/x86: Make boostable flag boolean Make arch_specific_insn.boostable to boolean, since it has only 2 states, boostable or not. So it is better to use boolean from the viewpoint of code readability. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076368566.22469.6322906866458231844.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/include/asm/kprobes.h | 7 +++---- arch/x86/kernel/kprobes/core.c | 12 ++++++------ arch/x86/kernel/kprobes/ftrace.c | 2 +- 3 files changed, 10 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 200581691c6e..34b984c60790 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -72,14 +72,13 @@ struct arch_specific_insn { /* copy of the original instruction */ kprobe_opcode_t *insn; /* - * boostable = -1: This instruction type is not boostable. - * boostable = 0: This instruction type is boostable. - * boostable = 1: This instruction has been boosted: we have + * boostable = false: This instruction type is not boostable. + * boostable = true: This instruction has been boosted: we have * added a relative jump after the instruction copy in insn, * so no single-step and fixup are needed (unless there's * a post_handler or break_handler). */ - int boostable; + bool boostable; bool if_modifier; }; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index a654054eae7e..3f084a0ca722 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -409,9 +409,9 @@ static void prepare_boost(struct kprobe *p, int length) * jumps back to correct address. */ synthesize_reljump(p->ainsn.insn + length, p->addr + length); - p->ainsn.boostable = 1; + p->ainsn.boostable = true; } else { - p->ainsn.boostable = -1; + p->ainsn.boostable = false; } } @@ -467,7 +467,7 @@ void arch_disarm_kprobe(struct kprobe *p) void arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { - free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); + free_insn_slot(p->ainsn.insn, p->ainsn.boostable); p->ainsn.insn = NULL; } } @@ -539,7 +539,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, return; #if !defined(CONFIG_PREEMPT) - if (p->ainsn.boostable == 1 && !p->post_handler) { + if (p->ainsn.boostable && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ if (!reenter) reset_current_kprobe(); @@ -859,7 +859,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs, case 0xcf: case 0xea: /* jmp absolute -- ip is correct */ /* ip is already adjusted, no more changes required */ - p->ainsn.boostable = 1; + p->ainsn.boostable = true; goto no_change; case 0xe8: /* call relative - Fix return addr */ *tos = orig_ip + (*tos - copy_ip); @@ -884,7 +884,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs, * jmp near and far, absolute indirect * ip is correct. And this is boostable */ - p->ainsn.boostable = 1; + p->ainsn.boostable = true; goto no_change; } default: diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 5f8f0b3cc674..041f7b6dfa0f 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -94,6 +94,6 @@ NOKPROBE_SYMBOL(kprobe_ftrace_handler); int arch_prepare_kprobe_ftrace(struct kprobe *p) { p->ainsn.insn = NULL; - p->ainsn.boostable = -1; + p->ainsn.boostable = false; return 0; } -- cgit v1.2.1 From d0381c81c2f782fa2131178d11e0cfb23d50d631 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 14:02:46 +0900 Subject: kprobes/x86: Set kprobes pages read-only Set the pages which is used for kprobes' singlestep buffer and optprobe's trampoline instruction buffer to readonly. This can prevent unexpected (or unintended) instruction modification. This also passes rodata_test as below. Without this patch, rodata_test shows a warning: WARNING: CPU: 0 PID: 1 at arch/x86/mm/dump_pagetables.c:235 note_page+0x7a9/0xa20 x86/mm: Found insecure W+X mapping at address ffffffffa0000000/0xffffffffa0000000 With this fix, no W+X pages are found: x86/mm: Checked W+X mappings: passed, no W+X pages found. rodata_test: all tests were successful Reported-by: Andrey Ryabinin Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076375592.22469.14174394514338612247.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 4 ++++ arch/x86/kernel/kprobes/opt.c | 3 +++ 2 files changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 3f084a0ca722..0dc24e6cdd1e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -419,6 +419,8 @@ static int arch_copy_kprobe(struct kprobe *p) { int len; + set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1); + /* Copy an instruction with recovering if other optprobe modifies it.*/ len = __copy_instruction(p->ainsn.insn, p->addr); if (!len) @@ -430,6 +432,8 @@ static int arch_copy_kprobe(struct kprobe *p) */ prepare_boost(p, len); + set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1); + /* Check whether the instruction modifies Interrupt Flag or not */ p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 3e7c6e5a08ff..b121037739e4 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -350,6 +350,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, } buf = (u8 *)op->optinsn.insn; + set_memory_rw((unsigned long)buf & PAGE_MASK, 1); /* Copy instructions into the out-of-line buffer */ ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); @@ -372,6 +373,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, (u8 *)op->kp.addr + op->optinsn.size); + set_memory_ro((unsigned long)buf & PAGE_MASK, 1); + flush_icache_range((unsigned long) buf, (unsigned long) buf + TMPL_END_IDX + op->optinsn.size + RELATIVEJUMP_SIZE); -- cgit v1.2.1 From ea1e34fc366b84e4449b37d86f2222935e29412d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 14:03:56 +0900 Subject: kprobes/x86: Use probe_kernel_read() instead of memcpy() Use probe_kernel_read() for avoiding unexpected faults while copying kernel text in __recover_probed_insn(), __recover_optprobed_insn() and __copy_instruction(). Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076382624.22469.10091613887942958518.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 12 +++++++++--- arch/x86/kernel/kprobes/opt.c | 5 ++++- 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 0dc24e6cdd1e..722f54440e7e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -259,7 +259,10 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Fortunately, we know that the original code is the ideal 5-byte * long NOP. */ - memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + if (probe_kernel_read(buf, (void *)addr, + MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) + return 0UL; + if (faddr) memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); else @@ -271,7 +274,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Recover the probed instruction at addr for further analysis. * Caller must lock kprobes by kprobe_mutex, or disable preemption * for preventing to release referencing kprobes. - * Returns zero if the instruction can not get recovered. + * Returns zero if the instruction can not get recovered (or access failed). */ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) { @@ -365,7 +368,10 @@ int __copy_instruction(u8 *dest, u8 *src) /* Another subsystem puts a breakpoint, failed to recover */ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; - memcpy(dest, insn.kaddr, length); + + /* This can access kernel text if given address is not recovered */ + if (kernel_probe_read(dest, insn.kaddr, length)) + return 0; #ifdef CONFIG_X86_64 /* Only x86_64 has RIP relative instructions */ diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index b121037739e4..5b5233441d30 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -65,7 +65,10 @@ found: * overwritten by jump destination address. In this case, original * bytes must be recovered from op->optinsn.copied_insn buffer. */ - memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + if (probe_kernel_read(buf, (void *)addr, + MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) + return 0UL; + if (addr == (unsigned long)kp->addr) { buf[0] = kp->opcode; memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); -- cgit v1.2.1 From a8d11cd0714f51877587f5ec891013ca46e163ac Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 14:05:06 +0900 Subject: kprobes/x86: Consolidate insn decoder users for copying code Consolidate x86 instruction decoder users on the path of copying original code for kprobes. Kprobes decodes the same instruction a maximum of 3 times when preparing the instruction buffer: - The first time for getting the length of the instruction, - the 2nd for adjusting displacement, - and the 3rd for checking whether the instruction is boostable or not. For each time, the actual decoding target address is slightly different (1st is original address or recovered instruction buffer, 2nd and 3rd are pointing to the copied buffer), but all have the same instruction. Thus, this patch also changes the target address to the copied buffer at first and reuses the decoded "insn" for displacement adjusting and checking boostability. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076389643.22469.13151892839998777373.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/common.h | 4 +-- arch/x86/kernel/kprobes/core.c | 66 +++++++++++++++++++--------------------- arch/x86/kernel/kprobes/opt.c | 5 +-- 3 files changed, 36 insertions(+), 39 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index d688826e5736..db2182d63ed0 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h @@ -67,7 +67,7 @@ #endif /* Ensure if the instruction can be boostable */ -extern int can_boost(kprobe_opcode_t *instruction, void *addr); +extern int can_boost(struct insn *insn, void *orig_addr); /* Recover instruction if given address is probed */ extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr); @@ -75,7 +75,7 @@ extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, * Copy an instruction and adjust the displacement if the instruction * uses the %rip-relative addressing mode. */ -extern int __copy_instruction(u8 *dest, u8 *src); +extern int __copy_instruction(u8 *dest, u8 *src, struct insn *insn); /* Generate a relative-jump/call instruction */ extern void synthesize_reljump(void *from, void *to); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 722f54440e7e..19e1f2a6d7b0 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -164,33 +164,29 @@ static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn) NOKPROBE_SYMBOL(skip_prefixes); /* - * Returns non-zero if opcode is boostable. + * Returns non-zero if INSN is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ -int can_boost(kprobe_opcode_t *opcodes, void *addr) +int can_boost(struct insn *insn, void *addr) { - struct insn insn; kprobe_opcode_t opcode; if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ - kernel_insn_init(&insn, (void *)opcodes, MAX_INSN_SIZE); - insn_get_opcode(&insn); - /* 2nd-byte opcode */ - if (insn.opcode.nbytes == 2) - return test_bit(insn.opcode.bytes[1], + if (insn->opcode.nbytes == 2) + return test_bit(insn->opcode.bytes[1], (unsigned long *)twobyte_is_boostable); - if (insn.opcode.nbytes != 1) + if (insn->opcode.nbytes != 1) return 0; /* Can't boost Address-size override prefix */ - if (unlikely(inat_is_address_size_prefix(insn.attr))) + if (unlikely(inat_is_address_size_prefix(insn->attr))) return 0; - opcode = insn.opcode.bytes[0]; + opcode = insn->opcode.bytes[0]; switch (opcode & 0xf0) { case 0x60: @@ -351,35 +347,31 @@ static int is_IF_modifier(kprobe_opcode_t *insn) * addressing mode. * This returns the length of copied instruction, or 0 if it has an error. */ -int __copy_instruction(u8 *dest, u8 *src) +int __copy_instruction(u8 *dest, u8 *src, struct insn *insn) { - struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; - int length; unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); - if (!recovered_insn) + if (!recovered_insn || !insn) return 0; - kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); - insn_get_length(&insn); - length = insn.length; - /* Another subsystem puts a breakpoint, failed to recover */ - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + /* This can access kernel text if given address is not recovered */ + if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE)) return 0; - /* This can access kernel text if given address is not recovered */ - if (kernel_probe_read(dest, insn.kaddr, length)) + kernel_insn_init(insn, dest, MAX_INSN_SIZE); + insn_get_length(insn); + + /* Another subsystem puts a breakpoint, failed to recover */ + if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; #ifdef CONFIG_X86_64 /* Only x86_64 has RIP relative instructions */ - if (insn_rip_relative(&insn)) { + if (insn_rip_relative(insn)) { s64 newdisp; u8 *disp; - kernel_insn_init(&insn, dest, length); - insn_get_displacement(&insn); /* * The copied instruction uses the %rip-relative addressing * mode. Adjust the displacement for the difference between @@ -392,29 +384,32 @@ int __copy_instruction(u8 *dest, u8 *src) * extension of the original signed 32-bit displacement would * have given. */ - newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; + newdisp = (u8 *) src + (s64) insn->displacement.value + - (u8 *) dest; if ((s64) (s32) newdisp != newdisp) { pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); - pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value); + pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", + src, dest, insn->displacement.value); return 0; } - disp = (u8 *) dest + insn_offset_displacement(&insn); + disp = (u8 *) dest + insn_offset_displacement(insn); *(s32 *) disp = (s32) newdisp; } #endif - return length; + return insn->length; } /* Prepare reljump right after instruction to boost */ -static void prepare_boost(struct kprobe *p, int length) +static void prepare_boost(struct kprobe *p, struct insn *insn) { - if (can_boost(p->ainsn.insn, p->addr) && - MAX_INSN_SIZE - length >= RELATIVEJUMP_SIZE) { + if (can_boost(insn, p->addr) && + MAX_INSN_SIZE - insn->length >= RELATIVEJUMP_SIZE) { /* * These instructions can be executed directly if it * jumps back to correct address. */ - synthesize_reljump(p->ainsn.insn + length, p->addr + length); + synthesize_reljump(p->ainsn.insn + insn->length, + p->addr + insn->length); p->ainsn.boostable = true; } else { p->ainsn.boostable = false; @@ -423,12 +418,13 @@ static void prepare_boost(struct kprobe *p, int length) static int arch_copy_kprobe(struct kprobe *p) { + struct insn insn; int len; set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1); /* Copy an instruction with recovering if other optprobe modifies it.*/ - len = __copy_instruction(p->ainsn.insn, p->addr); + len = __copy_instruction(p->ainsn.insn, p->addr, &insn); if (!len) return -EINVAL; @@ -436,7 +432,7 @@ static int arch_copy_kprobe(struct kprobe *p) * __copy_instruction can modify the displacement of the instruction, * but it doesn't affect boostable check. */ - prepare_boost(p, len); + prepare_boost(p, &insn); set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 5b5233441d30..9aadff3d0902 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -177,11 +177,12 @@ NOKPROBE_SYMBOL(optimized_callback); static int copy_optimized_instructions(u8 *dest, u8 *src) { + struct insn insn; int len = 0, ret; while (len < RELATIVEJUMP_SIZE) { - ret = __copy_instruction(dest + len, src + len); - if (!ret || !can_boost(dest + len, src + len)) + ret = __copy_instruction(dest + len, src + len, &insn); + if (!ret || !can_boost(&insn, src + len)) return -EINVAL; len += ret; } -- cgit v1.2.1 From fd583ad1563bec5f00140e1f2444adbcd331caad Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 4 Apr 2017 15:14:06 -0400 Subject: perf/x86: Fix spurious NMI with PEBS Load Latency event Spurious NMIs will be observed with the following command: while :; do perf record -bae "cpu/umask=0x01,event=0xcd,ldlat=0x80/pp" -e "cpu/umask=0x03,event=0x0/" -e "cpu/umask=0x02,event=0x0/" -e cycles,branches,cache-misses -e cache-references -- sleep 10 done The bug was introduced by commit: 8077eca079a2 ("perf/x86/pebs: Add workaround for broken OVFL status on HSW+") That commit clears the status bits for the counters used for PEBS events, by masking the whole 64 bits pebs_enabled. However, only the low 32 bits of both status and pebs_enabled are reserved for PEBS-able counters. For status bits 32-34 are fixed counter overflow bits. For pebs_enabled bits 32-34 are for PEBS Load Latency. In the test case, the PEBS Load Latency event and fixed counter event could overflow at the same time. The fixed counter overflow bit will be cleared by mistake. Once it is cleared, the fixed counter overflow never be processed, which finally trigger spurious NMI. Correct the PEBS enabled mask by ignoring the non-PEBS bits. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Fixes: 8077eca079a2 ("perf/x86/pebs: Add workaround for broken OVFL status on HSW+") Link: http://lkml.kernel.org/r/1491333246-3965-1-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 2 +- arch/x86/events/intel/ds.c | 2 +- arch/x86/events/perf_event.h | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 4244bed77824..a6d91d4e37a1 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2151,7 +2151,7 @@ again: * counters from the GLOBAL_STATUS mask and we always process PEBS * events via drain_pebs(). */ - status &= ~cpuc->pebs_enabled; + status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK); /* * PEBS overflow sets bit 62 in the global status register diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 9dfeeeca0ea8..c6d23ffe422d 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1222,7 +1222,7 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) /* clear non-PEBS bit and re-check */ pebs_status = p->status & cpuc->pebs_enabled; - pebs_status &= (1ULL << MAX_PEBS_EVENTS) - 1; + pebs_status &= PEBS_COUNTER_MASK; if (pebs_status == (1 << bit)) return at; } diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index bcbb1d2ae10b..be3d36254040 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -79,6 +79,7 @@ struct amd_nb { /* The maximal number of PEBS events: */ #define MAX_PEBS_EVENTS 8 +#define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) /* * Flags PEBS can handle without an PMI. -- cgit v1.2.1